aboutsummaryrefslogtreecommitdiff
path: root/EASTL
diff options
context:
space:
mode:
Diffstat (limited to 'EASTL')
-rw-r--r--EASTL/.clang-format32
-rw-r--r--EASTL/.gitattributes6
-rw-r--r--EASTL/.github/workflows/c-cpp.yml133
-rw-r--r--EASTL/.gitignore50
-rw-r--r--EASTL/.p4ignore4
-rw-r--r--EASTL/.travis.yml88
-rw-r--r--EASTL/3RDPARTYLICENSES.TXT110
-rw-r--r--EASTL/CMakeLists.txt58
-rw-r--r--EASTL/CONTRIBUTING.md90
-rw-r--r--EASTL/LICENSE29
-rw-r--r--EASTL/README.md73
-rw-r--r--EASTL/_config.yml1
-rw-r--r--EASTL/benchmark/CMakeLists.txt96
-rw-r--r--EASTL/benchmark/source/BenchmarkAlgorithm.cpp1241
-rw-r--r--EASTL/benchmark/source/BenchmarkBitset.cpp366
-rw-r--r--EASTL/benchmark/source/BenchmarkDeque.cpp342
-rw-r--r--EASTL/benchmark/source/BenchmarkHash.cpp469
-rw-r--r--EASTL/benchmark/source/BenchmarkHeap.cpp238
-rw-r--r--EASTL/benchmark/source/BenchmarkList.cpp382
-rw-r--r--EASTL/benchmark/source/BenchmarkMap.cpp382
-rw-r--r--EASTL/benchmark/source/BenchmarkSet.cpp353
-rw-r--r--EASTL/benchmark/source/BenchmarkSort.cpp1399
-rw-r--r--EASTL/benchmark/source/BenchmarkString.cpp531
-rw-r--r--EASTL/benchmark/source/BenchmarkTupleVector.cpp667
-rw-r--r--EASTL/benchmark/source/BenchmarkVector.cpp452
-rw-r--r--EASTL/benchmark/source/EASTLBenchmark.cpp291
-rw-r--r--EASTL/benchmark/source/EASTLBenchmark.h228
-rw-r--r--EASTL/benchmark/source/main.cpp194
-rw-r--r--EASTL/doc/Benchmarks.md851
-rw-r--r--EASTL/doc/BestPractices.md749
-rw-r--r--EASTL/doc/Bonus/tuple_vector_readme.md416
-rw-r--r--EASTL/doc/CMake/EASTL_Project_Integration.md93
-rw-r--r--EASTL/doc/Design.md374
-rw-r--r--EASTL/doc/EASTL-n2271.pdfbin0 -> 345371 bytes
-rw-r--r--EASTL/doc/EASTL.natvis731
-rw-r--r--EASTL/doc/FAQ.md2290
-rw-r--r--EASTL/doc/Glossary.md93
-rw-r--r--EASTL/doc/Gotchas.md134
-rw-r--r--EASTL/doc/Introduction.md18
-rw-r--r--EASTL/doc/Maintenance.md195
-rw-r--r--EASTL/doc/Modules.md89
-rw-r--r--EASTL/doc/html/EASTL Benchmarks.html330
-rw-r--r--EASTL/doc/html/EASTL Best Practices.html1001
-rw-r--r--EASTL/doc/html/EASTL Design.html424
-rw-r--r--EASTL/doc/html/EASTL FAQ.html2385
-rw-r--r--EASTL/doc/html/EASTL Glossary.html490
-rw-r--r--EASTL/doc/html/EASTL Gotchas.html175
-rw-r--r--EASTL/doc/html/EASTL Introduction.html47
-rw-r--r--EASTL/doc/html/EASTL Maintenance.html292
-rw-r--r--EASTL/doc/html/EASTL Modules.html666
-rw-r--r--EASTL/doc/html/EASTLDoc.css86
-rw-r--r--EASTL/doc/quick-reference.pdfbin0 -> 166175 bytes
-rw-r--r--EASTL/include/EASTL/algorithm.h4342
-rw-r--r--EASTL/include/EASTL/allocator.h397
-rw-r--r--EASTL/include/EASTL/allocator_malloc.h130
-rw-r--r--EASTL/include/EASTL/any.h652
-rw-r--r--EASTL/include/EASTL/array.h589
-rw-r--r--EASTL/include/EASTL/atomic.h1772
-rw-r--r--EASTL/include/EASTL/bit.h172
-rw-r--r--EASTL/include/EASTL/bitset.h2234
-rw-r--r--EASTL/include/EASTL/bitvector.h1474
-rw-r--r--EASTL/include/EASTL/bonus/adaptors.h88
-rw-r--r--EASTL/include/EASTL/bonus/call_traits.h117
-rw-r--r--EASTL/include/EASTL/bonus/compressed_pair.h460
-rw-r--r--EASTL/include/EASTL/bonus/fixed_ring_buffer.h50
-rw-r--r--EASTL/include/EASTL/bonus/fixed_tuple_vector.h210
-rw-r--r--EASTL/include/EASTL/bonus/intrusive_sdlist.h694
-rw-r--r--EASTL/include/EASTL/bonus/intrusive_slist.h321
-rw-r--r--EASTL/include/EASTL/bonus/list_map.h932
-rw-r--r--EASTL/include/EASTL/bonus/lru_cache.h424
-rw-r--r--EASTL/include/EASTL/bonus/overloaded.h81
-rw-r--r--EASTL/include/EASTL/bonus/ring_buffer.h1581
-rw-r--r--EASTL/include/EASTL/bonus/sort_extra.h204
-rw-r--r--EASTL/include/EASTL/bonus/tuple_vector.h1598
-rw-r--r--EASTL/include/EASTL/chrono.h759
-rw-r--r--EASTL/include/EASTL/compare.h45
-rw-r--r--EASTL/include/EASTL/core_allocator.h70
-rw-r--r--EASTL/include/EASTL/core_allocator_adapter.h368
-rw-r--r--EASTL/include/EASTL/deque.h2718
-rw-r--r--EASTL/include/EASTL/finally.h93
-rw-r--r--EASTL/include/EASTL/fixed_allocator.h455
-rw-r--r--EASTL/include/EASTL/fixed_function.h218
-rw-r--r--EASTL/include/EASTL/fixed_hash_map.h828
-rw-r--r--EASTL/include/EASTL/fixed_hash_set.h790
-rw-r--r--EASTL/include/EASTL/fixed_list.h388
-rw-r--r--EASTL/include/EASTL/fixed_map.h580
-rw-r--r--EASTL/include/EASTL/fixed_set.h578
-rw-r--r--EASTL/include/EASTL/fixed_slist.h389
-rw-r--r--EASTL/include/EASTL/fixed_string.h805
-rw-r--r--EASTL/include/EASTL/fixed_substring.h275
-rw-r--r--EASTL/include/EASTL/fixed_vector.h625
-rw-r--r--EASTL/include/EASTL/functional.h1255
-rw-r--r--EASTL/include/EASTL/hash_map.h636
-rw-r--r--EASTL/include/EASTL/hash_set.h486
-rw-r--r--EASTL/include/EASTL/heap.h685
-rw-r--r--EASTL/include/EASTL/initializer_list.h96
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/arch.h65
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/arch_add_fetch.h173
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/arch_and_fetch.h173
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/arch_cmpxchg_strong.h430
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/arch_cmpxchg_weak.h430
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/arch_compiler_barrier.h19
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/arch_cpu_pause.h25
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/arch_exchange.h173
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/arch_fetch_add.h173
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/arch_fetch_and.h173
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/arch_fetch_or.h173
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/arch_fetch_sub.h173
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/arch_fetch_xor.h173
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/arch_load.h125
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/arch_memory_barrier.h47
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/arch_or_fetch.h173
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/arch_signal_fence.h21
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/arch_store.h113
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/arch_sub_fetch.h173
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/arch_thread_fence.h49
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/arch_xor_fetch.h173
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/arm/arch_arm.h89
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/arm/arch_arm_load.h156
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/arm/arch_arm_memory_barrier.h97
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/arm/arch_arm_store.h142
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/arm/arch_arm_thread_fence.h37
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86.h158
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_add_fetch.h96
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_and_fetch.h96
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_strong.h69
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_weak.h52
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_exchange.h91
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_add.h90
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_and.h90
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_or.h90
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_sub.h90
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_xor.h90
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_load.h164
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_memory_barrier.h104
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_or_fetch.h96
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_store.h171
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_sub_fetch.h96
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_thread_fence.h42
-rw-r--r--EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_xor_fetch.h96
-rw-r--r--EASTL/include/EASTL/internal/atomic/atomic.h252
-rw-r--r--EASTL/include/EASTL/internal/atomic/atomic_asserts.h75
-rw-r--r--EASTL/include/EASTL/internal/atomic/atomic_base_width.h346
-rw-r--r--EASTL/include/EASTL/internal/atomic/atomic_casts.h190
-rw-r--r--EASTL/include/EASTL/internal/atomic/atomic_flag.h170
-rw-r--r--EASTL/include/EASTL/internal/atomic/atomic_flag_standalone.h69
-rw-r--r--EASTL/include/EASTL/internal/atomic/atomic_integral.h343
-rw-r--r--EASTL/include/EASTL/internal/atomic/atomic_macros.h67
-rw-r--r--EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros.h156
-rw-r--r--EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_add_fetch.h98
-rw-r--r--EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_and_fetch.h98
-rw-r--r--EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_base.h70
-rw-r--r--EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cmpxchg_strong.h245
-rw-r--r--EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cmpxchg_weak.h245
-rw-r--r--EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_compiler_barrier.h30
-rw-r--r--EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cpu_pause.h22
-rw-r--r--EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_exchange.h98
-rw-r--r--EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_add.h98
-rw-r--r--EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_and.h98
-rw-r--r--EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_or.h98
-rw-r--r--EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_sub.h98
-rw-r--r--EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_xor.h98
-rw-r--r--EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_load.h75
-rw-r--r--EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_memory_barrier.h38
-rw-r--r--EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_or_fetch.h98
-rw-r--r--EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_signal_fence.h34
-rw-r--r--EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_store.h68
-rw-r--r--EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_sub_fetch.h98
-rw-r--r--EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_thread_fence.h34
-rw-r--r--EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_xor_fetch.h98
-rw-r--r--EASTL/include/EASTL/internal/atomic/atomic_memory_order.h44
-rw-r--r--EASTL/include/EASTL/internal/atomic/atomic_pointer.h281
-rw-r--r--EASTL/include/EASTL/internal/atomic/atomic_pop_compiler_options.h11
-rw-r--r--EASTL/include/EASTL/internal/atomic/atomic_push_compiler_options.h17
-rw-r--r--EASTL/include/EASTL/internal/atomic/atomic_size_aligned.h197
-rw-r--r--EASTL/include/EASTL/internal/atomic/atomic_standalone.h470
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/compiler.h120
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/compiler_add_fetch.h173
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/compiler_and_fetch.h173
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/compiler_barrier.h36
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/compiler_cmpxchg_strong.h430
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/compiler_cmpxchg_weak.h430
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/compiler_cpu_pause.h32
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/compiler_exchange.h173
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/compiler_fetch_add.h173
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/compiler_fetch_and.h173
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/compiler_fetch_or.h173
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/compiler_fetch_sub.h173
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/compiler_fetch_xor.h173
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/compiler_load.h139
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/compiler_memory_barrier.h47
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/compiler_or_fetch.h173
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/compiler_signal_fence.h49
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/compiler_store.h113
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/compiler_sub_fetch.h173
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/compiler_thread_fence.h49
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/compiler_xor_fetch.h173
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc.h154
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_add_fetch.h118
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_and_fetch.h118
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_barrier.h30
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cmpxchg_strong.h182
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cmpxchg_weak.h182
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cpu_pause.h31
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_exchange.h118
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_add.h118
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_and.h118
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_or.h118
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_sub.h118
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_xor.h118
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_load.h90
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_or_fetch.h118
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_signal_fence.h38
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_store.h89
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_sub_fetch.h118
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_thread_fence.h38
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_xor_fetch.h118
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc.h259
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_add_fetch.h104
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_and_fetch.h121
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_barrier.h33
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cmpxchg_strong.h194
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cmpxchg_weak.h162
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cpu_pause.h22
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_exchange.h125
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_add.h101
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_and.h118
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_or.h118
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_sub.h104
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_xor.h118
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_or_fetch.h121
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_signal_fence.h34
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_sub_fetch.h107
-rw-r--r--EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_xor_fetch.h121
-rw-r--r--EASTL/include/EASTL/internal/char_traits.h464
-rw-r--r--EASTL/include/EASTL/internal/config.h1938
-rw-r--r--EASTL/include/EASTL/internal/copy_help.h221
-rw-r--r--EASTL/include/EASTL/internal/enable_shared.h83
-rw-r--r--EASTL/include/EASTL/internal/fill_help.h484
-rw-r--r--EASTL/include/EASTL/internal/fixed_pool.h1631
-rw-r--r--EASTL/include/EASTL/internal/function.h163
-rw-r--r--EASTL/include/EASTL/internal/function_detail.h673
-rw-r--r--EASTL/include/EASTL/internal/function_help.h51
-rw-r--r--EASTL/include/EASTL/internal/functional_base.h420
-rw-r--r--EASTL/include/EASTL/internal/generic_iterator.h219
-rw-r--r--EASTL/include/EASTL/internal/hashtable.h3125
-rw-r--r--EASTL/include/EASTL/internal/in_place_t.h82
-rw-r--r--EASTL/include/EASTL/internal/integer_sequence.h118
-rw-r--r--EASTL/include/EASTL/internal/intrusive_hashtable.h989
-rw-r--r--EASTL/include/EASTL/internal/mem_fn.h304
-rw-r--r--EASTL/include/EASTL/internal/memory_base.h37
-rw-r--r--EASTL/include/EASTL/internal/move_help.h162
-rw-r--r--EASTL/include/EASTL/internal/pair_fwd_decls.h16
-rw-r--r--EASTL/include/EASTL/internal/piecewise_construct_t.h46
-rw-r--r--EASTL/include/EASTL/internal/red_black_tree.h2366
-rw-r--r--EASTL/include/EASTL/internal/smart_ptr.h267
-rw-r--r--EASTL/include/EASTL/internal/thread_support.h160
-rw-r--r--EASTL/include/EASTL/internal/tuple_fwd_decls.h56
-rw-r--r--EASTL/include/EASTL/internal/type_compound.h715
-rw-r--r--EASTL/include/EASTL/internal/type_detected.h180
-rw-r--r--EASTL/include/EASTL/internal/type_fundamental.h346
-rw-r--r--EASTL/include/EASTL/internal/type_pod.h1948
-rw-r--r--EASTL/include/EASTL/internal/type_properties.h457
-rw-r--r--EASTL/include/EASTL/internal/type_transformations.h792
-rw-r--r--EASTL/include/EASTL/internal/type_void_t.h43
-rw-r--r--EASTL/include/EASTL/intrusive_hash_map.h98
-rw-r--r--EASTL/include/EASTL/intrusive_hash_set.h100
-rw-r--r--EASTL/include/EASTL/intrusive_list.h1323
-rw-r--r--EASTL/include/EASTL/intrusive_ptr.h426
-rw-r--r--EASTL/include/EASTL/iterator.h1250
-rw-r--r--EASTL/include/EASTL/linked_array.h336
-rw-r--r--EASTL/include/EASTL/linked_ptr.h426
-rw-r--r--EASTL/include/EASTL/list.h2183
-rw-r--r--EASTL/include/EASTL/map.h788
-rw-r--r--EASTL/include/EASTL/memory.h1726
-rw-r--r--EASTL/include/EASTL/meta.h247
-rw-r--r--EASTL/include/EASTL/numeric.h344
-rw-r--r--EASTL/include/EASTL/numeric_limits.h1819
-rw-r--r--EASTL/include/EASTL/optional.h728
-rw-r--r--EASTL/include/EASTL/priority_queue.h491
-rw-r--r--EASTL/include/EASTL/queue.h373
-rw-r--r--EASTL/include/EASTL/random.h254
-rw-r--r--EASTL/include/EASTL/ratio.h320
-rw-r--r--EASTL/include/EASTL/safe_ptr.h485
-rw-r--r--EASTL/include/EASTL/scoped_array.h237
-rw-r--r--EASTL/include/EASTL/scoped_ptr.h256
-rw-r--r--EASTL/include/EASTL/segmented_vector.h523
-rw-r--r--EASTL/include/EASTL/set.h675
-rw-r--r--EASTL/include/EASTL/shared_array.h434
-rw-r--r--EASTL/include/EASTL/shared_ptr.h1717
-rw-r--r--EASTL/include/EASTL/slist.h1946
-rw-r--r--EASTL/include/EASTL/sort.h2022
-rw-r--r--EASTL/include/EASTL/span.h441
-rw-r--r--EASTL/include/EASTL/stack.h352
-rw-r--r--EASTL/include/EASTL/string.h4296
-rw-r--r--EASTL/include/EASTL/string_hash_map.h189
-rw-r--r--EASTL/include/EASTL/string_map.h167
-rw-r--r--EASTL/include/EASTL/string_view.h849
-rw-r--r--EASTL/include/EASTL/tuple.h978
-rw-r--r--EASTL/include/EASTL/type_traits.h1041
-rw-r--r--EASTL/include/EASTL/unique_ptr.h735
-rw-r--r--EASTL/include/EASTL/unordered_map.h55
-rw-r--r--EASTL/include/EASTL/unordered_set.h53
-rw-r--r--EASTL/include/EASTL/utility.h968
-rw-r--r--EASTL/include/EASTL/variant.h1588
-rw-r--r--EASTL/include/EASTL/vector.h2084
-rw-r--r--EASTL/include/EASTL/vector_map.h906
-rw-r--r--EASTL/include/EASTL/vector_multimap.h843
-rw-r--r--EASTL/include/EASTL/vector_multiset.h764
-rw-r--r--EASTL/include/EASTL/vector_set.h793
-rw-r--r--EASTL/include/EASTL/version.h15
-rw-r--r--EASTL/include/EASTL/weak_ptr.h17
-rw-r--r--EASTL/scripts/CMake/CommonCppFlags.cmake83
-rwxr-xr-xEASTL/scripts/build.sh27
-rw-r--r--EASTL/source/allocator_eastl.cpp56
-rw-r--r--EASTL/source/assert.cpp116
-rw-r--r--EASTL/source/atomic.cpp25
-rw-r--r--EASTL/source/fixed_pool.cpp70
-rw-r--r--EASTL/source/hashtable.cpp177
-rw-r--r--EASTL/source/intrusive_list.cpp87
-rw-r--r--EASTL/source/numeric_limits.cpp598
-rw-r--r--EASTL/source/red_black_tree.cpp518
-rw-r--r--EASTL/source/string.cpp464
-rw-r--r--EASTL/source/thread_support.cpp129
-rw-r--r--EASTL/test/CMakeLists.txt102
-rw-r--r--EASTL/test/packages/EABase/.gitignore49
-rw-r--r--EASTL/test/packages/EABase/.p4ignore4
-rw-r--r--EASTL/test/packages/EABase/.travis.yml68
-rw-r--r--EASTL/test/packages/EABase/CMakeLists.txt32
-rw-r--r--EASTL/test/packages/EABase/CONTRIBUTING.md73
-rw-r--r--EASTL/test/packages/EABase/LICENSE27
-rw-r--r--EASTL/test/packages/EABase/README.md26
-rw-r--r--EASTL/test/packages/EABase/doc/EABase.html309
-rw-r--r--EASTL/test/packages/EABase/include/Common/EABase/config/eacompiler.h1778
-rw-r--r--EASTL/test/packages/EABase/include/Common/EABase/config/eacompilertraits.h2561
-rw-r--r--EASTL/test/packages/EABase/include/Common/EABase/config/eaplatform.h738
-rw-r--r--EASTL/test/packages/EABase/include/Common/EABase/eabase.h1011
-rw-r--r--EASTL/test/packages/EABase/include/Common/EABase/eahave.h877
-rw-r--r--EASTL/test/packages/EABase/include/Common/EABase/earesult.h62
-rw-r--r--EASTL/test/packages/EABase/include/Common/EABase/eastdarg.h99
-rw-r--r--EASTL/test/packages/EABase/include/Common/EABase/eaunits.h54
-rw-r--r--EASTL/test/packages/EABase/include/Common/EABase/int128.h1268
-rw-r--r--EASTL/test/packages/EABase/include/Common/EABase/nullptr.h102
-rw-r--r--EASTL/test/packages/EABase/include/Common/EABase/version.h36
-rw-r--r--EASTL/test/packages/EABase/test/CMakeLists.txt67
-rw-r--r--EASTL/test/packages/EABase/test/source/CEntryPoint.cpp4
-rw-r--r--EASTL/test/packages/EABase/test/source/TestEABase.cpp3742
-rw-r--r--EASTL/test/packages/EABase/test/source/TestEABase.h40
-rw-r--r--EASTL/test/packages/EABase/test/source/TestEABaseC.c1213
-rw-r--r--EASTL/test/packages/EABase/test/source/TestEABaseSeparate.cpp34
-rw-r--r--EASTL/test/source/ConceptImpls.h192
-rw-r--r--EASTL/test/source/EASTLTest.cpp273
-rw-r--r--EASTL/test/source/EASTLTest.h1588
-rw-r--r--EASTL/test/source/EASTLTestAllocator.cpp492
-rw-r--r--EASTL/test/source/EASTLTestAllocator.h26
-rw-r--r--EASTL/test/source/GetTypeName.h119
-rw-r--r--EASTL/test/source/TestAlgorithm.cpp2761
-rw-r--r--EASTL/test/source/TestAllocator.cpp405
-rw-r--r--EASTL/test/source/TestAny.cpp472
-rw-r--r--EASTL/test/source/TestArray.cpp360
-rw-r--r--EASTL/test/source/TestAtomicAsm.cpp4921
-rw-r--r--EASTL/test/source/TestAtomicBasic.cpp4083
-rw-r--r--EASTL/test/source/TestBitVector.cpp469
-rw-r--r--EASTL/test/source/TestBitcast.cpp52
-rw-r--r--EASTL/test/source/TestBitset.cpp1327
-rw-r--r--EASTL/test/source/TestCharTraits.cpp39
-rw-r--r--EASTL/test/source/TestChrono.cpp220
-rw-r--r--EASTL/test/source/TestCppCXTypeTraits.cpp35
-rw-r--r--EASTL/test/source/TestDeque.cpp1146
-rw-r--r--EASTL/test/source/TestExtra.cpp1554
-rw-r--r--EASTL/test/source/TestFinally.cpp107
-rw-r--r--EASTL/test/source/TestFixedFunction.cpp614
-rw-r--r--EASTL/test/source/TestFixedHash.cpp744
-rw-r--r--EASTL/test/source/TestFixedList.cpp563
-rw-r--r--EASTL/test/source/TestFixedMap.cpp185
-rw-r--r--EASTL/test/source/TestFixedSList.cpp313
-rw-r--r--EASTL/test/source/TestFixedSet.cpp207
-rw-r--r--EASTL/test/source/TestFixedString.cpp500
-rw-r--r--EASTL/test/source/TestFixedTupleVector.cpp1594
-rw-r--r--EASTL/test/source/TestFixedVector.cpp581
-rw-r--r--EASTL/test/source/TestFunctional.cpp1529
-rw-r--r--EASTL/test/source/TestHash.cpp1505
-rw-r--r--EASTL/test/source/TestHeap.cpp295
-rw-r--r--EASTL/test/source/TestIntrusiveHash.cpp773
-rw-r--r--EASTL/test/source/TestIntrusiveList.cpp403
-rw-r--r--EASTL/test/source/TestIntrusiveSDList.cpp315
-rw-r--r--EASTL/test/source/TestIntrusiveSList.cpp38
-rw-r--r--EASTL/test/source/TestIterator.cpp579
-rw-r--r--EASTL/test/source/TestList.cpp1090
-rw-r--r--EASTL/test/source/TestListMap.cpp222
-rw-r--r--EASTL/test/source/TestLruCache.cpp340
-rw-r--r--EASTL/test/source/TestMap.cpp305
-rw-r--r--EASTL/test/source/TestMap.h1418
-rw-r--r--EASTL/test/source/TestMemory.cpp775
-rw-r--r--EASTL/test/source/TestMeta.cpp120
-rw-r--r--EASTL/test/source/TestNumericLimits.cpp159
-rw-r--r--EASTL/test/source/TestOptional.cpp695
-rw-r--r--EASTL/test/source/TestRandom.cpp168
-rw-r--r--EASTL/test/source/TestRatio.cpp107
-rw-r--r--EASTL/test/source/TestRingBuffer.cpp1139
-rw-r--r--EASTL/test/source/TestSList.cpp928
-rw-r--r--EASTL/test/source/TestSegmentedVector.cpp89
-rw-r--r--EASTL/test/source/TestSet.cpp256
-rw-r--r--EASTL/test/source/TestSet.h906
-rw-r--r--EASTL/test/source/TestSmartPtr.cpp2230
-rw-r--r--EASTL/test/source/TestSort.cpp961
-rw-r--r--EASTL/test/source/TestSpan.cpp481
-rw-r--r--EASTL/test/source/TestString.cpp142
-rw-r--r--EASTL/test/source/TestString.inl2101
-rw-r--r--EASTL/test/source/TestStringHashMap.cpp303
-rw-r--r--EASTL/test/source/TestStringMap.cpp207
-rw-r--r--EASTL/test/source/TestStringView.cpp115
-rw-r--r--EASTL/test/source/TestStringView.inl599
-rw-r--r--EASTL/test/source/TestTuple.cpp587
-rw-r--r--EASTL/test/source/TestTupleVector.cpp1540
-rw-r--r--EASTL/test/source/TestTypeTraits.cpp2439
-rw-r--r--EASTL/test/source/TestUtility.cpp915
-rw-r--r--EASTL/test/source/TestVariant.cpp1823
-rw-r--r--EASTL/test/source/TestVariant2.cpp82
-rw-r--r--EASTL/test/source/TestVector.cpp1821
-rw-r--r--EASTL/test/source/TestVectorMap.cpp235
-rw-r--r--EASTL/test/source/TestVectorSet.cpp170
-rw-r--r--EASTL/test/source/main.cpp166
423 files changed, 200452 insertions, 0 deletions
diff --git a/EASTL/.clang-format b/EASTL/.clang-format
new file mode 100644
index 0000000..1680c89
--- /dev/null
+++ b/EASTL/.clang-format
@@ -0,0 +1,32 @@
+#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-
+Language : Cpp
+BasedOnStyle : Google
+Standard : Auto
+#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-
+AccessModifierOffset : -4
+AlignTrailingComments : true
+AllowAllParametersOfDeclarationOnNextLine : false
+AllowShortBlocksOnASingleLine : true
+AllowShortFunctionsOnASingleLine : true
+AllowShortIfStatementsOnASingleLine : false
+AllowShortLoopsOnASingleLine : false
+BinPackParameters : false
+BreakBeforeBraces : Allman
+BreakBeforeTernaryOperators : false
+BreakConstructorInitializersBeforeComma : true
+ColumnLimit : 120
+Cpp11BracedListStyle : true
+DerivePointerAlignment : true
+DerivePointerBinding : false
+IndentWidth : 4
+KeepEmptyLinesAtTheStartOfBlocks : true
+MaxEmptyLinesToKeep : 2
+NamespaceIndentation : All
+PointerBindsToType : true
+SpacesBeforeTrailingComments : 1
+SpacesInAngles : false
+SpacesInSquareBrackets : false
+TabWidth : 4
+UseTab : ForIndentation
+#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-
+#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-
diff --git a/EASTL/.gitattributes b/EASTL/.gitattributes
new file mode 100644
index 0000000..5e47b8d
--- /dev/null
+++ b/EASTL/.gitattributes
@@ -0,0 +1,6 @@
+# Auto detect text files and perform LF normalization
+# http://git-scm.com/docs/gitattributes
+* text=auto
+.appveyor.yml -text eol=crlf
+.appveyor-mingw.yml -text eol=crlf
+ci-*.cmd -text eol=crlf \ No newline at end of file
diff --git a/EASTL/.github/workflows/c-cpp.yml b/EASTL/.github/workflows/c-cpp.yml
new file mode 100644
index 0000000..0be723e
--- /dev/null
+++ b/EASTL/.github/workflows/c-cpp.yml
@@ -0,0 +1,133 @@
+name: EASTL Build & Test Pipeline
+
+on:
+ push:
+ branches: [ master ]
+ pull_request:
+ branches: [ master ]
+
+jobs:
+ checkout:
+ name: Checkout EASTL and submodules
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ with:
+ path: EASTL/
+ - run: cd EASTL/ && git submodule update --init
+ - name: Upload checked out code
+ uses: actions/upload-artifact@v2.3.1
+ with:
+ name: Code
+ path: EASTL/
+
+ build:
+ needs: checkout
+
+ strategy:
+ fail-fast: false
+ matrix:
+ os: [ windows-latest, ubuntu-latest ]
+ compiler: [ clang, gcc, msvc ]
+ configuration: [ Debug, Release ]
+ std_iter_compatibility: [ std_iter_category_disabled, std_iter_category_enabled ]
+ exclude:
+ - os: windows-latest
+ compiler: gcc
+ - os: windows-latest
+ compiler: clang
+ - os: ubuntu-latest
+ compiler: msvc
+ include:
+ - os: windows-latest
+ compiler: msvc
+ cxxflags: '/std:c++20 /Zc:char8_t'
+ - os: ubuntu-latest
+ compiler: clang
+ cc: 'clang-14'
+ cxx: 'clang++-14'
+ cxxflags: '-std=c++20'
+ - os: ubuntu-latest
+ compiler: gcc
+ cc: 'gcc-12'
+ cxx: 'g++-12'
+ cxxflags: '-std=c++2a'
+
+ name: Build EASTL
+ runs-on: ${{ matrix.os }}
+
+ steps:
+ - name: Download a Build Artifact
+ uses: actions/download-artifact@v2.1.0
+ with:
+ name: Code
+ path: Code/
+
+ - run: mkdir build
+ - run: cd build && cmake ../Code -DEASTL_BUILD_BENCHMARK:BOOL=ON -DEASTL_BUILD_TESTS:BOOL=ON -DEASTL_STD_ITERATOR_CATEGORY_ENABLED:BOOL=${{ contains(matrix.std_iter_compatibility, 'enabled') && 'ON' || 'OFF' }}
+ env:
+ CXXFLAGS: ${{ matrix.cxxflags }}
+ CXX: ${{ matrix.cxx }}
+ CC: ${{ matrix.cc }}
+ - run: cd build && cmake --build . --config ${{ matrix.configuration }}
+ - name: Upload binaries
+ uses: actions/upload-artifact@v2.3.1
+ with:
+ name: Binaries-${{ matrix.os }}-${{ matrix.compiler }}-${{ matrix.configuration }}
+ path: build/
+
+ test:
+ needs: build
+ name: Run EASTL tests
+ strategy:
+ fail-fast: false
+ matrix:
+ os: [ windows-latest, ubuntu-latest ]
+ compiler: [ clang, msvc, gcc ]
+ configuration: [ Debug, Release ]
+ exclude:
+ - os: windows-latest
+ compiler: gcc
+ - os: windows-latest
+ compiler: clang
+ - os: ubuntu-latest
+ compiler: msvc
+ runs-on: ${{ matrix.os }}
+
+ steps:
+ - name: Download a Build Artifact
+ uses: actions/download-artifact@v2.1.0
+ with:
+ name: Binaries-${{ matrix.os }}-${{ matrix.compiler }}-${{ matrix.configuration }}
+ path: Binaries/
+ - if: matrix.os == 'ubuntu-latest'
+ run: chmod 755 ./Binaries/test/EASTLTest
+ - run: cd Binaries/test && ctest -C ${{ matrix.configuration }} -V
+
+ benchmark:
+ needs: build
+ name: Run EASTL benchmarks
+ strategy:
+ fail-fast: false
+ matrix:
+ os: [ windows-latest, ubuntu-latest ]
+ compiler: [ clang, msvc, gcc ]
+ configuration: [ Release ]
+ exclude:
+ - os: windows-latest
+ compiler: gcc
+ - os: windows-latest
+ compiler: clang
+ - os: ubuntu-latest
+ compiler: msvc
+ runs-on: ${{ matrix.os }}
+
+ steps:
+ - name: Download a Build Artifact
+ uses: actions/download-artifact@v2.1.0
+ with:
+ name: Binaries-${{ matrix.os }}-${{ matrix.compiler }}-${{ matrix.configuration }}
+ path: Binaries/
+ - if: matrix.os == 'ubuntu-latest'
+ run: chmod 755 ./Binaries/benchmark/EASTLBenchmarks
+ - run: cd Binaries/benchmark && ctest -C ${{ matrix.configuration }} -V
diff --git a/EASTL/.gitignore b/EASTL/.gitignore
new file mode 100644
index 0000000..92749f4
--- /dev/null
+++ b/EASTL/.gitignore
@@ -0,0 +1,50 @@
+tags
+cscope.out
+**/*.swp
+**/*.swo
+.swp
+*.swp
+.swo
+.TMP
+-.d
+eastl_build_out
+build_bench
+bench.bat
+build.bat
+.p4config
+
+## CMake generated files
+CMakeCache.txt
+cmake_install.cmake
+
+## Patch files
+*.patch
+
+## For Visual Studio Generated projects
+*.sln
+**/*.vcxproj
+**/*.vcxproj.filters
+*.VC.opendb
+*.sdf
+**/*.suo
+**/*.user
+.vs/*
+**/Debug/*
+CMakeFiles/*
+EASTL.dir/**
+RelWithDebInfo/*
+Release/*
+Win32/*
+x64/*
+MinSizeRel/*
+build*/*
+Testing/*
+%ALLUSERSPROFILE%/*
+
+# Buck
+/buck-out/
+/.buckd/
+/buckaroo/
+.buckconfig.local
+BUCKAROO_DEPS
+.vscode/settings.json
diff --git a/EASTL/.p4ignore b/EASTL/.p4ignore
new file mode 100644
index 0000000..4bddd61
--- /dev/null
+++ b/EASTL/.p4ignore
@@ -0,0 +1,4 @@
+/.git/
+tags
+.gitignore
+cscope.out
diff --git a/EASTL/.travis.yml b/EASTL/.travis.yml
new file mode 100644
index 0000000..11b4a22
--- /dev/null
+++ b/EASTL/.travis.yml
@@ -0,0 +1,88 @@
+dist: xenial
+language: cpp
+
+cache:
+ - ccache: true
+
+os:
+ - linux
+ - osx
+ - windows
+
+compiler:
+ - gcc
+ - clang
+ - msvc
+
+env:
+ - EASTL_CONFIG=Debug
+ - EASTL_CONFIG=Release
+
+addons:
+ apt:
+ update: true
+ sources:
+ - george-edison55-precise-backports
+ - sourceline: 'ppa:ubuntu-toolchain-r/test'
+ - sourceline: 'deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-11 main'
+ key_url: 'https://apt.llvm.org/llvm-snapshot.gpg.key'
+ packages:
+ - cmake
+ - cmake-data
+ - g++-9
+ - clang-11
+
+matrix:
+ include:
+ - compiler: clang "release build with clang to trigger MOJI check"
+ env: EASTL_CONFIG=Release USE_MOJI_CHECK=yes
+ os: linux
+ - compiler: msvc
+ env: EASTL_CONFIG=Release CXXFLAGS="/std:c++latest /Zc:char8_t"
+ os: windows
+
+ exclude:
+ - os: osx
+ compiler: gcc
+ - os: osx
+ compiler: msvc
+ - os: linux
+ compiler: msvc
+ - os: windows
+ compiler: clang
+ - os: windows
+ compiler: gcc
+
+# Handle git submodules yourself
+git:
+ submodules: false
+
+before_install:
+ - git submodule update --init
+ - if [[ "$CXX" == "g++" ]]; then export CC="gcc-9" ;fi
+ - if [[ "$CXX" == "g++" ]]; then export CXX="g++-9" ;fi
+ - if [[ "$CXX" == "clang++" && "${TRAVIS_OS_NAME}" != "osx" ]]; then export CC="clang-11" ;fi
+ - if [[ "$CXX" == "clang++" && "${TRAVIS_OS_NAME}" != "osx" ]]; then export CXX="clang++-11" ;fi
+ - if [[ "$CXX" == "g++-9" && "${TRAVIS_OS_NAME}" != "windows" ]]; then g++-9 -v ;fi
+ - if [[ "$CXX" == "clang++-11" && "${TRAVIS_OS_NAME}" != "windows" ]]; then clang++-11 -v ;fi
+
+install:
+# MOJI check; exit 1 if non-ascii characters detected in C++
+ - if [[ -n "$USE_MOJI_CHECK" && -n `git grep -P "[^[:ascii:]]" source test` ]]; then echo "Moji Detected" && exit 1 ;fi
+ - if [[ -n "$USE_MOJI_CHECK" ]]; then exit 0 ;fi
+
+before_script:
+ - mkdir build_$EASTL_CONFIG
+ - cd build_$EASTL_CONFIG
+ - cmake .. -DEASTL_BUILD_BENCHMARK:BOOL=ON -DEASTL_BUILD_TESTS:BOOL=ON
+ - cmake --build . --config $EASTL_CONFIG
+
+script:
+ # Run Tests
+ - cd $TRAVIS_BUILD_DIR/build_$EASTL_CONFIG/test
+ - ctest -C $EASTL_CONFIG -V || exit 1
+
+ # Run Benchmarks
+ - cd $TRAVIS_BUILD_DIR/build_$EASTL_CONFIG/benchmark
+ - ctest -C $EASTL_CONFIG -V || exit 1
+
diff --git a/EASTL/3RDPARTYLICENSES.TXT b/EASTL/3RDPARTYLICENSES.TXT
new file mode 100644
index 0000000..41fe473
--- /dev/null
+++ b/EASTL/3RDPARTYLICENSES.TXT
@@ -0,0 +1,110 @@
+Additional licenses also apply to this software package as detailed below.
+
+
+
+HP STL comes with the following license:
+
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) 1994
+// Hewlett-Packard Company
+//
+// Permission to use, copy, modify, distribute and sell this software
+// and its documentation for any purpose is hereby granted without fee,
+// provided that the above copyright notice appear in all copies and
+// that both that copyright notice and this permission notice appear
+// in supporting documentation. Hewlett-Packard Company makes no
+// representations about the suitability of this software for any
+// purpose. It is provided "as is" without express or implied warranty.
+///////////////////////////////////////////////////////////////////////////////
+
+
+
+libc++ comes with the following license:
+
+==============================================================================
+libc++ License
+==============================================================================
+
+The libc++ library is dual licensed under both the University of Illinois
+"BSD-Like" license and the MIT license. As a user of this code you may choose
+to use it under either license. As a contributor, you agree to allow your code
+to be used under both.
+
+Full text of the relevant licenses is included below.
+
+==============================================================================
+
+University of Illinois/NCSA
+Open Source License
+
+Copyright (c) 2009-2015 by the contributors listed at
+http://llvm.org/svn/llvm-project/libcxx/trunk/CREDITS.TXT
+
+All rights reserved.
+
+Developed by:
+
+ LLVM Team
+
+ University of Illinois at Urbana-Champaign
+
+ http://llvm.org
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal with
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimers.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimers in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the names of the LLVM Team, University of Illinois at
+ Urbana-Champaign, nor the names of its contributors may be used to
+ endorse or promote products derived from this Software without specific
+ prior written permission.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+SOFTWARE.
+
+==============================================================================
+
+Copyright (c) 2009-2014 by the contributors listed at
+http://llvm.org/svn/llvm-project/libcxx/trunk/CREDITS.TXT
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+==============================================================================
+
+*No express or implied license to use PlayStation®4 libraries included.
+PlayStation®4 development tools and libraries are subject to separate license
+with Sony Interactive Entertainment LLC.
+
+==============================================================================
+
diff --git a/EASTL/CMakeLists.txt b/EASTL/CMakeLists.txt
new file mode 100644
index 0000000..25e7373
--- /dev/null
+++ b/EASTL/CMakeLists.txt
@@ -0,0 +1,58 @@
+#-------------------------------------------------------------------------------------------
+# Copyright (C) Electronic Arts Inc. All rights reserved.
+#-------------------------------------------------------------------------------------------
+cmake_minimum_required(VERSION 3.1)
+project(EASTL CXX)
+
+#-------------------------------------------------------------------------------------------
+# Options
+#-------------------------------------------------------------------------------------------
+option(EASTL_BUILD_BENCHMARK "Enable generation of build files for benchmark" OFF)
+option(EASTL_BUILD_TESTS "Enable generation of build files for tests" OFF)
+option(EASTL_STD_ITERATOR_CATEGORY_ENABLED "Enable compatibility with std:: iterator categories" OFF)
+
+#-------------------------------------------------------------------------------------------
+# Compiler Flags
+#-------------------------------------------------------------------------------------------
+set (CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH};${CMAKE_CURRENT_SOURCE_DIR}/scripts/CMake")
+include(CommonCppFlags)
+
+#-------------------------------------------------------------------------------------------
+# Library definition
+#-------------------------------------------------------------------------------------------
+file(GLOB EASTL_SOURCES "source/*.cpp")
+add_library(EASTL ${EASTL_SOURCES})
+
+if(EASTL_BUILD_BENCHMARK)
+ add_subdirectory(benchmark)
+endif()
+
+if(EASTL_BUILD_TESTS)
+ add_subdirectory(test)
+endif()
+
+#-------------------------------------------------------------------------------------------
+# Defines
+#-------------------------------------------------------------------------------------------
+add_definitions(-D_CHAR16T)
+add_definitions(-D_CRT_SECURE_NO_WARNINGS)
+add_definitions(-D_SCL_SECURE_NO_WARNINGS)
+add_definitions(-DEASTL_OPENSOURCE=1)
+if (EASTL_STD_ITERATOR_CATEGORY_ENABLED)
+ add_definitions(-DEASTL_STD_ITERATOR_CATEGORY_ENABLED=1)
+endif()
+
+#-------------------------------------------------------------------------------------------
+# Include dirs
+#-------------------------------------------------------------------------------------------
+target_include_directories(EASTL PUBLIC include)
+
+#-------------------------------------------------------------------------------------------
+# Dependencies
+#-------------------------------------------------------------------------------------------
+if (NOT TARGET EABase)
+ add_subdirectory(test/packages/EABase)
+endif()
+
+target_link_libraries(EASTL EABase)
+
diff --git a/EASTL/CONTRIBUTING.md b/EASTL/CONTRIBUTING.md
new file mode 100644
index 0000000..036520e
--- /dev/null
+++ b/EASTL/CONTRIBUTING.md
@@ -0,0 +1,90 @@
+## Contributing
+
+Before you can contribute, EA must have a Contributor License Agreement (CLA) on file that has been signed by each contributor.
+You can sign here: [Go to CLA](https://electronicarts.na1.echosign.com/public/esignWidget?wid=CBFCIBAA3AAABLblqZhByHRvZqmltGtliuExmuV-WNzlaJGPhbSRg2ufuPsM3P0QmILZjLpkGslg24-UJtek*)
+
+If you want to be recognized for your contributions to EASTL or have a project using EASTL be recognized; you can submit a pull request to the appropriate sections in [README.md](README.md).
+Some examples of what the format and information will look like is as follows.
+* John Smith - jsmith@domain.com
+* John Smith
+* Frostbite - Electronic Arts
+* My Project - [link to said project]
+
+### Pull Request Policy
+
+All code contributions to EASTL are submitted as [Github pull requests](https://help.github.com/articles/using-pull-requests/). All pull requests will be reviewed by an EASTL maintainer according to the guidelines found in the next section.
+
+Your pull request should:
+
+* merge cleanly
+* come with tests
+ * tests should be minimal and stable
+ * fail before your fix is applied
+* pass the test suite
+* code formatting is encoded in clang format
+ * limit using clang format on new code
+ * do not deviate from style already established in the files
+
+### Getting the Repository
+
+EASTL uses git submodules for its dependencies as they are seperate git repositories. Recursive clones will continue until HD space is exhausted unless they are manually limited.
+It is recommended to use the following to get the source:
+
+```bash
+git clone https://github.com/electronicarts/EASTL
+cd EASTL
+git submodule update --init
+```
+
+### Running the Unit Tests
+
+EASTL uses CMake as its build system.
+
+* Create and navigate to "your_build_folder":
+ * mkdir your_build_folder && cd your_build_folder
+* Generate build scripts:
+ * cmake eastl_source_folder -DEASTL_BUILD_TESTS:BOOL=ON
+* Build unit tests for "your_config":
+ * cmake --build . --config your_config
+* Run the unit tests for "your_config" from the test folder:
+ * cd test && ctest -C your_config
+
+Here is an example batch file.
+```batch
+set build_folder=out
+mkdir %build_folder%
+pushd %build_folder%
+call cmake .. -DEASTL_BUILD_TESTS:BOOL=ON -DEASTL_BUILD_BENCHMARK:BOOL=OFF
+call cmake --build . --config Release
+call cmake --build . --config Debug
+call cmake --build . --config RelWithDebInfo
+call cmake --build . --config MinSizeRel
+pushd test
+call ctest -C Release
+call ctest -C Debug
+call ctest -C RelWithDebInfo
+call ctest -C MinSizeRel
+popd
+popd
+```
+
+Here is an example bash file
+```bash
+build_folder=out
+mkdir $build_folder
+pushd $build_folder
+cmake .. -DEASTL_BUILD_TESTS:BOOL=ON -DEASTL_BUILD_BENCHMARK:BOOL=OFF
+cmake --build . --config Release
+cmake --build . --config Debug
+cmake --build . --config RelWithDebInfo
+cmake --build . --config MinSizeRel
+pushd test
+ctest -C Release
+ctest -C Debug
+ctest -C RelWithDebInfo
+ctest -C MinSizeRel
+popd
+popd
+```
+
+The value of EASTL_BUILD_BENCHMARK can be toggled to `ON` in order to build projects that include the benchmark program.
diff --git a/EASTL/LICENSE b/EASTL/LICENSE
new file mode 100644
index 0000000..1b112db
--- /dev/null
+++ b/EASTL/LICENSE
@@ -0,0 +1,29 @@
+BSD 3-Clause License
+
+Copyright (c) 2019, Electronic Arts
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/EASTL/README.md b/EASTL/README.md
new file mode 100644
index 0000000..8548d9e
--- /dev/null
+++ b/EASTL/README.md
@@ -0,0 +1,73 @@
+# EA Standard Template Library
+
+[![Build Status](https://travis-ci.org/electronicarts/EASTL.svg?branch=master)](https://travis-ci.org/electronicarts/EASTL)
+
+EASTL stands for Electronic Arts Standard Template Library. It is a C++ template library of containers, algorithms, and iterators useful for runtime and tool development across multiple platforms. It is a fairly extensive and robust implementation of such a library and has an emphasis on high performance above all other considerations.
+
+
+## Usage
+
+If you are familiar with the C++ STL or have worked with other templated container/algorithm libraries, you probably don't need to read this. If you have no familiarity with C++ templates at all, then you probably will need more than this document to get you up to speed. In this case, you need to understand that templates, when used properly, are powerful vehicles for the ease of creation of optimized C++ code. A description of C++ templates is outside the scope of this documentation, but there is plenty of such documentation on the Internet.
+
+EASTL is suitable for any tools and shipping applications where the functionality of EASTL is useful. Modern compilers are capable of producing good code with templates and many people are using them in both current generation and future generation applications on multiple platforms from embedded systems to servers and mainframes.
+
+## Package Managers
+
+You can download and install EASTL using the [Conan](https://github.com/conan-io/conan) package manager:
+
+ conan install eastl/3.15.00@
+
+The EASTL package in conan is kept up to date by Conan team members and community contributors. If the version is out-of-date, please [create an issue or pull request](https://github.com/conan-io/conan-center-index) on the Conan Center Index repository.
+
+
+You can download and install EASTL using the [vcpkg](https://github.com/Microsoft/vcpkg) dependency manager:
+
+ git clone https://github.com/Microsoft/vcpkg.git
+ cd vcpkg
+ ./bootstrap-vcpkg.sh
+ ./vcpkg integrate install
+ vcpkg install eastl
+
+The EASTL port in vcpkg is kept up to date by Microsoft team members and community contributors. If the version is out of date, please [create an issue or pull request](https://github.com/Microsoft/vcpkg) on the vcpkg repository.
+
+
+## Documentation
+
+Please see [EASTL Introduction](doc/Introduction.md).
+
+
+## Compiling sources
+
+Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on compiling and testing the source.
+
+## Credits And Maintainers
+
+EASTL was created by Paul Pedriana and he maintained the project for roughly 10 years.
+
+EASTL was subsequently maintained by Roberto Parolin for more than 8 years.
+He was the driver and proponent for getting EASTL opensourced.
+Rob was a mentor to all members of the team and taught us everything we ever wanted to know about C++ spookyness.
+
+After Rob, maintenance of EASTL passed to Max Winkler for roughly a year, until landing with its current maintainer Liam Mitchell.
+
+Significant EASTL contributions were made by (in alphabetical order):
+
+* Avery Lee
+* Colin Andrews
+* JP Flouret
+* Liam Mitchell
+* Matt Newport
+* Max Winkler
+* Paul Pedriana
+* Roberto Parolin
+* Simon Everett
+
+## Contributors
+
+## Projects And Products Using EASTL
+
+* Frostbite - Electronic Arts - [https://www.ea.com/frostbite]
+
+## License
+
+Modified BSD License (3-Clause BSD license) see the file LICENSE in the project root.
diff --git a/EASTL/_config.yml b/EASTL/_config.yml
new file mode 100644
index 0000000..2f7efbe
--- /dev/null
+++ b/EASTL/_config.yml
@@ -0,0 +1 @@
+theme: jekyll-theme-minimal \ No newline at end of file
diff --git a/EASTL/benchmark/CMakeLists.txt b/EASTL/benchmark/CMakeLists.txt
new file mode 100644
index 0000000..9ef8c66
--- /dev/null
+++ b/EASTL/benchmark/CMakeLists.txt
@@ -0,0 +1,96 @@
+#-------------------------------------------------------------------------------------------
+# Copyright (C) Electronic Arts Inc. All rights reserved.
+#-------------------------------------------------------------------------------------------
+
+#-------------------------------------------------------------------------------------------
+# CMake info
+#-------------------------------------------------------------------------------------------
+cmake_minimum_required(VERSION 3.1)
+project(EASTLBenchmarks CXX)
+include(CTest)
+
+#-------------------------------------------------------------------------------------------
+# Defines
+#-------------------------------------------------------------------------------------------
+add_definitions(-D_CHAR16T)
+
+#-------------------------------------------------------------------------------------------
+# Include directories
+#-------------------------------------------------------------------------------------------
+include_directories(source)
+include_directories(../test/source)
+
+#-------------------------------------------------------------------------------------------
+# Compiler Flags
+#-------------------------------------------------------------------------------------------
+set (CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH};${CMAKE_CURRENT_SOURCE_DIR}/../scripts/CMake")
+include(CommonCppFlags)
+
+# Libstdc++ calls new internally, since DLLs have no weak symbols, runtime symbol resolution fails and EASTL's new is not called.
+# Linking against static libstdc++ fixes this.
+# See https://github.com/electronicarts/EASTL/issues/40 for more info.
+if (CMAKE_CXX_COMPILER_ID MATCHES "Clang" OR CMAKE_CXX_COMPILER_ID MATCHES "GNU" AND MINGW)
+ set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} -static-libstdc++")
+ set(CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} -static-libstdc++")
+ set(CMAKE_EXE_LINKER_FLAGS_MINSIZEREL "${CMAKE_EXE_LINKER_FLAGS_MINSIZEREL} -static-libstdc++")
+endif()
+
+if (CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND CMAKE_BUILD_TYPE MATCHES "MinSizeRel" AND MINGW)
+ message(FATAL_ERROR "FIXME: MinSizeRel on MingW-w64's Clang fails to link.")
+endif()
+
+# The benchmark suite fails to compile if char8_t is enabled, so disable it.
+if (EASTL_NO_CHAR8T_FLAG)
+ add_compile_options(${EASTL_NO_CHAR8T_FLAG})
+endif()
+
+#-------------------------------------------------------------------------------------------
+# Source files
+#-------------------------------------------------------------------------------------------
+file(GLOB EASTLBENCHMARK_SOURCES "source/*.cpp" "../test/source/EASTLTestAllocator.cpp" "../test/source/EASTLTest.cpp")
+set(SOURCES ${EASTLBENCHMARK_SOURCES})
+
+#-------------------------------------------------------------------------------------------
+# Defines
+#-------------------------------------------------------------------------------------------
+add_definitions(-D_CRT_SECURE_NO_WARNINGS)
+add_definitions(-D_SCL_SECURE_NO_WARNINGS)
+add_definitions(-DEASTL_THREAD_SUPPORT_AVAILABLE=0)
+add_definitions(-DEASTL_OPENSOURCE=1)
+add_definitions(-D_SILENCE_STDEXT_HASH_DEPRECATION_WARNINGS) # silence std::hash_map deprecation warnings
+if (EASTL_STD_ITERATOR_CATEGORY_ENABLED)
+ add_definitions(-DEASTL_STD_ITERATOR_CATEGORY_ENABLED=1)
+endif()
+
+if(NOT EASTL_BUILD_TESTS)
+ add_subdirectory(../test/packages/EAStdC ../test/EAStdC)
+ add_subdirectory(../test/packages/EAAssert ../test/EAAssert)
+ add_subdirectory(../test/packages/EAThread ../test/EAThread)
+ add_subdirectory(../test/packages/EATest ../test/EATest)
+ add_subdirectory(../test/packages/EAMain ../test/EAMain)
+endif()
+
+#-------------------------------------------------------------------------------------------
+# Executable definition
+#-------------------------------------------------------------------------------------------
+add_executable(EASTLBenchmarks ${EASTLBENCHMARK_SOURCES})
+
+set(THREADS_PREFER_PTHREAD_FLAG ON)
+find_package(Threads REQUIRED)
+
+set(EASTLBenchmark_Libraries
+ EABase
+ EAAssert
+ EAMain
+ EAThread
+ EAStdC
+ EASTL
+ EATest)
+target_link_libraries(EASTLBenchmarks ${EASTLBenchmark_Libraries} Threads::Threads)
+
+#-------------------------------------------------------------------------------------------
+# Run Unit tests and verify the results.
+#-------------------------------------------------------------------------------------------
+add_test(EASTLBenchmarkRuns EASTLBenchmarks)
+set_tests_properties (EASTLBenchmarkRuns PROPERTIES PASS_REGULAR_EXPRESSION "RETURNCODE=0")
+
diff --git a/EASTL/benchmark/source/BenchmarkAlgorithm.cpp b/EASTL/benchmark/source/BenchmarkAlgorithm.cpp
new file mode 100644
index 0000000..57e155e
--- /dev/null
+++ b/EASTL/benchmark/source/BenchmarkAlgorithm.cpp
@@ -0,0 +1,1241 @@
+/////////////////////////////////////////////////////////////////////////////
+// BenchmarkAlgorithm.cpp
+//
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLBenchmark.h"
+#include "EASTLTest.h"
+#include <EAStdC/EAStopwatch.h>
+#include <EAStdC/EAMemory.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/sort.h>
+#include <EASTL/vector.h>
+#include <EASTL/slist.h>
+#include <EASTL/list.h>
+#include <EASTL/string.h>
+#include <EASTL/random.h>
+
+EA_DISABLE_ALL_VC_WARNINGS()
+#include <stdio.h>
+#include <stdlib.h>
+#include <string>
+#include <vector>
+#include <list>
+#include <algorithm>
+EA_RESTORE_ALL_VC_WARNINGS()
+
+#ifdef _MSC_VER
+ #pragma warning(disable: 4996) // Function call with parameters that may be unsafe
+#endif
+
+
+using namespace EA;
+
+
+typedef std::vector<unsigned char> StdVectorUChar;
+typedef eastl::vector<unsigned char> EaVectorUChar;
+
+typedef std::vector<signed char> StdVectorSChar;
+typedef eastl::vector<signed char> EaVectorSChar;
+
+typedef std::vector<uint32_t> StdVectorUint32;
+typedef eastl::vector<uint32_t> EaVectorUint32;
+
+typedef std::vector<uint64_t> StdVectorUint64;
+typedef eastl::vector<uint64_t> EaVectorUint64;
+
+typedef std::vector<TestObject> StdVectorTO;
+typedef eastl::vector<TestObject> EaVectorTO;
+
+
+// We make a fake version of C++11 std::next, as some C++ compilers don't currently
+// provide the C++11 next algorithm in their standard libraries.
+namespace std__
+{
+ template<typename InputIterator>
+ inline InputIterator
+ next(InputIterator it, typename std::iterator_traits<InputIterator>::difference_type n = 1)
+ {
+ std::advance(it, n);
+ return it;
+ }
+}
+
+
+namespace
+{
+ void TestFindEndStd(EA::StdC::Stopwatch& stopwatch, const std::string& sTest, const char* pSearchStringBegin, const char* pSearchStringEnd)
+ {
+ stopwatch.Restart();
+ std::string::const_iterator it = std::find_end(sTest.begin(), sTest.end(), pSearchStringBegin, pSearchStringEnd);
+ stopwatch.Stop();
+ if(it != sTest.end())
+ sprintf(Benchmark::gScratchBuffer, "%c", *it);
+ }
+
+ void TestFindEndEa(EA::StdC::Stopwatch& stopwatch, const eastl::string& sTest, const char* pSearchStringBegin, const char* pSearchStringEnd)
+ {
+ stopwatch.Restart();
+ eastl::string::const_iterator it = eastl::find_end(sTest.begin(), sTest.end(), pSearchStringBegin, pSearchStringEnd);
+ stopwatch.Stop();
+ if(it != sTest.end())
+ sprintf(Benchmark::gScratchBuffer, "%c", *it);
+ }
+
+
+
+ void TestSearchStd(EA::StdC::Stopwatch& stopwatch, const std::string& sTest, const char* pSearchStringBegin, const char* pSearchStringEnd)
+ {
+ stopwatch.Restart();
+ std::string::const_iterator it = std::search(sTest.begin(), sTest.end(), pSearchStringBegin, pSearchStringEnd);
+ stopwatch.Stop();
+ if(it != sTest.end())
+ sprintf(Benchmark::gScratchBuffer, "%c", *it);
+ }
+
+ void TestSearchEa(EA::StdC::Stopwatch& stopwatch, const eastl::string& sTest, const char* pSearchStringBegin, const char* pSearchStringEnd)
+ {
+ stopwatch.Restart();
+ eastl::string::const_iterator it = eastl::search(sTest.begin(), sTest.end(), pSearchStringBegin, pSearchStringEnd);
+ stopwatch.Stop();
+ if(it != sTest.end())
+ sprintf(Benchmark::gScratchBuffer, "%c", *it);
+ }
+
+
+
+ void TestSearchNStd(EA::StdC::Stopwatch& stopwatch, const std::string& sTest, int n, char c)
+ {
+ stopwatch.Restart();
+ std::string::const_iterator it = std::search_n(sTest.begin(), sTest.end(), n, c);
+ stopwatch.Stop();
+ if(it != sTest.end())
+ sprintf(Benchmark::gScratchBuffer, "%c", *it);
+ }
+
+ void TestSearchNEa(EA::StdC::Stopwatch& stopwatch, const eastl::string& sTest, int n, char c)
+ {
+ stopwatch.Restart();
+ eastl::string::const_iterator it = eastl::search_n(sTest.begin(), sTest.end(), n, c);
+ stopwatch.Stop();
+ if(it != sTest.end())
+ sprintf(Benchmark::gScratchBuffer, "%c", *it);
+ }
+
+
+
+ template <typename Container>
+ void TestUniqueStd(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ stopwatch.Restart();
+ typename Container::iterator it = std::unique(c.begin(), c.end());
+ stopwatch.Stop();
+ c.erase(it, c.end());
+ }
+
+ template <typename Container>
+ void TestUniqueEa(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ stopwatch.Restart();
+ typename Container::iterator it = eastl::unique(c.begin(), c.end());
+ stopwatch.Stop();
+ c.erase(it, c.end());
+ }
+
+
+
+ template <typename Container>
+ void TestMinElementStd(EA::StdC::Stopwatch& stopwatch, const Container& c)
+ {
+ stopwatch.Restart();
+ const typename Container::const_iterator it = std::min_element(c.begin(), c.end());
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%p", &it);
+ }
+
+ template <typename Container>
+ void TestMinElementEa(EA::StdC::Stopwatch& stopwatch, const Container& c)
+ {
+ stopwatch.Restart();
+ const typename Container::const_iterator it = eastl::min_element(c.begin(), c.end());
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%p", &it);
+ }
+
+
+
+ template <typename Container>
+ void TestCountStd(EA::StdC::Stopwatch& stopwatch, const Container& c)
+ {
+ stopwatch.Restart();
+ const typename Container::difference_type n = std::count(c.begin(), c.end(), (typename Container::value_type)999999);
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%d", (int)n);
+ }
+
+ template <typename Container>
+ void TestCountEa(EA::StdC::Stopwatch& stopwatch, const Container& c)
+ {
+ stopwatch.Restart();
+ const typename Container::difference_type n = eastl::count(c.begin(), c.end(), (typename Container::value_type)999999);
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%d", (int)n);
+ }
+
+
+
+ template <typename Container>
+ void TestAdjacentFindStd(EA::StdC::Stopwatch& stopwatch, const Container& c)
+ {
+ stopwatch.Restart();
+ const typename Container::const_iterator it = std::adjacent_find(c.begin(), c.end());
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%p", &it);
+ }
+
+ template <typename Container>
+ void TestAdjacentFindEa(EA::StdC::Stopwatch& stopwatch, const Container& c)
+ {
+ stopwatch.Restart();
+ const typename Container::const_iterator it = eastl::adjacent_find(c.begin(), c.end());
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%p", &it);
+ }
+
+
+
+ template <typename Container>
+ void TestLowerBoundStd(EA::StdC::Stopwatch& stopwatch, const Container& c, const typename Container::value_type* pBegin, const typename Container::value_type* pEnd)
+ {
+
+ stopwatch.Restart();
+ while(pBegin != pEnd)
+ {
+ typename Container::const_iterator it = std::lower_bound(c.begin(), c.end(), *pBegin++);
+ Benchmark::DoNothing(&it);
+ }
+ stopwatch.Stop();
+ }
+
+ template <typename Container>
+ void TestLowerBoundEa(EA::StdC::Stopwatch& stopwatch, Container& c, const typename Container::value_type* pBegin, const typename Container::value_type* pEnd)
+ {
+ stopwatch.Restart();
+ while(pBegin != pEnd)
+ {
+ typename Container::const_iterator it = eastl::lower_bound(c.begin(), c.end(), *pBegin++);
+ Benchmark::DoNothing(&it);
+ }
+ stopwatch.Stop();
+ }
+
+
+
+ template <typename Container>
+ void TestUpperBoundStd(EA::StdC::Stopwatch& stopwatch, const Container& c, const typename Container::value_type* pBegin, const typename Container::value_type* pEnd)
+ {
+ stopwatch.Restart();
+ while(pBegin != pEnd)
+ {
+ typename Container::const_iterator it = std::upper_bound(c.begin(), c.end(), *pBegin++);
+ Benchmark::DoNothing(&it);
+ }
+ stopwatch.Stop();
+ }
+
+ template <typename Container>
+ void TestUpperBoundEa(EA::StdC::Stopwatch& stopwatch, Container& c, const typename Container::value_type* pBegin, const typename Container::value_type* pEnd)
+ {
+ stopwatch.Restart();
+ while(pBegin != pEnd)
+ {
+ typename Container::const_iterator it = eastl::upper_bound(c.begin(), c.end(), *pBegin++);
+ Benchmark::DoNothing(&it);
+ }
+ stopwatch.Stop();
+ }
+
+
+
+ template <typename Container>
+ void TestEqualRangeStd(EA::StdC::Stopwatch& stopwatch, const Container& c, const typename Container::value_type* pBegin, const typename Container::value_type* pEnd)
+ {
+ stopwatch.Restart();
+ while(pBegin != pEnd)
+ {
+ std::pair<const typename Container::const_iterator,
+ const typename Container::const_iterator> itPair = std::equal_range(c.begin(), c.end(), *pBegin++);
+
+ Benchmark::DoNothing(&itPair);
+ }
+ stopwatch.Stop();
+ }
+
+ template <typename Container>
+ void TestEqualRangeEa(EA::StdC::Stopwatch& stopwatch, Container& c, const typename Container::value_type* pBegin, const typename Container::value_type* pEnd)
+ {
+ stopwatch.Restart();
+ while(pBegin != pEnd)
+ {
+ eastl::pair<const typename Container::const_iterator,
+ const typename Container::const_iterator> itPair = eastl::equal_range(c.begin(), c.end(), *pBegin++);
+ Benchmark::DoNothing(&itPair);
+ }
+ stopwatch.Stop();
+ }
+
+
+
+ template <typename Iterator1, typename Iterator2>
+ void TestLexicographicalCompareStd(EA::StdC::Stopwatch& stopwatch, Iterator1 first1, Iterator1 last1, Iterator2 first2, Iterator2 last2)
+ {
+ stopwatch.Restart();
+ const bool bResult = std::lexicographical_compare(first1, last1, first2, last2);
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%d", bResult ? (int)1 : (int)0);
+ }
+
+ template <typename Iterator1, typename Iterator2>
+ void TestLexicographicalCompareEa(EA::StdC::Stopwatch& stopwatch, Iterator1 first1, Iterator1 last1, Iterator2 first2, Iterator2 last2)
+ {
+ stopwatch.Restart();
+ const bool bResult = eastl::lexicographical_compare(first1, last1, first2, last2);
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%d", bResult ? (int)1 : (int)0);
+ }
+
+
+
+ template <typename Iterator, typename OutputIterator>
+ void TestCopyStd(EA::StdC::Stopwatch& stopwatch, Iterator first, Iterator last, OutputIterator result)
+ {
+ stopwatch.Restart();
+ std::copy(first, last, result);
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%d", (int)*first);
+ }
+
+ template <typename Iterator, typename OutputIterator>
+ void TestCopyEa(EA::StdC::Stopwatch& stopwatch, Iterator first, Iterator last, OutputIterator result)
+ {
+ stopwatch.Restart();
+ eastl::copy(first, last, result);
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%d", (int)*first);
+ }
+
+
+
+ template <typename Iterator, typename OutputIterator>
+ void TestCopyBackwardStd(EA::StdC::Stopwatch& stopwatch, Iterator first, Iterator last, OutputIterator result)
+ {
+ stopwatch.Restart();
+ std::copy_backward(first, last, result);
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%d", (int)*first);
+ }
+
+ template <typename Iterator, typename OutputIterator>
+ void TestCopyBackwardEa(EA::StdC::Stopwatch& stopwatch, Iterator first, Iterator last, OutputIterator result)
+ {
+ stopwatch.Restart();
+ eastl::copy_backward(first, last, result);
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%d", (int)*first);
+ }
+
+
+
+ template <typename Iterator, typename Value>
+ void TestFillStd(EA::StdC::Stopwatch& stopwatch, Iterator first, Iterator last, const Value& v)
+ {
+ stopwatch.Restart();
+ std::fill(first, last, v);
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%p", &*first);
+ }
+
+ template <typename Iterator, typename Value>
+ void TestFillEa(EA::StdC::Stopwatch& stopwatch, Iterator first, Iterator last, const Value& v)
+ {
+ stopwatch.Restart();
+ eastl::fill(first, last, v);
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%p", &*first);
+ }
+
+
+
+ template <typename Iterator, typename Value>
+ void TestFillNStd(EA::StdC::Stopwatch& stopwatch, Iterator first, int n, const Value& v)
+ {
+ stopwatch.Restart();
+ std::fill_n(first, n, v);
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%p", &*first);
+ }
+
+ template <typename Iterator, typename Value>
+ void TestFillNEa(EA::StdC::Stopwatch& stopwatch, Iterator first, int n, const Value& v)
+ {
+ stopwatch.Restart();
+ eastl::fill_n(first, n, v);
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%p", &*first);
+ }
+
+
+
+ template <typename Iterator>
+ void TestReverseStd(EA::StdC::Stopwatch& stopwatch, Iterator first, Iterator last)
+ {
+ stopwatch.Restart();
+ std::reverse(first, last);
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%p", &*first);
+ }
+
+ template <typename Iterator>
+ void TestReverseEa(EA::StdC::Stopwatch& stopwatch, Iterator first, Iterator last)
+ {
+ stopwatch.Restart();
+ eastl::reverse(first, last);
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%p", &*first);
+ }
+
+
+
+ template <typename Iterator>
+ void TestRotateStd(EA::StdC::Stopwatch& stopwatch, Iterator first, Iterator middle, Iterator last)
+ {
+ stopwatch.Restart();
+ std::rotate(first, middle, last); // C++11 specifies that rotate has a return value, but not all std implementations return it.
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%p", &*first);
+ }
+
+ template <typename Iterator>
+ void TestRotateEa(EA::StdC::Stopwatch& stopwatch, Iterator first, Iterator middle, Iterator last)
+ {
+ stopwatch.Restart();
+ eastl::rotate(first, middle, last);
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%p", &*first);
+ }
+
+ template <typename Iterator>
+ void TestMergeStd(EA::StdC::Stopwatch& stopwatch, Iterator firstIn1, Iterator lastIn1, Iterator firstIn2, Iterator lastIn2, Iterator out)
+ {
+ stopwatch.Restart();
+ std::merge(firstIn1, lastIn1, firstIn2, lastIn2, out);
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%p", &*out);
+ }
+
+ template <typename Iterator>
+ void TestMergeEa(EA::StdC::Stopwatch& stopwatch, Iterator firstIn1, Iterator lastIn1, Iterator firstIn2, Iterator lastIn2, Iterator out)
+ {
+ stopwatch.Restart();
+ eastl::merge(firstIn1, lastIn1, firstIn2, lastIn2, out);
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%p", &*out);
+ }
+} // namespace
+
+
+
+
+void BenchmarkAlgorithm1(EASTLTest_Rand& /*rng*/, EA::StdC::Stopwatch& stopwatch1, EA::StdC::Stopwatch& stopwatch2)
+{
+ {
+ std::string sTestStd;
+ eastl::string sTestEa;
+ const char* pSearchString1Begin = "AAA";
+ const char* pSearchString1End = pSearchString1Begin + strlen(pSearchString1Begin);
+ const char* pSearchString2Begin = "BBB"; // This is something that doesn't exist searched string.
+ const char* pSearchString2End = pSearchString2Begin + strlen(pSearchString2Begin);
+ const char* pSearchString3Begin = "CCC";
+ const char* pSearchString3End = pSearchString3Begin + strlen(pSearchString3Begin);
+
+ for(int i = 0; i < 10000; i++)
+ sTestStd += "This is a test of the find_end algorithm. ";
+ sTestEa.assign(sTestStd.data(), (eastl_size_t)sTestStd.length());
+
+ for(int i = 0; i < 2; i++)
+ {
+ ///////////////////////////////
+ // Test find_end
+ ///////////////////////////////
+
+ sTestStd.insert(sTestStd.size() * 15 / 16, pSearchString1Begin);
+ sTestEa.insert (sTestEa.size() * 15 / 16, pSearchString1Begin);
+ TestFindEndStd(stopwatch1, sTestStd, pSearchString1Begin, pSearchString1End);
+ TestFindEndEa (stopwatch2, sTestEa, pSearchString1Begin, pSearchString1End);
+
+ if(i == 1)
+ Benchmark::AddResult("algorithm/find_end/string/end", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ sTestStd.insert(sTestStd.size() / 2, pSearchString2Begin);
+ sTestEa.insert (sTestEa.size() / 2, pSearchString2Begin);
+ TestFindEndStd(stopwatch1, sTestStd, pSearchString2Begin, pSearchString2End);
+ TestFindEndEa (stopwatch2, sTestEa, pSearchString2Begin, pSearchString2End);
+
+ if(i == 1)
+ Benchmark::AddResult("algorithm/find_end/string/middle", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestFindEndStd(stopwatch1, sTestStd, pSearchString3Begin, pSearchString3End);
+ TestFindEndEa (stopwatch2, sTestEa, pSearchString3Begin, pSearchString3End);
+
+ if(i == 1)
+ Benchmark::AddResult("algorithm/find_end/string/none", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+
+ ///////////////////////////////
+ // Test search
+ ///////////////////////////////
+ TestSearchStd(stopwatch1, sTestStd, pSearchString1Begin, pSearchString1End);
+ TestSearchEa (stopwatch2, sTestEa, pSearchString1Begin, pSearchString1End);
+
+ if(i == 1)
+ Benchmark::AddResult("algorithm/search/string<char>", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test search_n
+ ///////////////////////////////
+ TestSearchNStd(stopwatch1, sTestStd, 3, 'A');
+ TestSearchNEa (stopwatch2, sTestEa, 3, 'A');
+
+ if(i == 1)
+ Benchmark::AddResult("algorithm/search_n/string<char>", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test adjacent_find
+ ///////////////////////////////
+
+ }
+ }
+}
+
+
+void BenchmarkAlgorithm2(EASTLTest_Rand& rng, EA::StdC::Stopwatch& stopwatch1, EA::StdC::Stopwatch& stopwatch2)
+{
+ {
+ StdVectorUint32 stdVectorUint32;
+ EaVectorUint32 eaVectorUint32;
+
+ StdVectorUint64 stdVectorUint64;
+ EaVectorUint64 eaVectorUint64;
+
+ StdVectorTO stdVectorTO;
+ EaVectorTO eaVectorTO;
+
+ for(int i = 0; i < 2; i++)
+ {
+ stdVectorUint32.clear();
+ eaVectorUint32.clear();
+
+ for(int j = 0; j < 100000; j++)
+ {
+ stdVectorUint32.push_back(j);
+ eaVectorUint32.push_back(j);
+ stdVectorUint64.push_back(j);
+ eaVectorUint64.push_back(j);
+ stdVectorTO.push_back(TestObject(j));
+ eaVectorTO.push_back(TestObject(j));
+
+ if((rng() % 16) == 0)
+ {
+ stdVectorUint32.push_back(i);
+ eaVectorUint32.push_back(i);
+ stdVectorUint64.push_back(j);
+ eaVectorUint64.push_back(j);
+ stdVectorTO.push_back(TestObject(j));
+ eaVectorTO.push_back(TestObject(j));
+
+ if((rng() % 16) == 0)
+ {
+ stdVectorUint32.push_back(i);
+ eaVectorUint32.push_back(i);
+ stdVectorUint64.push_back(j);
+ eaVectorUint64.push_back(j);
+ stdVectorTO.push_back(TestObject(j));
+ eaVectorTO.push_back(TestObject(j));
+ }
+ }
+ }
+
+
+ ///////////////////////////////
+ // Test unique
+ ///////////////////////////////
+
+ TestUniqueStd(stopwatch1, stdVectorUint32);
+ TestUniqueEa (stopwatch2, eaVectorUint32);
+
+ if(i == 1)
+ Benchmark::AddResult("algorithm/unique/vector<uint32_t>", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestUniqueStd(stopwatch1, stdVectorUint64);
+ TestUniqueEa (stopwatch2, eaVectorUint64);
+
+ if(i == 1)
+ Benchmark::AddResult("algorithm/unique/vector<uint64_t>", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestUniqueStd(stopwatch1, stdVectorTO);
+ TestUniqueEa (stopwatch2, eaVectorTO);
+
+ if(i == 1)
+ Benchmark::AddResult("algorithm/unique/vector<TestObject>", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+
+ ///////////////////////////////
+ // Test min_element
+ ///////////////////////////////
+
+ TestMinElementStd(stopwatch1, stdVectorTO);
+ TestMinElementEa (stopwatch2, eaVectorTO);
+
+ if(i == 1)
+ Benchmark::AddResult("algorithm/min_element/vector<TestObject>", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+
+ ///////////////////////////////
+ // Test count
+ ///////////////////////////////
+
+ TestCountStd(stopwatch1, stdVectorUint64);
+ TestCountEa (stopwatch2, eaVectorUint64);
+
+ if(i == 1)
+ Benchmark::AddResult("algorithm/count/vector<uint64_t>", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+
+ ///////////////////////////////
+ // Test adjacent_find
+ ///////////////////////////////
+
+ // Due to the above unique testing, the container should container unique elements. Let's change that.
+ stdVectorTO[stdVectorTO.size() - 2] = stdVectorTO[stdVectorTO.size() - 1];
+ eaVectorTO[eaVectorTO.size() - 2] = eaVectorTO[eaVectorTO.size() - 1];
+ TestAdjacentFindStd(stopwatch1, stdVectorTO);
+ TestAdjacentFindEa (stopwatch2, eaVectorTO);
+
+ if(i == 1)
+ Benchmark::AddResult("algorithm/adj_find/vector<TestObject>", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+
+ ///////////////////////////////
+ // Test lower_bound
+ ///////////////////////////////
+
+ // Sort the containers for the following tests.
+ std::sort(stdVectorTO.begin(), stdVectorTO.end());
+ eaVectorTO.assign(&stdVectorTO[0], &stdVectorTO[0] + stdVectorTO.size());
+
+ TestLowerBoundStd(stopwatch1, stdVectorTO, &stdVectorTO[0], &stdVectorTO[0] + stdVectorTO.size());
+ TestLowerBoundEa (stopwatch2, eaVectorTO, &eaVectorTO[0], &eaVectorTO[0] + eaVectorTO.size());
+
+ if(i == 1)
+ Benchmark::AddResult("algorithm/lower_bound/vector<TestObject>", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test upper_bound
+ ///////////////////////////////
+
+ std::sort(stdVectorUint32.begin(), stdVectorUint32.end());
+ eaVectorUint32.assign(&stdVectorUint32[0], &stdVectorUint32[0] + stdVectorUint32.size());
+
+ TestUpperBoundStd(stopwatch1, stdVectorUint32, &stdVectorUint32[0], &stdVectorUint32[0] + stdVectorUint32.size());
+ TestUpperBoundEa (stopwatch2, eaVectorUint32, &eaVectorUint32[0], &eaVectorUint32[0] + eaVectorUint32.size());
+
+ if(i == 1)
+ Benchmark::AddResult("algorithm/upper_bound/vector<uint32_t>", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test equal_range
+ ///////////////////////////////
+
+ // VS2010 (and later versions?) is extremely slow executing this in debug builds. It can take minutes for a
+ // single TestEqualRangeStd call to complete. It's so slow that it's nearly pointless to execute.
+ #if !defined(_MSC_VER) || (_MSC_VER < 1600) || !defined(_ITERATOR_DEBUG_LEVEL) || (_ITERATOR_DEBUG_LEVEL < 2)
+ std::sort(stdVectorUint64.begin(), stdVectorUint64.end());
+ eaVectorUint64.assign(&stdVectorUint64[0], &stdVectorUint64[0] + stdVectorUint64.size());
+
+ TestEqualRangeStd(stopwatch1, stdVectorUint64, &stdVectorUint64[0], &stdVectorUint64[0] + stdVectorUint64.size());
+ TestEqualRangeEa (stopwatch2, eaVectorUint64, &eaVectorUint64[0], &eaVectorUint64[0] + eaVectorUint64.size());
+
+ if(i == 1)
+ Benchmark::AddResult("algorithm/equal_range/vector<uint64_t>", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+ #endif
+ }
+ }
+}
+
+
+void BenchmarkAlgorithm3(EASTLTest_Rand& /*rng*/, EA::StdC::Stopwatch& stopwatch1, EA::StdC::Stopwatch& stopwatch2)
+{
+ {
+ StdVectorUChar stdVectorUChar1(100000);
+ StdVectorUChar stdVectorUChar2(100000);
+ EaVectorUChar eaVectorUChar1(100000);
+ EaVectorUChar eaVectorUChar2(100000);
+
+ StdVectorSChar stdVectorSChar1(100000);
+ StdVectorSChar stdVectorSChar2(100000);
+ EaVectorSChar eaVectorSChar1(100000);
+ EaVectorSChar eaVectorSChar2(100000);
+
+ StdVectorTO stdVectorTO1(100000);
+ StdVectorTO stdVectorTO2(100000);
+ EaVectorTO eaVectorTO1(100000);
+ EaVectorTO eaVectorTO2(100000);
+
+ // All these containers should have values of zero in them.
+
+ for(int i = 0; i < 2; i++)
+ {
+ ///////////////////////////////
+ // Test lexicographical_compare
+ ///////////////////////////////
+
+ TestLexicographicalCompareStd(stopwatch1, stdVectorUChar1.begin(), stdVectorUChar1.end(), stdVectorUChar2.begin(), stdVectorUChar2.end());
+ TestLexicographicalCompareEa (stopwatch2, eaVectorUChar1.begin(), eaVectorUChar2.end(), eaVectorUChar2.begin(), eaVectorUChar2.end());
+
+ if(i == 1)
+ Benchmark::AddResult("algorithm/lex_cmp/vector<uchar>", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestLexicographicalCompareStd(stopwatch1, &stdVectorSChar1[0], &stdVectorSChar1[0] + stdVectorSChar1.size(), &stdVectorSChar2[0], &stdVectorSChar2[0] + stdVectorSChar2.size());
+ TestLexicographicalCompareEa (stopwatch2, &eaVectorSChar1[0], &eaVectorSChar1[0] + eaVectorSChar1.size(), &eaVectorSChar2[0], &eaVectorSChar2[0] + eaVectorSChar2.size());
+
+ if(i == 1)
+ Benchmark::AddResult("algorithm/lex_cmp/schar[]", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestLexicographicalCompareStd(stopwatch1, stdVectorTO1.begin(), stdVectorTO1.end(), stdVectorTO2.begin(), stdVectorTO2.end());
+ TestLexicographicalCompareEa (stopwatch2, eaVectorTO1.begin(), eaVectorTO1.end(), eaVectorTO2.begin(), eaVectorTO2.end());
+
+ if(i == 1)
+ Benchmark::AddResult("algorithm/lex_cmp/vector<TestObject>", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+ }
+ }
+
+}
+
+
+void BenchmarkAlgorithm4(EASTLTest_Rand& /*rng*/, EA::StdC::Stopwatch& stopwatch1, EA::StdC::Stopwatch& stopwatch2)
+{
+ {
+ std::vector<uint32_t> stdVectorUint321(10000);
+ std::vector<uint32_t> stdVectorUint322(10000);
+ eastl::vector<uint32_t> eaVectorUint321(10000);
+ eastl::vector<uint32_t> eaVectorUint322(10000);
+
+ std::vector<uint64_t> stdVectorUint64(100000);
+ eastl::vector<uint64_t> eaVectorUint64(100000);
+
+
+ for(int i = 0; i < 2; i++)
+ {
+ ///////////////////////////////
+ // Test copy
+ ///////////////////////////////
+
+ TestCopyStd(stopwatch1, stdVectorUint321.begin(), stdVectorUint321.end(), stdVectorUint322.begin());
+ TestCopyEa (stopwatch2, eaVectorUint321.begin(), eaVectorUint321.end(), eaVectorUint322.begin());
+
+ if(i == 1)
+ Benchmark::AddResult("algorithm/copy/vector<uint32_t>", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test copy_backward
+ ///////////////////////////////
+
+ TestCopyBackwardStd(stopwatch1, stdVectorUint321.begin(), stdVectorUint321.end(), stdVectorUint322.end());
+ TestCopyBackwardEa (stopwatch2, eaVectorUint321.begin(), eaVectorUint321.end(), eaVectorUint322.end());
+
+ if(i == 1)
+ Benchmark::AddResult("algorithm/copy_backward/vector<uint32_t>", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test fill
+ ///////////////////////////////
+
+ TestFillStd(stopwatch1, stdVectorUint64.begin(), stdVectorUint64.end(), UINT64_C(37));
+ TestFillEa (stopwatch2, eaVectorUint64.begin(), eaVectorUint64.end(), UINT64_C(37));
+ TestFillStd(stopwatch1, stdVectorUint64.begin(), stdVectorUint64.end(), UINT64_C(37)); // Intentionally do this a second time, as we are finding
+ TestFillEa (stopwatch2, eaVectorUint64.begin(), eaVectorUint64.end(), UINT64_C(37)); // the results are inconsistent otherwise.
+ if(EA::StdC::Memcheck64(&eaVectorUint64[0], UINT64_C(37), eaVectorUint64.size()))
+ EA::UnitTest::Report("eastl algorithm 64 bit fill failure.");
+
+ if(i == 1)
+ Benchmark::AddResult("algorithm/fill/vector<uint64_t>", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test fill_n
+ ///////////////////////////////
+
+ TestFillNStd(stopwatch1, stdVectorUint64.begin(), (int)stdVectorUint64.size(), UINT64_C(37));
+ TestFillNEa (stopwatch2, eaVectorUint64.begin(), (int) eaVectorUint64.size(), UINT64_C(37));
+ TestFillNStd(stopwatch1, stdVectorUint64.begin(), (int)stdVectorUint64.size(), UINT64_C(37)); // Intentionally do this a second time, as we are finding
+ TestFillNEa (stopwatch2, eaVectorUint64.begin(), (int) eaVectorUint64.size(), UINT64_C(37)); // the results are inconsistent otherwise.
+
+ if(i == 1)
+ Benchmark::AddResult("algorithm/fill_n/vector<uint64_t>", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+ }
+ }
+}
+
+
+void BenchmarkAlgorithm5(EASTLTest_Rand& /*rng*/, EA::StdC::Stopwatch& stopwatch1, EA::StdC::Stopwatch& stopwatch2)
+{
+ {
+ std::vector<void*> stdVectorVoid(100000);
+ eastl::vector<void*> eaVectorVoid(100000);
+
+ std::vector<char> stdVectorChar(100000);
+ eastl::vector<char> eaVectorChar(100000);
+
+ std::vector<bool> stdVectorBool(100000);
+ eastl::vector<bool> eaVectorBool(100000);
+
+ for(int i = 0; i < 2; i++)
+ {
+ TestFillStd(stopwatch1, stdVectorVoid.begin(), stdVectorVoid.end(), (void*)NULL);
+ TestFillEa (stopwatch2, eaVectorVoid.begin(), eaVectorVoid.end(), (void*)NULL);
+
+ if(i == 1)
+ Benchmark::AddResult("algorithm/fill/vector<void*>", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestFillStd(stopwatch1, &stdVectorChar[0], &stdVectorChar[0] + stdVectorChar.size(), 'd'); // Intentionally use ' ' and not casted to any type.
+ TestFillEa (stopwatch2, eaVectorChar.data(), eaVectorChar.data() + eaVectorChar.size(), 'd');
+ TestFillStd(stopwatch1, &stdVectorChar[0], &stdVectorChar[0] + stdVectorChar.size(), 'd'); // Intentionally do this a second time, as we are finding
+ TestFillEa (stopwatch2, eaVectorChar.data(), eaVectorChar.data() + eaVectorChar.size(), 'd'); // the results are inconsistent otherwise.
+
+ if(i == 1)
+ Benchmark::AddResult("algorithm/fill/char[]/'d'", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestFillStd(stopwatch1, stdVectorChar.begin(), stdVectorChar.end(), (char)'d');
+ TestFillEa (stopwatch2, eaVectorChar.begin(), eaVectorChar.end(), (char)'d');
+ TestFillStd(stopwatch1, stdVectorChar.begin(), stdVectorChar.end(), (char)'d'); // Intentionally do this a second time, as we are finding
+ TestFillEa (stopwatch2, eaVectorChar.begin(), eaVectorChar.end(), (char)'d'); // the results are inconsistent otherwise.
+
+ if(i == 1)
+ Benchmark::AddResult("algorithm/fill/vector<char>/'d'", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestFillStd(stopwatch1, stdVectorChar.begin(), stdVectorChar.end(), (char)0);
+ TestFillEa (stopwatch2, eaVectorChar.begin(), eaVectorChar.end(), (char)0);
+ TestFillStd(stopwatch1, stdVectorChar.begin(), stdVectorChar.end(), (char)0); // Intentionally do this a second time, as we are finding
+ TestFillEa (stopwatch2, eaVectorChar.begin(), eaVectorChar.end(), (char)0); // the results are inconsistent otherwise.
+
+ if(i == 1)
+ Benchmark::AddResult("algorithm/fill/vector<char>/0", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestFillStd(stopwatch1, eaVectorBool.data(), eaVectorBool.data() + eaVectorBool.size(), false); // Intentionally use eaVectorBool for the array.
+ TestFillEa (stopwatch2, eaVectorBool.data(), eaVectorBool.data() + eaVectorBool.size(), false);
+ TestFillStd(stopwatch1, eaVectorBool.data(), eaVectorBool.data() + eaVectorBool.size(), false);
+ TestFillEa (stopwatch2, eaVectorBool.data(), eaVectorBool.data() + eaVectorBool.size(), false);
+
+ if(i == 1)
+ Benchmark::AddResult("algorithm/fill/bool[]", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test fill_n
+ ///////////////////////////////
+
+ TestFillNStd(stopwatch1, eaVectorChar.data(), (int) eaVectorChar.size(), 'd'); // Intentionally use eaVectorBool for the array.
+ TestFillNEa (stopwatch2, eaVectorChar.data(), (int) eaVectorChar.size(), 'd');
+ TestFillNStd(stopwatch1, eaVectorChar.data(), (int) eaVectorChar.size(), 'd'); // Intentionally do this a second time, as we are finding
+ TestFillNEa (stopwatch2, eaVectorChar.data(), (int) eaVectorChar.size(), 'd'); // the results are inconsistent otherwise.
+
+ if(i == 1)
+ Benchmark::AddResult("algorithm/fill_n/char[]", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestFillNStd(stopwatch1, eaVectorBool.data(), (int) eaVectorBool.size(), false); // Intentionally use eaVectorBool for the array.
+ TestFillNEa (stopwatch2, eaVectorBool.data(), (int) eaVectorBool.size(), false);
+ TestFillNStd(stopwatch1, eaVectorBool.data(), (int) eaVectorBool.size(), false); // Intentionally do this a second time, as we are finding
+ TestFillNEa (stopwatch2, eaVectorBool.data(), (int) eaVectorBool.size(), false); // the results are inconsistent otherwise.
+
+ if(i == 1)
+ Benchmark::AddResult("algorithm/fill_n/bool[]", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+ }
+ }
+}
+
+
+void BenchmarkAlgorithm6(EASTLTest_Rand& /*rng*/, EA::StdC::Stopwatch& stopwatch1, EA::StdC::Stopwatch& stopwatch2)
+{
+ // We allocate this on the heap because some platforms don't always have enough stack space for this.
+ std::vector<LargePOD>* pstdVectorLP1 = new std::vector<LargePOD>(100);
+ std::vector<LargePOD>* pstdVectorLP2 = new std::vector<LargePOD>(100);
+ eastl::vector<LargePOD>* peaVectorLP1 = new eastl::vector<LargePOD>(100);
+ eastl::vector<LargePOD>* peaVectorLP2 = new eastl::vector<LargePOD>(100);
+
+ // Aliases.
+ std::vector<LargePOD>& stdVectorLP1 = *pstdVectorLP1;
+ std::vector<LargePOD>& stdVectorLP2 = *pstdVectorLP2;
+ eastl::vector<LargePOD>& eaVectorLP1 = *peaVectorLP1;
+ eastl::vector<LargePOD>& eaVectorLP2 = *peaVectorLP2;
+
+ for(int i = 0; i < 2; i++)
+ {
+ ///////////////////////////////
+ // Test copy
+ ///////////////////////////////
+
+ TestCopyStd(stopwatch1, stdVectorLP1.begin(), stdVectorLP1.end(), stdVectorLP2.begin());
+ TestCopyEa (stopwatch2, eaVectorLP1.begin(), eaVectorLP1.end(), eaVectorLP2.begin());
+
+ if(i == 1)
+ Benchmark::AddResult("algorithm/copy/vector<LargePOD>", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test copy_backward
+ ///////////////////////////////
+
+ TestCopyBackwardStd(stopwatch1, stdVectorLP1.begin(), stdVectorLP1.end(), stdVectorLP2.end());
+ TestCopyBackwardEa (stopwatch2, eaVectorLP1.begin(), eaVectorLP1.end(), eaVectorLP2.end());
+
+ if(i == 1)
+ Benchmark::AddResult("algorithm/copy_backward/vector<LargePOD>", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+ }
+
+ delete pstdVectorLP1;
+ delete pstdVectorLP2;
+ delete peaVectorLP1;
+ delete peaVectorLP2;
+}
+
+
+void BenchmarkAlgorithm7(EASTLTest_Rand& /*rng*/, EA::StdC::Stopwatch& stopwatch1, EA::StdC::Stopwatch& stopwatch2)
+{
+ {
+ std::list<TestObject> stdListTO(10000);
+ eastl::list<TestObject> eaListTO(10000);
+
+ std::vector<TestObject> stdVectorTO(10000);
+ eastl::vector<TestObject> eaVectorTO(10000);
+
+ for(int i = 0; i < 2; i++)
+ {
+ ///////////////////////////////
+ // Test reverse
+ ///////////////////////////////
+
+ TestReverseStd(stopwatch1, stdListTO.begin(), stdListTO.end());
+ TestReverseEa (stopwatch2, eaListTO.begin(), eaListTO.end());
+
+ if(i == 1)
+ Benchmark::AddResult("algorithm/reverse/list<TestObject>", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestReverseStd(stopwatch1, stdVectorTO.begin(), stdVectorTO.end());
+ TestReverseEa (stopwatch2, eaVectorTO.begin(), eaVectorTO.end());
+
+ if(i == 1)
+ Benchmark::AddResult("algorithm/reverse/vector<TestObject>", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+ }
+ }
+
+ {
+ // Create some containers and seed them with incremental values (i.e. 0, 1, 2, 3...).
+ eastl::slist<int32_t> eaSlistIntLarge(10000);
+ eastl::generate(eaSlistIntLarge.begin(), eaSlistIntLarge.end(), GenerateIncrementalIntegers<int32_t>());
+
+
+ std::vector< SizedPOD<32> > stdVectorLargePod32(10000);
+ for(std::vector< SizedPOD<32> >::iterator it = stdVectorLargePod32.begin(); it != stdVectorLargePod32.end(); ++it)
+ memset(&*it, 0, sizeof(SizedPOD<32>));
+ eastl::vector< SizedPOD<32> > eaVectorLargePod32(10000);
+ for(eastl::vector< SizedPOD<32> >::iterator it = eaVectorLargePod32.begin(); it != eaVectorLargePod32.end(); ++it)
+ memset(&*it, 0, sizeof(SizedPOD<32>));
+
+ std::list<int32_t> stdListIntLarge(10000);
+ eastl::generate(stdListIntLarge.begin(), stdListIntLarge.end(), GenerateIncrementalIntegers<int32_t>());
+
+ eastl::list<int32_t> eaListIntLarge(10000);
+ eastl::generate(eaListIntLarge.begin(), eaListIntLarge.end(), GenerateIncrementalIntegers<int32_t>());
+
+
+ std::vector<int32_t> stdVectorIntLarge(10000);
+ eastl::generate(stdVectorIntLarge.begin(), stdVectorIntLarge.end(), GenerateIncrementalIntegers<int32_t>());
+
+ eastl::vector<int32_t> eaVectorIntLarge(10000);
+ eastl::generate(eaVectorIntLarge.begin(), eaVectorIntLarge.end(), GenerateIncrementalIntegers<int32_t>());
+
+
+ std::list<int32_t> stdListIntSmall(10);
+ eastl::generate(stdListIntLarge.begin(), stdListIntLarge.end(), GenerateIncrementalIntegers<int32_t>());
+
+ eastl::list<int32_t> eaListIntSmall(10);
+ eastl::generate(eaListIntLarge.begin(), eaListIntLarge.end(), GenerateIncrementalIntegers<int32_t>());
+
+
+ std::vector<int32_t> stdVectorIntSmall(10);
+ eastl::generate(stdVectorIntLarge.begin(), stdVectorIntLarge.end(), GenerateIncrementalIntegers<int32_t>());
+
+ eastl::vector<int32_t> eaVectorIntSmall(10);
+ eastl::generate(eaVectorIntLarge.begin(), eaVectorIntLarge.end(), GenerateIncrementalIntegers<int32_t>());
+
+
+
+ std::list<TestObject> stdListTOLarge(10000);
+ eastl::generate(stdListTOLarge.begin(), stdListTOLarge.end(), GenerateIncrementalIntegers<TestObject>());
+
+ eastl::list<TestObject> eaListTOLarge(10000);
+ eastl::generate(eaListTOLarge.begin(), eaListTOLarge.end(), GenerateIncrementalIntegers<TestObject>());
+
+
+ std::vector<TestObject> stdVectorTOLarge(10000);
+ eastl::generate(stdVectorTOLarge.begin(), stdVectorTOLarge.end(), GenerateIncrementalIntegers<TestObject>());
+
+ eastl::vector<TestObject> eaVectorTOLarge(10000);
+ eastl::generate(eaVectorTOLarge.begin(), eaVectorTOLarge.end(), GenerateIncrementalIntegers<TestObject>());
+
+
+ std::list<TestObject> stdListTOSmall(10);
+ eastl::generate(stdListTOSmall.begin(), stdListTOSmall.end(), GenerateIncrementalIntegers<TestObject>());
+
+ eastl::list<TestObject> eaListTOSmall(10);
+ eastl::generate(eaListTOSmall.begin(), eaListTOSmall.end(), GenerateIncrementalIntegers<TestObject>());
+
+
+ std::vector<TestObject> stdVectorTOSmall(10);
+ eastl::generate(stdVectorTOSmall.begin(), stdVectorTOSmall.end(), GenerateIncrementalIntegers<TestObject>());
+
+ eastl::vector<TestObject> eaVectorTOSmall(10);
+ eastl::generate(eaVectorTOSmall.begin(), eaVectorTOSmall.end(), GenerateIncrementalIntegers<TestObject>());
+
+
+ for(int i = 0; i < 2; i++)
+ {
+ ///////////////////////////////
+ // Test reverse
+ ///////////////////////////////
+
+ // There is no guaranteed Standard Library forward_list or slist.
+ TestRotateEa (stopwatch2, eaSlistIntLarge.begin(), eastl::next( eaSlistIntLarge.begin(), (eaSlistIntLarge.size() / 2) - 1), eaSlistIntLarge.end());
+ if(i == 1)
+ Benchmark::AddResult("algorithm/rotate/slist<int32_t> large", stopwatch1.GetUnits(), 0 /* untested */, stopwatch2.GetElapsedTime());
+
+
+
+ TestRotateStd(stopwatch1, stdVectorLargePod32.begin(), std__::next(stdVectorLargePod32.begin(), (stdVectorLargePod32.size() / 2) - 1), stdVectorLargePod32.end());
+ TestRotateEa (stopwatch2, eaVectorLargePod32.begin(), eastl::next( eaVectorLargePod32.begin(), (eaVectorLargePod32.size() / 2) - 1), eaVectorLargePod32.end());
+ if(i == 1)
+ Benchmark::AddResult("algorithm/rotate/vector<SizedPOD<32>> large", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+
+
+ TestRotateStd(stopwatch1, stdListIntLarge.begin(), std__::next(stdListIntLarge.begin(), (stdListIntLarge.size() / 2) - 1), stdListIntLarge.end());
+ TestRotateEa (stopwatch2, eaListIntLarge.begin(), eastl::next( eaListIntLarge.begin(), (eaListIntLarge.size() / 2) - 1), eaListIntLarge.end());
+ if(i == 1)
+ Benchmark::AddResult("algorithm/rotate/list<int32_t> large", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestRotateStd(stopwatch1, stdVectorIntLarge.begin(), std__::next(stdVectorIntLarge.begin(), (stdVectorIntLarge.size() / 2) - 1), stdVectorIntLarge.end());
+ TestRotateEa (stopwatch2, eaVectorIntLarge.begin(), eastl::next( eaVectorIntLarge.begin(), (eaVectorIntLarge.size() / 2) - 1), eaVectorIntLarge.end());
+ if(i == 1)
+ Benchmark::AddResult("algorithm/rotate/vector<int32_t large>", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+
+ TestRotateStd(stopwatch1, stdListIntSmall.begin(), std__::next(stdListIntSmall.begin(), (stdListIntSmall.size() / 2) - 1), stdListIntSmall.end());
+ TestRotateEa (stopwatch2, eaListIntSmall.begin(), eastl::next( eaListIntSmall.begin(), (eaListIntSmall.size() / 2) - 1), eaListIntSmall.end());
+ if(i == 1)
+ Benchmark::AddResult("algorithm/rotate/list<int32_t> small", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestRotateStd(stopwatch1, stdVectorIntSmall.begin(), std__::next(stdVectorIntSmall.begin(), (stdVectorIntSmall.size() / 2) - 1), stdVectorIntSmall.end());
+ TestRotateEa (stopwatch2, eaVectorIntSmall.begin(), eastl::next( eaVectorIntSmall.begin(), (eaVectorIntSmall.size() / 2) - 1), eaVectorIntSmall.end());
+ if(i == 1)
+ Benchmark::AddResult("algorithm/rotate/vector<int32_t small>", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+
+ TestRotateStd(stopwatch1, stdListTOLarge.begin(), std__::next(stdListTOLarge.begin(), (stdListTOLarge.size() / 2) - 1), stdListTOLarge.end());
+ TestRotateEa (stopwatch2, eaListTOLarge.begin(), eastl::next( eaListTOLarge.begin(), (eaListTOLarge.size() / 2) - 1), eaListTOLarge.end());
+ if(i == 1)
+ Benchmark::AddResult("algorithm/rotate/list<TestObject large>", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestRotateStd(stopwatch1, stdVectorTOLarge.begin(), std__::next(stdVectorTOLarge.begin(), (stdVectorTOLarge.size() / 2) - 1), stdVectorTOLarge.end());
+ TestRotateEa (stopwatch2, eaVectorTOLarge.begin(), eastl::next( eaVectorTOLarge.begin(), (eaVectorTOLarge.size() / 2) - 1), eaVectorTOLarge.end());
+ if(i == 1)
+ Benchmark::AddResult("algorithm/rotate/vector<TestObject large>", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+
+ TestRotateStd(stopwatch1, stdListTOSmall.begin(), std__::next(stdListTOSmall.begin(), (stdListTOSmall.size() / 2) - 1), stdListTOSmall.end());
+ TestRotateEa (stopwatch2, eaListTOSmall.begin(), eastl::next( eaListTOSmall.begin(), (eaListTOSmall.size() / 2) - 1), eaListTOSmall.end());
+ if(i == 1)
+ Benchmark::AddResult("algorithm/rotate/list<TestObject small>", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestRotateStd(stopwatch1, stdVectorTOSmall.begin(), std__::next(stdVectorTOSmall.begin(), (stdVectorTOSmall.size() / 2) - 1), stdVectorTOSmall.end());
+ TestRotateEa (stopwatch2, eaVectorTOSmall.begin(), eastl::next( eaVectorTOSmall.begin(), (eaVectorTOSmall.size() / 2) - 1), eaVectorTOSmall.end());
+ if(i == 1)
+ Benchmark::AddResult("algorithm/rotate/vector<TestObject small>", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+ }
+ }
+}
+
+void BenchmarkAlgorithm8(EASTLTest_Rand& rng, EA::StdC::Stopwatch& stopwatch1, EA::StdC::Stopwatch& stopwatch2)
+{
+ const uint32_t ElementCount = 10000;
+
+ eastl::vector<int> srcVecA(ElementCount);
+ eastl::vector<int> srcVecB(ElementCount);
+
+ std::vector<int> stdVecAInt(ElementCount);
+ std::vector<int> stdVecBInt(ElementCount);
+ std::vector<int> stdVecOutInt(2 * ElementCount);
+ std::vector<TestObject> stdVecATestObject(ElementCount);
+ std::vector<TestObject> stdVecBTestObject(ElementCount);
+ std::vector<TestObject> stdVecOutTestObject(2 * ElementCount);
+
+ eastl::vector<int> eaVecAInt(ElementCount);
+ eastl::vector<int> eaVecBInt(ElementCount);
+ eastl::vector<int> eaVecOutInt(2 * ElementCount);
+ eastl::vector<TestObject> eaVecATestObject(ElementCount);
+ eastl::vector<TestObject> eaVecBTestObject(ElementCount);
+ eastl::vector<TestObject> eaVecOutTestObject(2 * ElementCount);
+
+ // Note:
+ // In some cases the compiler may generate branch free code for the loop body of merge.
+ // In this situation the performance of merging data that has a random merge selection (i.e. the chance that the smallest
+ // element is taken from the first or second list is essentially random) is the same as merging data where the choice of
+ // which list has the smallest element is predictable.
+ // However, if the compiler doesn't generate branch free code, then the performance of merge will suffer from branch
+ // misprediction when merging random data and will benefit greatly when misprediction is rare.
+ // This benchmark is aimed at highlighting what sort of code is being generated, and also showing the impact of
+ // predictability of the comparisons performed during merge. The branch predictablity /can/ have a large impact
+ // on merge sort performance.
+
+ // 'unpred' is the case where the comparison is unpredictable
+ // 'pred' is the case where the comparison is mostly predictable
+ const char* patternDescriptions[][2] =
+ {
+ {
+ "algorithm/merge/vector<int> (unpred)",
+ "algorithm/merge/vector<int> (pred)",
+ },
+ {
+ "algorithm/merge/vector<TestObject> (unpred)",
+ "algorithm/merge/vector<TestObject> (pred)",
+ },
+ };
+
+ enum Pattern
+ {
+ P_Random,
+ P_Predictable,
+ P_Count
+ };
+
+ for (int pattern = 0; pattern < P_Count; pattern++)
+ {
+ if (pattern == P_Random)
+ {
+ eastl::generate(srcVecA.begin(), srcVecA.end(), [&]{ return int(rng()); });
+ eastl::sort(srcVecA.begin(), srcVecA.end());
+ eastl::generate(srcVecB.begin(), srcVecB.end(), [&] { return int(rng()); });
+ eastl::sort(srcVecB.begin(), srcVecB.end());
+ }
+ else if (pattern == P_Predictable)
+ {
+ // The data pattern means that a simple/naive algorithm will select 'runLen' values
+ // from one list, and then 'runLen' values from the other list (alternating back and forth).
+ // Of course, a merge algorithm that is more complicated might have a different order of
+ // comparison.
+ const int runLen = 32;
+ for (int i = 0; i < ElementCount; i++)
+ {
+ int baseValue = ((i / runLen) * 2 * runLen) + (i % (runLen));
+ srcVecA[i] = baseValue;
+ srcVecB[i] = baseValue + runLen;
+ }
+ }
+
+ ///////////////////////////////
+ // Test merge
+ ///////////////////////////////
+ for (int i = 0; i < 2; i++)
+ {
+ eastl::copy(srcVecA.begin(), srcVecA.end(), stdVecAInt.begin());
+ eastl::copy(srcVecB.begin(), srcVecB.end(), stdVecBInt.begin());
+ eastl::copy(srcVecA.begin(), srcVecA.end(), eaVecAInt.begin());
+ eastl::copy(srcVecB.begin(), srcVecB.end(), eaVecBInt.begin());
+ TestMergeStd(stopwatch1, stdVecAInt.begin(), stdVecAInt.end(), stdVecBInt.begin(), stdVecBInt.end(), stdVecOutInt.begin());
+ TestMergeEa(stopwatch2, eaVecAInt.begin(), eaVecAInt.end(), eaVecBInt.begin(), eaVecBInt.end(), eaVecOutInt.begin());
+
+ if (i == 1)
+ {
+ Benchmark::AddResult(patternDescriptions[0][pattern], stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+ }
+
+ for (int j = 0; j < ElementCount; j++)
+ {
+ stdVecATestObject[j] = TestObject(srcVecA[j]);
+ stdVecBTestObject[j] = TestObject(srcVecB[j]);
+ eaVecATestObject[j] = TestObject(srcVecA[j]);
+ eaVecBTestObject[j] = TestObject(srcVecB[j]);
+ }
+ TestMergeStd(stopwatch1, stdVecATestObject.begin(), stdVecATestObject.end(), stdVecBTestObject.begin(), stdVecBTestObject.end(), stdVecOutTestObject.begin());
+ TestMergeEa(stopwatch2, eaVecATestObject.begin(), eaVecATestObject.end(), eaVecBTestObject.begin(), eaVecBTestObject.end(), eaVecOutTestObject.begin());
+
+ if (i == 1)
+ {
+ Benchmark::AddResult(patternDescriptions[1][pattern], stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+ }
+ }
+ }
+
+}
+
+
+
+void BenchmarkAlgorithm()
+{
+ EASTLTest_Printf("Algorithm\n");
+
+ EASTLTest_Rand rng(EA::UnitTest::GetRandSeed());
+ EA::StdC::Stopwatch stopwatch1(EA::StdC::Stopwatch::kUnitsCPUCycles);
+ EA::StdC::Stopwatch stopwatch2(EA::StdC::Stopwatch::kUnitsCPUCycles);
+
+ BenchmarkAlgorithm1(rng, stopwatch1, stopwatch2);
+ BenchmarkAlgorithm2(rng, stopwatch1, stopwatch2);
+ BenchmarkAlgorithm3(rng, stopwatch1, stopwatch2);
+ BenchmarkAlgorithm4(rng, stopwatch1, stopwatch2);
+ BenchmarkAlgorithm5(rng, stopwatch1, stopwatch2);
+ BenchmarkAlgorithm6(rng, stopwatch1, stopwatch2);
+ BenchmarkAlgorithm7(rng, stopwatch1, stopwatch2);
+ BenchmarkAlgorithm8(rng, stopwatch1, stopwatch2);
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/benchmark/source/BenchmarkBitset.cpp b/EASTL/benchmark/source/BenchmarkBitset.cpp
new file mode 100644
index 0000000..680622b
--- /dev/null
+++ b/EASTL/benchmark/source/BenchmarkBitset.cpp
@@ -0,0 +1,366 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifdef _MSC_VER
+ // Microsoft STL generates warnings.
+ #pragma warning(disable: 4267) // 'initializing' : conversion from 'size_t' to 'const int', possible loss of data
+#endif
+
+#include "EASTLBenchmark.h"
+#include "EASTLTest.h"
+#include <EAStdC/EAStopwatch.h>
+#include <EASTL/bitset.h>
+
+
+EA_DISABLE_ALL_VC_WARNINGS()
+#include <bitset>
+EA_RESTORE_ALL_VC_WARNINGS()
+
+
+using namespace EA;
+
+
+namespace
+{
+ template <typename Bitset>
+ void TestSet(EA::StdC::Stopwatch& stopwatch, Bitset& b)
+ {
+ stopwatch.Restart();
+ for(int i = 0; i < 100000; i++)
+ {
+ b.set();
+ Benchmark::DoNothing(&b);
+ }
+ stopwatch.Stop();
+ }
+
+
+ template <typename Bitset>
+ void TestSetIndex(EA::StdC::Stopwatch& stopwatch, Bitset& b, size_t index)
+ {
+ stopwatch.Restart();
+ for(int i = 0; i < 100000; i++)
+ {
+ b.set(index);
+ Benchmark::DoNothing(&b);
+ }
+ stopwatch.Stop();
+ }
+
+
+ template <typename Bitset>
+ void TestReset(EA::StdC::Stopwatch& stopwatch, Bitset& b)
+ {
+ stopwatch.Restart();
+ for(int i = 0; i < 100000; i++)
+ {
+ b.reset();
+ Benchmark::DoNothing(&b);
+ }
+ stopwatch.Stop();
+ }
+
+
+ template <typename Bitset>
+ void TestFlip(EA::StdC::Stopwatch& stopwatch, Bitset& b)
+ {
+ stopwatch.Restart();
+ for(int i = 0; i < 100000; i++)
+ {
+ b.flip();
+ Benchmark::DoNothing(&b);
+ }
+ stopwatch.Stop();
+ }
+
+
+ template <typename Bitset>
+ void TestTest(EA::StdC::Stopwatch& stopwatch, Bitset& b, unsigned nANDValue)
+ {
+ stopwatch.Restart();
+ for(unsigned i = 0; i < 100000; i++)
+ Benchmark::DoNothing(b.test(i & nANDValue)); // We use & instead of % because the former is always fast due to forced power of 2.
+ stopwatch.Stop();
+ }
+
+
+ template <typename Bitset>
+ void TestCount(EA::StdC::Stopwatch& stopwatch, Bitset& b)
+ {
+ size_t temp = 0;
+ stopwatch.Restart();
+ for(int i = 0; i < 100000; i++)
+ {
+ temp += b.count();
+ Benchmark::DoNothing(&temp);
+ }
+ stopwatch.Stop();
+ }
+
+
+ template <typename Bitset>
+ void TestRightShift(EA::StdC::Stopwatch& stopwatch, Bitset& b, size_t n)
+ {
+ size_t temp = 0;
+ stopwatch.Restart();
+ for(int i = 0; i < 100000; i++)
+ {
+ b >>= n;
+ Benchmark::DoNothing(&temp);
+ }
+ stopwatch.Stop();
+ }
+
+} // namespace
+
+
+
+void BenchmarkBitset()
+{
+ EASTLTest_Printf("Bitset\n");
+
+ EA::StdC::Stopwatch stopwatch1(EA::StdC::Stopwatch::kUnitsCPUCycles);
+ EA::StdC::Stopwatch stopwatch2(EA::StdC::Stopwatch::kUnitsCPUCycles);
+
+ {
+ std::bitset<15> stdBitset15;
+ eastl::bitset<15> eaBitset15;
+
+ std::bitset<35> stdBitset35;
+ eastl::bitset<35> eaBitset35;
+
+ std::bitset<75> stdBitset75;
+ eastl::bitset<75> eaBitset75;
+
+ std::bitset<1500> stdBitset1500;
+ eastl::bitset<1500> eaBitset1500;
+
+
+ for(int i = 0; i < 2; i++)
+ {
+ ///////////////////////////////
+ // Test set()
+ ///////////////////////////////
+
+ TestSet(stopwatch1, stdBitset15);
+ TestSet(stopwatch2, eaBitset15);
+
+ if(i == 1)
+ Benchmark::AddResult("bitset<15>/set()", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestSet(stopwatch1, stdBitset35);
+ TestSet(stopwatch2, eaBitset35);
+
+ if(i == 1)
+ Benchmark::AddResult("bitset<35>/set()", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestSet(stopwatch1, stdBitset75);
+ TestSet(stopwatch2, eaBitset75);
+
+ if(i == 1)
+ Benchmark::AddResult("bitset<75>/set()", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestSet(stopwatch1, stdBitset1500);
+ TestSet(stopwatch2, eaBitset1500);
+
+ if(i == 1)
+ Benchmark::AddResult("bitset<1500>/set()", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test set(index)
+ ///////////////////////////////
+
+ TestSetIndex(stopwatch1, stdBitset15, 13);
+ TestSetIndex(stopwatch2, eaBitset15, 13);
+
+ if(i == 1)
+ Benchmark::AddResult("bitset<15>/set(i)", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestSetIndex(stopwatch1, stdBitset35, 33);
+ TestSetIndex(stopwatch2, eaBitset35, 33);
+
+ if(i == 1)
+ Benchmark::AddResult("bitset<35>/set(i)", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestSetIndex(stopwatch1, stdBitset75, 73);
+ TestSetIndex(stopwatch2, eaBitset75, 73);
+
+ if(i == 1)
+ Benchmark::AddResult("bitset<75>/set(i)", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestSetIndex(stopwatch1, stdBitset1500, 730);
+ TestSetIndex(stopwatch2, eaBitset1500, 730);
+
+ if(i == 1)
+ Benchmark::AddResult("bitset<1500>/set(i)", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test reset()
+ ///////////////////////////////
+
+ TestReset(stopwatch1, stdBitset15);
+ TestReset(stopwatch2, eaBitset15);
+
+ if(i == 1)
+ Benchmark::AddResult("bitset<15>/reset", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestReset(stopwatch1, stdBitset35);
+ TestReset(stopwatch2, eaBitset35);
+
+ if(i == 1)
+ Benchmark::AddResult("bitset<35>/reset", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestReset(stopwatch1, stdBitset75);
+ TestReset(stopwatch2, eaBitset75);
+
+ if(i == 1)
+ Benchmark::AddResult("bitset<75>/reset", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestReset(stopwatch1, stdBitset1500);
+ TestReset(stopwatch2, eaBitset1500);
+
+ if(i == 1)
+ Benchmark::AddResult("bitset<1500>/reset", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test flip
+ ///////////////////////////////
+
+ TestFlip(stopwatch1, stdBitset15);
+ TestFlip(stopwatch2, eaBitset15);
+
+ if(i == 1)
+ Benchmark::AddResult("bitset<15>/flip", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestFlip(stopwatch1, stdBitset35);
+ TestFlip(stopwatch2, eaBitset35);
+
+ if(i == 1)
+ Benchmark::AddResult("bitset<35>/flip", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestFlip(stopwatch1, stdBitset75);
+ TestFlip(stopwatch2, eaBitset75);
+
+ if(i == 1)
+ Benchmark::AddResult("bitset<75>/flip", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestFlip(stopwatch1, stdBitset1500);
+ TestFlip(stopwatch2, eaBitset1500);
+
+ if(i == 1)
+ Benchmark::AddResult("bitset<1500>/flip", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test test
+ ///////////////////////////////
+
+ TestTest(stopwatch1, stdBitset15, 7);
+ TestTest(stopwatch2, eaBitset15, 7);
+
+ if(i == 1)
+ Benchmark::AddResult("bitset<15>/test", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestTest(stopwatch1, stdBitset35, 31);
+ TestTest(stopwatch2, eaBitset35, 31);
+
+ if(i == 1)
+ Benchmark::AddResult("bitset<35>/test", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestTest(stopwatch1, stdBitset75, 63);
+ TestTest(stopwatch2, eaBitset75, 63);
+
+ if(i == 1)
+ Benchmark::AddResult("bitset<75>/test", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestTest(stopwatch1, stdBitset1500, 1023);
+ TestTest(stopwatch2, eaBitset1500, 1023);
+
+ if(i == 1)
+ Benchmark::AddResult("bitset<1500>/test", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test count
+ ///////////////////////////////
+
+ TestCount(stopwatch1, stdBitset15);
+ TestCount(stopwatch2, eaBitset15);
+
+ if(i == 1)
+ Benchmark::AddResult("bitset<15>/count", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestCount(stopwatch1, stdBitset35);
+ TestCount(stopwatch2, eaBitset35);
+
+ if(i == 1)
+ Benchmark::AddResult("bitset<35>/count", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestCount(stopwatch1, stdBitset75);
+ TestCount(stopwatch2, eaBitset75);
+
+ if(i == 1)
+ Benchmark::AddResult("bitset<75>/count", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestCount(stopwatch1, stdBitset1500);
+ TestCount(stopwatch2, eaBitset1500);
+
+ if(i == 1)
+ Benchmark::AddResult("bitset<1500>/count", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test >>=
+ ///////////////////////////////
+
+ TestRightShift(stopwatch1, stdBitset15, 1);
+ TestRightShift(stopwatch2, eaBitset15, 1);
+
+ if(i == 1)
+ Benchmark::AddResult("bitset<15>/>>=/1", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime(),
+ GetStdSTLType() == kSTLPort ? "STLPort is broken, neglects wraparound check." : NULL);
+
+ TestRightShift(stopwatch1, stdBitset35, 1);
+ TestRightShift(stopwatch2, eaBitset35, 1);
+
+ if(i == 1)
+ Benchmark::AddResult("bitset<35>/>>=/1", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime(),
+ GetStdSTLType() == kSTLPort ? "STLPort is broken, neglects wraparound check." : NULL);
+
+ TestRightShift(stopwatch1, stdBitset75, 1);
+ TestRightShift(stopwatch2, eaBitset75, 1);
+
+ if(i == 1)
+ Benchmark::AddResult("bitset<75>/>>=/1", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime(),
+ GetStdSTLType() == kSTLPort ? "STLPort is broken, neglects wraparound check." : NULL);
+
+ TestRightShift(stopwatch1, stdBitset1500, 1);
+ TestRightShift(stopwatch2, eaBitset1500, 1);
+
+ if(i == 1)
+ Benchmark::AddResult("bitset<1500>/>>=/1", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime(),
+ GetStdSTLType() == kSTLPort ? "STLPort is broken, neglects wraparound check." : NULL);
+ }
+ }
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/benchmark/source/BenchmarkDeque.cpp b/EASTL/benchmark/source/BenchmarkDeque.cpp
new file mode 100644
index 0000000..d3c69de
--- /dev/null
+++ b/EASTL/benchmark/source/BenchmarkDeque.cpp
@@ -0,0 +1,342 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLBenchmark.h"
+#include "EASTLTest.h"
+#include <EAStdC/EAStopwatch.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/deque.h>
+#include <EASTL/vector.h>
+#include <EASTL/sort.h>
+
+#ifdef _MSC_VER
+ #pragma warning(push, 0)
+ #pragma warning(disable: 4350) // behavior change: X called instead of Y
+#endif
+#include <algorithm>
+#include <vector>
+#include <deque>
+#include <stdio.h>
+#include <stdlib.h>
+#ifdef _MSC_VER
+ #pragma warning(pop)
+#endif
+
+
+using namespace EA;
+
+
+namespace
+{
+ struct ValuePair
+ {
+ uint32_t key;
+ uint32_t v;
+ };
+
+ struct VPCompare
+ {
+ bool operator()(const ValuePair& vp1, const ValuePair& vp2) const
+ {
+ return (vp1.key == vp2.key) ? (vp1.v < vp2.v) : (vp1.key < vp2.key);
+ }
+ };
+
+ bool operator<(const ValuePair& vp1, const ValuePair& vp2)
+ {
+ return (vp1.key == vp2.key) ? (vp1.v < vp2.v) : (vp1.key < vp2.key);
+ }
+
+ bool operator==(const ValuePair& vp1, const ValuePair& vp2)
+ {
+ return (vp1.key == vp2.key) && (vp1.v == vp2.v);
+ }
+}
+
+
+EASTL_DECLARE_POD(ValuePair)
+EASTL_DECLARE_TRIVIAL_CONSTRUCTOR(ValuePair)
+EASTL_DECLARE_TRIVIAL_COPY(ValuePair)
+EASTL_DECLARE_TRIVIAL_ASSIGN(ValuePair)
+EASTL_DECLARE_TRIVIAL_DESTRUCTOR(ValuePair)
+EASTL_DECLARE_TRIVIAL_RELOCATE(ValuePair)
+
+
+
+typedef std::deque<ValuePair> StdDeque;
+typedef eastl::deque<ValuePair, EASTLAllocatorType, 128> EaDeque; // What value do we pick for the subarray size to make the comparison fair? Using the default isn't ideal because it results in this test measuring speed efficiency and ignoring memory efficiency.
+
+
+
+namespace
+{
+ template <typename Container>
+ void TestPushBack(EA::StdC::Stopwatch& stopwatch, Container& c, eastl::vector<uint32_t>& intVector)
+ {
+ stopwatch.Restart();
+ for(eastl_size_t j = 0, jEnd = intVector.size(); j < jEnd; j++)
+ {
+ const ValuePair vp = { intVector[j], intVector[j] };
+ c.push_back(vp);
+ }
+ stopwatch.Stop();
+ }
+
+
+ template <typename Container>
+ void TestPushFront(EA::StdC::Stopwatch& stopwatch, Container& c, eastl::vector<uint32_t>& intVector)
+ {
+ stopwatch.Restart();
+ for(eastl_size_t j = 0, jEnd = intVector.size(); j < jEnd; j++)
+ {
+ const ValuePair vp = { intVector[j], intVector[j] };
+ c.push_front(vp);
+ }
+ stopwatch.Stop();
+ }
+
+
+ template <typename Container>
+ void TestBracket(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ uint64_t temp = 0;
+ stopwatch.Restart();
+ for(typename Container::size_type j = 0, jEnd = c.size(); j < jEnd; j++)
+ temp += c[j].key;
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)(temp & 0xffffffff));
+ }
+
+
+ template <typename Container>
+ void TestIteration(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ typename Container::iterator it = c.begin(), itEnd = c.end();
+ stopwatch.Restart();
+ while(it != itEnd)
+ ++it;
+ stopwatch.Stop();
+ if(it != c.end())
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)(*it).key);
+
+ /* Alternative way to measure:
+ const eastl_size_t n = c.size();
+ stopwatch.Restart();
+ for(eastl_size_t i = 0; i < n; ++i)
+ ++it;
+ stopwatch.Stop();
+ if(it != c.end())
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)(*it).key);
+ */
+ }
+
+
+ template <typename Container>
+ void TestFind(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ // Intentionally use eastl find in order to measure just
+ // vector access speed and not be polluted by sort speed.
+ const ValuePair vp = { 0xffffffff, 0 };
+ stopwatch.Restart();
+ typename Container::iterator it = eastl::find(c.begin(), c.end(), vp);
+ stopwatch.Stop();
+ if(it != c.end())
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)(*it).key);
+ }
+
+
+ template <typename Container>
+ void TestSort(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ // Intentionally use eastl sort in order to measure just
+ // vector access speed and not be polluted by sort speed.
+ VPCompare vpCompare;
+ stopwatch.Restart();
+ eastl::quick_sort(c.begin(), c.end(), vpCompare);
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c[0].key);
+ }
+
+
+ template <typename Container>
+ void TestInsert(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ const ValuePair vp = { 0xffffffff, 0 };
+ typename Container::size_type j, jEnd;
+ typename Container::iterator it;
+
+ stopwatch.Restart();
+ for(j = 0, jEnd = 2000, it = c.begin(); j < jEnd; ++j)
+ {
+ it = c.insert(it, vp);
+
+ if(it == c.end()) // Try to safely increment the iterator three times.
+ it = c.begin();
+ if(++it == c.end())
+ it = c.begin();
+ if(++it == c.end())
+ it = c.begin();
+ }
+ stopwatch.Stop();
+ }
+
+
+ template <typename Container>
+ void TestErase(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ typename Container::size_type j, jEnd;
+ typename Container::iterator it;
+
+ stopwatch.Restart();
+ for(j = 0, jEnd = 2000, it = c.begin(); j < jEnd; ++j)
+ {
+ it = c.erase(it);
+
+ if(it == c.end()) // Try to safely increment the iterator three times.
+ it = c.begin();
+ if(++it == c.end())
+ it = c.begin();
+ if(++it == c.end())
+ it = c.begin();
+ }
+ stopwatch.Stop();
+ }
+
+} // namespace
+
+
+
+void BenchmarkDeque()
+{
+ EASTLTest_Printf("Deque\n");
+
+ EA::UnitTest::RandGenT<uint32_t> rng(EA::UnitTest::GetRandSeed());
+ EA::StdC::Stopwatch stopwatch1(EA::StdC::Stopwatch::kUnitsCPUCycles);
+ EA::StdC::Stopwatch stopwatch2(EA::StdC::Stopwatch::kUnitsCPUCycles);
+
+ { // Exercise some declarations
+ int nErrorCount = 0;
+ ValuePair vp1 = { 0, 0 }, vp2 = { 0, 0 };
+ VPCompare c1, c2;
+
+ VERIFY(c1.operator()(vp1, vp2) == c2.operator()(vp1, vp2));
+ VERIFY((vp1 < vp2) || (vp1 == vp2) || !(vp1 == vp2));
+ }
+
+ {
+ eastl::vector<uint32_t> intVector(100000);
+ eastl::generate(intVector.begin(), intVector.end(), rng);
+
+ for(int i = 0; i < 2; i++)
+ {
+ StdDeque stdDeque;
+ EaDeque eaDeque;
+
+
+ ///////////////////////////////
+ // Test push_back
+ ///////////////////////////////
+
+ TestPushBack(stopwatch1, stdDeque, intVector);
+ TestPushBack(stopwatch2, eaDeque, intVector);
+
+ if(i == 1)
+ Benchmark::AddResult("deque<ValuePair>/push_back", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test push_front
+ ///////////////////////////////
+
+ TestPushFront(stopwatch1, stdDeque, intVector);
+ TestPushFront(stopwatch2, eaDeque, intVector);
+
+ if(i == 1)
+ Benchmark::AddResult("deque<ValuePair>/push_front", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test operator[]
+ ///////////////////////////////
+
+ TestBracket(stopwatch1, stdDeque);
+ TestBracket(stopwatch2, eaDeque);
+
+ if(i == 1)
+ Benchmark::AddResult("deque<ValuePair>/operator[]", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test iteration
+ ///////////////////////////////
+
+ TestIteration(stopwatch1, stdDeque);
+ TestIteration(stopwatch2, eaDeque);
+
+ if(i == 1)
+ Benchmark::AddResult("deque<ValuePair>/iteration", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test find()
+ ///////////////////////////////
+
+ TestFind(stopwatch1, stdDeque);
+ TestFind(stopwatch2, eaDeque);
+
+ if(i == 1)
+ Benchmark::AddResult("deque<ValuePair>/find", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test sort
+ ///////////////////////////////
+
+ // Currently VC++ complains about our sort function decrementing std::iterator that is already at begin(). In the strictest sense,
+ // that's a valid complaint, but we aren't testing std STL here. We will want to revise our sort function eventually.
+ #if !defined(_MSC_VER) || !defined(_ITERATOR_DEBUG_LEVEL) || (_ITERATOR_DEBUG_LEVEL < 2)
+ TestSort(stopwatch1, stdDeque);
+ TestSort(stopwatch2, eaDeque);
+
+ if(i == 1)
+ Benchmark::AddResult("deque<ValuePair>/sort", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+ #endif
+
+
+ ///////////////////////////////
+ // Test insert
+ ///////////////////////////////
+
+ TestInsert(stopwatch1, stdDeque);
+ TestInsert(stopwatch2, eaDeque);
+
+ if(i == 1)
+ Benchmark::AddResult("deque<ValuePair>/insert", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test erase
+ ///////////////////////////////
+
+ TestErase(stopwatch1, stdDeque);
+ TestErase(stopwatch2, eaDeque);
+
+ if(i == 1)
+ Benchmark::AddResult("deque<ValuePair>/erase", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+ }
+ }
+}
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/benchmark/source/BenchmarkHash.cpp b/EASTL/benchmark/source/BenchmarkHash.cpp
new file mode 100644
index 0000000..35470e7
--- /dev/null
+++ b/EASTL/benchmark/source/BenchmarkHash.cpp
@@ -0,0 +1,469 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLBenchmark.h"
+#include "EASTLTest.h"
+#include <EAStdC/EAStopwatch.h>
+#include <EASTL/vector.h>
+#include <EASTL/hash_map.h>
+#include <EASTL/string.h>
+#include <EASTL/algorithm.h>
+
+
+
+EA_DISABLE_ALL_VC_WARNINGS()
+#include <unordered_map>
+#include <string>
+#include <algorithm>
+#include <stdio.h>
+EA_RESTORE_ALL_VC_WARNINGS()
+
+
+
+using namespace EA;
+
+
+// HashString8
+//
+// We define a string
+//
+template <typename String>
+struct HashString8
+{
+ // Defined for EASTL, STLPort, SGI, etc. and Metrowerks-related hash tables:
+ size_t operator()(const String& s) const
+ {
+ const uint8_t* p = (const uint8_t*) s.c_str();
+ uint32_t c, stringHash = UINT32_C(2166136261);
+ while((c = *p++) != 0)
+ stringHash = (stringHash * 16777619) ^ c;
+ return stringHash;
+ }
+
+ // Defined for Dinkumware-related (e.g. MS STL) hash tables:
+ bool operator()(const String& s1, const String& s2) const
+ {
+ return s1 < s2;
+ }
+
+ // Defined for Dinkumware-related (e.g. MS STL) hash tables:
+ enum {
+ bucket_size = 7,
+ min_buckets = 8
+ };
+};
+
+
+using StdMapUint32TO = std::unordered_map<uint32_t, TestObject>;
+using StdMapStrUint32 = std::unordered_map<std::string, uint32_t, HashString8<std::string>>;
+
+using EaMapUint32TO = eastl::hash_map<uint32_t, TestObject>;
+using EaMapStrUint32 = eastl::hash_map<eastl::string, uint32_t, HashString8<eastl::string>>;
+
+
+namespace
+{
+ template <typename Container, typename Value>
+ void TestInsert(EA::StdC::Stopwatch& stopwatch, Container& c, const Value* pArrayBegin, const Value* pArrayEnd)
+ {
+ stopwatch.Restart();
+ c.insert(pArrayBegin, pArrayEnd);
+ stopwatch.Stop();
+ }
+
+
+ template <typename Container, typename Value>
+ void TestIteration(EA::StdC::Stopwatch& stopwatch, const Container& c, const Value& findValue)
+ {
+ stopwatch.Restart();
+ typename Container::const_iterator it = eastl::find(c.begin(), c.end(), findValue); // It shouldn't matter what find implementation we use here, as it merely iterates values.
+ stopwatch.Stop();
+ if(it != c.end())
+ sprintf(Benchmark::gScratchBuffer, "%p", &*it);
+ }
+
+
+ template <typename Container, typename Value>
+ void TestBracket(EA::StdC::Stopwatch& stopwatch, Container& c, const Value* pArrayBegin, const Value* pArrayEnd)
+ {
+ stopwatch.Restart();
+ while(pArrayBegin != pArrayEnd)
+ {
+ Benchmark::DoNothing(&c[pArrayBegin->first]);
+ ++pArrayBegin;
+ }
+ stopwatch.Stop();
+ }
+
+
+ template <typename Container, typename Value>
+ void TestFind(EA::StdC::Stopwatch& stopwatch, Container& c, const Value* pArrayBegin, const Value* pArrayEnd)
+ {
+ stopwatch.Restart();
+ while(pArrayBegin != pArrayEnd)
+ {
+ typename Container::iterator it = c.find(pArrayBegin->first);
+ Benchmark::DoNothing(&it);
+ ++pArrayBegin;
+ }
+ stopwatch.Stop();
+ }
+
+
+ template <typename Container, typename Value>
+ void TestFindAsStd(EA::StdC::Stopwatch& stopwatch, Container& c, const Value* pArrayBegin, const Value* pArrayEnd)
+ {
+ stopwatch.Restart();
+ while(pArrayBegin != pArrayEnd)
+ {
+ typename Container::iterator it = c.find(pArrayBegin->first.c_str());
+ Benchmark::DoNothing(&it);
+ ++pArrayBegin;
+ }
+ stopwatch.Stop();
+ }
+
+
+ template <typename Container, typename Value>
+ void TestFindAsEa(EA::StdC::Stopwatch& stopwatch, Container& c, const Value* pArrayBegin, const Value* pArrayEnd)
+ {
+ stopwatch.Restart();
+ while(pArrayBegin != pArrayEnd)
+ {
+ typename Container::iterator it = c.find_as(pArrayBegin->first.c_str());
+ Benchmark::DoNothing(&it);
+ ++pArrayBegin;
+ }
+ stopwatch.Stop();
+ }
+
+
+ template <typename Container, typename Value>
+ void TestCount(EA::StdC::Stopwatch& stopwatch, Container& c, const Value* pArrayBegin, const Value* pArrayEnd)
+ {
+ typename Container::size_type temp = 0;
+ stopwatch.Restart();
+ while(pArrayBegin != pArrayEnd)
+ {
+ temp += c.count(pArrayBegin->first);
+ ++pArrayBegin;
+ }
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)temp);
+ }
+
+
+ template <typename Container, typename Value>
+ void TestEraseValue(EA::StdC::Stopwatch& stopwatch, Container& c, const Value* pArrayBegin, const Value* pArrayEnd)
+ {
+ stopwatch.Restart();
+ while(pArrayBegin != pArrayEnd)
+ {
+ c.erase(pArrayBegin->first);
+ ++pArrayBegin;
+ }
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c.size());
+ }
+
+
+ template <typename Container>
+ void TestErasePosition(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ typename Container::size_type j, jEnd;
+ typename Container::iterator it;
+
+ stopwatch.Restart();
+ for(j = 0, jEnd = c.size() / 3, it = c.begin(); j < jEnd; ++j)
+ {
+ // The erase fucntion is supposed to return an iterator, but the C++ standard was
+ // not initially clear about it and some STL implementations don't do it correctly.
+ #if (defined(_MSC_VER) || defined(_CPPLIB_VER)) // _CPPLIB_VER is something defined by Dinkumware STL.
+ it = c.erase(it);
+ #else
+ // This pathway may execute at a slightly different speed than the
+ // standard behaviour, but that's fine for the benchmark because the
+ // benchmark is measuring the speed of erasing while iterating, and
+ // however it needs to get done by the given STL is how it is measured.
+ const typename Container::iterator itErase(it++);
+ c.erase(itErase);
+ #endif
+
+ ++it;
+ ++it;
+ }
+
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%p %p", &c, &it);
+ }
+
+
+ template <typename Container>
+ void TestEraseRange(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ typename Container::size_type j, jEnd;
+ typename Container::iterator it1 = c.begin();
+ typename Container::iterator it2 = c.begin();
+
+ for(j = 0, jEnd = c.size() / 3; j < jEnd; ++j)
+ ++it2;
+
+ stopwatch.Restart();
+ c.erase(it1, it2);
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%p %p %p", &c, &it1, &it2);
+ }
+
+
+ template <typename Container>
+ void TestClear(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ stopwatch.Restart();
+ c.clear();
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c.size());
+ }
+
+
+} // namespace
+
+
+
+void BenchmarkHash()
+{
+ EASTLTest_Printf("HashMap\n");
+
+ EA::UnitTest::Rand rng(EA::UnitTest::GetRandSeed());
+ EA::StdC::Stopwatch stopwatch1(EA::StdC::Stopwatch::kUnitsCPUCycles);
+ EA::StdC::Stopwatch stopwatch2(EA::StdC::Stopwatch::kUnitsCPUCycles);
+
+ {
+ eastl::vector< std::pair<uint32_t, TestObject> > stdVectorUT(10000);
+ eastl::vector< eastl::pair<uint32_t, TestObject> > eaVectorUT(10000);
+
+ eastl::vector< std::pair< std::string, uint32_t> > stdVectorSU(10000);
+ eastl::vector< eastl::pair<eastl::string, uint32_t> > eaVectorSU(10000);
+
+ for(eastl_size_t i = 0, iEnd = stdVectorUT.size(); i < iEnd; i++)
+ {
+ const uint32_t n1 = rng.RandLimit((uint32_t)(iEnd / 2));
+ const uint32_t n2 = rng.RandValue();
+
+ stdVectorUT[i] = std::pair<uint32_t, TestObject>(n1, TestObject(n2));
+ eaVectorUT[i] = eastl::pair<uint32_t, TestObject>(n1, TestObject(n2));
+
+ char str_n1[32];
+ sprintf(str_n1, "%u", (unsigned)n1);
+
+ stdVectorSU[i] = std::pair< std::string, uint32_t>( std::string(str_n1), n2);
+ eaVectorSU[i] = eastl::pair<eastl::string, uint32_t>(eastl::string(str_n1), n2);
+ }
+
+ for(int i = 0; i < 2; i++)
+ {
+ StdMapUint32TO stdMapUint32TO;
+ EaMapUint32TO eaMapUint32TO;
+
+ StdMapStrUint32 stdMapStrUint32;
+ EaMapStrUint32 eaMapStrUint32;
+
+
+ ///////////////////////////////
+ // Test insert(const value_type&)
+ ///////////////////////////////
+
+ TestInsert(stopwatch1, stdMapUint32TO, stdVectorUT.data(), stdVectorUT.data() + stdVectorUT.size());
+ TestInsert(stopwatch2, eaMapUint32TO, eaVectorUT.data(), eaVectorUT.data() + eaVectorUT.size());
+
+ if(i == 1)
+ Benchmark::AddResult("hash_map<uint32_t, TestObject>/insert", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestInsert(stopwatch1, stdMapStrUint32, stdVectorSU.data(), stdVectorSU.data() + stdVectorSU.size());
+ TestInsert(stopwatch2, eaMapStrUint32, eaVectorSU.data(), eaVectorSU.data() + eaVectorSU.size());
+
+ if(i == 1)
+ Benchmark::AddResult("hash_map<string, uint32_t>/insert", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test iteration
+ ///////////////////////////////
+
+ TestIteration(stopwatch1, stdMapUint32TO, StdMapUint32TO::value_type(9999999, TestObject(9999999)));
+ TestIteration(stopwatch2, eaMapUint32TO, EaMapUint32TO::value_type(9999999, TestObject(9999999)));
+
+ if(i == 1)
+ Benchmark::AddResult("hash_map<uint32_t, TestObject>/iteration", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestIteration(stopwatch1, stdMapStrUint32, StdMapStrUint32::value_type( std::string("9999999"), 9999999));
+ TestIteration(stopwatch2, eaMapStrUint32, EaMapStrUint32::value_type(eastl::string("9999999"), 9999999));
+
+ if(i == 1)
+ Benchmark::AddResult("hash_map<string, uint32_t>/iteration", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test operator[]
+ ///////////////////////////////
+
+ TestBracket(stopwatch1, stdMapUint32TO, stdVectorUT.data(), stdVectorUT.data() + stdVectorUT.size());
+ TestBracket(stopwatch2, eaMapUint32TO, eaVectorUT.data(), eaVectorUT.data() + eaVectorUT.size());
+
+ if(i == 1)
+ Benchmark::AddResult("hash_map<uint32_t, TestObject>/operator[]", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestBracket(stopwatch1, stdMapStrUint32, stdVectorSU.data(), stdVectorSU.data() + stdVectorSU.size());
+ TestBracket(stopwatch2, eaMapStrUint32, eaVectorSU.data(), eaVectorSU.data() + eaVectorSU.size());
+
+ if(i == 1)
+ Benchmark::AddResult("hash_map<string, uint32_t>/operator[]", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test find
+ ///////////////////////////////
+
+ TestFind(stopwatch1, stdMapUint32TO, stdVectorUT.data(), stdVectorUT.data() + stdVectorUT.size());
+ TestFind(stopwatch2, eaMapUint32TO, eaVectorUT.data(), eaVectorUT.data() + eaVectorUT.size());
+
+ if(i == 1)
+ Benchmark::AddResult("hash_map<uint32_t, TestObject>/find", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestFind(stopwatch1, stdMapStrUint32, stdVectorSU.data(), stdVectorSU.data() + stdVectorSU.size());
+ TestFind(stopwatch2, eaMapStrUint32, eaVectorSU.data(), eaVectorSU.data() + eaVectorSU.size());
+
+ if(i == 1)
+ Benchmark::AddResult("hash_map<string, uint32_t>/find", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test find_as
+ ///////////////////////////////
+
+ TestFindAsStd(stopwatch1, stdMapStrUint32, stdVectorSU.data(), stdVectorSU.data() + stdVectorSU.size());
+ TestFindAsEa(stopwatch2, eaMapStrUint32, eaVectorSU.data(), eaVectorSU.data() + eaVectorSU.size());
+
+ if(i == 1)
+ Benchmark::AddResult("hash_map<string, uint32_t>/find_as/char*", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test count
+ ///////////////////////////////
+
+ TestCount(stopwatch1, stdMapUint32TO, stdVectorUT.data(), stdVectorUT.data() + stdVectorUT.size());
+ TestCount(stopwatch2, eaMapUint32TO, eaVectorUT.data(), eaVectorUT.data() + eaVectorUT.size());
+
+ if(i == 1)
+ Benchmark::AddResult("hash_map<uint32_t, TestObject>/count", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestCount(stopwatch1, stdMapStrUint32, stdVectorSU.data(), stdVectorSU.data() + stdVectorSU.size());
+ TestCount(stopwatch2, eaMapStrUint32, eaVectorSU.data(), eaVectorSU.data() + eaVectorSU.size());
+
+ if(i == 1)
+ Benchmark::AddResult("hash_map<string, uint32_t>/count", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test erase(const key_type& key)
+ ///////////////////////////////
+
+ TestEraseValue(stopwatch1, stdMapUint32TO, stdVectorUT.data(), stdVectorUT.data() + (stdVectorUT.size() / 2));
+ TestEraseValue(stopwatch2, eaMapUint32TO, eaVectorUT.data(), eaVectorUT.data() + (eaVectorUT.size() / 2));
+
+ if(i == 1)
+ Benchmark::AddResult("hash_map<uint32_t, TestObject>/erase val", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestEraseValue(stopwatch1, stdMapStrUint32, stdVectorSU.data(), stdVectorSU.data() + (stdVectorSU.size() / 2));
+ TestEraseValue(stopwatch2, eaMapStrUint32, eaVectorSU.data(), eaVectorSU.data() + (eaVectorSU.size() / 2));
+
+ if(i == 1)
+ Benchmark::AddResult("hash_map<string, uint32_t>/erase val", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test erase(iterator position)
+ ///////////////////////////////
+
+ TestErasePosition(stopwatch1, stdMapUint32TO);
+ TestErasePosition(stopwatch2, eaMapUint32TO);
+
+ if(i == 1)
+ Benchmark::AddResult("hash_map<uint32_t, TestObject>/erase pos", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestErasePosition(stopwatch1, stdMapStrUint32);
+ TestErasePosition(stopwatch2, eaMapStrUint32);
+
+ if(i == 1)
+ Benchmark::AddResult("hash_map<string, uint32_t>/erase pos", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test erase(iterator first, iterator last)
+ ///////////////////////////////
+
+ TestEraseRange(stopwatch1, stdMapUint32TO);
+ TestEraseRange(stopwatch2, eaMapUint32TO);
+
+ if(i == 1)
+ Benchmark::AddResult("hash_map<uint32_t, TestObject>/erase range", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestEraseRange(stopwatch1, stdMapStrUint32);
+ TestEraseRange(stopwatch2, eaMapStrUint32);
+
+ if(i == 1)
+ Benchmark::AddResult("hash_map<string, uint32_t>/erase range", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test clear()
+ ///////////////////////////////
+
+ // Clear the containers of whatever they happen to have. We want the containers to have full data.
+ TestClear(stopwatch1, stdMapUint32TO);
+ TestClear(stopwatch2, eaMapUint32TO);
+ TestClear(stopwatch1, stdMapStrUint32);
+ TestClear(stopwatch2, eaMapStrUint32);
+
+ // Re-set the containers with full data.
+ TestInsert(stopwatch1, stdMapUint32TO, stdVectorUT.data(), stdVectorUT.data() + stdVectorUT.size());
+ TestInsert(stopwatch2, eaMapUint32TO, eaVectorUT.data(), eaVectorUT.data() + eaVectorUT.size());
+ TestInsert(stopwatch1, stdMapStrUint32, stdVectorSU.data(), stdVectorSU.data() + stdVectorSU.size());
+ TestInsert(stopwatch2, eaMapStrUint32, eaVectorSU.data(), eaVectorSU.data() + eaVectorSU.size());
+
+ // Now clear the data again, this time measuring it.
+ TestClear(stopwatch1, stdMapUint32TO);
+ TestClear(stopwatch2, eaMapUint32TO);
+
+ if(i == 1)
+ Benchmark::AddResult("hash_map<uint32_t, TestObject>/clear", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestClear(stopwatch1, stdMapStrUint32);
+ TestClear(stopwatch2, eaMapStrUint32);
+
+ if(i == 1)
+ Benchmark::AddResult("hash_map<string, uint32_t>/clear", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ }
+ }
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/benchmark/source/BenchmarkHeap.cpp b/EASTL/benchmark/source/BenchmarkHeap.cpp
new file mode 100644
index 0000000..635cf31
--- /dev/null
+++ b/EASTL/benchmark/source/BenchmarkHeap.cpp
@@ -0,0 +1,238 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLBenchmark.h"
+#include "EASTLTest.h"
+#include <EAStdC/EAStopwatch.h>
+#include <EASTL/heap.h>
+#include <EASTL/vector.h>
+#include <EASTL/algorithm.h>
+
+#ifdef _MSC_VER
+ #pragma warning(push, 0)
+ #pragma warning(disable: 4350) // behavior change: X called instead of Y
+#endif
+#include <algorithm>
+#include <vector>
+#ifdef _MSC_VER
+ #pragma warning(pop)
+#endif
+
+
+using namespace EA;
+
+
+namespace
+{
+ template <typename Iterator>
+ void TestMakeHeapStd(EA::StdC::Stopwatch& stopwatch, Iterator first, Iterator last)
+ {
+ stopwatch.Restart();
+ std::make_heap(first, last);
+ stopwatch.Stop();
+ }
+
+ template <typename Iterator>
+ void TestMakeHeapEa(EA::StdC::Stopwatch& stopwatch, Iterator first, Iterator last)
+ {
+ stopwatch.Restart();
+ eastl::make_heap(first, last);
+ stopwatch.Stop();
+ }
+
+
+
+ template <typename Iterator1, typename Iterator2>
+ void TestPushHeapStd(EA::StdC::Stopwatch& stopwatch, Iterator1 first1, Iterator1 last1, Iterator2 first2, Iterator2 last2)
+ {
+ stopwatch.Restart();
+ while(first2 != last2)
+ {
+ *last1++ = *first2++;
+ std::push_heap(first1, last1);
+ }
+ stopwatch.Stop();
+ }
+
+ template <typename Iterator1, typename Iterator2>
+ void TestPushHeapEa(EA::StdC::Stopwatch& stopwatch, Iterator1 first1, Iterator1 last1, Iterator2 first2, Iterator2 last2)
+ {
+ stopwatch.Restart();
+ while(first2 != last2)
+ {
+ *last1++ = *first2++;
+ eastl::push_heap(first1, last1);
+ }
+ stopwatch.Stop();
+ }
+
+
+
+ template <typename Iterator>
+ void TestPopHeapStd(EA::StdC::Stopwatch& stopwatch, Iterator first, Iterator last, Iterator popEnd)
+ {
+ stopwatch.Restart();
+ while(last != popEnd)
+ std::pop_heap(first, last--);
+ stopwatch.Stop();
+ }
+
+ template <typename Iterator>
+ void TestPopHeapEa(EA::StdC::Stopwatch& stopwatch, Iterator first, Iterator last, Iterator popEnd)
+ {
+ stopwatch.Restart();
+ while(last != popEnd)
+ eastl::pop_heap(first, last--);
+ stopwatch.Stop();
+ }
+
+
+
+ template <typename Iterator>
+ void TestSortHeapStd(EA::StdC::Stopwatch& stopwatch, Iterator first, Iterator last)
+ {
+ stopwatch.Restart();
+ std::sort_heap(first, last);
+ stopwatch.Stop();
+ }
+
+ template <typename Iterator>
+ void TestSortHeapEa(EA::StdC::Stopwatch& stopwatch, Iterator first, Iterator last)
+ {
+ stopwatch.Restart();
+ eastl::sort_heap(first, last);
+ stopwatch.Stop();
+ }
+
+} // namespace
+
+
+
+void BenchmarkHeap()
+{
+ EASTLTest_Printf("Heap (Priority Queue)\n");
+
+ EA::UnitTest::RandGenT<uint32_t> rng(EA::UnitTest::GetRandSeed());
+ EA::StdC::Stopwatch stopwatch1(EA::StdC::Stopwatch::kUnitsCPUCycles);
+ EA::StdC::Stopwatch stopwatch2(EA::StdC::Stopwatch::kUnitsCPUCycles);
+
+ {
+ const int kArraySize = 100000;
+
+ // uint32[]
+ uint32_t* const pIntArrayS = new uint32_t[kArraySize * 2]; // * 2 because we will be adding more items via push_heap.
+ uint32_t* const pIntArrayE = new uint32_t[kArraySize * 2]; // S means Std; E means EA.
+ uint32_t* const pIntArray2 = new uint32_t[kArraySize]; // This will be used for pop_heap.
+
+ eastl::generate(pIntArrayS, pIntArrayS + kArraySize, rng);
+ eastl::copy(pIntArrayS, pIntArrayS + kArraySize, pIntArrayE);
+ eastl::copy(pIntArrayS, pIntArrayS + kArraySize, pIntArray2);
+
+
+ // vector<TestObject>
+ std::vector<TestObject> stdVectorTO(kArraySize * 2);
+ std::vector<TestObject> stdVectorTO2(kArraySize);
+ eastl::vector<TestObject> eaVectorTO(kArraySize * 2);
+ eastl::vector<TestObject> eaVectorTO2(kArraySize);
+
+ for(int k = 0; k < kArraySize; k++)
+ {
+ stdVectorTO[k] = TestObject(pIntArrayS[k]);
+ stdVectorTO2[k] = TestObject(pIntArrayS[k]);
+ eaVectorTO[k] = TestObject(pIntArrayS[k]);
+ eaVectorTO2[k] = TestObject(pIntArrayS[k]);
+ }
+
+
+ for(int i = 0; i < 2; i++)
+ {
+ ///////////////////////////////
+ // Test make_heap
+ ///////////////////////////////
+
+ TestMakeHeapStd(stopwatch1, pIntArrayS, pIntArrayS + kArraySize);
+ TestMakeHeapEa (stopwatch2, pIntArrayE, pIntArrayE + kArraySize);
+
+ if(i == 1)
+ Benchmark::AddResult("heap (uint32_t[])/make_heap", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestMakeHeapStd(stopwatch1, stdVectorTO.begin(), stdVectorTO.begin() + kArraySize);
+ TestMakeHeapEa (stopwatch2, eaVectorTO.begin(), eaVectorTO.begin() + kArraySize);
+
+ if(i == 1)
+ Benchmark::AddResult("heap (vector<TestObject>)/make_heap", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+
+ ///////////////////////////////
+ // Test push_heap
+ ///////////////////////////////
+
+ TestPushHeapStd(stopwatch1, pIntArrayS, pIntArrayS + kArraySize, pIntArray2, pIntArray2 + kArraySize);
+ TestPushHeapEa (stopwatch2, pIntArrayE, pIntArrayE + kArraySize, pIntArray2, pIntArray2 + kArraySize);
+
+ if(i == 1)
+ Benchmark::AddResult("heap (uint32_t[])/push_heap", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestPushHeapStd(stopwatch1, stdVectorTO.begin(), stdVectorTO.begin() + kArraySize, stdVectorTO2.begin(), stdVectorTO2.begin() + kArraySize);
+ TestPushHeapEa (stopwatch2, eaVectorTO.begin(), eaVectorTO.begin() + kArraySize, eaVectorTO2.begin(), eaVectorTO2.begin() + kArraySize);
+
+ if(i == 1)
+ Benchmark::AddResult("heap (vector<TestObject>)/push_heap", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+
+ ///////////////////////////////
+ // Test pop_heap
+ ///////////////////////////////
+
+ TestPopHeapStd(stopwatch1, pIntArrayS, pIntArrayS + (kArraySize * 2), pIntArrayS + kArraySize); // * 2 because we used push_heap above to add more items.
+ TestPopHeapEa (stopwatch2, pIntArrayE, pIntArrayE + (kArraySize * 2), pIntArrayE + kArraySize);
+
+ if(i == 1)
+ Benchmark::AddResult("heap (uint32_t[])/pop_heap", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestPopHeapStd(stopwatch1, stdVectorTO.begin(), stdVectorTO.begin() + (kArraySize * 2), stdVectorTO.begin() + kArraySize); // * 2 because we used push_heap above to add more items.
+ TestPopHeapEa (stopwatch2, eaVectorTO.begin(), eaVectorTO.begin() + (kArraySize * 2), eaVectorTO.begin() + kArraySize);
+
+ if(i == 1)
+ Benchmark::AddResult("heap (vector<TestObject>)/pop_heap", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+
+ ///////////////////////////////
+ // Test sort_heap
+ ///////////////////////////////
+
+ TestSortHeapStd(stopwatch1, pIntArrayS, pIntArrayS + kArraySize);
+ TestSortHeapEa (stopwatch2, pIntArrayE, pIntArrayE + kArraySize);
+
+ if(i == 1)
+ Benchmark::AddResult("heap (uint32_t[])/sort_heap", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestSortHeapStd(stopwatch1, stdVectorTO.begin(), stdVectorTO.begin() + kArraySize);
+ TestSortHeapEa (stopwatch2, eaVectorTO.begin(), eaVectorTO.begin() + kArraySize);
+
+ if(i == 1)
+ Benchmark::AddResult("heap (vector<TestObject>)/sort_heap", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+ }
+
+ delete[] pIntArrayS;
+ delete[] pIntArrayE;
+ delete[] pIntArray2;
+ }
+}
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/benchmark/source/BenchmarkList.cpp b/EASTL/benchmark/source/BenchmarkList.cpp
new file mode 100644
index 0000000..1d22ad8
--- /dev/null
+++ b/EASTL/benchmark/source/BenchmarkList.cpp
@@ -0,0 +1,382 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLBenchmark.h"
+#include "EASTLTest.h"
+#include <EAStdC/EAStopwatch.h>
+#include <EASTL/list.h>
+#include <EASTL/vector.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/random.h>
+
+#ifdef _MSC_VER
+ #pragma warning(push, 0)
+ #pragma warning(disable: 4555) // expression has no effect; expected expression with side-effect
+ #pragma warning(disable: 4350) // behavior change: X called instead of Y
+#endif
+#include <list>
+#ifdef _MSC_VER
+ #pragma warning(pop)
+#endif
+
+
+using namespace EA;
+using namespace eastl;
+
+
+
+typedef std::list<TestObject> StdListTO;
+typedef eastl::list<TestObject> EaListTO;
+
+
+
+namespace
+{
+ void DoNothing(void*)
+ {
+ // Empty
+ }
+
+
+ template <typename ContainerSource, typename Container>
+ void TestCtorIterator(EA::StdC::Stopwatch& stopwatch, const ContainerSource& cs, Container*) // Dummy Container argument because of GCC 2.X limitations.
+ {
+ stopwatch.Restart();
+ Container c(cs.begin(), cs.end());
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c.back().mX);
+ }
+
+
+ template <typename Container>
+ void TestCtorN(EA::StdC::Stopwatch& stopwatch, Container*) // Dummy Container argument because of GCC 2.X limitations.
+ {
+ stopwatch.Restart();
+ Container c(10000);
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c.back().mX);
+ }
+
+
+ template <typename Container>
+ void TestPushBack(EA::StdC::Stopwatch& stopwatch, Container& c, const TestObject* pTOBegin, const TestObject* const pTOEnd)
+ {
+ stopwatch.Restart();
+ while(pTOBegin != pTOEnd)
+ c.push_back(*pTOBegin++);
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c.back().mX);
+ }
+
+
+ template <typename Container>
+ void TestInsert(EA::StdC::Stopwatch& stopwatch, Container& c, const TestObject* pTOBegin, const TestObject* const pTOEnd)
+ {
+ typename Container::iterator it = c.begin();
+ stopwatch.Restart();
+ while(pTOBegin != pTOEnd)
+ {
+ it = c.insert(it, *pTOBegin++);
+
+ if(++it == c.end()) // Try to safely increment the iterator a couple times
+ it = c.begin();
+ if(++it == c.end())
+ it = c.begin();
+ }
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c.back().mX);
+ }
+
+
+ template <typename Container>
+ void TestSize(EA::StdC::Stopwatch& stopwatch, Container& c, void (*pFunction)(...))
+ {
+ stopwatch.Restart();
+ for(int i = 0; (i < 10000) && c.size(); i++)
+ (*pFunction)(&c);
+ stopwatch.Stop();
+ }
+
+
+ template <typename Container>
+ void TestFind(EA::StdC::Stopwatch& stopwatch, Container& c, const TestObject& to)
+ {
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c.size());
+ stopwatch.Restart();
+ typename Container::iterator it = eastl::find(c.begin(), c.end(), to);
+ stopwatch.Stop();
+ if(it != c.end())
+ sprintf(Benchmark::gScratchBuffer, "%d", (*it).mX);
+ }
+
+
+ template <typename Container>
+ void TestReverse(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c.size());
+ stopwatch.Restart();
+ c.reverse();
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c.back().mX);
+ }
+
+
+ template <typename Container>
+ void TestRemove(EA::StdC::Stopwatch& stopwatch, Container& c, const TestObject* pTOBegin, const TestObject* const pTOEnd)
+ {
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c.size());
+ stopwatch.Restart();
+ while(pTOBegin != pTOEnd)
+ c.remove(*pTOBegin++);
+ stopwatch.Stop();
+ if(!c.empty())
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c.back().mX);
+ }
+
+
+ template <typename Container>
+ void TestSplice(EA::StdC::Stopwatch& stopwatch, Container& c, Container& cSource)
+ {
+ typename Container::iterator it = c.begin();
+ int i = 0, iEnd = (int)cSource.size() - 5;
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c.size());
+ stopwatch.Restart();
+ while(i++ != iEnd)
+ c.splice(it, cSource, cSource.begin());
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c.back().mX);
+ }
+
+
+ template <typename Container>
+ void TestErase(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ typename Container::iterator it = c.begin();
+ int i = 0, iEnd = (int)c.size() - 5;
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c.size());
+ stopwatch.Restart();
+ while(i++ != iEnd)
+ {
+ it = c.erase(it);
+
+ if(it == c.end()) // Try to safely increment the iterator a couple times
+ it = c.begin();
+ if(++it == c.end())
+ it = c.begin();
+ }
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c.back().mX);
+ }
+
+} // namespace
+
+
+
+
+void BenchmarkList()
+{
+ EASTLTest_Printf("List\n");
+
+ EASTLTest_Rand rng(EA::UnitTest::GetRandSeed());
+ EA::StdC::Stopwatch stopwatch1(EA::StdC::Stopwatch::kUnitsCPUCycles);
+ EA::StdC::Stopwatch stopwatch2(EA::StdC::Stopwatch::kUnitsCPUCycles);
+
+ EaListTO eaListTO_1(1);
+ EaListTO eaListTO_10(10);
+ EaListTO eaListTO_100(100);
+ StdListTO stdListTO_1(1);
+ StdListTO stdListTO_10(10);
+ StdListTO stdListTO_100(100);
+
+ {
+ char buffer[32];
+ sprintf(buffer, "%p", &DoNothing);
+ }
+
+ {
+ eastl::vector<TestObject> toVector(100000);
+ for(eastl_size_t i = 0, iEnd = toVector.size(); i < iEnd; ++i)
+ toVector[i] = TestObject((int)i);
+ random_shuffle(toVector.begin(), toVector.end(), rng);
+
+
+ for(int i = 0; i < 2; i++)
+ {
+ StdListTO stdListTO;
+ EaListTO eaListTO;
+
+
+ ///////////////////////////////
+ // Test list(InputIterator first, InputIterator last)
+ ///////////////////////////////
+
+ TestCtorIterator(stopwatch1, toVector, &stdListTO);
+ TestCtorIterator(stopwatch2, toVector, &eaListTO);
+
+ if(i == 1)
+ Benchmark::AddResult("list<TestObject>/ctor(it)", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+
+ ///////////////////////////////
+ // Test list(size_type n)
+ ///////////////////////////////
+
+ TestCtorN(stopwatch1, &stdListTO);
+ TestCtorN(stopwatch2, &eaListTO);
+
+ if(i == 1)
+ Benchmark::AddResult("list<TestObject>/ctor(n)", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+
+
+ ///////////////////////////////
+ // Test push_back()
+ ///////////////////////////////
+
+ TestPushBack(stopwatch1, stdListTO, toVector.data(), toVector.data() + toVector.size());
+ TestPushBack(stopwatch2, eaListTO, toVector.data(), toVector.data() + toVector.size());
+
+ if(i == 1)
+ Benchmark::AddResult("list<TestObject>/push_back", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+
+
+ ///////////////////////////////
+ // Test insert()
+ ///////////////////////////////
+
+ TestInsert(stopwatch1, stdListTO, toVector.data(), toVector.data() + toVector.size());
+ TestInsert(stopwatch2, eaListTO, toVector.data(), toVector.data() + toVector.size());
+
+ if(i == 1)
+ Benchmark::AddResult("list<TestObject>/insert", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+
+
+ ///////////////////////////////
+ // Test size()
+ ///////////////////////////////
+
+ TestSize(stopwatch1, stdListTO_1, Benchmark::DoNothing);
+ TestSize(stopwatch2, eaListTO_1, Benchmark::DoNothing);
+
+ if(i == 1)
+ Benchmark::AddResult("list<TestObject>/size/1", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestSize(stopwatch1, stdListTO_10, Benchmark::DoNothing);
+ TestSize(stopwatch2, eaListTO_10, Benchmark::DoNothing);
+
+ if(i == 1)
+ Benchmark::AddResult("list<TestObject>/size/10", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()
+ #if !EASTL_LIST_SIZE_CACHE
+ , "EASTL is configured to not cache the list size."
+ #endif
+ );
+
+ TestSize(stopwatch1, stdListTO_100, Benchmark::DoNothing);
+ TestSize(stopwatch2, eaListTO_100, Benchmark::DoNothing);
+
+ if(i == 1)
+ Benchmark::AddResult("list<TestObject>/size/100", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()
+ #if !EASTL_LIST_SIZE_CACHE
+ , "EASTL is configured to not cache the list size."
+ #endif
+ );
+
+
+
+ ///////////////////////////////
+ // Test find()
+ ///////////////////////////////
+
+ TestFind(stopwatch1, stdListTO, TestObject(99999999));
+ TestFind(stopwatch2, eaListTO, TestObject(99999999));
+
+ if(i == 1)
+ Benchmark::AddResult("list<TestObject>/find", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+
+
+ ///////////////////////////////
+ // Test reverse()
+ ///////////////////////////////
+
+ TestReverse(stopwatch1, stdListTO);
+ TestReverse(stopwatch2, eaListTO);
+
+ if(i == 1)
+ Benchmark::AddResult("list<TestObject>/reverse", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+
+
+ ///////////////////////////////
+ // Test remove()
+ ///////////////////////////////
+
+ random_shuffle(toVector.begin(), toVector.end(), rng);
+ TestRemove(stopwatch1, stdListTO, &toVector[0], &toVector[20]);
+ TestRemove(stopwatch2, eaListTO, &toVector[0], &toVector[20]);
+
+ if(i == 1)
+ Benchmark::AddResult("list<TestObject>/remove", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+
+
+ ///////////////////////////////
+ // Test splice()
+ ///////////////////////////////
+ StdListTO listCopyStd(stdListTO);
+ EaListTO listCopyEa(eaListTO);
+
+ TestSplice(stopwatch1, stdListTO, listCopyStd);
+ TestSplice(stopwatch2, eaListTO, listCopyEa);
+
+ if(i == 1)
+ Benchmark::AddResult("list<TestObject>/splice", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+
+
+ ///////////////////////////////
+ // Test erase()
+ ///////////////////////////////
+
+ TestErase(stopwatch1, stdListTO);
+ TestErase(stopwatch2, eaListTO);
+
+ if(i == 1)
+ Benchmark::AddResult("list<TestObject>/erase", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+ }
+ }
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/benchmark/source/BenchmarkMap.cpp b/EASTL/benchmark/source/BenchmarkMap.cpp
new file mode 100644
index 0000000..d2fc35e
--- /dev/null
+++ b/EASTL/benchmark/source/BenchmarkMap.cpp
@@ -0,0 +1,382 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLBenchmark.h"
+#include "EASTLTest.h"
+#include <EAStdC/EAStopwatch.h>
+#include <EASTL/map.h>
+#include <EASTL/vector.h>
+#include <EASTL/algorithm.h>
+
+EA_DISABLE_ALL_VC_WARNINGS()
+#include <map>
+#include <algorithm>
+EA_RESTORE_ALL_VC_WARNINGS()
+
+
+using namespace EA;
+
+
+typedef std::map<TestObject, uint32_t> StdMapTOUint32;
+typedef eastl::map<TestObject, uint32_t> EaMapTOUint32;
+
+
+namespace
+{
+ template <typename Container, typename Value>
+ void TestInsert(EA::StdC::Stopwatch& stopwatch, Container& c, const Value* pArrayBegin, const Value* pArrayEnd, const Value& highValue)
+ {
+ stopwatch.Restart();
+ c.insert(pArrayBegin, pArrayEnd);
+ stopwatch.Stop();
+ c.insert(highValue);
+ }
+
+
+ template <typename Container, typename Value>
+ void TestIteration(EA::StdC::Stopwatch& stopwatch, const Container& c, const Value& findValue)
+ {
+ stopwatch.Restart();
+ typename Container::const_iterator it = eastl::find(c.begin(), c.end(), findValue); // It shouldn't matter what find implementation we use here, as it merely iterates values.
+ stopwatch.Stop();
+ if(it != c.end())
+ sprintf(Benchmark::gScratchBuffer, "%p", &*it);
+ }
+
+
+ template <typename Container, typename Value>
+ void TestBracket(EA::StdC::Stopwatch& stopwatch, Container& c, const Value* pArrayBegin, const Value* pArrayEnd)
+ {
+ stopwatch.Restart();
+ while(pArrayBegin != pArrayEnd)
+ {
+ Benchmark::DoNothing(c[pArrayBegin->first]);
+ ++pArrayBegin;
+ }
+ stopwatch.Stop();
+ }
+
+
+ template <typename Container, typename Value>
+ void TestFind(EA::StdC::Stopwatch& stopwatch, Container& c, const Value* pArrayBegin, const Value* pArrayEnd)
+ {
+ stopwatch.Restart();
+ while(pArrayBegin != pArrayEnd)
+ {
+ Benchmark::DoNothing(c.find(pArrayBegin->first)->second);
+ ++pArrayBegin;
+ }
+ stopwatch.Stop();
+ }
+
+
+ template <typename Container, typename Value>
+ void TestCount(EA::StdC::Stopwatch& stopwatch, Container& c, const Value* pArrayBegin, const Value* pArrayEnd)
+ {
+ typename Container::size_type temp = 0;
+ stopwatch.Restart();
+ while(pArrayBegin != pArrayEnd)
+ {
+ temp += c.count(pArrayBegin->first);
+ ++pArrayBegin;
+ }
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)temp);
+ }
+
+
+ template <typename Container, typename Value>
+ void TestLowerBound(EA::StdC::Stopwatch& stopwatch, Container& c, const Value* pArrayBegin, const Value* pArrayEnd)
+ {
+ stopwatch.Restart();
+ while(pArrayBegin != pArrayEnd)
+ {
+ Benchmark::DoNothing(c.lower_bound(pArrayBegin->first)->second);
+ ++pArrayBegin;
+ }
+ stopwatch.Stop();
+ }
+
+
+ template <typename Container, typename Value>
+ void TestUpperBound(EA::StdC::Stopwatch& stopwatch, Container& c, const Value* pArrayBegin, const Value* pArrayEnd)
+ {
+ stopwatch.Restart();
+ while(pArrayBegin != pArrayEnd)
+ {
+ Benchmark::DoNothing(c.upper_bound(pArrayBegin->first)->second);
+ ++pArrayBegin;
+ }
+ stopwatch.Stop();
+ }
+
+
+ template <typename Container, typename Value>
+ void TestEqualRange(EA::StdC::Stopwatch& stopwatch, Container& c, const Value* pArrayBegin, const Value* pArrayEnd)
+ {
+ stopwatch.Restart();
+ while(pArrayBegin != pArrayEnd)
+ {
+ Benchmark::DoNothing(c.equal_range(pArrayBegin->first).second->second);
+ ++pArrayBegin;
+ }
+ stopwatch.Stop();
+ }
+
+
+ template <typename Container, typename Value>
+ void TestEraseValue(EA::StdC::Stopwatch& stopwatch, Container& c, const Value* pArrayBegin, const Value* pArrayEnd)
+ {
+ stopwatch.Restart();
+ while(pArrayBegin != pArrayEnd)
+ {
+ c.erase(pArrayBegin->first);
+ ++pArrayBegin;
+ }
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c.size());
+ }
+
+
+ template <typename Container>
+ void TestErasePosition(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ typename Container::size_type j, jEnd;
+ typename Container::iterator it;
+
+ stopwatch.Restart();
+ for(j = 0, jEnd = c.size() / 3, it = c.begin(); j < jEnd; ++j)
+ {
+ // The erase fucntion is supposed to return an iterator, but the C++ standard was
+ // not initially clear about it and some STL implementations don't do it correctly.
+ #if (((defined(_MSC_VER) || defined(_CPPLIB_VER)) && !defined(_HAS_STRICT_CONFORMANCE))) // _CPPLIB_VER is something defined by Dinkumware STL.
+ it = c.erase(it); // Standard behavior.
+ #else
+ // This pathway may execute at a slightly different speed than the
+ // standard behaviour, but that's fine for the benchmark because the
+ // benchmark is measuring the speed of erasing while iterating, and
+ // however it needs to get done by the given STL is how it is measured.
+ const typename Container::iterator itErase(it++);
+ c.erase(itErase);
+ #endif
+
+ ++it;
+ ++it;
+ }
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%p %p", &c, &it);
+ }
+
+
+ template <typename Container>
+ void TestEraseRange(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ typename Container::size_type j, jEnd;
+ typename Container::iterator it1 = c.begin();
+ typename Container::iterator it2 = c.begin();
+
+ for(j = 0, jEnd = c.size() / 3; j < jEnd; ++j)
+ ++it2;
+
+ stopwatch.Restart();
+ c.erase(it1, it2);
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%p %p %p", &c, &it1, &it2);
+ }
+
+
+ template <typename Container>
+ void TestClear(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ stopwatch.Restart();
+ c.clear();
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c.size());
+ }
+
+
+} // namespace
+
+
+
+void BenchmarkMap()
+{
+ EASTLTest_Printf("Map\n");
+
+ EA::UnitTest::Rand rng(EA::UnitTest::GetRandSeed());
+ EA::StdC::Stopwatch stopwatch1(EA::StdC::Stopwatch::kUnitsCPUCycles);
+ EA::StdC::Stopwatch stopwatch2(EA::StdC::Stopwatch::kUnitsCPUCycles);
+
+ {
+ eastl::vector< std::pair<TestObject, uint32_t> > stdVector(10000);
+ eastl::vector< eastl::pair<TestObject, uint32_t> > eaVector(10000);
+
+ for(eastl_size_t i = 0, iEnd = stdVector.size(); i < iEnd; i++)
+ {
+ const uint32_t n1 = rng.RandLimit(((uint32_t)iEnd / 2));
+ const uint32_t n2 = rng.RandValue();
+
+ stdVector[i] = std::pair<TestObject, uint32_t>(TestObject(n1), n2);
+ eaVector[i] = eastl::pair<TestObject, uint32_t>(TestObject(n1), n2);
+ }
+
+ for(int i = 0; i < 2; i++)
+ {
+ StdMapTOUint32 stdMapTOUint32;
+ EaMapTOUint32 eaMapTOUint32;
+
+
+ ///////////////////////////////
+ // Test insert(const value_type&)
+ ///////////////////////////////
+ const std::pair<TestObject, uint32_t> stdHighValue(TestObject(0x7fffffff), 0x7fffffff);
+ const eastl::pair<TestObject, uint32_t> eaHighValue(TestObject(0x7fffffff), 0x7fffffff);
+
+ TestInsert(stopwatch1, stdMapTOUint32, stdVector.data(), stdVector.data() + stdVector.size(), stdHighValue);
+ TestInsert(stopwatch2, eaMapTOUint32, eaVector.data(), eaVector.data() + eaVector.size(), eaHighValue);
+
+ if(i == 1)
+ Benchmark::AddResult("map<TestObject, uint32_t>/insert", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test iteration
+ ///////////////////////////////
+
+ TestIteration(stopwatch1, stdMapTOUint32, StdMapTOUint32::value_type(TestObject(9999999), 9999999));
+ TestIteration(stopwatch2, eaMapTOUint32, EaMapTOUint32::value_type(TestObject(9999999), 9999999));
+
+ if(i == 1)
+ Benchmark::AddResult("map<TestObject, uint32_t>/iteration", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test operator[]
+ ///////////////////////////////
+
+ TestBracket(stopwatch1, stdMapTOUint32, stdVector.data(), stdVector.data() + stdVector.size());
+ TestBracket(stopwatch2, eaMapTOUint32, eaVector.data(), eaVector.data() + eaVector.size());
+
+ if(i == 1)
+ Benchmark::AddResult("map<TestObject, uint32_t>/operator[]", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test find
+ ///////////////////////////////
+
+ TestFind(stopwatch1, stdMapTOUint32, stdVector.data(), stdVector.data() + stdVector.size());
+ TestFind(stopwatch2, eaMapTOUint32, eaVector.data(), eaVector.data() + eaVector.size());
+
+ if(i == 1)
+ Benchmark::AddResult("map<TestObject, uint32_t>/find", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test count
+ ///////////////////////////////
+
+ TestCount(stopwatch1, stdMapTOUint32, stdVector.data(), stdVector.data() + stdVector.size());
+ TestCount(stopwatch2, eaMapTOUint32, eaVector.data(), eaVector.data() + eaVector.size());
+
+ if(i == 1)
+ Benchmark::AddResult("map<TestObject, uint32_t>/count", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test lower_bound
+ ///////////////////////////////
+
+ TestLowerBound(stopwatch1, stdMapTOUint32, stdVector.data(), stdVector.data() + stdVector.size());
+ TestLowerBound(stopwatch2, eaMapTOUint32, eaVector.data(), eaVector.data() + eaVector.size());
+
+ if(i == 1)
+ Benchmark::AddResult("map<TestObject, uint32_t>/lower_bound", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test upper_bound
+ ///////////////////////////////
+
+ TestUpperBound(stopwatch1, stdMapTOUint32, stdVector.data(), stdVector.data() + stdVector.size());
+ TestUpperBound(stopwatch2, eaMapTOUint32, eaVector.data(), eaVector.data() + eaVector.size());
+
+ if(i == 1)
+ Benchmark::AddResult("map<TestObject, uint32_t>/upper_bound", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test equal_range
+ ///////////////////////////////
+
+ TestEqualRange(stopwatch1, stdMapTOUint32, stdVector.data(), stdVector.data() + stdVector.size());
+ TestEqualRange(stopwatch2, eaMapTOUint32, eaVector.data(), eaVector.data() + eaVector.size());
+
+ if(i == 1)
+ Benchmark::AddResult("map<TestObject, uint32_t>/equal_range", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test erase(const key_type& key)
+ ///////////////////////////////
+
+ TestEraseValue(stopwatch1, stdMapTOUint32, stdVector.data(), stdVector.data() + (stdVector.size() / 2));
+ TestEraseValue(stopwatch2, eaMapTOUint32, eaVector.data(), eaVector.data() + (eaVector.size() / 2));
+
+ if(i == 1)
+ Benchmark::AddResult("map<TestObject, uint32_t>/erase/key", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test erase(iterator position)
+ ///////////////////////////////
+
+ TestErasePosition(stopwatch1, stdMapTOUint32);
+ TestErasePosition(stopwatch2, eaMapTOUint32);
+
+ if(i == 1)
+ Benchmark::AddResult("map<TestObject, uint32_t>/erase/pos", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime(),
+ GetStdSTLType() == kSTLMS ? "MS uses a code bloating implementation of erase." : NULL);
+
+
+ ///////////////////////////////
+ // Test erase(iterator first, iterator last)
+ ///////////////////////////////
+
+ TestEraseRange(stopwatch1, stdMapTOUint32);
+ TestEraseRange(stopwatch2, eaMapTOUint32);
+
+ if(i == 1)
+ Benchmark::AddResult("map<TestObject, uint32_t>/erase/range", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test clear()
+ ///////////////////////////////
+
+ TestClear(stopwatch1, stdMapTOUint32);
+ TestClear(stopwatch2, eaMapTOUint32);
+
+ if(i == 1)
+ Benchmark::AddResult("map<TestObject, uint32_t>/clear", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ }
+ }
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/benchmark/source/BenchmarkSet.cpp b/EASTL/benchmark/source/BenchmarkSet.cpp
new file mode 100644
index 0000000..4a58b1a
--- /dev/null
+++ b/EASTL/benchmark/source/BenchmarkSet.cpp
@@ -0,0 +1,353 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLBenchmark.h"
+#include "EASTLTest.h"
+#include <EAStdC/EAStopwatch.h>
+#include <EASTL/set.h>
+#include <EASTL/vector.h>
+#include <EASTL/algorithm.h>
+
+EA_DISABLE_ALL_VC_WARNINGS()
+#include <set>
+#include <algorithm>
+EA_RESTORE_ALL_VC_WARNINGS()
+
+
+using namespace EA;
+
+
+typedef std::set<uint32_t> StdSetUint32;
+typedef eastl::set<uint32_t> EaSetUint32;
+
+
+namespace
+{
+ template <typename Container>
+ void TestInsert(EA::StdC::Stopwatch& stopwatch, Container& c, const uint32_t* pArrayBegin, const uint32_t* pArrayEnd)
+ {
+ stopwatch.Restart();
+ c.insert(pArrayBegin, pArrayEnd);
+ stopwatch.Stop();
+
+ // Intentionally push back a high uint32_t value. We do this so that
+ // later upper_bound, lower_bound and equal_range never return end().
+ c.insert(0xffffffff);
+ }
+
+
+ template <typename Container>
+ void TestIteration(EA::StdC::Stopwatch& stopwatch, const Container& c)
+ {
+ stopwatch.Restart();
+ typename Container::const_iterator it = eastl::find(c.begin(), c.end(), uint32_t(9999999));
+ stopwatch.Stop();
+ if(it != c.end())
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)*it);
+ }
+
+
+ template <typename Container>
+ void TestFind(EA::StdC::Stopwatch& stopwatch, Container& c, const uint32_t* pArrayBegin, const uint32_t* pArrayEnd)
+ {
+ uint32_t temp = 0;
+ typename Container::iterator it;
+ stopwatch.Restart();
+ while(pArrayBegin != pArrayEnd)
+ {
+ it = c.find(*pArrayBegin++);
+ temp += *it;
+ }
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)temp);
+ }
+
+
+ template <typename Container>
+ void TestCount(EA::StdC::Stopwatch& stopwatch, Container& c, const uint32_t* pArrayBegin, const uint32_t* pArrayEnd)
+ {
+ typename Container::size_type temp = 0;
+ stopwatch.Restart();
+ while(pArrayBegin != pArrayEnd)
+ temp += c.count(*pArrayBegin++);
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)temp);
+ }
+
+
+ template <typename Container>
+ void TestLowerBound(EA::StdC::Stopwatch& stopwatch, Container& c, const uint32_t* pArrayBegin, const uint32_t* pArrayEnd)
+ {
+ uint32_t temp = 0;
+ typename Container::iterator it;
+ stopwatch.Restart();
+ while(pArrayBegin != pArrayEnd)
+ {
+ it = c.lower_bound(*pArrayBegin++);
+ temp += *it; // We know that it != end because earlier we inserted 0xffffffff.
+ }
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)temp);
+ }
+
+
+ template <typename Container>
+ void TestUpperBound(EA::StdC::Stopwatch& stopwatch, Container& c, const uint32_t* pArrayBegin, const uint32_t* pArrayEnd)
+ {
+ uint32_t temp = 0;
+ typename Container::iterator it;
+ stopwatch.Restart();
+ while(pArrayBegin != pArrayEnd)
+ {
+ it = c.upper_bound(*pArrayBegin++);
+ temp += *it; // We know that it != end because earlier we inserted 0xffffffff.
+ }
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)temp);
+ }
+
+
+ template <typename Container>
+ void TestEqualRange(EA::StdC::Stopwatch& stopwatch, Container& c, const uint32_t* pArrayBegin, const uint32_t* pArrayEnd)
+ {
+ uint32_t temp = 0;
+ stopwatch.Restart();
+ while(pArrayBegin != pArrayEnd)
+ {
+ temp += *(c.equal_range(*pArrayBegin++).first); // We know that it != end because earlier we inserted 0xffffffff.
+ }
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)temp);
+ }
+
+
+ template <typename Container>
+ void TestEraseValue(EA::StdC::Stopwatch& stopwatch, Container& c, const uint32_t* pArrayBegin, const uint32_t* pArrayEnd)
+ {
+ stopwatch.Restart();
+ while(pArrayBegin != pArrayEnd)
+ c.erase(*pArrayBegin++);
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c.size());
+ }
+
+
+ template <typename Container>
+ void TestErasePosition(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ typename Container::size_type j, jEnd;
+ typename Container::iterator it;
+
+ stopwatch.Restart();
+ for(j = 0, jEnd = c.size() / 3, it = c.begin(); j < jEnd; ++j)
+ {
+ // The erase fucntion is supposed to return an iterator, but the C++ standard was
+ // not initially clear about it and some STL implementations don't do it correctly.
+ #if (((defined(_MSC_VER) || defined(_CPPLIB_VER)) && !defined(_HAS_STRICT_CONFORMANCE))) // _CPPLIB_VER is something defined by Dinkumware STL.
+ it = c.erase(it);
+ #else
+ // This pathway may execute at a slightly different speed than the
+ // standard behaviour, but that's fine for the benchmark because the
+ // benchmark is measuring the speed of erasing while iterating, and
+ // however it needs to get done by the given STL is how it is measured.
+ const typename Container::iterator itErase(it++);
+ c.erase(itErase);
+ #endif
+
+ ++it;
+ ++it;
+ }
+ stopwatch.Stop();
+ }
+
+
+ template <typename Container>
+ void TestEraseRange(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ typename Container::size_type j, jEnd;
+ typename Container::iterator it1 = c.begin();
+ typename Container::iterator it2 = c.begin();
+
+ for(j = 0, jEnd = c.size() / 3; j < jEnd; ++j)
+ ++it2;
+
+ stopwatch.Restart();
+ c.erase(it1, it2);
+ stopwatch.Stop();
+ }
+
+
+ template <typename Container>
+ void TestClear(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ stopwatch.Restart();
+ c.clear();
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c.size());
+ }
+
+
+} // namespace
+
+
+
+void BenchmarkSet()
+{
+ EASTLTest_Printf("Set\n");
+
+ EA::UnitTest::Rand rng(EA::UnitTest::GetRandSeed());
+ EA::StdC::Stopwatch stopwatch1(EA::StdC::Stopwatch::kUnitsCPUCycles);
+ EA::StdC::Stopwatch stopwatch2(EA::StdC::Stopwatch::kUnitsCPUCycles);
+
+ {
+ eastl::vector<uint32_t> intVector(10000);
+ for(eastl_size_t i = 0, iEnd = intVector.size(); i < iEnd; i++)
+ intVector[i] = (uint32_t)rng.RandLimit(((uint32_t)iEnd / 2)); // This will result in duplicates and even a few triplicates.
+
+ for(int i = 0; i < 2; i++)
+ {
+ StdSetUint32 stdSetUint32;
+ EaSetUint32 eaSetUint32;
+
+
+ ///////////////////////////////
+ // Test insert(const value_type&)
+ ///////////////////////////////
+
+ TestInsert(stopwatch1, stdSetUint32, intVector.data(), intVector.data() + intVector.size());
+ TestInsert(stopwatch2, eaSetUint32, intVector.data(), intVector.data() + intVector.size());
+
+ if(i == 1)
+ Benchmark::AddResult("set<uint32_t>/insert", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test iteration
+ ///////////////////////////////
+
+ TestIteration(stopwatch1, stdSetUint32);
+ TestIteration(stopwatch2, eaSetUint32);
+
+ if(i == 1)
+ Benchmark::AddResult("set<uint32_t>/iteration", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test find
+ ///////////////////////////////
+
+ TestFind(stopwatch1, stdSetUint32, intVector.data(), intVector.data() + intVector.size());
+ TestFind(stopwatch2, eaSetUint32, intVector.data(), intVector.data() + intVector.size());
+
+ if(i == 1)
+ Benchmark::AddResult("set<uint32_t>/find", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test count
+ ///////////////////////////////
+
+ TestCount(stopwatch1, stdSetUint32, intVector.data(), intVector.data() + intVector.size());
+ TestCount(stopwatch2, eaSetUint32, intVector.data(), intVector.data() + intVector.size());
+
+ if(i == 1)
+ Benchmark::AddResult("set<uint32_t>/count", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test lower_bound
+ ///////////////////////////////
+
+ TestLowerBound(stopwatch1, stdSetUint32, intVector.data(), intVector.data() + intVector.size());
+ TestLowerBound(stopwatch2, eaSetUint32, intVector.data(), intVector.data() + intVector.size());
+
+ if(i == 1)
+ Benchmark::AddResult("set<uint32_t>/lower_bound", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test upper_bound
+ ///////////////////////////////
+
+ TestUpperBound(stopwatch1, stdSetUint32, intVector.data(), intVector.data() + intVector.size());
+ TestUpperBound(stopwatch2, eaSetUint32, intVector.data(), intVector.data() + intVector.size());
+
+ if(i == 1)
+ Benchmark::AddResult("set<uint32_t>/upper_bound", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test equal_range
+ ///////////////////////////////
+
+ TestEqualRange(stopwatch1, stdSetUint32, intVector.data(), intVector.data() + intVector.size());
+ TestEqualRange(stopwatch2, eaSetUint32, intVector.data(), intVector.data() + intVector.size());
+
+ if(i == 1)
+ Benchmark::AddResult("set<uint32_t>/equal_range", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test erase(const key_type& key)
+ ///////////////////////////////
+
+ TestEraseValue(stopwatch1, stdSetUint32, &intVector[0], &intVector[intVector.size() / 2]);
+ TestEraseValue(stopwatch2, eaSetUint32, &intVector[0], &intVector[intVector.size() / 2]);
+
+ if(i == 1)
+ Benchmark::AddResult("set<uint32_t>/erase/val", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test erase(iterator position)
+ ///////////////////////////////
+
+ TestErasePosition(stopwatch1, stdSetUint32);
+ TestErasePosition(stopwatch2, eaSetUint32);
+
+ if(i == 1)
+ Benchmark::AddResult("set<uint32_t>/erase/pos", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime(),
+ GetStdSTLType() == kSTLMS ? "MS uses a code bloating implementation of erase." : NULL);
+
+
+ ///////////////////////////////
+ // Test erase(iterator first, iterator last)
+ ///////////////////////////////
+
+ TestEraseRange(stopwatch1, stdSetUint32);
+ TestEraseRange(stopwatch2, eaSetUint32);
+
+ if(i == 1)
+ Benchmark::AddResult("set<uint32_t>/erase range", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test clear()
+ ///////////////////////////////
+
+ TestClear(stopwatch1, stdSetUint32);
+ TestClear(stopwatch2, eaSetUint32);
+
+ if(i == 1)
+ Benchmark::AddResult("set<uint32_t>/clear", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ }
+ }
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/benchmark/source/BenchmarkSort.cpp b/EASTL/benchmark/source/BenchmarkSort.cpp
new file mode 100644
index 0000000..ccd2f43
--- /dev/null
+++ b/EASTL/benchmark/source/BenchmarkSort.cpp
@@ -0,0 +1,1399 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include <EASTL/bonus/sort_extra.h>
+#include <EASTL/sort.h>
+#include <EASTL/vector.h>
+#include <EAStdC/EAStopwatch.h>
+#include "EASTLBenchmark.h"
+#include "EASTLTest.h"
+
+EA_DISABLE_ALL_VC_WARNINGS()
+#include <stdlib.h>
+#include <algorithm>
+#include <functional>
+#include <vector>
+EA_RESTORE_ALL_VC_WARNINGS()
+
+
+using namespace EA;
+
+
+namespace
+{
+ struct ValuePair
+ {
+ uint32_t key;
+ uint32_t v;
+ };
+
+ struct VPCompare
+ {
+ bool operator()(const ValuePair& vp1, const ValuePair& vp2) const
+ {
+ // return *(const uint64_t*)&vp1 < *(const uint64_t*)&vp2;
+ return (vp1.key == vp2.key) ? (vp1.v < vp2.v) : (vp1.key < vp2.key);
+ }
+ };
+
+ bool operator<(const ValuePair& vp1, const ValuePair& vp2)
+ {
+ // return *(const uint64_t*)&vp1 < *(const uint64_t*)&vp2;
+ return (vp1.key == vp2.key) ? (vp1.v < vp2.v) : (vp1.key < vp2.key);
+ }
+
+ bool operator==(const ValuePair& vp1, const ValuePair& vp2)
+ {
+ // return *(const uint64_t*)&vp1 == *(const uint64_t*)&vp2;
+ return (vp1.key == vp2.key) && (vp1.v == vp2.v);
+ }
+}
+
+// VPCompareC
+// Useful for testing the the C qsort function.
+int VPCompareC(const void* elem1, const void* elem2)
+{
+ return (int)(*(const uint64_t*)elem1 - *(const uint64_t*)elem2);
+}
+
+
+typedef std::vector<ValuePair> StdVectorVP;
+typedef eastl::vector<ValuePair> EaVectorVP;
+
+typedef std::vector<uint32_t> StdVectorInt;
+typedef eastl::vector<uint32_t> EaVectorInt;
+
+typedef std::vector<TestObject> StdVectorTO;
+typedef eastl::vector<TestObject> EaVectorTO;
+
+
+namespace
+{
+ #ifndef EA_PREFIX_NO_INLINE
+ #ifdef _MSC_VER
+ #define EA_PREFIX_NO_INLINE EA_NO_INLINE
+ #define EA_POSTFIX_NO_INLINE
+ #else
+ #define EA_PREFIX_NO_INLINE
+ #define EA_POSTFIX_NO_INLINE EA_NO_INLINE
+ #endif
+ #endif
+
+ EA_PREFIX_NO_INLINE void TestQuickSortStdVP (EA::StdC::Stopwatch& stopwatch, StdVectorVP& stdVectorVP) EA_POSTFIX_NO_INLINE;
+ EA_PREFIX_NO_INLINE void TestQuickSortEaVP (EA::StdC::Stopwatch& stopwatch, EaVectorVP& eaVectorVP) EA_POSTFIX_NO_INLINE;
+ EA_PREFIX_NO_INLINE void TestQuickSortStdInt(EA::StdC::Stopwatch& stopwatch, StdVectorInt& stdVectorInt) EA_POSTFIX_NO_INLINE;
+ EA_PREFIX_NO_INLINE void TestQuickSortEaInt (EA::StdC::Stopwatch& stopwatch, EaVectorInt& eaVectorInt) EA_POSTFIX_NO_INLINE;
+ EA_PREFIX_NO_INLINE void TestQuickSortStdTO (EA::StdC::Stopwatch& stopwatch, StdVectorTO& stdVectorTO) EA_POSTFIX_NO_INLINE;
+ EA_PREFIX_NO_INLINE void TestQuickSortEaTO (EA::StdC::Stopwatch& stopwatch, EaVectorTO& eaVectorTO) EA_POSTFIX_NO_INLINE;
+
+
+
+ void TestQuickSortStdVP(EA::StdC::Stopwatch& stopwatch, StdVectorVP& stdVectorVP)
+ {
+ stopwatch.Restart();
+ std::sort(stdVectorVP.begin(), stdVectorVP.end());
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)stdVectorVP[0].key);
+ }
+
+
+ void TestQuickSortEaVP(EA::StdC::Stopwatch& stopwatch, EaVectorVP& eaVectorVP)
+ {
+ stopwatch.Restart();
+ eastl::quick_sort(eaVectorVP.begin(), eaVectorVP.end());
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)eaVectorVP[0].key);
+ }
+
+
+ void TestQuickSortStdInt(EA::StdC::Stopwatch& stopwatch, StdVectorInt& stdVectorInt)
+ {
+ stopwatch.Restart();
+ std::sort(stdVectorInt.begin(), stdVectorInt.end());
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)stdVectorInt[0]);
+ }
+
+
+ void TestQuickSortEaInt(EA::StdC::Stopwatch& stopwatch, EaVectorInt& eaVectorInt)
+ {
+ stopwatch.Restart();
+ eastl::quick_sort(eaVectorInt.begin(), eaVectorInt.end());
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)eaVectorInt[0]);
+ }
+
+
+ void TestQuickSortStdTO(EA::StdC::Stopwatch& stopwatch, StdVectorTO& stdVectorTO)
+ {
+ stopwatch.Restart();
+ std::sort(stdVectorTO.begin(), stdVectorTO.end());
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)stdVectorTO[0].mX);
+ }
+
+
+ void TestQuickSortEaTO(EA::StdC::Stopwatch& stopwatch, EaVectorTO& eaVectorTO)
+ {
+ stopwatch.Restart();
+ eastl::quick_sort(eaVectorTO.begin(), eaVectorTO.end());
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)eaVectorTO[0].mX);
+ }
+
+} // namespace
+
+
+namespace
+{
+ enum SortFunctionType
+ {
+ sf_qsort, // C qsort
+ sf_shell_sort, // eastl::shell_sort.
+ sf_heap_sort, // eastl::heap_sort
+ sf_merge_sort, // eastl::merge_sort
+ sf_merge_sort_buffer, // eastl::merge_sort_buffer
+ sf_comb_sort, // eastl::comb_sort
+ sf_bubble_sort, // eastl::bubble_sort
+ sf_selection_sort, // eastl::selection_sort
+ sf_shaker_sort, // eastl::shaker_sort
+ sf_quick_sort, // eastl::quick_sort
+ sf_tim_sort, // eastl::tim_sort
+ sf_insertion_sort, // eastl::insertion_sort
+ sf_std_sort, // std::sort
+ sf_std_stable_sort, // std::stable_sort
+ sf_radix_sort, // eastl::radix_sort (unconventional sort)
+ sf_count //
+ };
+
+ const char* GetSortFunctionName(int sortFunctionType)
+ {
+ switch (sortFunctionType)
+ {
+ case sf_quick_sort:
+ return "eastl::sort";
+
+ case sf_tim_sort:
+ return "eastl::tim_sort";
+
+ case sf_insertion_sort:
+ return "eastl::insertion_sort";
+
+ case sf_shell_sort:
+ return "eastl::shell_sort";
+
+ case sf_heap_sort:
+ return "eastl::heap_sort";
+
+ case sf_merge_sort:
+ return "eastl::merge_sort";
+
+ case sf_merge_sort_buffer:
+ return "eastl::merge_sort_buffer";
+
+ case sf_comb_sort:
+ return "eastl::comb_sort";
+
+ case sf_bubble_sort:
+ return "eastl::bubble_sort";
+
+ case sf_selection_sort:
+ return "eastl::selection_sort";
+
+ case sf_shaker_sort:
+ return "eastl::shaker_sort";
+
+ case sf_radix_sort:
+ return "eastl::radix_sort";
+
+ case sf_qsort:
+ return "qsort";
+
+ case sf_std_sort:
+ return "std::sort";
+
+ case sf_std_stable_sort:
+ return "std::stable_sort";
+
+ default:
+ return "unknown";
+ }
+ }
+
+
+ enum RandomizationType
+ {
+ kRandom, // Completely random data.
+ kRandomSorted, // Random values already sorted.
+ kOrdered, // Already sorted.
+ kMostlyOrdered, // Partly sorted already.
+ kRandomizationTypeCount
+ };
+
+ const char* GetRandomizationTypeName(int randomizationType)
+ {
+ switch (randomizationType)
+ {
+ case kRandom:
+ return "random";
+
+ case kRandomSorted:
+ return "random sorted";
+
+ case kOrdered:
+ return "ordered";
+
+ case kMostlyOrdered:
+ return "mostly ordered";
+
+ default:
+ return "unknown";
+ }
+ }
+
+ template <typename ElementType, typename RandomType>
+ void Randomize(eastl::vector<ElementType>& v, EA::UnitTest::RandGenT<RandomType>& rng, RandomizationType type)
+ {
+ typedef RandomType value_type;
+
+ switch (type)
+ {
+ default:
+ case kRandomizationTypeCount: // We specify this only to avoid a compiler warning about not testing for it.
+ case kRandom:
+ {
+ eastl::generate(v.begin(), v.end(), rng);
+ break;
+ }
+
+ case kRandomSorted:
+ {
+ // This randomization type differs from kOrdered because the set of values is random (but sorted), in the kOrdered
+ // case the set of values is contiguous (i.e. 0, 1, ..., n) which can have different performance characteristics.
+ // For example, radix_sort performs poorly for kOrdered.
+ eastl::generate(v.begin(), v.end(), rng);
+ eastl::sort(v.begin(), v.end());
+ break;
+ }
+
+ case kOrdered:
+ {
+ for(eastl_size_t i = 0; i < v.size(); ++i)
+ v[i] = value_type((value_type)i); // Note that value_type may be a struct and not an integer. Thus the casting and construction here.
+ break;
+ }
+
+ case kMostlyOrdered:
+ {
+ for(eastl_size_t i = 0; i < v.size(); ++i)
+ v[i] = value_type((value_type)i); // Note that value_type may be a struct and not an integer. Thus the casting and construction here.
+
+ // We order random segments.
+ // The algorithm below in practice will make slightly more than kPercentOrdered be ordered.
+ const eastl_size_t kPercentOrdered = 80; // In actuality, due to statistics, the actual ordered percent will be about 82-85%.
+
+ for(eastl_size_t n = 0, s = v.size(), nEnd = ((s < (100 - kPercentOrdered)) ? 1 : (s / (100 - kPercentOrdered))); n < nEnd; n++)
+ {
+ eastl_size_t i = rng.mRand.RandLimit((uint32_t)s);
+ eastl_size_t j = rng.mRand.RandLimit((uint32_t)s);
+
+ eastl::swap(v[i], v[j]);
+ }
+
+ break;
+ }
+ }
+ }
+
+
+ char gSlowAssignBuffer1[256] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0 /* ... */};
+ char gSlowAssignBuffer2[256] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0 /* ... */};
+
+
+ // SlowAssign
+ // Implements an object which has slow assign performance.
+ template <typename T>
+ struct SlowAssign
+ {
+ typedef T key_type;
+ T x;
+
+ static int nAssignCount;
+
+ SlowAssign()
+ { x = 0; memcpy(gSlowAssignBuffer1, gSlowAssignBuffer2, sizeof(gSlowAssignBuffer1)); }
+
+ SlowAssign(const SlowAssign& sa)
+ { ++nAssignCount; x = sa.x; memcpy(gSlowAssignBuffer1, gSlowAssignBuffer2, sizeof(gSlowAssignBuffer1)); }
+
+ SlowAssign& operator=(const SlowAssign& sa)
+ { ++nAssignCount; x = sa.x; memcpy(gSlowAssignBuffer1, gSlowAssignBuffer2, sizeof(gSlowAssignBuffer1)); return *this; }
+
+ SlowAssign& operator=(int a)
+ { x = (T)a; return *this; }
+
+ static void Reset()
+ { nAssignCount = 0; }
+ };
+
+ template<> int SlowAssign<uint32_t>::nAssignCount = 0;
+
+ template <typename T>
+ bool operator <(const SlowAssign<T>& a, const SlowAssign<T>& b)
+ { return a.x < b.x; }
+
+
+ // SlowCompare
+ // Implements a compare which is N time slower than a simple integer compare.
+ template <typename T>
+ struct SlowCompare
+ {
+ static int nCompareCount;
+
+ bool operator()(T a, T b)
+ {
+ ++nCompareCount;
+
+ return (a < b) && // It happens that gSlowAssignBuffer1 is always zeroed.
+ (gSlowAssignBuffer1[0] == 0) && (gSlowAssignBuffer1[1] == 0) && (gSlowAssignBuffer1[1] == 0) &&
+ (gSlowAssignBuffer1[2] == 0) && (gSlowAssignBuffer1[4] == 0) && (gSlowAssignBuffer1[5] == 0);
+ }
+
+ static void Reset() { nCompareCount = 0; }
+ };
+
+ template <>
+ int SlowCompare<int32_t>::nCompareCount = 0;
+
+
+ // qsort callback functions
+ // qsort compare function returns negative if b > a and positive if a > b.
+ template <typename T>
+ int CompareInteger(const void* a, const void* b)
+ {
+ // Even though you see the following in Internet example code, it doesn't work!
+ // The reason is that it works only if a and b are both >= 0, otherwise large
+ // values can cause integer register wraparound. A similar kind of problem happens
+ // if you try to do the same thing with floating point value compares.
+ // See http://www.akalin.cx/2006/06/23/on-the-qsort-comparison-function/
+ // Internet exmaple code:
+ // return *(const int32_t*)a - *(const int32_t*)b;
+
+ // This double comparison might seem like it's crippling qsort against the
+ // STL-based sorts which do a single compare. But consider that the returning
+ // of -1, 0, +1 gives qsort more information, and its logic takes advantage
+ // of that.
+ if (*(const T*)a < *(const T*)b)
+ return -1;
+ if (*(const T*)a > *(const T*)b)
+ return +1;
+ return 0;
+ }
+
+
+ int SlowCompareInt32(const void* a, const void* b)
+ {
+ ++SlowCompare<int32_t>::nCompareCount;
+
+ // This code is similar in performance to the C++ SlowCompare template functor above.
+ if((gSlowAssignBuffer1[0] == 0) && (gSlowAssignBuffer1[1] == 0) &&
+ (gSlowAssignBuffer1[1] == 0) && (gSlowAssignBuffer1[2] == 0) &&
+ (gSlowAssignBuffer1[4] == 0) && (gSlowAssignBuffer1[5] == 0))
+ {
+ if (*(const int32_t*)a < *(const int32_t*)b)
+ return -1;
+ if (*(const int32_t*)a > *(const int32_t*)b)
+ return +1;
+ }
+
+ return 0;
+ }
+
+ template <typename slow_assign_type>
+ struct slow_assign_extract_radix_key
+ {
+ typedef typename slow_assign_type::key_type radix_type;
+
+ const radix_type operator()(const slow_assign_type& obj) const
+ {
+ return obj.x;
+ }
+ };
+
+ template <typename integer_type>
+ struct identity_extract_radix_key
+ {
+ typedef integer_type radix_type;
+
+ const radix_type operator()(const integer_type& x) const
+ {
+ return x;
+ }
+ };
+} // namespace
+
+
+struct BenchmarkResult
+{
+ uint64_t mTime;
+ uint64_t mCompareCount;
+ uint64_t mAssignCount;
+
+ BenchmarkResult() : mTime(0), mCompareCount(0), mAssignCount(0) {}
+};
+
+
+int CompareSortPerformance()
+{
+ // Sizes of arrays to be sorted.
+ const eastl_size_t kSizes[] = { 10, 100, 1000, 10000 };
+ const eastl_size_t kSizesCount = EAArrayCount(kSizes);
+ static BenchmarkResult sResults[kRandomizationTypeCount][kSizesCount][sf_count];
+ int nErrorCount = 0;
+
+ EA::UnitTest::ReportVerbosity(2, "Sort comparison\n");
+ EA::UnitTest::ReportVerbosity(2, "Random seed = %u\n", (unsigned)EA::UnitTest::GetRandSeed());
+
+ EA::UnitTest::RandGenT<int32_t> rng(EA::UnitTest::GetRandSeed());
+ EA::StdC::Stopwatch stopwatch(EA::StdC::Stopwatch::kUnitsCPUCycles);
+ EA::StdC::Stopwatch stopwatchGlobal(EA::StdC::Stopwatch::kUnitsSeconds);
+ const eastl_size_t kArraySizeMax = *eastl::max_element(eastl::begin(kSizes), eastl::end(kSizes));
+ const int kRunCount = 4;
+
+ #if !defined(EA_DEBUG)
+ EA::UnitTest::SetHighThreadPriority();
+ #endif
+
+ eastl::vector<SortFunctionType> allSortFunctions;
+ for (int i = 0; i < sf_count; i++)
+ {
+ allSortFunctions.push_back(SortFunctionType(i));
+ }
+
+ {
+ auto& sortFunctions = allSortFunctions;
+
+ // Regular speed test.
+ // In this case we test the sorting of integral values.
+ // This is probably the most common type of comparison.
+ EA::UnitTest::ReportVerbosity(2, "Sort comparison: Regular speed test\n");
+
+ typedef uint32_t ElementType;
+ typedef eastl::less<ElementType> CompareFunction;
+
+ eastl::string sOutput;
+ sOutput.set_capacity(100000);
+ ElementType* pBuffer = new ElementType[kArraySizeMax];
+
+ memset(sResults, 0, sizeof(sResults));
+
+ stopwatchGlobal.Restart();
+
+ for (int c = 0; c < kRunCount; c++)
+ {
+ for (int i = 0; i < kRandomizationTypeCount; i++)
+ {
+ for (size_t sizeType = 0; sizeType < EAArrayCount(kSizes); sizeType++)
+ {
+ const eastl_size_t size = kSizes[sizeType];
+
+ for (SortFunctionType sortFunction : sortFunctions)
+ {
+ eastl::vector<ElementType> v(size);
+
+ rng.SetSeed(EA::UnitTest::GetRandSeed());
+ Randomize(v, rng, (RandomizationType)i);
+
+ switch (sortFunction)
+ {
+ case sf_quick_sort:
+ stopwatch.Restart();
+ eastl::quick_sort(v.begin(), v.end(), CompareFunction());
+ stopwatch.Stop();
+ break;
+
+ case sf_tim_sort:
+ stopwatch.Restart();
+ eastl::tim_sort_buffer(v.begin(), v.end(), pBuffer, CompareFunction());
+ stopwatch.Stop();
+ break;
+
+ case sf_insertion_sort:
+ stopwatch.Restart();
+ eastl::insertion_sort(v.begin(), v.end(), CompareFunction());
+ stopwatch.Stop();
+ break;
+
+ case sf_shell_sort:
+ stopwatch.Restart();
+ eastl::shell_sort(v.begin(), v.end(), CompareFunction());
+ stopwatch.Stop();
+ break;
+
+ case sf_heap_sort:
+ stopwatch.Restart();
+ eastl::heap_sort(v.begin(), v.end(), CompareFunction());
+ stopwatch.Stop();
+ break;
+
+ case sf_merge_sort:
+ stopwatch.Restart();
+ eastl::merge_sort(v.begin(), v.end(), *get_default_allocator((EASTLAllocatorType*)NULL), CompareFunction());
+ stopwatch.Stop();
+ break;
+
+ case sf_merge_sort_buffer:
+ stopwatch.Restart();
+ eastl::merge_sort_buffer(v.begin(), v.end(), pBuffer, CompareFunction());
+ stopwatch.Stop();
+ break;
+
+ case sf_comb_sort:
+ stopwatch.Restart();
+ eastl::comb_sort(v.begin(), v.end(), CompareFunction());
+ stopwatch.Stop();
+ break;
+
+ case sf_bubble_sort:
+ stopwatch.Restart();
+ eastl::bubble_sort(v.begin(), v.end(), CompareFunction());
+ stopwatch.Stop();
+ break;
+
+ case sf_selection_sort:
+ stopwatch.Restart();
+ eastl::selection_sort(v.begin(), v.end(), CompareFunction());
+ stopwatch.Stop();
+ break;
+
+ case sf_shaker_sort:
+ stopwatch.Restart();
+ eastl::shaker_sort(v.begin(), v.end(), CompareFunction());
+ stopwatch.Stop();
+ break;
+
+ case sf_radix_sort:
+ stopwatch.Restart();
+ eastl::radix_sort<ElementType*, identity_extract_radix_key<ElementType>>(v.begin(), v.end(), pBuffer);
+ stopwatch.Stop();
+ break;
+
+ case sf_qsort:
+ stopwatch.Restart();
+ qsort(&v[0], (size_t)v.size(), sizeof(ElementType), CompareInteger<ElementType>);
+ stopwatch.Stop();
+ break;
+
+ case sf_std_sort:
+ stopwatch.Restart();
+ std::sort(v.data(), v.data() + v.size(), std::less<ElementType>());
+ stopwatch.Stop();
+ break;
+
+ case sf_std_stable_sort:
+ stopwatch.Restart();
+ std::stable_sort(v.data(), v.data() + v.size(), std::less<ElementType>());
+ stopwatch.Stop();
+ break;
+
+ case sf_count:
+ default:
+ // unsupported
+ break;
+ }
+
+ const uint64_t elapsedTime = (uint64_t)stopwatch.GetElapsedTime();
+
+ // If this result was faster than a previously fastest result, record this one instead.
+ if ((c == 0) || (elapsedTime < sResults[i][sizeType][sortFunction].mTime))
+ sResults[i][sizeType][sortFunction].mTime = elapsedTime;
+
+ VERIFY(eastl::is_sorted(v.begin(), v.end()));
+
+ } // for each sort function...
+
+ } // for each size type...
+
+ } // for each randomization type...
+
+ } // for each run
+
+ EA::UnitTest::ReportVerbosity(2, "Total time: %.2f s\n", stopwatchGlobal.GetElapsedTimeFloat());
+
+ delete[] pBuffer;
+
+ // Now print the results.
+ for (int i = 0; i < kRandomizationTypeCount; i++)
+ {
+ for (size_t sizeType = 0; sizeType < EAArrayCount(kSizes); sizeType++)
+ {
+ const eastl_size_t size = kSizes[sizeType];
+
+ for (SortFunctionType sortFunction : sortFunctions)
+ {
+ sOutput.append_sprintf("%25s, %14s, Size: %8u, Time: %14" PRIu64 " ticks %0.2f ticks/elem\n",
+ GetSortFunctionName(sortFunction), GetRandomizationTypeName(i),
+ (unsigned)size, sResults[i][sizeType][sortFunction].mTime,
+ float(sResults[i][sizeType][sortFunction].mTime)/float(size));
+ }
+ sOutput.append("\n");
+ }
+ }
+
+ EA::UnitTest::ReportVerbosity(2, "%s\n\n", sOutput.c_str());
+ }
+
+ {
+ // Do a speed test for the case of slow compares.
+ // By this we mean to compare sorting speeds when the comparison of elements is slow.
+ // Sort functions use element comparison to tell where elements go and use element
+ // movement to get them there. But some sorting functions accomplish sorting performance by
+ // minimizing the amount of movement, some minimize the amount of comparisons, and the
+ // best do a good job of minimizing both.
+ auto sortFunctions = allSortFunctions;
+ // We can't test this radix_sort because what we need isn't exposed.
+ sortFunctions.erase(eastl::remove(sortFunctions.begin(), sortFunctions.end(), sf_radix_sort), sortFunctions.end());
+ EA::UnitTest::ReportVerbosity(2, "Sort comparison: Slow compare speed test\n");
+
+ typedef int32_t ElementType;
+ typedef SlowCompare<ElementType> CompareFunction;
+
+ eastl::string sOutput;
+ sOutput.set_capacity(100000);
+ ElementType* pBuffer = new ElementType[kArraySizeMax];
+
+ memset(sResults, 0, sizeof(sResults));
+
+ stopwatchGlobal.Restart();
+
+ for (int c = 0; c < kRunCount; c++)
+ {
+ for (int i = 0; i < kRandomizationTypeCount; i++)
+ {
+ for (size_t sizeType = 0; sizeType < EAArrayCount(kSizes); sizeType++)
+ {
+ const eastl_size_t size = kSizes[sizeType];
+
+ for (SortFunctionType sortFunction : sortFunctions)
+ {
+ eastl::vector<ElementType> v(size);
+
+ rng.SetSeed(EA::UnitTest::GetRandSeed());
+ Randomize(v, rng, (RandomizationType)i);
+ CompareFunction::Reset();
+
+ switch (sortFunction)
+ {
+ case sf_quick_sort:
+ stopwatch.Restart();
+ eastl::quick_sort(v.begin(), v.end(), CompareFunction());
+ stopwatch.Stop();
+ break;
+
+ case sf_tim_sort:
+ stopwatch.Restart();
+ eastl::tim_sort_buffer(v.begin(), v.end(), pBuffer, CompareFunction());
+ stopwatch.Stop();
+ break;
+
+ case sf_insertion_sort:
+ stopwatch.Restart();
+ eastl::insertion_sort(v.begin(), v.end(), CompareFunction());
+ stopwatch.Stop();
+ break;
+
+ case sf_shell_sort:
+ stopwatch.Restart();
+ eastl::shell_sort(v.begin(), v.end(), CompareFunction());
+ stopwatch.Stop();
+ break;
+
+ case sf_heap_sort:
+ stopwatch.Restart();
+ eastl::heap_sort(v.begin(), v.end(), CompareFunction());
+ stopwatch.Stop();
+ break;
+
+ case sf_merge_sort:
+ stopwatch.Restart();
+ eastl::merge_sort(v.begin(), v.end(), *get_default_allocator((EASTLAllocatorType*)NULL), CompareFunction());
+ stopwatch.Stop();
+ break;
+
+ case sf_merge_sort_buffer:
+ stopwatch.Restart();
+ eastl::merge_sort_buffer(v.begin(), v.end(), pBuffer, CompareFunction());
+ stopwatch.Stop();
+ break;
+
+ case sf_comb_sort:
+ stopwatch.Restart();
+ eastl::comb_sort(v.begin(), v.end(), CompareFunction());
+ stopwatch.Stop();
+ break;
+
+ case sf_bubble_sort:
+ stopwatch.Restart();
+ eastl::bubble_sort(v.begin(), v.end(), CompareFunction());
+ stopwatch.Stop();
+ break;
+
+ case sf_selection_sort:
+ stopwatch.Restart();
+ eastl::selection_sort(v.begin(), v.end(), CompareFunction());
+ stopwatch.Stop();
+ break;
+
+ case sf_shaker_sort:
+ stopwatch.Restart();
+ eastl::shaker_sort(v.begin(), v.end(), CompareFunction());
+ stopwatch.Stop();
+ break;
+
+ case sf_qsort:
+ stopwatch.Restart();
+ qsort(&v[0], (size_t)v.size(), sizeof(ElementType), SlowCompareInt32);
+ stopwatch.Stop();
+ break;
+
+ case sf_std_sort:
+ stopwatch.Restart();
+ std::sort(v.begin(), v.end(), CompareFunction());
+ stopwatch.Stop();
+ break;
+
+ case sf_std_stable_sort:
+ stopwatch.Restart();
+ std::stable_sort(v.begin(), v.end(), CompareFunction());
+ stopwatch.Stop();
+ break;
+
+ case sf_radix_sort:
+ case sf_count:
+ default:
+ // unsupported
+ break;
+ }
+
+ const uint64_t elapsedTime = (uint64_t)stopwatch.GetElapsedTime();
+
+ // If this result was faster than a previously fastest result, record this one instead.
+ if ((c == 0) || (elapsedTime < sResults[i][sizeType][sortFunction].mTime))
+ {
+ sResults[i][sizeType][sortFunction].mTime = elapsedTime;
+ sResults[i][sizeType][sortFunction].mCompareCount = (uint64_t)CompareFunction::nCompareCount;
+ }
+
+ VERIFY(eastl::is_sorted(v.begin(), v.end()));
+ } // for each sort function...
+
+ } // for each size type...
+
+ } // for each randomization type...
+
+ } // for each run
+
+ EA::UnitTest::ReportVerbosity(2, "Total time: %.2f s\n", stopwatchGlobal.GetElapsedTimeFloat());
+
+ delete[] pBuffer;
+
+ // Now print the results.
+ for (int i = 0; i < kRandomizationTypeCount; i++)
+ {
+ for (size_t sizeType = 0; sizeType < EAArrayCount(kSizes); sizeType++)
+ {
+ const eastl_size_t size = kSizes[sizeType];
+
+ for (SortFunctionType sortFunction : sortFunctions)
+ {
+ sOutput.append_sprintf("%25s, %14s, Size: %6u, Time: %11" PRIu64 " ticks, Compares: %11" PRIu64 "\n",
+ GetSortFunctionName(sortFunction), GetRandomizationTypeName(i),
+ (unsigned)size, sResults[i][sizeType][sortFunction].mTime,
+ sResults[i][sizeType][sortFunction].mCompareCount);
+ }
+
+ sOutput.append("\n");
+ }
+ }
+
+ EA::UnitTest::ReportVerbosity(2, "%s\n\n", sOutput.c_str());
+ }
+
+ {
+ // Do a speed test for the case of slow assignment.
+ // By this we mean to compare sorting speeds when the movement of elements is slow.
+ // Sort functions use element comparison to tell where elements go and use element
+ // movement to get them there. But some sorting functions accomplish sorting performance by
+ // minimizing the amount of movement, some minimize the amount of comparisons, and the
+ // best do a good job of minimizing both.
+ auto sortFunctions = allSortFunctions;
+ // Can't implement this for qsort because the C standard library doesn't expose it.
+ // We could implement it by copying and modifying the source code.
+ sortFunctions.erase(eastl::remove(sortFunctions.begin(), sortFunctions.end(), sf_qsort), sortFunctions.end());
+
+ EA::UnitTest::ReportVerbosity(2, "Sort comparison: Slow assignment speed test\n");
+
+ typedef SlowAssign<uint32_t> ElementType;
+ typedef eastl::less<ElementType> CompareFunction;
+
+ eastl::string sOutput;
+ sOutput.set_capacity(100000);
+ ElementType* pBuffer = new ElementType[kArraySizeMax];
+
+ memset(sResults, 0, sizeof(sResults));
+
+ stopwatchGlobal.Restart();
+
+ for (int c = 0; c < kRunCount; c++)
+ {
+ for (int i = 0; i < kRandomizationTypeCount; i++)
+ {
+ for (size_t sizeType = 0; sizeType < EAArrayCount(kSizes); sizeType++)
+ {
+ const eastl_size_t size = kSizes[sizeType];
+
+ for (SortFunctionType sortFunction : sortFunctions)
+ {
+ eastl::vector<ElementType> v(size);
+
+ Randomize<ElementType>(v, rng, (RandomizationType)i);
+ ElementType::Reset();
+
+ switch (sortFunction)
+ {
+ case sf_quick_sort:
+ stopwatch.Restart();
+ eastl::quick_sort(v.begin(), v.end(), CompareFunction());
+ stopwatch.Stop();
+ break;
+
+ case sf_tim_sort:
+ stopwatch.Restart();
+ eastl::tim_sort_buffer(v.begin(), v.end(), pBuffer, CompareFunction());
+ stopwatch.Stop();
+ break;
+
+ case sf_insertion_sort:
+ stopwatch.Restart();
+ eastl::insertion_sort(v.begin(), v.end(), CompareFunction());
+ stopwatch.Stop();
+ break;
+
+ case sf_shell_sort:
+ stopwatch.Restart();
+ eastl::shell_sort(v.begin(), v.end(), CompareFunction());
+ stopwatch.Stop();
+ break;
+
+ case sf_heap_sort:
+ stopwatch.Restart();
+ eastl::heap_sort(v.begin(), v.end(), CompareFunction());
+ stopwatch.Stop();
+ break;
+
+ case sf_merge_sort:
+ stopwatch.Restart();
+ eastl::merge_sort(v.begin(), v.end(), *get_default_allocator((EASTLAllocatorType*)NULL), CompareFunction());
+ stopwatch.Stop();
+ break;
+
+ case sf_merge_sort_buffer:
+ stopwatch.Restart();
+ eastl::merge_sort_buffer(v.begin(), v.end(), pBuffer, CompareFunction());
+ stopwatch.Stop();
+ break;
+
+ case sf_comb_sort:
+ stopwatch.Restart();
+ eastl::comb_sort(v.begin(), v.end(), CompareFunction());
+ stopwatch.Stop();
+ break;
+
+ case sf_bubble_sort:
+ stopwatch.Restart();
+ eastl::bubble_sort(v.begin(), v.end(), CompareFunction());
+ stopwatch.Stop();
+ break;
+
+ case sf_selection_sort:
+ stopwatch.Restart();
+ eastl::selection_sort(v.begin(), v.end(), CompareFunction());
+ stopwatch.Stop();
+ break;
+
+ case sf_shaker_sort:
+ stopwatch.Restart();
+ eastl::shaker_sort(v.begin(), v.end(), CompareFunction());
+ stopwatch.Stop();
+ break;
+
+ case sf_radix_sort:
+ stopwatch.Restart();
+ eastl::radix_sort<ElementType*, slow_assign_extract_radix_key<ElementType>>(v.begin(), v.end(), pBuffer);
+ stopwatch.Stop();
+ break;
+
+ case sf_std_sort:
+ stopwatch.Restart();
+ std::sort(v.begin(), v.end(), std::less<ElementType>());
+ stopwatch.Stop();
+ break;
+
+ case sf_std_stable_sort:
+ stopwatch.Restart();
+ std::stable_sort(v.begin(), v.end(), std::less<ElementType>());
+ stopwatch.Stop();
+ break;
+
+ case sf_qsort:
+ case sf_count:
+ default:
+ // unsupported
+ break;
+ }
+
+ const uint64_t elapsedTime = (uint64_t)stopwatch.GetElapsedTime();
+
+ // If this result was faster than a previously fastest result, record this one instead.
+ if ((c == 0) || (elapsedTime < sResults[i][sizeType][sortFunction].mTime))
+ {
+ sResults[i][sizeType][sortFunction].mTime = elapsedTime;
+ sResults[i][sizeType][sortFunction].mAssignCount = (uint64_t)ElementType::nAssignCount;
+ }
+
+ VERIFY(eastl::is_sorted(v.begin(), v.end()));
+
+ } // for each sort function...
+
+ } // for each size type...
+
+ } // for each randomization type...
+
+ } // for each run
+
+ EA::UnitTest::ReportVerbosity(2, "Total time: %.2f s\n", stopwatchGlobal.GetElapsedTimeFloat());
+
+ delete[] pBuffer;
+
+ // Now print the results.
+ for (int i = 0; i < kRandomizationTypeCount; i++)
+ {
+ for (size_t sizeType = 0; sizeType < EAArrayCount(kSizes); sizeType++)
+ {
+ const eastl_size_t size = kSizes[sizeType];
+
+ for (SortFunctionType sortFunction : sortFunctions)
+ {
+ sOutput.append_sprintf("%25s, %14s, Size: %6u, Time: %11" PRIu64 " ticks, Assignments: %11" PRIu64 "\n",
+ GetSortFunctionName(sortFunction), GetRandomizationTypeName(i),
+ (unsigned)size, sResults[i][sizeType][sortFunction].mTime,
+ sResults[i][sizeType][sortFunction].mAssignCount);
+ }
+
+ sOutput.append("\n");
+ }
+ }
+ EA::UnitTest::ReportVerbosity(2, "%s\n", sOutput.c_str());
+ }
+
+ #if !defined(EA_DEBUG)
+ EA::UnitTest::SetNormalThreadPriority();
+ #endif
+
+ return nErrorCount;
+}
+
+typedef eastl::function<void(eastl::string &output, const char* sortFunction, const char* randomizationType, size_t size, size_t numSubArrays, const BenchmarkResult &result)> OutputResultCallback;
+typedef eastl::function<void(BenchmarkResult &result)> PostExecuteCallback;
+typedef eastl::function<void()> PreExecuteCallback;
+
+
+template<class ElementType, class CompareFunction>
+static int CompareSmallInputSortPerformanceHelper(eastl::vector<eastl_size_t> &arraySizes, eastl::vector<SortFunctionType> &sortFunctions, const PreExecuteCallback &preExecuteCallback, const PostExecuteCallback &postExecuteCallback, const OutputResultCallback &outputResultCallback)
+{
+ int nErrorCount = 0;
+
+ EA::UnitTest::RandGenT<int32_t> rng(EA::UnitTest::GetRandSeed());
+ EA::StdC::Stopwatch stopwatch(EA::StdC::Stopwatch::kUnitsCPUCycles);
+ EA::StdC::Stopwatch stopwatchGlobal(EA::StdC::Stopwatch::kUnitsSeconds);
+ const eastl_size_t kArraySizeMax = *eastl::max_element(eastl::begin(arraySizes), eastl::end(arraySizes));
+ const int kRunCount = 4;
+ const int numSubArrays = 128;
+
+ eastl::string sOutput;
+ sOutput.set_capacity(100000);
+ ElementType* pBuffer = new ElementType[kArraySizeMax];
+
+ stopwatchGlobal.Restart();
+
+ for (int i = 0; i < kRandomizationTypeCount; i++)
+ {
+ for (size_t size : arraySizes)
+ {
+ for (SortFunctionType sortFunction : sortFunctions)
+ {
+ BenchmarkResult bestResult{};
+
+ for (int c = 0; c < kRunCount; c++)
+ {
+ eastl::vector<ElementType> v(size * numSubArrays);
+
+ rng.SetSeed(EA::UnitTest::GetRandSeed());
+ Randomize(v, rng, (RandomizationType)i);
+ preExecuteCallback();
+
+ switch (sortFunction)
+ {
+ case sf_quick_sort:
+ stopwatch.Restart();
+ for (auto begin = v.begin(); begin != v.end(); begin += size)
+ {
+ eastl::quick_sort(begin, begin + size, CompareFunction());
+ }
+ stopwatch.Stop();
+ break;
+
+ case sf_tim_sort:
+ stopwatch.Restart();
+ for (auto begin = v.begin(); begin != v.end(); begin += size)
+ {
+ eastl::tim_sort_buffer(begin, begin + size, pBuffer, CompareFunction());
+ }
+ stopwatch.Stop();
+ break;
+
+ case sf_insertion_sort:
+ stopwatch.Restart();
+ for (auto begin = v.begin(); begin != v.end(); begin += size)
+ {
+ eastl::insertion_sort(begin, begin + size, CompareFunction());
+ }
+ stopwatch.Stop();
+ break;
+
+ case sf_shell_sort:
+ stopwatch.Restart();
+ for (auto begin = v.begin(); begin != v.end(); begin += size)
+ {
+ eastl::shell_sort(begin, begin + size, CompareFunction());
+ }
+ stopwatch.Stop();
+ break;
+
+ case sf_heap_sort:
+ stopwatch.Restart();
+ for (auto begin = v.begin(); begin != v.end(); begin += size)
+ {
+ eastl::heap_sort(begin, begin + size, CompareFunction());
+ }
+ stopwatch.Stop();
+ break;
+
+ case sf_merge_sort:
+ stopwatch.Restart();
+ for (auto begin = v.begin(); begin != v.end(); begin += size)
+ {
+ eastl::merge_sort(begin, begin + size, *get_default_allocator((EASTLAllocatorType*)NULL), CompareFunction());
+ }
+ stopwatch.Stop();
+ break;
+
+ case sf_merge_sort_buffer:
+ stopwatch.Restart();
+ for (auto begin = v.begin(); begin != v.end(); begin += size)
+ {
+ eastl::merge_sort_buffer(begin, begin + size, pBuffer, CompareFunction());
+ }
+ stopwatch.Stop();
+ break;
+
+ case sf_comb_sort:
+ stopwatch.Restart();
+ for (auto begin = v.begin(); begin != v.end(); begin += size)
+ {
+ eastl::comb_sort(begin, begin + size, CompareFunction());
+ }
+ stopwatch.Stop();
+ break;
+
+ case sf_bubble_sort:
+ stopwatch.Restart();
+ for (auto begin = v.begin(); begin != v.end(); begin += size)
+ {
+ eastl::bubble_sort(begin, begin + size, CompareFunction());
+ }
+ stopwatch.Stop();
+ break;
+
+ case sf_selection_sort:
+ stopwatch.Restart();
+ for (auto begin = v.begin(); begin != v.end(); begin += size)
+ {
+ eastl::selection_sort(begin, begin + size, CompareFunction());
+ }
+ stopwatch.Stop();
+ break;
+
+ case sf_shaker_sort:
+ stopwatch.Restart();
+ for (auto begin = v.begin(); begin != v.end(); begin += size)
+ {
+ eastl::shaker_sort(begin, begin + size, CompareFunction());
+ }
+ stopwatch.Stop();
+ break;
+
+ case sf_std_sort:
+ stopwatch.Restart();
+ for (auto begin = v.begin(); begin != v.end(); begin += size)
+ {
+ std::sort(begin, begin + size, CompareFunction());
+ }
+ stopwatch.Stop();
+ break;
+
+ case sf_std_stable_sort:
+ stopwatch.Restart();
+ for (auto begin = v.begin(); begin != v.end(); begin += size)
+ {
+ std::stable_sort(begin, begin + size, CompareFunction());
+ }
+ stopwatch.Stop();
+ break;
+
+ case sf_qsort:
+ case sf_radix_sort:
+ case sf_count:
+ default:
+ EATEST_VERIFY_F(false, "Missing case statement for sort function %s.", GetSortFunctionName(sortFunction));
+ break;
+ }
+
+ BenchmarkResult result {};
+ result.mTime = (uint64_t)stopwatch.GetElapsedTime();
+ postExecuteCallback(result);
+
+ // If this result was faster than a previously fastest result, record this one instead.
+ if ((c == 0) || (result.mTime < bestResult.mTime))
+ bestResult = result;
+
+ for (auto begin = v.begin(); begin != v.end(); begin += size)
+ {
+ VERIFY(eastl::is_sorted(begin, begin + size));
+ }
+ } // for each run
+
+ outputResultCallback(sOutput, GetSortFunctionName(sortFunction), GetRandomizationTypeName(i), size, numSubArrays, bestResult);
+
+ } // for each sort function...
+ sOutput.append("\n");
+
+ } // for each size type...
+
+ } // for each randomization type...
+
+ EA::UnitTest::ReportVerbosity(2, "Total time: %.2f s\n", stopwatchGlobal.GetElapsedTimeFloat());
+ EA::UnitTest::ReportVerbosity(2, "%s\n", sOutput.c_str());
+
+ delete[] pBuffer;
+ return nErrorCount;
+}
+
+static int CompareSmallInputSortPerformance()
+{
+ int nErrorCount = 0;
+ eastl::vector<eastl_size_t> arraySizes{1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 64, 128, 256};
+ // Test quick sort and merge sort to provide a "base line" for performance. The other sort algorithms are mostly
+ // O(n^2) and they are benchmarked to determine what sorts are ideal for sorting small arrays or sub-arrays. (i.e.
+ // this is useful to determine good algorithms to choose as a base case for some of the recursive sorts).
+ eastl::vector<SortFunctionType> sortFunctions{sf_quick_sort, sf_merge_sort_buffer, sf_bubble_sort, sf_comb_sort,
+ sf_insertion_sort, sf_selection_sort, sf_shell_sort, sf_shaker_sort};
+
+ EA::UnitTest::ReportVerbosity(2, "Small Sub-array Sort comparison: Regular speed test\n");
+ nErrorCount += CompareSmallInputSortPerformanceHelper<uint32_t, eastl::less<uint32_t>>(
+ arraySizes, sortFunctions, PreExecuteCallback([]() {}), PostExecuteCallback([](BenchmarkResult&) {}),
+ OutputResultCallback([](eastl::string& output, const char* sortFunction, const char* randomizationType,
+ size_t size, size_t numSubArrays, const BenchmarkResult& result) {
+ output.append_sprintf("%25s, %14s, Size: %8u, Time: %0.1f ticks %0.2f ticks/elem\n", sortFunction,
+ randomizationType, (unsigned)size, float(result.mTime) / float(numSubArrays),
+ float(result.mTime) / float(size * numSubArrays));
+ }));
+
+ EA::UnitTest::ReportVerbosity(2, "Small Sub-array Sort comparison: Slow compare speed test\n");
+ nErrorCount += CompareSmallInputSortPerformanceHelper<int32_t, SlowCompare<int32_t>>(
+ arraySizes, sortFunctions, PreExecuteCallback([]() { SlowCompare<int32_t>::Reset(); }),
+ PostExecuteCallback(
+ [](BenchmarkResult& result) { result.mCompareCount = (uint64_t)SlowCompare<int32_t>::nCompareCount; }),
+ OutputResultCallback([](eastl::string& output, const char* sortFunction, const char* randomizationType,
+ size_t size, size_t numSubArrays, const BenchmarkResult& result) {
+ output.append_sprintf("%25s, %14s, Size: %6u, Time: %0.2f ticks, Compares: %0.2f\n", sortFunction,
+ randomizationType, (unsigned)size, float(result.mTime) / float(numSubArrays),
+ float(result.mCompareCount) / float(numSubArrays));
+ }));
+
+ EA::UnitTest::ReportVerbosity(2, "Small Sub-array Sort comparison: Slow assignment speed test\n");
+ nErrorCount += CompareSmallInputSortPerformanceHelper<SlowAssign<uint32_t>, eastl::less<SlowAssign<uint32_t>>>(
+ arraySizes, sortFunctions, PreExecuteCallback([]() { SlowAssign<uint32_t>::Reset(); }),
+ PostExecuteCallback([](BenchmarkResult& result) {
+ result.mCompareCount = (uint64_t)SlowCompare<int32_t>::nCompareCount;
+ result.mAssignCount = (uint64_t)SlowAssign<uint32_t>::nAssignCount;
+ }),
+ OutputResultCallback([](eastl::string& output, const char* sortFunction, const char* randomizationType,
+ size_t size, size_t numSubArrays, const BenchmarkResult& result) {
+ output.append_sprintf("%25s, %14s, Size: %6u, Time: %0.2f ticks, Assignments: %0.2f\n", sortFunction,
+ randomizationType, (unsigned)size, float(result.mTime) / float(numSubArrays),
+ float(result.mAssignCount) / float(numSubArrays));
+ }));
+
+ return nErrorCount;
+}
+
+
+void BenchmarkSort()
+{
+ EASTLTest_Printf("Sort\n");
+
+ EA::UnitTest::RandGenT<uint32_t> rng(12345678); // For debugging sort code we should use 12345678, for normal testing use EA::UnitTest::GetRandSeed().
+ EA::StdC::Stopwatch stopwatch1(EA::StdC::Stopwatch::kUnitsCPUCycles);
+ EA::StdC::Stopwatch stopwatch2(EA::StdC::Stopwatch::kUnitsCPUCycles);
+
+ if (EA::UnitTest::GetVerbosity() >= 3)
+ {
+ CompareSortPerformance();
+ CompareSmallInputSortPerformance();
+ }
+
+ { // Exercise some declarations
+ int nErrorCount = 0;
+
+ ValuePair vp1 = {0, 0}, vp2 = {0, 0};
+ VPCompare c1, c2;
+
+ VERIFY(c1.operator()(vp1, vp2) == c2.operator()(vp1, vp2));
+ VERIFY((vp1 < vp2) || (vp1 == vp2) || !(vp1 == vp2));
+ }
+
+ {
+ eastl::vector<uint32_t> intVector(10000);
+ eastl::generate(intVector.begin(), intVector.end(), rng);
+
+ for (int i = 0; i < 2; i++)
+ {
+ ///////////////////////////////
+ // Test quick_sort/vector/ValuePair
+ ///////////////////////////////
+
+ StdVectorVP stdVectorVP(intVector.size());
+ EaVectorVP eaVectorVP(intVector.size());
+
+ for (eastl_size_t j = 0, jEnd = intVector.size(); j < jEnd; j++)
+ {
+ const ValuePair vp = {intVector[j], intVector[j]};
+ stdVectorVP[j] = vp;
+ eaVectorVP[j] = vp;
+ }
+
+ TestQuickSortStdVP(stopwatch1, stdVectorVP);
+ TestQuickSortEaVP (stopwatch2, eaVectorVP);
+
+ if(i == 1)
+ Benchmark::AddResult("sort/q_sort/vector<ValuePair>", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ // Benchmark the sorting of something that is already sorted.
+ TestQuickSortStdVP(stopwatch1, stdVectorVP);
+ TestQuickSortEaVP (stopwatch2, eaVectorVP);
+
+ if(i == 1)
+ Benchmark::AddResult("sort/q_sort/vector<ValuePair>/sorted", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+
+ ///////////////////////////////
+ // Test quick_sort/vector/Int
+ ///////////////////////////////
+
+ StdVectorInt stdVectorInt(intVector.size());
+ EaVectorInt eaVectorInt (intVector.size());
+
+ for(eastl_size_t j = 0, jEnd = intVector.size(); j < jEnd; j++)
+ {
+ stdVectorInt[j] = intVector[j];
+ eaVectorInt[j] = intVector[j];
+ }
+
+ TestQuickSortStdInt(stopwatch1, stdVectorInt);
+ TestQuickSortEaInt (stopwatch2, eaVectorInt);
+
+ if(i == 1)
+ Benchmark::AddResult("sort/q_sort/vector<uint32>", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ // Benchmark the sorting of something that is already sorted.
+ TestQuickSortStdInt(stopwatch1, stdVectorInt);
+ TestQuickSortEaInt (stopwatch2, eaVectorInt);
+
+ if(i == 1)
+ Benchmark::AddResult("sort/q_sort/vector<uint32>/sorted", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+
+ ///////////////////////////////
+ // Test quick_sort/vector/TestObject
+ ///////////////////////////////
+
+ StdVectorTO stdVectorTO(intVector.size());
+ EaVectorTO eaVectorTO(intVector.size());
+
+ for (eastl_size_t j = 0, jEnd = intVector.size(); j < jEnd; j++)
+ {
+ stdVectorTO[j] = TestObject(intVector[j]);
+ eaVectorTO[j] = TestObject(intVector[j]);
+ }
+
+ TestQuickSortStdTO(stopwatch1, stdVectorTO);
+ TestQuickSortEaTO(stopwatch2, eaVectorTO);
+
+ if (i == 1)
+ Benchmark::AddResult("sort/q_sort/vector<TestObject>", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ // Benchmark the sorting of something that is already sorted.
+ TestQuickSortStdTO(stopwatch1, stdVectorTO);
+ TestQuickSortEaTO(stopwatch2, eaVectorTO);
+
+ if (i == 1)
+ Benchmark::AddResult("sort/q_sort/vector<TestObject>/sorted", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+
+ ///////////////////////////////
+ // Test quick_sort/TestObject[]
+ ///////////////////////////////
+
+ // Reset the values back to the unsorted state.
+ for(eastl_size_t j = 0, jEnd = intVector.size(); j < jEnd; j++)
+ {
+ stdVectorTO[j] = TestObject(intVector[j]);
+ eaVectorTO[j] = TestObject(intVector[j]);
+ }
+
+ TestQuickSortStdTO(stopwatch1, stdVectorTO);
+ TestQuickSortEaTO (stopwatch2, eaVectorTO);
+
+ if(i == 1)
+ Benchmark::AddResult("sort/q_sort/TestObject[]", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ // Benchmark the sorting of something that is already sorted.
+ TestQuickSortStdTO(stopwatch1, stdVectorTO);
+ TestQuickSortEaTO (stopwatch2, eaVectorTO);
+
+ if(i == 1)
+ Benchmark::AddResult("sort/q_sort/TestObject[]/sorted", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+ }
+ }
+}
+
+
+
+
+
diff --git a/EASTL/benchmark/source/BenchmarkString.cpp b/EASTL/benchmark/source/BenchmarkString.cpp
new file mode 100644
index 0000000..5dfefbc
--- /dev/null
+++ b/EASTL/benchmark/source/BenchmarkString.cpp
@@ -0,0 +1,531 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLBenchmark.h"
+#include "EASTLTest.h"
+#include <EAStdC/EAStopwatch.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/string.h>
+#include <EASTL/sort.h>
+
+EA_DISABLE_ALL_VC_WARNINGS()
+#include <algorithm>
+#include <string>
+#include <stdio.h>
+#include <stdlib.h>
+EA_RESTORE_ALL_VC_WARNINGS()
+
+
+using namespace EA;
+
+
+namespace
+{
+ template <typename Container>
+ void TestPushBack(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ stopwatch.Restart();
+ for(int i = 0; i < 100000; i++)
+ c.push_back((typename Container::value_type)(i & ((typename Container::value_type)~0)));
+ stopwatch.Stop();
+ }
+
+
+ template <typename Container, typename T>
+ void TestInsert1(EA::StdC::Stopwatch& stopwatch, Container& c, T* p)
+ {
+ const typename Container::size_type s = c.size();
+ stopwatch.Restart();
+ for(int i = 0; i < 100; i++)
+ c.insert(s - (typename Container::size_type)(i * 317), p);
+ stopwatch.Stop();
+ }
+
+
+ template <typename Container>
+ void TestErase1(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ const typename Container::size_type s = c.size();
+ stopwatch.Restart();
+ for(int i = 0; i < 100; i++)
+ c.erase(s - (typename Container::size_type)(i * 339), 7);
+ stopwatch.Stop();
+ }
+
+
+ template <typename Container, typename T>
+ void TestReplace1(EA::StdC::Stopwatch& stopwatch, Container& c, T* p, int n)
+ {
+ const typename Container::size_type s = c.size();
+ stopwatch.Restart();
+ for(int i = 0; i < 1000; i++)
+ c.replace(s - (typename Container::size_type)(i * 5), ((n - 2) + (i & 3)), p, n); // The second argument rotates through n-2, n-1, n, n+1, n-2, etc.
+ stopwatch.Stop();
+ }
+
+
+ template <typename Container>
+ void TestReserve(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ const typename Container::size_type s = c.capacity();
+ stopwatch.Restart();
+ for(int i = 0; i < 1000; i++)
+ c.reserve((s - 2) + (i & 3)); // The second argument rotates through n-2, n-1, n, n+1, n-2, etc.
+ stopwatch.Stop();
+ }
+
+
+ template <typename Container>
+ void TestSize(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ stopwatch.Restart();
+ for(int i = 0; i < 1000; i++)
+ Benchmark::DoNothing(&c, c.size());
+ stopwatch.Stop();
+ }
+
+
+ template <typename Container>
+ void TestBracket(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ int32_t temp = 0;
+ stopwatch.Restart();
+ for(typename Container::size_type j = 0, jEnd = c.size(); j < jEnd; j++)
+ temp += c[j];
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)temp);
+ }
+
+
+ template <typename Container>
+ void TestFind(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ stopwatch.Restart();
+ for(int i = 0; i < 1000; i++)
+ Benchmark::DoNothing(&c, *eastl::find(c.begin(), c.end(), (typename Container::value_type)~0));
+ stopwatch.Stop();
+ }
+
+
+ template <typename Container, typename T>
+ void TestFind1(EA::StdC::Stopwatch& stopwatch, Container& c, T* p, int pos, int n)
+ {
+ stopwatch.Restart();
+ for(int i = 0; i < 1000; i++)
+ Benchmark::DoNothing(&c, c.find(p, (typename Container::size_type)pos, (typename Container::size_type)n));
+ stopwatch.Stop();
+ }
+
+
+ template <typename Container, typename T>
+ void TestRfind1(EA::StdC::Stopwatch& stopwatch, Container& c, T* p, int pos, int n)
+ {
+ stopwatch.Restart();
+ for(int i = 0; i < 1000; i++)
+ Benchmark::DoNothing(&c, c.rfind(p, (typename Container::size_type)pos, (typename Container::size_type)n));
+ stopwatch.Stop();
+ }
+
+ template <typename Container, typename T>
+ void TestFirstOf1(EA::StdC::Stopwatch& stopwatch, Container& c, T* p, int pos, int n)
+ {
+ stopwatch.Restart();
+ for(int i = 0; i < 1000; i++)
+ Benchmark::DoNothing(&c, c.find_first_of(p, (typename Container::size_type)pos, (typename Container::size_type)n));
+ stopwatch.Stop();
+ }
+
+ template <typename Container, typename T>
+ void TestLastOf1(EA::StdC::Stopwatch& stopwatch, Container& c, T* p, int pos, int n)
+ {
+ stopwatch.Restart();
+ for(int i = 0; i < 1000; i++)
+ Benchmark::DoNothing(&c, c.find_last_of(p, (typename Container::size_type)pos, (typename Container::size_type)n));
+ stopwatch.Stop();
+ }
+
+ template <typename Container, typename T>
+ void TestFirstNotOf1(EA::StdC::Stopwatch& stopwatch, Container& c, T* p, int pos, int n)
+ {
+ stopwatch.Restart();
+ for(int i = 0; i < 1000; i++)
+ Benchmark::DoNothing(&c, c.find_first_not_of(p, (typename Container::size_type)pos, (typename Container::size_type)n));
+ stopwatch.Stop();
+ }
+
+ template <typename Container, typename T>
+ void TestLastNotOf1(EA::StdC::Stopwatch& stopwatch, Container& c, T* p, int pos, int n)
+ {
+ stopwatch.Restart();
+ for(int i = 0; i < 1000; i++)
+ Benchmark::DoNothing(&c, c.find_last_not_of(p, (typename Container::size_type)pos, (typename Container::size_type)n));
+ stopwatch.Stop();
+ }
+
+
+ template <typename Container>
+ void TestCompare(EA::StdC::Stopwatch& stopwatch, Container& c1, Container& c2) // size()
+ {
+ stopwatch.Restart();
+ for(int i = 0; i < 500; i++)
+ Benchmark::DoNothing(&c1, c1.compare(c2));
+ stopwatch.Stop();
+ }
+
+
+ template <typename Container>
+ void TestSwap(EA::StdC::Stopwatch& stopwatch, Container& c1, Container& c2) // size()
+ {
+ stopwatch.Restart();
+ for(int i = 0; i < 10000; i++) // Make sure this is an even count so that when done things haven't changed.
+ {
+ c1.swap(c2);
+ Benchmark::DoNothing(&c1);
+ }
+ stopwatch.Stop();
+ }
+
+} // namespace
+
+
+
+
+void BenchmarkString()
+{
+ EASTLTest_Printf("String\n");
+
+ EA::StdC::Stopwatch stopwatch1(EA::StdC::Stopwatch::kUnitsCPUCycles);
+ EA::StdC::Stopwatch stopwatch2(EA::StdC::Stopwatch::kUnitsCPUCycles);
+
+ {
+ for(int i = 0; i < 2; i++)
+ {
+ std::basic_string<char8_t> ss8(16, 0); // We initialize to size of 16 because different implementations may make
+ eastl::basic_string<char8_t> es8(16, 0); // different tradeoffs related to startup size. Initial operations are faster
+ // when strings start with a higher reserve, but they use more memory.
+ std::basic_string<char16_t> ss16(16, 0); // We try to nullify this tradeoff for the tests below by starting all at
+ eastl::basic_string<char16_t> es16(16, 0); // the same baseline allocation.
+
+
+ ///////////////////////////////
+ // Test push_back
+ ///////////////////////////////
+
+ TestPushBack(stopwatch1, ss8);
+ TestPushBack(stopwatch2, es8);
+
+ if(i == 1)
+ Benchmark::AddResult("string<char8_t>/push_back", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestPushBack(stopwatch1, ss16);
+ TestPushBack(stopwatch2, es16);
+
+ if(i == 1)
+ Benchmark::AddResult("string<char16_t>/push_back", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test insert(size_type position, const value_type* p)
+ ///////////////////////////////
+
+ const char8_t pInsert1_8[] = { 'a', 0 };
+ TestInsert1(stopwatch1, ss8, pInsert1_8);
+ TestInsert1(stopwatch2, es8, pInsert1_8);
+
+ if(i == 1)
+ Benchmark::AddResult("string<char8_t>/insert/pos,p", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ const char16_t pInsert1_16[] = { 'a', 0 };
+ TestInsert1(stopwatch1, ss16, pInsert1_16);
+ TestInsert1(stopwatch2, es16, pInsert1_16);
+
+ if(i == 1)
+ Benchmark::AddResult("string<char16_t>/insert/pos,p", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test erase(size_type position, size_type n)
+ ///////////////////////////////
+
+ TestErase1(stopwatch1, ss8);
+ TestErase1(stopwatch2, es8);
+
+ if(i == 1)
+ Benchmark::AddResult("string<char8_t>/erase/pos,n", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestErase1(stopwatch1, ss16);
+ TestErase1(stopwatch2, es16);
+
+ if(i == 1)
+ Benchmark::AddResult("string<char16_t>/erase/pos,n", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test replace(size_type position, size_type n1, const value_type* p, size_type n2)
+ ///////////////////////////////
+
+ const int kReplace1Size = 8;
+ const char8_t pReplace1_8[kReplace1Size] = { 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h' };
+
+ TestReplace1(stopwatch1, ss8, pReplace1_8, kReplace1Size);
+ TestReplace1(stopwatch2, es8, pReplace1_8, kReplace1Size);
+
+ if(i == 1)
+ Benchmark::AddResult("string<char8_t>/replace/pos,n,p,n", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ const char16_t pReplace1_16[kReplace1Size] = { 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h' };
+
+ TestReplace1(stopwatch1, ss16, pReplace1_16, kReplace1Size);
+ TestReplace1(stopwatch2, es16, pReplace1_16, kReplace1Size);
+
+ if(i == 1)
+ Benchmark::AddResult("string<char16_t>/replace/pos,n,p,n", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test reserve(size_type)
+ ///////////////////////////////
+
+ TestReserve(stopwatch1, ss8);
+ TestReserve(stopwatch2, es8);
+
+ if(i == 1)
+ Benchmark::AddResult("string<char8_t>/reserve", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestReserve(stopwatch1, ss16);
+ TestReserve(stopwatch2, es16);
+
+ if(i == 1)
+ Benchmark::AddResult("string<char16_t>/reserve", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test size()
+ ///////////////////////////////
+
+ TestSize(stopwatch1, ss8);
+ TestSize(stopwatch2, es8);
+
+ if(i == 1)
+ Benchmark::AddResult("string<char8_t>/size", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestSize(stopwatch1, ss16);
+ TestSize(stopwatch2, es16);
+
+ if(i == 1)
+ Benchmark::AddResult("string<char16_t>/size", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test operator[].
+ ///////////////////////////////
+
+ TestBracket(stopwatch1, ss8);
+ TestBracket(stopwatch2, es8);
+
+ if(i == 1)
+ Benchmark::AddResult("string<char8_t>/operator[]", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestBracket(stopwatch1, ss16);
+ TestBracket(stopwatch2, es16);
+
+ if(i == 1)
+ Benchmark::AddResult("string<char16_t>/operator[]", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test iteration via find().
+ ///////////////////////////////
+
+ TestFind(stopwatch1, ss8);
+ TestFind(stopwatch2, es8);
+
+ if(i == 1)
+ Benchmark::AddResult("string<char8_t>/iteration", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestFind(stopwatch1, ss16);
+ TestFind(stopwatch2, es16);
+
+ if(i == 1)
+ Benchmark::AddResult("string<char16_t>/iteration", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test find(const value_type* p, size_type position, size_type n)
+ ///////////////////////////////
+
+ const int kFind1Size = 7;
+ const char8_t pFind1_8[kFind1Size] = { 'p', 'a', 't', 't', 'e', 'r', 'n' };
+
+ ss8.insert(ss8.size() / 2, pFind1_8);
+ es8.insert(es8.size() / 2, pFind1_8);
+
+ TestFind1(stopwatch1, ss8, pFind1_8, 15, kFind1Size);
+ TestFind1(stopwatch2, es8, pFind1_8, 15, kFind1Size);
+
+ if(i == 1)
+ Benchmark::AddResult("string<char8_t>/find/p,pos,n", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ const char16_t pFind1_16[kFind1Size] = { 'p', 'a', 't', 't', 'e', 'r', 'n' };
+
+ #if !defined(EA_PLATFORM_IPHONE) && (!defined(EA_COMPILER_CLANG) && defined(EA_PLATFORM_MINGW)) // Crashes on iPhone.
+ ss16.insert(ss8.size() / 2, pFind1_16);
+ #endif
+ es16.insert(es8.size() / 2, pFind1_16);
+
+ TestFind1(stopwatch1, ss16, pFind1_16, 15, kFind1Size);
+ TestFind1(stopwatch2, es16, pFind1_16, 15, kFind1Size);
+
+ if(i == 1)
+ Benchmark::AddResult("string<char16_t>/find/p,pos,n", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test rfind(const value_type* p, size_type position, size_type n)
+ ///////////////////////////////
+
+ TestRfind1(stopwatch1, ss8, pFind1_8, 15, kFind1Size);
+ TestRfind1(stopwatch2, es8, pFind1_8, 15, kFind1Size);
+
+ if(i == 1)
+ Benchmark::AddResult("string<char8_t>/rfind/p,pos,n", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestRfind1(stopwatch1, ss16, pFind1_16, 15, kFind1Size);
+ TestRfind1(stopwatch2, es16, pFind1_16, 15, kFind1Size);
+
+ if(i == 1)
+ Benchmark::AddResult("string<char16_t>/rfind/p,pos,n", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ //NOTICE (RASHIN):
+ //FindFirstOf variants are incredibly slow on palm pixi debug builds.
+ //Disabling for now...
+ #if !defined(EA_DEBUG)
+ ///////////////////////////////
+ // Test find_first_of(const value_type* p, size_type position, size_type n
+ ///////////////////////////////
+
+ const int kFindOf1Size = 7;
+ const char8_t pFindOf1_8[kFindOf1Size] = { '~', '~', '~', '~', '~', '~', '~' };
+
+ TestFirstOf1(stopwatch1, ss8, pFindOf1_8, 15, kFindOf1Size);
+ TestFirstOf1(stopwatch2, es8, pFindOf1_8, 15, kFindOf1Size);
+
+ if(i == 1)
+ Benchmark::AddResult("string<char8_t>/find_first_of/p,pos,n", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ const char16_t pFindOf1_16[kFindOf1Size] = { '~', '~', '~', '~', '~', '~', '~' };
+
+ TestFirstOf1(stopwatch1, ss16, pFindOf1_16, 15, kFindOf1Size);
+ TestFirstOf1(stopwatch2, es16, pFindOf1_16, 15, kFindOf1Size);
+
+ if(i == 1)
+ Benchmark::AddResult("string<char16_t>/find_first_of/p,pos,n", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test find_last_of(const value_type* p, size_type position, size_type n
+ ///////////////////////////////
+
+ TestLastOf1(stopwatch1, ss8, pFindOf1_8, 15, kFindOf1Size);
+ TestLastOf1(stopwatch2, es8, pFindOf1_8, 15, kFindOf1Size);
+
+ if(i == 1)
+ Benchmark::AddResult("string<char8_t>/find_last_of/p,pos,n", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestLastOf1(stopwatch1, ss16, pFindOf1_16, 15, kFindOf1Size);
+ TestLastOf1(stopwatch2, es16, pFindOf1_16, 15, kFindOf1Size);
+
+ if(i == 1)
+ Benchmark::AddResult("string<char16_t>/find_last_of/p,pos,n", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test find_first_not_of(const value_type* p, size_type position, size_type n
+ ///////////////////////////////
+
+ TestFirstNotOf1(stopwatch1, ss8, pFind1_8, 15, kFind1Size);
+ TestFirstNotOf1(stopwatch2, es8, pFind1_8, 15, kFind1Size);
+
+ if(i == 1)
+ Benchmark::AddResult("string<char8_t>/find_first_not_of/p,pos,n", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestFirstNotOf1(stopwatch1, ss16, pFind1_16, 15, kFind1Size);
+ TestFirstNotOf1(stopwatch2, es16, pFind1_16, 15, kFind1Size);
+
+ if(i == 1)
+ Benchmark::AddResult("string<char16_t>/find_first_not_of/p,pos,n", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test find_last_of(const value_type* p, size_type position, size_type n
+ ///////////////////////////////
+
+ TestLastNotOf1(stopwatch1, ss8, pFind1_8, 15, kFind1Size);
+ TestLastNotOf1(stopwatch2, es8, pFind1_8, 15, kFind1Size);
+
+ if(i == 1)
+ Benchmark::AddResult("string<char8_t>/find_last_of/p,pos,n", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestLastNotOf1(stopwatch1, ss16, pFind1_16, 15, kFind1Size);
+ TestLastNotOf1(stopwatch2, es16, pFind1_16, 15, kFind1Size);
+
+ if(i == 1)
+ Benchmark::AddResult("string<char16_t>/find_last_of/p,pos,n", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ #endif
+
+ ///////////////////////////////
+ // Test compare()
+ ///////////////////////////////
+
+ std::basic_string<char8_t> ss8X(ss8);
+ eastl::basic_string<char8_t> es8X(es8);
+ std::basic_string<char16_t> ss16X(ss16);
+ eastl::basic_string<char16_t> es16X(es16);
+
+ TestCompare(stopwatch1, ss8, ss8X);
+ TestCompare(stopwatch2, es8, es8X);
+
+ if(i == 1)
+ Benchmark::AddResult("string<char8_t>/compare", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestCompare(stopwatch1, ss16, ss16X);
+ TestCompare(stopwatch2, es16, es16X);
+
+ if(i == 1)
+ Benchmark::AddResult("string<char16_t>/compare", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+
+ ///////////////////////////////
+ // Test swap()
+ ///////////////////////////////
+
+ TestSwap(stopwatch1, ss8, ss8X);
+ TestSwap(stopwatch2, es8, es8X);
+
+ if(i == 1)
+ Benchmark::AddResult("string<char8_t>/swap", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ TestSwap(stopwatch1, ss16, ss16X);
+ TestSwap(stopwatch2, es16, es16X);
+
+ if(i == 1)
+ Benchmark::AddResult("string<char16_t>/swap", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+ }
+ }
+
+}
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/benchmark/source/BenchmarkTupleVector.cpp b/EASTL/benchmark/source/BenchmarkTupleVector.cpp
new file mode 100644
index 0000000..3a8e79d
--- /dev/null
+++ b/EASTL/benchmark/source/BenchmarkTupleVector.cpp
@@ -0,0 +1,667 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLBenchmark.h"
+#include "EASTLTest.h"
+#include <EAStdC/EAStopwatch.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/bonus/tuple_vector.h>
+#include <EASTL/sort.h>
+
+#ifdef _MSC_VER
+ #pragma warning(push, 0)
+ #pragma warning(disable: 4350)
+#endif
+#include <algorithm>
+#include <vector>
+#include <stdio.h>
+#include <stdlib.h>
+#ifdef _MSC_VER
+ #pragma warning(pop)
+#endif
+
+
+using namespace EA;
+
+
+typedef std::vector<uint64_t> StdVectorUint64;
+typedef eastl::tuple_vector<uint64_t> EaTupleVectorUint64;
+
+ struct PaddingStruct
+{
+ char padding[56] = { 0 };
+};
+static const PaddingStruct DefaultPadding;
+typedef eastl::tuple<uint64_t, PaddingStruct> PaddedTuple;
+typedef std::vector<PaddedTuple> StdVectorUint64Padded;
+typedef eastl::tuple_vector<uint64_t, PaddingStruct> EaTupleVectorUint64Padded;
+
+namespace
+{
+
+
+ //////////////////////////////////////////////////////////////////////////////
+ // MovableType
+ //
+ struct MovableType
+ {
+ int8_t* mpData;
+ enum { kDataSize = 128 };
+
+ MovableType() : mpData(new int8_t[kDataSize])
+ { memset(mpData, 0, kDataSize); }
+
+ MovableType(const MovableType& x) : mpData(new int8_t[kDataSize])
+ { memcpy(mpData, x.mpData, kDataSize); }
+
+ MovableType& operator=(const MovableType& x)
+ {
+ if(!mpData)
+ mpData = new int8_t[kDataSize];
+ memcpy(mpData, x.mpData, kDataSize);
+ return *this;
+ }
+
+ #if EASTL_MOVE_SEMANTICS_ENABLED
+ MovableType(MovableType&& x) EA_NOEXCEPT : mpData(x.mpData)
+ { x.mpData = NULL; }
+
+ MovableType& operator=(MovableType&& x)
+ {
+ eastl::swap(mpData, x.mpData); // In practice it may not be right to do a swap, depending on the case.
+ return *this;
+ }
+ #endif
+
+ ~MovableType()
+ { delete[] mpData; }
+ };
+
+
+ //////////////////////////////////////////////////////////////////////////////
+ // AutoRefCount
+ //
+ // Basic ref-counted object.
+ //
+ template <typename T>
+ class AutoRefCount
+ {
+ public:
+ T* mpObject;
+
+ public:
+ AutoRefCount() EA_NOEXCEPT : mpObject(NULL)
+ {}
+
+ AutoRefCount(T* pObject) EA_NOEXCEPT : mpObject(pObject)
+ {
+ if(mpObject)
+ mpObject->AddRef();
+ }
+
+ AutoRefCount(T* pObject, int) EA_NOEXCEPT : mpObject(pObject)
+ {
+ // Inherit the existing refcount.
+ }
+
+ AutoRefCount(const AutoRefCount& x) EA_NOEXCEPT : mpObject(x.mpObject)
+ {
+ if(mpObject)
+ mpObject->AddRef();
+ }
+
+ AutoRefCount& operator=(const AutoRefCount& x)
+ {
+ return operator=(x.mpObject);
+ }
+
+ AutoRefCount& operator=(T* pObject)
+ {
+ if(pObject != mpObject)
+ {
+ T* const pTemp = mpObject; // Create temporary to prevent possible problems with re-entrancy.
+ if(pObject)
+ pObject->AddRef();
+ mpObject = pObject;
+ if(pTemp)
+ pTemp->Release();
+ }
+ return *this;
+ }
+
+ #if EASTL_MOVE_SEMANTICS_ENABLED
+ AutoRefCount(AutoRefCount&& x) EA_NOEXCEPT : mpObject(x.mpObject)
+ {
+ x.mpObject = NULL;
+ }
+
+ AutoRefCount& operator=(AutoRefCount&& x)
+ {
+ if(mpObject)
+ mpObject->Release();
+ mpObject = x.mpObject;
+ x.mpObject = NULL;
+ return *this;
+ }
+ #endif
+
+ ~AutoRefCount()
+ {
+ if(mpObject)
+ mpObject->Release();
+ }
+
+ T& operator *() const EA_NOEXCEPT
+ { return *mpObject; }
+
+ T* operator ->() const EA_NOEXCEPT
+ { return mpObject; }
+
+ operator T*() const EA_NOEXCEPT
+ { return mpObject; }
+
+ }; // class AutoRefCount
+
+
+ struct RefCounted
+ {
+ int mRefCount;
+ static int msAddRefCount;
+ static int msReleaseCount;
+
+ RefCounted() : mRefCount(1) {}
+
+ int AddRef()
+ { ++msAddRefCount; return ++mRefCount; }
+
+ int Release()
+ {
+ ++msReleaseCount;
+ if(mRefCount > 1)
+ return --mRefCount;
+ delete this;
+ return 0;
+ }
+ };
+
+ int RefCounted::msAddRefCount = 0;
+ int RefCounted::msReleaseCount = 0;
+
+} // namespace
+
+
+namespace
+{
+ template <typename Container>
+ void TestPushBack(EA::StdC::Stopwatch& stopwatch, Container& c, eastl::vector<uint32_t>& intVector)
+ {
+ stopwatch.Restart();
+ for(eastl_size_t j = 0, jEnd = intVector.size(); j < jEnd; j++)
+ c.push_back((uint64_t)intVector[j]);
+ stopwatch.Stop();
+ }
+
+
+ template <typename Container>
+ void TestBracket(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ uint64_t temp = 0;
+ stopwatch.Restart();
+ for(typename Container::size_type j = 0, jEnd = c.size(); j < jEnd; j++)
+ temp += c[j];
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)(temp & 0xffffffff));
+ }
+
+ void TestBracket(EA::StdC::Stopwatch& stopwatch, EaTupleVectorUint64& c)
+ {
+ uint64_t temp = 0;
+ stopwatch.Restart();
+ for (typename EaTupleVectorUint64::size_type j = 0, jEnd = c.size(); j < jEnd; j++)
+ temp += eastl::get<0>(c[j]);
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)(temp & 0xffffffff));
+ }
+
+ template <typename Container>
+ void TestFind(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ stopwatch.Restart();
+ typedef typename Container::iterator iterator_t; // This typedef is required to get this code to compile on RVCT
+ iterator_t it = eastl::find(c.begin(), c.end(), UINT64_C(0xffffffffffff));
+ stopwatch.Stop();
+ if(it != c.end())
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)*it);
+ }
+
+ void TestFind(EA::StdC::Stopwatch& stopwatch, EaTupleVectorUint64& c)
+ {
+ eastl::tuple<uint64_t> val(0xffffffffffff);
+ stopwatch.Restart();
+ EaTupleVectorUint64::iterator it = eastl::find(c.begin(), c.end(), val);
+ stopwatch.Stop();
+ if (it != c.end())
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)eastl::get<0>(*it));
+ }
+
+ template <typename Container>
+ void TestSort(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ // Intentionally use eastl sort in order to measure just
+ // vector access speed and not be polluted by sort speed.
+ stopwatch.Restart();
+ eastl::quick_sort(c.begin(), c.end());
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)(c[0] & 0xffffffff));
+ }
+
+ void TestSort(EA::StdC::Stopwatch& stopwatch, EaTupleVectorUint64& c)
+ {
+ // Intentionally use eastl sort in order to measure just
+ // vector access speed and not be polluted by sort speed.
+ stopwatch.Restart();
+ eastl::quick_sort(c.begin(), c.end());
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)(eastl::get<0>(c[0]) & 0xffffffff));
+ }
+
+
+ template <typename Container>
+ void TestInsert(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ typename Container::size_type j, jEnd;
+ typename Container::iterator it;
+
+ stopwatch.Restart();
+ for(j = 0, jEnd = 100, it = c.begin(); j < jEnd; ++j)
+ {
+ it = c.insert(it, UINT64_C(0xffffffffffff));
+
+ if(it == c.end()) // Try to safely increment the iterator three times.
+ it = c.begin();
+ if(++it == c.end())
+ it = c.begin();
+ if(++it == c.end())
+ it = c.begin();
+ }
+ stopwatch.Stop();
+ }
+
+
+ template <typename Container>
+ void TestErase(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ typename Container::size_type j, jEnd;
+ typename Container::iterator it;
+
+ stopwatch.Restart();
+ for(j = 0, jEnd = 100, it = c.begin(); j < jEnd; ++j)
+ {
+ it = c.erase(it);
+
+ if(it == c.end()) // Try to safely increment the iterator three times.
+ it = c.begin();
+ if(++it == c.end())
+ it = c.begin();
+ if(++it == c.end())
+ it = c.begin();
+ }
+ stopwatch.Stop();
+ }
+
+
+ template <typename Container>
+ void TestMoveReallocate(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ stopwatch.Restart();
+ while(c.size() < 8192)
+ c.resize(c.capacity() + 1);
+ stopwatch.Stop();
+ }
+
+
+ template <typename Container>
+ void TestMoveErase(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ stopwatch.Restart();
+ while(!c.empty())
+ c.erase(c.begin());
+ stopwatch.Stop();
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // Variations of test functions for the Padded structures
+ template <typename Container>
+ void TestTuplePushBack(EA::StdC::Stopwatch& stopwatch, Container& c, eastl::vector<uint32_t>& intVector)
+ {
+ stopwatch.Restart();
+ for (eastl_size_t j = 0, jEnd = intVector.size(); j < jEnd; j++)
+ {
+ PaddedTuple tup((uint64_t)intVector[j], DefaultPadding);
+ c.push_back(tup);
+ }
+ stopwatch.Stop();
+ }
+
+
+ template <typename Container>
+ void TestTupleBracket(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ uint64_t temp = 0;
+ stopwatch.Restart();
+ for (typename Container::size_type j = 0, jEnd = c.size(); j < jEnd; j++)
+ temp += eastl::get<0>(c[j]);
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)(temp & 0xffffffff));
+ }
+
+
+ template <typename Container>
+ void TestTupleFind(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ stopwatch.Restart();
+ typedef typename Container::iterator iterator_t; // This typedef is required to get this code to compile on RVCT
+ iterator_t it = eastl::find_if(c.begin(), c.end(), [](auto tup) { return eastl::get<0>(tup) == 0xFFFFFFFF; });
+ stopwatch.Stop();
+ if (it != c.end())
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)eastl::get<0>(*it));
+ }
+
+ template <typename Container>
+ void TestTupleSort(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ // Intentionally use eastl sort in order to measure just
+ // vector access speed and not be polluted by sort speed.
+ stopwatch.Restart();
+ eastl::quick_sort(c.begin(), c.end(), [](auto a, auto b) { return eastl::get<0>(a) < eastl::get<0>(b); });
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)(eastl::get<0>(c[0]) & 0xffffffff));
+ }
+
+ template <typename Container>
+ void TestTupleInsert(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ typename Container::size_type j, jEnd;
+ typename Container::iterator it;
+ PaddedTuple tup(0xFFFFFFFF, DefaultPadding);
+
+ stopwatch.Restart();
+ for (j = 0, jEnd = 100, it = c.begin(); j < jEnd; ++j)
+ {
+ it = c.insert(it, tup);
+
+ if (it == c.end()) // Try to safely increment the iterator three times.
+ it = c.begin();
+ if (++it == c.end())
+ it = c.begin();
+ if (++it == c.end())
+ it = c.begin();
+ }
+ stopwatch.Stop();
+ }
+
+ template <typename Container>
+ void TestTupleErase(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ typename Container::size_type j, jEnd;
+ typename Container::iterator it;
+
+ stopwatch.Restart();
+ for (j = 0, jEnd = 100, it = c.begin(); j < jEnd; ++j)
+ {
+ it = c.erase(it);
+
+ if (it == c.end()) // Try to safely increment the iterator three times.
+ it = c.begin();
+ if (++it == c.end())
+ it = c.begin();
+ if (++it == c.end())
+ it = c.begin();
+ }
+ stopwatch.Stop();
+ }
+
+} // namespace
+
+
+
+
+
+void BenchmarkTupleVector()
+{
+ EASTLTest_Printf("TupleVector\n");
+
+ EA::UnitTest::RandGenT<uint32_t> rng(EA::UnitTest::GetRandSeed());
+ EA::StdC::Stopwatch stopwatch1(EA::StdC::Stopwatch::kUnitsCPUCycles);
+ EA::StdC::Stopwatch stopwatch2(EA::StdC::Stopwatch::kUnitsCPUCycles);
+
+ {
+ eastl::vector<uint32_t> intVector(100000);
+ eastl::generate(intVector.begin(), intVector.end(), rng);
+
+ for(int i = 0; i < 2; i++)
+ {
+ StdVectorUint64 stdVectorUint64;
+ EaTupleVectorUint64 eaTupleVectorUint64;
+
+
+ ///////////////////////////////
+ // Test push_back
+ ///////////////////////////////
+
+ TestPushBack(stopwatch1, stdVectorUint64, intVector);
+ TestPushBack(stopwatch2, eaTupleVectorUint64, intVector);
+
+ if(i == 1)
+ Benchmark::AddResult("tuple_vector<uint64>/push_back", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test operator[].
+ ///////////////////////////////
+
+ TestBracket(stopwatch1, stdVectorUint64);
+ TestBracket(stopwatch2, eaTupleVectorUint64);
+
+ if(i == 1)
+ Benchmark::AddResult("tuple_vector<uint64>/operator[]", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test iteration via find().
+ ///////////////////////////////
+
+ TestFind(stopwatch1, stdVectorUint64);
+ TestFind(stopwatch2, eaTupleVectorUint64);
+ TestFind(stopwatch1, stdVectorUint64);
+ TestFind(stopwatch2, eaTupleVectorUint64);
+
+ if(i == 1)
+ Benchmark::AddResult("tuple_vector<uint64>/iteration", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test sort
+ ///////////////////////////////
+
+ // Currently VC++ complains about our sort function decrementing std::iterator that is already at begin(). In the strictest sense,
+ // that's a valid complaint, but we aren't testing std STL here. We will want to revise our sort function eventually.
+ #if !defined(_MSC_VER) || !defined(_ITERATOR_DEBUG_LEVEL) || (_ITERATOR_DEBUG_LEVEL < 2)
+ TestSort(stopwatch1, stdVectorUint64);
+ TestSort(stopwatch2, eaTupleVectorUint64);
+
+ if(i == 1)
+ Benchmark::AddResult("tuple_vector<uint64>/sort", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+ #endif
+
+ ///////////////////////////////
+ // Test insert
+ ///////////////////////////////
+
+ TestInsert(stopwatch1, stdVectorUint64);
+ TestInsert(stopwatch2, eaTupleVectorUint64);
+
+ if(i == 1)
+ Benchmark::AddResult("tuple_vector<uint64>/insert", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test erase
+ ///////////////////////////////
+
+ TestErase(stopwatch1, stdVectorUint64);
+ TestErase(stopwatch2, eaTupleVectorUint64);
+
+ if(i == 1)
+ Benchmark::AddResult("tuple_vector<uint64>/erase", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////////////////
+ // Test move of MovableType
+ // Should be much faster with C++11 move.
+ ///////////////////////////////////////////
+
+ std::vector<MovableType> stdVectorMovableType;
+ eastl::tuple_vector<MovableType> eaTupleVectorMovableType;
+
+ TestMoveReallocate(stopwatch1, stdVectorMovableType);
+ TestMoveReallocate(stopwatch2, eaTupleVectorMovableType);
+
+ if(i == 1)
+ Benchmark::AddResult("tuple_vector<MovableType>/reallocate", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ TestMoveErase(stopwatch1, stdVectorMovableType);
+ TestMoveErase(stopwatch2, eaTupleVectorMovableType);
+
+ if(i == 1)
+ Benchmark::AddResult("tuple_vector<MovableType>/erase", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////////////////
+ // Test move of AutoRefCount
+ // Should be much faster with C++11 move.
+ ///////////////////////////////////////////
+
+ std::vector<AutoRefCount<RefCounted> > stdVectorAutoRefCount;
+ eastl::tuple_vector<AutoRefCount<RefCounted> > eaTupleVectorAutoRefCount;
+
+ for(size_t a = 0; a < 2048; a++)
+ {
+ stdVectorAutoRefCount.push_back(AutoRefCount<RefCounted>(new RefCounted));
+ eaTupleVectorAutoRefCount.push_back(AutoRefCount<RefCounted>(new RefCounted));
+ }
+
+ RefCounted::msAddRefCount = 0;
+ RefCounted::msReleaseCount = 0;
+ TestMoveErase(stopwatch1, stdVectorAutoRefCount);
+ //EASTLTest_Printf("tuple_vector<AutoRefCount>/erase std counts: %d %d\n", RefCounted::msAddRefCount, RefCounted::msReleaseCount);
+
+ RefCounted::msAddRefCount = 0;
+ RefCounted::msReleaseCount = 0;
+ TestMoveErase(stopwatch2, eaTupleVectorAutoRefCount);
+ //EASTLTest_Printf("tuple_vector<AutoRefCount>/erase EA counts: %d %d\n", RefCounted::msAddRefCount, RefCounted::msReleaseCount);
+
+ if(i == 1)
+ Benchmark::AddResult("tuple_vector<AutoRefCount>/erase", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ //////////////////////////////////////////////////////////////////////////
+ // Test various operations with "padded" data, to demonstrate access/modification of sparse data
+
+ StdVectorUint64Padded stdVectorUint64Padded;
+ EaTupleVectorUint64Padded eaTupleVectorUint64Padded;
+
+ ///////////////////////////////
+ // Test push_back
+ ///////////////////////////////
+
+ TestTuplePushBack(stopwatch1, stdVectorUint64Padded, intVector);
+ TestTuplePushBack(stopwatch2, eaTupleVectorUint64Padded, intVector);
+
+ if(i == 1)
+ Benchmark::AddResult("tuple_vector<uint64,Padding>/push_back", stopwatch1.GetUnits(),
+ stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test operator[].
+ ///////////////////////////////
+
+ TestTupleBracket(stopwatch1, stdVectorUint64Padded);
+ TestTupleBracket(stopwatch2, eaTupleVectorUint64Padded);
+
+ if(i == 1)
+ Benchmark::AddResult("tuple_vector<uint64,Padding>/operator[]", stopwatch1.GetUnits(),
+ stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test iteration via find().
+ ///////////////////////////////
+
+ TestTupleFind(stopwatch1, stdVectorUint64Padded);
+ TestTupleFind(stopwatch2, eaTupleVectorUint64Padded);
+ TestTupleFind(stopwatch1, stdVectorUint64Padded);
+ TestTupleFind(stopwatch2, eaTupleVectorUint64Padded);
+
+ if(i == 1)
+ Benchmark::AddResult("tuple_vector<uint64,Padding>/iteration", stopwatch1.GetUnits(),
+ stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test sort
+ ///////////////////////////////
+
+ // Currently VC++ complains about our sort function decrementing std::iterator that is already at
+ // begin(). In the strictest sense, that's a valid complaint, but we aren't testing std STL here. We
+ // will want to revise our sort function eventually.
+ #if !defined(_MSC_VER) || !defined(_ITERATOR_DEBUG_LEVEL) || (_ITERATOR_DEBUG_LEVEL < 2)
+ TestTupleSort(stopwatch1, stdVectorUint64Padded);
+ TestTupleSort(stopwatch2, eaTupleVectorUint64Padded);
+
+ if(i == 1)
+ Benchmark::AddResult("tuple_vector<uint64,Padding>/sort", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(),
+ stopwatch2.GetElapsedTime());
+ #endif
+
+ ///////////////////////////////
+ // Test insert
+ ///////////////////////////////
+
+ TestTupleInsert(stopwatch1, stdVectorUint64Padded);
+ TestTupleInsert(stopwatch2, eaTupleVectorUint64Padded);
+
+ if(i == 1)
+ Benchmark::AddResult("tuple_vector<uint64,Padding>/insert", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(),
+ stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test erase
+ ///////////////////////////////
+
+ TestTupleErase(stopwatch1, stdVectorUint64Padded);
+ TestTupleErase(stopwatch2, eaTupleVectorUint64Padded);
+
+ if(i == 1)
+ Benchmark::AddResult("tuple_vector<uint64,Padding>/erase", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(),
+ stopwatch2.GetElapsedTime());
+ }
+ }
+}
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/benchmark/source/BenchmarkVector.cpp b/EASTL/benchmark/source/BenchmarkVector.cpp
new file mode 100644
index 0000000..9331530
--- /dev/null
+++ b/EASTL/benchmark/source/BenchmarkVector.cpp
@@ -0,0 +1,452 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLBenchmark.h"
+#include "EASTLTest.h"
+#include <EAStdC/EAStopwatch.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/vector.h>
+#include <EASTL/sort.h>
+
+#ifdef _MSC_VER
+ #pragma warning(push, 0)
+ #pragma warning(disable: 4350)
+#endif
+#include <algorithm>
+#include <vector>
+#include <stdio.h>
+#include <stdlib.h>
+#ifdef _MSC_VER
+ #pragma warning(pop)
+#endif
+
+
+using namespace EA;
+
+
+typedef std::vector<uint64_t> StdVectorUint64;
+typedef eastl::vector<uint64_t> EaVectorUint64;
+
+
+namespace
+{
+
+
+ //////////////////////////////////////////////////////////////////////////////
+ // MovableType
+ //
+ struct MovableType
+ {
+ int8_t* mpData;
+ enum { kDataSize = 128 };
+
+ MovableType() : mpData(new int8_t[kDataSize])
+ { memset(mpData, 0, kDataSize); }
+
+ MovableType(const MovableType& x) : mpData(new int8_t[kDataSize])
+ { memcpy(mpData, x.mpData, kDataSize); }
+
+ MovableType& operator=(const MovableType& x)
+ {
+ if(!mpData)
+ mpData = new int8_t[kDataSize];
+ memcpy(mpData, x.mpData, kDataSize);
+ return *this;
+ }
+
+ MovableType(MovableType&& x) EA_NOEXCEPT : mpData(x.mpData)
+ { x.mpData = NULL; }
+
+ MovableType& operator=(MovableType&& x)
+ {
+ eastl::swap(mpData, x.mpData); // In practice it may not be right to do a swap, depending on the case.
+ return *this;
+ }
+
+ ~MovableType()
+ { delete[] mpData; }
+ };
+
+
+ //////////////////////////////////////////////////////////////////////////////
+ // AutoRefCount
+ //
+ // Basic ref-counted object.
+ //
+ template <typename T>
+ class AutoRefCount
+ {
+ public:
+ T* mpObject;
+
+ public:
+ AutoRefCount() EA_NOEXCEPT : mpObject(NULL)
+ {}
+
+ AutoRefCount(T* pObject) EA_NOEXCEPT : mpObject(pObject)
+ {
+ if(mpObject)
+ mpObject->AddRef();
+ }
+
+ AutoRefCount(T* pObject, int) EA_NOEXCEPT : mpObject(pObject)
+ {
+ // Inherit the existing refcount.
+ }
+
+ AutoRefCount(const AutoRefCount& x) EA_NOEXCEPT : mpObject(x.mpObject)
+ {
+ if(mpObject)
+ mpObject->AddRef();
+ }
+
+ AutoRefCount& operator=(const AutoRefCount& x)
+ {
+ return operator=(x.mpObject);
+ }
+
+ AutoRefCount& operator=(T* pObject)
+ {
+ if(pObject != mpObject)
+ {
+ T* const pTemp = mpObject; // Create temporary to prevent possible problems with re-entrancy.
+ if(pObject)
+ pObject->AddRef();
+ mpObject = pObject;
+ if(pTemp)
+ pTemp->Release();
+ }
+ return *this;
+ }
+
+ AutoRefCount(AutoRefCount&& x) EA_NOEXCEPT : mpObject(x.mpObject)
+ {
+ x.mpObject = NULL;
+ }
+
+ AutoRefCount& operator=(AutoRefCount&& x)
+ {
+ if(mpObject)
+ mpObject->Release();
+ mpObject = x.mpObject;
+ x.mpObject = NULL;
+ return *this;
+ }
+
+ ~AutoRefCount()
+ {
+ if(mpObject)
+ mpObject->Release();
+ }
+
+ T& operator *() const EA_NOEXCEPT
+ { return *mpObject; }
+
+ T* operator ->() const EA_NOEXCEPT
+ { return mpObject; }
+
+ operator T*() const EA_NOEXCEPT
+ { return mpObject; }
+
+ }; // class AutoRefCount
+
+
+ struct RefCounted
+ {
+ int mRefCount;
+ static int msAddRefCount;
+ static int msReleaseCount;
+
+ RefCounted() : mRefCount(1) {}
+
+ int AddRef()
+ { ++msAddRefCount; return ++mRefCount; }
+
+ int Release()
+ {
+ ++msReleaseCount;
+ if(mRefCount > 1)
+ return --mRefCount;
+ delete this;
+ return 0;
+ }
+ };
+
+ int RefCounted::msAddRefCount = 0;
+ int RefCounted::msReleaseCount = 0;
+
+} // namespace
+
+
+namespace
+{
+ template <typename Container>
+ void TestPushBack(EA::StdC::Stopwatch& stopwatch, Container& c, eastl::vector<uint32_t>& intVector)
+ {
+ stopwatch.Restart();
+ for(eastl_size_t j = 0, jEnd = intVector.size(); j < jEnd; j++)
+ c.push_back((uint64_t)intVector[j]);
+ stopwatch.Stop();
+ }
+
+
+ template <typename Container>
+ void TestBracket(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ uint64_t temp = 0;
+ stopwatch.Restart();
+ for(typename Container::size_type j = 0, jEnd = c.size(); j < jEnd; j++)
+ temp += c[j];
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)(temp & 0xffffffff));
+ }
+
+
+ template <typename Container>
+ void TestFind(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ stopwatch.Restart();
+ typedef typename Container::iterator iterator_t; // This typedef is required to get this code to compile on RVCT
+ iterator_t it = eastl::find(c.begin(), c.end(), UINT64_C(0xffffffffffff));
+ stopwatch.Stop();
+ if(it != c.end())
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)*it);
+ }
+
+
+ template <typename Container>
+ void TestSort(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ // Intentionally use eastl sort in order to measure just
+ // vector access speed and not be polluted by sort speed.
+ stopwatch.Restart();
+ eastl::quick_sort(c.begin(), c.end());
+ stopwatch.Stop();
+ sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)(c[0] & 0xffffffff));
+ }
+
+
+ template <typename Container>
+ void TestInsert(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ typename Container::size_type j, jEnd;
+ typename Container::iterator it;
+
+ stopwatch.Restart();
+ for(j = 0, jEnd = 100, it = c.begin(); j < jEnd; ++j)
+ {
+ it = c.insert(it, UINT64_C(0xffffffffffff));
+
+ if(it == c.end()) // Try to safely increment the iterator three times.
+ it = c.begin();
+ if(++it == c.end())
+ it = c.begin();
+ if(++it == c.end())
+ it = c.begin();
+ }
+ stopwatch.Stop();
+ }
+
+
+ template <typename Container>
+ void TestErase(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ typename Container::size_type j, jEnd;
+ typename Container::iterator it;
+
+ stopwatch.Restart();
+ for(j = 0, jEnd = 100, it = c.begin(); j < jEnd; ++j)
+ {
+ it = c.erase(it);
+
+ if(it == c.end()) // Try to safely increment the iterator three times.
+ it = c.begin();
+ if(++it == c.end())
+ it = c.begin();
+ if(++it == c.end())
+ it = c.begin();
+ }
+ stopwatch.Stop();
+ }
+
+
+ template <typename Container>
+ void TestMoveReallocate(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ stopwatch.Restart();
+ while(c.size() < 8192)
+ c.resize(c.capacity() + 1);
+ stopwatch.Stop();
+ }
+
+
+ template <typename Container>
+ void TestMoveErase(EA::StdC::Stopwatch& stopwatch, Container& c)
+ {
+ stopwatch.Restart();
+ while(!c.empty())
+ c.erase(c.begin());
+ stopwatch.Stop();
+ }
+
+
+} // namespace
+
+
+
+
+
+void BenchmarkVector()
+{
+ EASTLTest_Printf("Vector\n");
+
+ EA::UnitTest::RandGenT<uint32_t> rng(EA::UnitTest::GetRandSeed());
+ EA::StdC::Stopwatch stopwatch1(EA::StdC::Stopwatch::kUnitsCPUCycles);
+ EA::StdC::Stopwatch stopwatch2(EA::StdC::Stopwatch::kUnitsCPUCycles);
+
+ {
+ eastl::vector<uint32_t> intVector(100000);
+ eastl::generate(intVector.begin(), intVector.end(), rng);
+
+ for(int i = 0; i < 2; i++)
+ {
+ StdVectorUint64 stdVectorUint64;
+ EaVectorUint64 eaVectorUint64;
+
+
+ ///////////////////////////////
+ // Test push_back
+ ///////////////////////////////
+
+ TestPushBack(stopwatch1, stdVectorUint64, intVector);
+ TestPushBack(stopwatch2, eaVectorUint64, intVector);
+
+ if(i == 1)
+ Benchmark::AddResult("vector<uint64>/push_back", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test operator[].
+ ///////////////////////////////
+
+ TestBracket(stopwatch1, stdVectorUint64);
+ TestBracket(stopwatch2, eaVectorUint64);
+
+ if(i == 1)
+ Benchmark::AddResult("vector<uint64>/operator[]", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test iteration via find().
+ ///////////////////////////////
+
+ TestFind(stopwatch1, stdVectorUint64);
+ TestFind(stopwatch2, eaVectorUint64);
+ TestFind(stopwatch1, stdVectorUint64);
+ TestFind(stopwatch2, eaVectorUint64);
+
+ if(i == 1)
+ Benchmark::AddResult("vector<uint64>/iteration", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test sort
+ ///////////////////////////////
+
+ // Currently VC++ complains about our sort function decrementing std::iterator that is already at begin(). In the strictest sense,
+ // that's a valid complaint, but we aren't testing std STL here. We will want to revise our sort function eventually.
+ #if !defined(_MSC_VER) || !defined(_ITERATOR_DEBUG_LEVEL) || (_ITERATOR_DEBUG_LEVEL < 2)
+ TestSort(stopwatch1, stdVectorUint64);
+ TestSort(stopwatch2, eaVectorUint64);
+
+ if(i == 1)
+ Benchmark::AddResult("vector<uint64>/sort", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+ #endif
+
+ ///////////////////////////////
+ // Test insert
+ ///////////////////////////////
+
+ TestInsert(stopwatch1, stdVectorUint64);
+ TestInsert(stopwatch2, eaVectorUint64);
+
+ if(i == 1)
+ Benchmark::AddResult("vector<uint64>/insert", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////
+ // Test erase
+ ///////////////////////////////
+
+ TestErase(stopwatch1, stdVectorUint64);
+ TestErase(stopwatch2, eaVectorUint64);
+
+ if(i == 1)
+ Benchmark::AddResult("vector<uint64>/erase", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////////////////
+ // Test move of MovableType
+ // Should be much faster with C++11 move.
+ ///////////////////////////////////////////
+
+ std::vector<MovableType> stdVectorMovableType;
+ eastl::vector<MovableType> eaVectorMovableType;
+
+ TestMoveReallocate(stopwatch1, stdVectorMovableType);
+ TestMoveReallocate(stopwatch2, eaVectorMovableType);
+
+ if(i == 1)
+ Benchmark::AddResult("vector<MovableType>/reallocate", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ TestMoveErase(stopwatch1, stdVectorMovableType);
+ TestMoveErase(stopwatch2, eaVectorMovableType);
+
+ if(i == 1)
+ Benchmark::AddResult("vector<MovableType>/erase", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+
+
+ ///////////////////////////////////////////
+ // Test move of AutoRefCount
+ // Should be much faster with C++11 move.
+ ///////////////////////////////////////////
+
+ std::vector<AutoRefCount<RefCounted> > stdVectorAutoRefCount;
+ eastl::vector<AutoRefCount<RefCounted> > eaVectorAutoRefCount;
+
+ for(size_t a = 0; a < 2048; a++)
+ {
+ stdVectorAutoRefCount.push_back(AutoRefCount<RefCounted>(new RefCounted));
+ eaVectorAutoRefCount.push_back(AutoRefCount<RefCounted>(new RefCounted));
+ }
+
+ RefCounted::msAddRefCount = 0;
+ RefCounted::msReleaseCount = 0;
+ TestMoveErase(stopwatch1, stdVectorAutoRefCount);
+ EASTLTest_Printf("vector<AutoRefCount>/erase std counts: %d %d\n", RefCounted::msAddRefCount, RefCounted::msReleaseCount);
+
+ RefCounted::msAddRefCount = 0;
+ RefCounted::msReleaseCount = 0;
+ TestMoveErase(stopwatch2, eaVectorAutoRefCount);
+ EASTLTest_Printf("vector<AutoRefCount>/erase EA counts: %d %d\n", RefCounted::msAddRefCount, RefCounted::msReleaseCount);
+
+ if(i == 1)
+ Benchmark::AddResult("vector<AutoRefCount>/erase", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime());
+ }
+ }
+}
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/benchmark/source/EASTLBenchmark.cpp b/EASTL/benchmark/source/EASTLBenchmark.cpp
new file mode 100644
index 0000000..8e4d3ae
--- /dev/null
+++ b/EASTL/benchmark/source/EASTLBenchmark.cpp
@@ -0,0 +1,291 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLBenchmark.h"
+#include "EASTLTest.h"
+#include <EASTL/string.h>
+#include <EAMain/EAMain.h>
+
+#ifdef _MSC_VER
+ #pragma warning(push, 0)
+#endif
+#include <stdio.h>
+#include <math.h>
+#include <float.h>
+#ifdef _MSC_VER
+ #pragma warning(pop)
+#endif
+
+
+
+namespace Benchmark
+{
+ static int64_t ConvertStopwatchUnits(EA::StdC::Stopwatch::Units unitsSource, int64_t valueSource, EA::StdC::Stopwatch::Units unitsDest)
+ {
+ using namespace EA::StdC;
+
+ int64_t valueDest = valueSource;
+
+ if(unitsSource != unitsDest)
+ {
+ double sourceMultiplier;
+
+ switch (unitsSource)
+ {
+ case Stopwatch::kUnitsCPUCycles:
+ sourceMultiplier = Stopwatch::GetUnitsPerCPUCycle(unitsDest); // This will typically be a number less than 1.
+ valueDest = (int64_t)(valueSource * sourceMultiplier);
+ break;
+
+ case Stopwatch::kUnitsCycles:
+ sourceMultiplier = Stopwatch::GetUnitsPerStopwatchCycle(unitsDest); // This will typically be a number less than 1.
+ valueDest = (int64_t)(valueSource * sourceMultiplier);
+ break;
+
+ case Stopwatch::kUnitsNanoseconds:
+ case Stopwatch::kUnitsMicroseconds:
+ case Stopwatch::kUnitsMilliseconds:
+ case Stopwatch::kUnitsSeconds:
+ case Stopwatch::kUnitsMinutes:
+ case Stopwatch::kUnitsUserDefined:
+ // To do. Also, handle the case of unitsDest being Cycles or CPUCycles and unitsSource being a time.
+ break;
+ }
+ }
+
+ return valueDest;
+ }
+
+ void WriteTime(int64_t timeNS, eastl::string& sTime)
+ {
+ if(timeNS > 1000000000)
+ sTime.sprintf(" %6.2f s", (double)timeNS / 1000000000);
+ else if(timeNS > 1000000)
+ sTime.sprintf("%6.1f ms", (double)timeNS / 1000000);
+ else if(timeNS > 1000)
+ sTime.sprintf("%6.1f us", (double)timeNS / 1000);
+ else
+ sTime.sprintf("%6.1f ns", (double)timeNS / 1);
+ }
+
+
+
+ Environment gEnvironment;
+
+ Environment& GetEnvironment()
+ {
+ return gEnvironment;
+ }
+
+
+
+ ResultSet gResultSet;
+
+ ResultSet& GetResultSet()
+ {
+ return gResultSet;
+ }
+
+
+
+ // Scratch sprintf buffer
+ char gScratchBuffer[1024];
+
+
+ void DoNothing(...)
+ {
+ // Intentionally nothing.
+ }
+
+
+ void AddResult(const char* pName, int units, int64_t nTime1, int64_t nTime2, const char* pNotes)
+ {
+ Result result;
+
+ result.msName = pName;
+ result.mUnits = units;
+ result.mTime1 = nTime1;
+ result.mTime1NS = ConvertStopwatchUnits((EA::StdC::Stopwatch::Units)units, nTime1, EA::StdC::Stopwatch::kUnitsNanoseconds);
+ result.mTime2 = nTime2;
+ result.mTime2NS = ConvertStopwatchUnits((EA::StdC::Stopwatch::Units)units, nTime2, EA::StdC::Stopwatch::kUnitsNanoseconds);
+
+ if(pNotes)
+ result.msNotes = pNotes;
+
+ gResultSet.insert(result);
+ }
+
+
+ void PrintResultLine(const Result& result)
+ {
+ const double fRatio = (double)result.mTime1 / (double)result.mTime2;
+ const double fRatioPrinted = (fRatio > 100) ? 100 : fRatio;
+ const double fPercentChange = fabs(((double)result.mTime1 - (double)result.mTime2) / (((double)result.mTime1 + (double)result.mTime2) / 2));
+ const bool bDifference = (result.mTime1 > 10) && (result.mTime2 > 10) && (fPercentChange > 0.25);
+ const char* pDifference = (bDifference ? (result.mTime1 < result.mTime2 ? "-" : "+") : "");
+
+ eastl::string sClockTime1, sClockTime2;
+
+ WriteTime(result.mTime1NS, sClockTime1); // This converts an integer in nanoseconds (e.g. 23400000) to a string (e.g. "23.4 ms")
+ WriteTime(result.mTime2NS, sClockTime2);
+
+ EA::UnitTest::Report("%-43s | %13" PRIu64 " %s | %13" PRIu64 " %s | %10.2f%10s", result.msName.c_str(), result.mTime1, sClockTime1.c_str(), result.mTime2, sClockTime2.c_str(), fRatioPrinted, pDifference);
+
+ if(result.msNotes.length()) // If there are any notes...
+ EA::UnitTest::Report(" %s", result.msNotes.c_str());
+ EA::UnitTest::Report("\n");
+ }
+
+
+ #if defined(EASTL_BENCHMARK_WRITE_FILE) && EASTL_BENCHMARK_WRITE_FILE
+
+ #if !defined(EASTL_BENCHMARK_WRITE_FILE_PATH)
+ #define EASTL_BENCHMARK_WRITE_FILE_PATH "BenchmarkResults.txt"
+ #endif
+
+ struct FileWriter
+ {
+ FILE* mpReportFile;
+ EA::EAMain::ReportFunction mpSavedReportFunction;
+ static FileWriter* gpFileWriter;
+
+ static void StaticPrintfReportFunction(const char8_t* pText)
+ {
+ if(gpFileWriter)
+ gpFileWriter->PrintfReportFunction(pText);
+ }
+
+ void PrintfReportFunction(const char8_t* pText)
+ {
+ fwrite(pText, strlen(pText), 1, mpReportFile);
+ EA::EAMain::ReportFunction gpReportFunction = EA::EAMain::GetDefaultReportFunction();
+ gpReportFunction(pText);
+ }
+
+ FileWriter() : mpReportFile(NULL), mpSavedReportFunction(NULL)
+ {
+ mpReportFile = fopen(EASTL_BENCHMARK_WRITE_FILE_PATH, "w+");
+
+ if(mpReportFile)
+ {
+ gpFileWriter = this;
+ mpSavedReportFunction = EA::EAMain::GetDefaultReportFunction();
+ EA::EAMain::SetReportFunction(StaticPrintfReportFunction);
+ }
+ }
+
+ ~FileWriter()
+ {
+ if(mpReportFile)
+ {
+ gpFileWriter = NULL;
+ EA::EAMain::SetReportFunction(mpSavedReportFunction);
+ fclose(mpReportFile);
+ }
+ }
+ };
+
+ FileWriter* FileWriter::gpFileWriter = NULL;
+ #endif
+
+
+ void PrintResults()
+ {
+ #if defined(EASTL_BENCHMARK_WRITE_FILE) && EASTL_BENCHMARK_WRITE_FILE
+ FileWriter fileWriter; // This will auto-execute.
+ #endif
+
+ // Print the results
+ EA::UnitTest::Report("\n");
+ EA::UnitTest::Report("****************************************************************************************\n");
+ EA::UnitTest::Report("EASTL Benchmark test results\n");
+ EA::UnitTest::Report("****************************************************************************************\n");
+ EA::UnitTest::Report("\n");
+ EA::UnitTest::Report("EASTL version: %s\n", EASTL_VERSION);
+ EA::UnitTest::Report("Platform: %s\n", gEnvironment.msPlatform.c_str());
+ EA::UnitTest::Report("Compiler: %s\n", EA_COMPILER_STRING);
+ #if defined(EA_DEBUG) || defined(_DEBUG)
+ EA::UnitTest::Report("Allocator: PPMalloc::GeneralAllocatorDebug. Thread safety enabled.\n");
+ EA::UnitTest::Report("Build: Debug. Inlining disabled. STL debug features disabled.\n");
+ #else
+ EA::UnitTest::Report("Allocator: PPMalloc::GeneralAllocator. Thread safety enabled.\n");
+ EA::UnitTest::Report("Build: Full optimization. Inlining enabled.\n");
+ #endif
+ EA::UnitTest::Report("\n");
+ EA::UnitTest::Report("Values are ticks and time to complete tests; smaller values are better.\n");
+ EA::UnitTest::Report("\n");
+ EA::UnitTest::Report("%-43s%26s%26s%13s%13s\n", "Test", gEnvironment.msSTLName1.c_str(), gEnvironment.msSTLName2.c_str(), "Ratio", "Difference?");
+ EA::UnitTest::Report("---------------------------------------------------------------------------------------------------------------------\n");
+
+ eastl::string sTestTypeLast;
+ eastl::string sTestTypeTemp;
+
+ for(ResultSet::iterator it = gResultSet.begin(); it != gResultSet.end(); ++it)
+ {
+ const Result& result = *it;
+
+ eastl_size_t n = result.msName.find('/');
+ if(n == eastl::string::npos)
+ n = result.msName.length();
+ sTestTypeTemp.assign(result.msName, 0, n);
+
+ if(sTestTypeTemp != sTestTypeLast) // If it looks like we are changing to a new test type... add an empty line to help readability.
+ {
+ if(it != gResultSet.begin())
+ EA::UnitTest::Report("\n");
+ sTestTypeLast = sTestTypeTemp;
+ }
+
+ PrintResultLine(result);
+ }
+
+ // We will print out a final line that has the sum of the rows printed above.
+ Result resultSum;
+ resultSum.msName = "sum";
+
+ for(ResultSet::iterator its = gResultSet.begin(); its != gResultSet.end(); ++its)
+ {
+ const Result& resultTemp = *its;
+
+ EASTL_ASSERT(resultTemp.mUnits == EA::StdC::Stopwatch::kUnitsCPUCycles); // Our ConvertStopwatchUnits call below assumes that every measured time is CPUCycles.
+ resultSum.mTime1 += resultTemp.mTime1;
+ resultSum.mTime2 += resultTemp.mTime2;
+ }
+
+ // We do this convert as a final step instead of the loop in order to avoid loss of precision.
+ resultSum.mTime1NS = ConvertStopwatchUnits(EA::StdC::Stopwatch::kUnitsCPUCycles, resultSum.mTime1, EA::StdC::Stopwatch::kUnitsNanoseconds);
+ resultSum.mTime2NS = ConvertStopwatchUnits(EA::StdC::Stopwatch::kUnitsCPUCycles, resultSum.mTime2, EA::StdC::Stopwatch::kUnitsNanoseconds);
+ EA::UnitTest::Report("\n");
+ PrintResultLine(resultSum);
+
+ EA::UnitTest::Report("\n");
+ EA::UnitTest::Report("****************************************************************************************\n");
+ EA::UnitTest::Report("\n");
+
+ // Clear the results
+ gResultSet.clear();
+ gEnvironment.clear();
+ }
+
+} // namespace Benchmark
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/benchmark/source/EASTLBenchmark.h b/EASTL/benchmark/source/EASTLBenchmark.h
new file mode 100644
index 0000000..a0833e6
--- /dev/null
+++ b/EASTL/benchmark/source/EASTLBenchmark.h
@@ -0,0 +1,228 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTLBENCHMARK_H
+#define EASTLBENCHMARK_H
+
+
+// Intrinsic control
+//
+// Our benchmark results are being skewed by inconsistent decisions by the
+// VC++ compiler to use intrinsic functions. Additionally, many of our
+// benchmarks work on large blocks of elements, whereas intrinsics often
+// are an improvement only over small blocks of elements. As a result,
+// enabling of intrinsics is often resulting in poor benchmark results for
+// code that gets an intrinsic enabled for it, even though it will often
+// happen in real code to be the opposite case. The disabling of intrinsics
+// here often results in EASTL performance being lower than it would be in
+// real-world situations.
+//
+#include <string.h>
+#ifdef _MSC_VER
+ #pragma function(strlen, strcmp, strcpy, strcat, memcpy, memcmp, memset)
+#endif
+
+
+#include <EASTL/set.h>
+#include <EASTL/string.h>
+#include <EAStdC/EAStopwatch.h>
+#include <stdlib.h>
+#include <string.h>
+
+
+void BenchmarkSort();
+void BenchmarkList();
+void BenchmarkString();
+void BenchmarkVector();
+void BenchmarkDeque();
+void BenchmarkSet();
+void BenchmarkMap();
+void BenchmarkHash();
+void BenchmarkAlgorithm();
+void BenchmarkHeap();
+void BenchmarkBitset();
+void BenchmarkTupleVector();
+
+
+namespace Benchmark
+{
+
+ // Environment
+ //
+ // The environment for this benchmark test.
+ //
+ struct Environment
+ {
+ eastl::string8 msPlatform; // Name of test platform (e.g. "Windows")
+ eastl::string8 msSTLName1; // Name of competitor #1 (e.g. "EASTL").
+ eastl::string8 msSTLName2; // Name of competitor #2 (e.g. "MS STL").
+
+ void clear() { msPlatform.set_capacity(0); msSTLName1.set_capacity(0); msSTLName2.set_capacity(0); }
+ };
+
+ Environment& GetEnvironment();
+
+
+ // Result
+ //
+ // An individual benchmark result.
+ //
+ struct Result
+ {
+ eastl::string8 msName; // Test name (e.g. "vector/insert").
+ int mUnits; // Timing units (e.g. EA::StdC::Stopwatch::kUnitsSeconds).
+ int64_t mTime1; // Time of competitor #1.
+ uint64_t mTime1NS; // Nanoseconds.
+ int64_t mTime2; // Time of competitor #2.
+ int64_t mTime2NS; // Nanoseconds.
+ eastl::string8 msNotes; // Any comments to attach to this result.
+
+ Result() : msName(), mUnits(EA::StdC::Stopwatch::kUnitsCPUCycles),
+ mTime1(0), mTime1NS(0), mTime2(0), mTime2NS(0), msNotes() { }
+ };
+
+ inline bool operator<(const Result& r1, const Result& r2)
+ { return r1.msName < r2.msName; }
+
+ typedef eastl::set<Result> ResultSet;
+
+ ResultSet& GetResultSet();
+
+
+ // Scratch sprintf buffer
+ extern char gScratchBuffer[1024];
+
+
+
+ // Utility functions
+ //
+ void DoNothing(...);
+ void AddResult(const char* pName, int units, int64_t nTime1, int64_t nTime2, const char* pNotes = NULL);
+ void PrintResults();
+ void WriteTime(int64_t timeNS, eastl::string& sTime);
+
+
+} // namespace Benchmark
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+/// LargePOD
+///
+/// Implements a structure which is essentially a largish POD. Useful for testing
+/// containers and algorithms for their ability to efficiently work with PODs.
+/// This class isn't strictly a POD by the definition of the C++ standard,
+/// but it suffices for our interests.
+///
+struct LargeObject
+{
+ int32_t mData[2048];
+};
+
+struct LargePOD
+{
+ LargeObject mLargeObject1;
+ LargeObject mLargeObject2;
+ const char* mpName1;
+ const char* mpName2;
+
+ explicit LargePOD(int32_t x = 0) // A true POD doesn't have a non-trivial constructor.
+ {
+ memset(mLargeObject1.mData, 0, sizeof(mLargeObject1.mData));
+ memset(mLargeObject2.mData, 0, sizeof(mLargeObject2.mData));
+ mLargeObject1.mData[0] = x;
+
+ mpName1 = "LargePOD1";
+ mpName2 = "LargePOD2";
+ }
+
+ LargePOD(const LargePOD& largePOD) // A true POD doesn't have a non-trivial copy-constructor.
+ : mLargeObject1(largePOD.mLargeObject1),
+ mLargeObject2(largePOD.mLargeObject2),
+ mpName1(largePOD.mpName1),
+ mpName2(largePOD.mpName2)
+ {
+ }
+
+ virtual ~LargePOD() { }
+
+ LargePOD& operator=(const LargePOD& largePOD) // A true POD doesn't have a non-trivial assignment operator.
+ {
+ if(&largePOD != this)
+ {
+ mLargeObject1 = largePOD.mLargeObject1;
+ mLargeObject2 = largePOD.mLargeObject2;
+ mpName1 = largePOD.mpName1;
+ mpName2 = largePOD.mpName2;
+ }
+ return *this;
+ }
+
+ virtual void DoSomething() // Note that by declaring this virtual, this class is not truly a POD.
+ { // But it acts like a POD for the purposes of EASTL algorithms.
+ mLargeObject1.mData[1]++;
+ }
+
+ operator int()
+ {
+ return (int)mLargeObject1.mData[0];
+ }
+};
+
+//EASTL_DECLARE_POD(LargePOD);
+//EASTL_DECLARE_TRIVIAL_CONSTRUCTOR(LargePOD);
+//EASTL_DECLARE_TRIVIAL_COPY(LargePOD);
+//EASTL_DECLARE_TRIVIAL_ASSIGN(LargePOD);
+//EASTL_DECLARE_TRIVIAL_DESTRUCTOR(LargePOD);
+//EASTL_DECLARE_TRIVIAL_RELOCATE(LargePOD);
+
+// Operators
+// We specifically define only == and <, in order to verify that
+// our containers and algorithms are not mistakenly expecting other
+// operators for the contained and manipulated classes.
+inline bool operator==(const LargePOD& t1, const LargePOD& t2)
+{
+ return (memcmp(&t1.mLargeObject1, &t2.mLargeObject1, sizeof(t1.mLargeObject1)) == 0) &&
+ (memcmp(&t1.mLargeObject2, &t2.mLargeObject2, sizeof(t1.mLargeObject2)) == 0) &&
+ (strcmp(t1.mpName1, t2.mpName1) == 0) &&
+ (strcmp(t1.mpName2, t2.mpName2) == 0);
+}
+
+inline bool operator<(const LargePOD& t1, const LargePOD& t2)
+{
+ return (memcmp(&t1.mLargeObject1, &t2.mLargeObject1, sizeof(t1.mLargeObject1)) < 0) &&
+ (memcmp(&t1.mLargeObject2, &t2.mLargeObject2, sizeof(t1.mLargeObject2)) < 0) &&
+ (strcmp(t1.mpName1, t2.mpName1) < 0) &&
+ (strcmp(t1.mpName2, t2.mpName2) < 0);
+}
+
+
+
+
+
+#endif // Header sentry
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/benchmark/source/main.cpp b/EASTL/benchmark/source/main.cpp
new file mode 100644
index 0000000..59ff5a9
--- /dev/null
+++ b/EASTL/benchmark/source/main.cpp
@@ -0,0 +1,194 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLBenchmark.h"
+#include "EASTLTest.h"
+#if !EASTL_OPENSOURCE
+ #include <PPMalloc/EAGeneralAllocatorDebug.h>
+#endif
+#include <EAStdC/EASprintf.h>
+#include <EAStdC/EAStopwatch.h>
+#include <EAStdC/EAString.h>
+#include <EASTL/internal/config.h>
+#include <string.h>
+#include <stdio.h>
+EA_DISABLE_VC_WARNING(4946)
+#include "EAMain/EAEntryPointMain.inl"
+#include "EASTLTestAllocator.h"
+
+
+///////////////////////////////////////////////////////////////////////////////
+// gpEAGeneralAllocator / gpEAGeneralAllocatorDebug
+//
+#if !EASTL_OPENSOURCE
+namespace EA
+{
+ namespace Allocator
+ {
+ #ifdef EA_DEBUG
+ extern GeneralAllocatorDebug gGeneralAllocator;
+ extern PPM_API GeneralAllocatorDebug* gpEAGeneralAllocatorDebug;
+ #else
+ extern GeneralAllocator gGeneralAllocator;
+ extern PPM_API GeneralAllocator* gpEAGeneralAllocator;
+ #endif
+ }
+}
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Required by EASTL.
+//
+#if !defined(EASTL_EASTDC_VSNPRINTF) || !EASTL_EASTDC_VSNPRINTF
+ int Vsnprintf8(char8_t* pDestination, size_t n, const char8_t* pFormat, va_list arguments)
+ {
+ return EA::StdC::Vsnprintf(pDestination, n, pFormat, arguments);
+ }
+
+ int Vsnprintf16(char16_t* pDestination, size_t n, const char16_t* pFormat, va_list arguments)
+ {
+ return EA::StdC::Vsnprintf(pDestination, n, pFormat, arguments);
+ }
+
+ #if (EASTDC_VERSION_N >= 10600)
+ int Vsnprintf32(char32_t* pDestination, size_t n, const char32_t* pFormat, va_list arguments)
+ {
+ return EA::StdC::Vsnprintf(pDestination, n, pFormat, arguments);
+ }
+ #endif
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// main
+//
+int EAMain(int argc, char* argv[])
+{
+ bool bWaitAtEnd = false;
+ bool bPrintHelp = false;
+ int nOptionCount = 0;
+ int nErrorCount = 0;
+
+ EA::EAMain::PlatformStartup();
+ EA::EAMain::SetVerbosity(2); // Default value.
+
+ // Set up debug parameters.
+ #ifdef EA_DEBUG
+ // Only enable this temporarily to help find any problems you might find.
+ // EA::Allocator::gpEAGeneralAllocatorDebug->SetAutoHeapValidation(EA::Allocator::GeneralAllocator::kHeapValidationLevelBasic, 16);
+ #endif
+
+ // Parse command line arguments
+ for(int i = 1; i < argc; i++)
+ {
+ if(strstr(argv[i], "-w") == argv[i])
+ {
+ bWaitAtEnd = true;
+ nOptionCount++;
+ }
+ else if(strstr(argv[i], "-v") == argv[i])
+ {
+ uint32_t verbosity = EA::StdC::AtoU32(argv[i] + 3);
+ EA::EAMain::SetVerbosity(verbosity);
+ nOptionCount++;
+ }
+ else if(strstr(argv[i], "-l:") == argv[i])
+ {
+ gEASTL_TestLevel = atoi(argv[i] + 3);
+ if(gEASTL_TestLevel < kEASTL_TestLevelLow)
+ gEASTL_TestLevel = kEASTL_TestLevelLow;
+ else if(gEASTL_TestLevel > kEASTL_TestLevelHigh)
+ gEASTL_TestLevel = kEASTL_TestLevelHigh;
+ nOptionCount++;
+ }
+ else if(strstr(argv[i], "-s:") == argv[i])
+ {
+ uint32_t seed = (eastl_size_t)atoi(argv[i] + 3);
+ EA::UnitTest::SetRandSeed(seed);
+ nOptionCount++;
+ }
+ else if((strstr(argv[i], "-?") == argv[i]) || (strstr(argv[i], "-h") == argv[i]))
+ {
+ bPrintHelp = true;
+ nOptionCount++;
+ }
+ }
+
+ // Print user help.
+ if(!bPrintHelp)
+ bPrintHelp = (nOptionCount == 0);
+
+ if(bPrintHelp)
+ {
+ EASTLTest_Printf("Options\n");
+ EASTLTest_Printf(" -w Wait at end.\n");
+ EASTLTest_Printf(" -l:N Test level in range of [1, 10]. 10 means maximum testing.\n");
+ EASTLTest_Printf(" -s:N Specify a randomization seed. 0 is default and means use clock.\n");
+ EASTLTest_Printf(" -? Show help.\n");
+ }
+
+
+ // Set up test information
+ Benchmark::Environment& environment = Benchmark::GetEnvironment();
+ environment.msPlatform = EA_PLATFORM_DESCRIPTION;
+ environment.msSTLName1 = GetStdSTLName();
+ environment.msSTLName2 = "EASTL";
+
+
+ // Run tests
+ #ifndef EA_DEBUG
+ EA::UnitTest::SetHighThreadPriority();
+ #endif
+
+ EA::StdC::Stopwatch stopwatch(EA::StdC::Stopwatch::kUnitsSeconds, true); // Measure seconds, start the counting immediately.
+
+ BenchmarkAlgorithm();
+ BenchmarkList();
+ BenchmarkString();
+ BenchmarkVector();
+ BenchmarkDeque();
+ BenchmarkSet();
+ BenchmarkMap();
+ BenchmarkHash();
+ BenchmarkHeap();
+ BenchmarkBitset();
+ BenchmarkSort();
+ BenchmarkTupleVector();
+
+ stopwatch.Stop();
+
+ #ifndef EA_DEBUG
+ EA::UnitTest::SetNormalThreadPriority();
+ #endif
+
+ Benchmark::PrintResults();
+
+ eastl::string sClockTime;
+ Benchmark::WriteTime(stopwatch.GetElapsedTime(), sClockTime);
+
+ EASTLTest_Printf("Time to complete all tests: %s.\n", sClockTime.c_str());
+
+ // Done
+ if(bWaitAtEnd)
+ {
+ EASTLTest_Printf("\nPress any key to exit.\n");
+ getchar(); // Wait for the user and shutdown
+ }
+
+ EA::EAMain::PlatformShutdown(nErrorCount);
+
+ return 0;
+}
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/doc/Benchmarks.md b/EASTL/doc/Benchmarks.md
new file mode 100644
index 0000000..c41cdb6
--- /dev/null
+++ b/EASTL/doc/Benchmarks.md
@@ -0,0 +1,851 @@
+# EASTL Benchmarks
+
+## Introduction
+
+This document provides a number of benchmark results of EASTL. Where possible, these benchmarks are implemented as comparisons with equivalent functionality found in other libraries such as compiler STL libraries or other well-known libraries. These comparison benchmarks concentrate on highlighting the differences between implementations rather than the similarities. In many mundane cases -- such as accessing a vector element via operator [] -- virtually all vector/array implementations you are likely to run into will have identical performance.
+
+It's also important to note that the platform you run on can make a significant difference in the results. On a modern 3+GHz Windows PC many operations are fast due to large memory caches, intelligent branch prediction, and parallel instruction execution. However, on embedded or console systems none of these may be the case.
+
+While EASTL generally outperforms std STL, there are some benchmarks here in which EASTL is slower than std STL. There are three primary explanations of this:
+
+1. EASTL is making some kind of speed, memory, or design tradeoff that results in the given speed difference. In may such cases, EASTL goes slower on one benchmark in order to go faster on another benchmark deemed more important. This explanation constitutes about 60% of the cases.
+2. Compiler optimizations and resulting code generation is coincidencally favoring one kind of implementation over another, often when they are visually virtually identical. This explantation constitutes about 30% of the cases.
+3. EASTL is simply not yet as optimized as it could be. This explanation constitutes about 10% of the cases (as of this writing there are about three such functions throughout EASTL).
+
+## Benchmarks
+
+Below is a table of links to detailed benchmark results derived from the Benchmark test present in the EASTL package. The detailed results are present below the table. Additional platforms will be added as results become available for those platforms. Debug benchmarks are present because (lack of) debug performance can be significant for highly templated libraries. EASTL has specific optimizations to enhance debug performance relative to other standard libraries; in some cases it is 10x or more faster than alternatives (though there are exceptions where EASTL is slower). Feel free to submit results for additional compilers/platforms.
+
+| Platform | Compiler | STL type | Build | Results |
+|------|------|------|------|------|
+| Win32 | VC++ 7.1 | Microsoft (Dinkumware) | Debug | [Detail]() |
+| Win32 | VC++ 7.1 | Microsoft (Dinkumware) | Release | [Detail]() |
+| Win32 | VC++ 7.1 | STLPort | Debug | [Detail]() |
+| Win32 | VC++ 7.1 | STLPort | Release | [Detail]() |
+
+### Win32.VC71.MS.Debug
+
+```
+EASTL version: 0.96.00
+Platform: Windows on X86
+Compiler: Microsoft Visual C++ compiler, version 1310
+Allocator: PPMalloc::GeneralAllocatorDebug. Thread safety enabled.
+Build: Debug. Inlining disabled. STL debug features disabled.
+
+Values are times to complete tests; smaller values are better.
+Alarm indicates a greater than 10% difference.
+
+Test VC++ EASTL Ratio Alarm
+----------------------------------------------------------------------------------------
+algorithm/adj_find/vector<TestObject> 33061345 6497757 5.09 *
+algorithm/copy/vector<LargePOD> 5844906 4876076 1.20 *
+algorithm/copy/vector<uint32_t> 1634346 166065 9.84 *
+algorithm/copy_backward/vector<LargePOD> 4515974 4638892 0.97
+algorithm/copy_backward/vector<uint32_t> 1821168 121746 14.96 *
+algorithm/count/vector<uint64_t> 17048884 2720766 6.27 *
+algorithm/equal_range/vector<uint64_t> 1111147812 448756888 2.48 *
+algorithm/fill/bool[] 1728722 91936 18.80 *
+algorithm/fill/char[]/'d' 1299200 33745 38.50 *
+algorithm/fill/vector<char>/'d' 10205092 33796 100.00 *
+algorithm/fill/vector<char>/0 10200748 33805 100.00 *
+algorithm/fill/vector<uint64_t> 10416538 1399687 7.44 *
+algorithm/fill/vector<void*> 10221837 1307700 7.82 *
+algorithm/fill_n/bool[] 1399033 34196 40.91 *
+algorithm/fill_n/char[] 1299225 33754 38.49 *
+algorithm/fill_n/vector<uint64_t> 5961637 1371900 4.35 *
+algorithm/find_end/string/end 16569373 2657372 6.24 *
+algorithm/find_end/string/middle 16558638 20242410 0.82 *
+algorithm/find_end/string/none 16811207 40480468 0.42 *
+algorithm/lex_cmp/schar[] 1749674 194429 9.00 *
+algorithm/lex_cmp/vector<TestObject> 32824195 5253587 6.25 *
+algorithm/lex_cmp/vector<uchar> 29852034 202658 100.00 *
+algorithm/lower_bound/vector<TestObject> 798624462 350027935 2.28 *
+algorithm/min_element/vector<TestObject> 21675298 5314676 4.08 *
+algorithm/rand_shuffle/vector<uint64_t> 84236190 43677506 1.93 *
+algorithm/reverse/list<TestObject> 3007292 2105799 1.43 *
+algorithm/reverse/vector<TestObject> 2974618 2124796 1.40 *
+algorithm/search/string<char> 16228158 3594268 4.52 *
+algorithm/search_n/string<char> 16926985 1522096 11.12 *
+algorithm/unique/vector<TestObject> 54206243 9988002 5.43 *
+algorithm/unique/vector<uint32_t> 26940079 1741991 15.47 *
+algorithm/unique/vector<uint64_t> 47621344 5213127 9.13 *
+algorithm/upper_bound/vector<uint32_t> 372381295 137901552 2.70 *
+
+bitset<1500>/>>=/1 90196544 92539832 0.97
+bitset<1500>/count 50753832 53742117 0.94
+bitset<1500>/flip 86935875 85121117 1.02
+bitset<1500>/reset 78153837 79922611 0.98
+bitset<1500>/set() 79214968 79360658 1.00
+bitset<1500>/set(i) 11300589 12199651 0.93
+bitset<1500>/test 11282679 13186450 0.86 *
+
+bitset<15>/>>=/1 10500577 6000559 1.75 *
+bitset<15>/count 4000356 6399753 0.63 *
+bitset<15>/flip 7268877 5647944 1.29 *
+bitset<15>/reset 8564235 5800163 1.48 *
+bitset<15>/set() 9935523 5914012 1.68 *
+bitset<15>/set(i) 11199703 12503637 0.90 *
+bitset<15>/test 10600623 12899592 0.82 *
+
+bitset<35>/>>=/1 13076052 6599834 1.98 *
+bitset<35>/count 4800384 11500330 0.42 *
+bitset<35>/flip 7915439 5816313 1.36 *
+bitset<35>/reset 9400049 5803180 1.62 *
+bitset<35>/set() 10701152 5840316 1.83 *
+bitset<35>/set(i) 11342936 12271128 0.92
+bitset<35>/test 10670799 13099682 0.81 *
+
+bitset<75>/>>=/1 14198834 17151088 0.83 *
+bitset<75>/count 5795530 8576373 0.68 *
+bitset<75>/flip 8516703 8922995 0.95
+bitset<75>/reset 9999970 8526095 1.17 *
+bitset<75>/set() 11124877 9009686 1.23 *
+bitset<75>/set(i) 11300563 12531618 0.90 *
+bitset<75>/test 11031913 13100523 0.84 *
+
+deque<ValuePair>/erase 743801706 335646802 2.22 *
+deque<ValuePair>/insert 742331809 341912866 2.17 *
+deque<ValuePair>/iteration 29097030 16315827 1.78 *
+deque<ValuePair>/operator[] 49859598 24026313 2.08 *
+deque<ValuePair>/push_back 424807033 34497608 12.31 *
+deque<ValuePair>/push_front 402313373 38006322 10.59 *
+deque<ValuePair>/sort 725101017 581796551 1.25 *
+
+hash_map<string, uint32_t>/clear 559462 961019 0.58 *
+hash_map<string, uint32_t>/count 53377807 8091448 6.60 *
+hash_map<string, uint32_t>/erase pos 613573 858084 0.72 *
+hash_map<string, uint32_t>/erase range 5488748 461134 11.90 *
+hash_map<string, uint32_t>/erase val 35760096 16379858 2.18 *
+hash_map<string, uint32_t>/find 43490335 10324823 4.21 *
+hash_map<string, uint32_t>/find_as/char* 49343818 8617139 5.73 *
+hash_map<string, uint32_t>/insert 107420281 168690439 0.64 *
+hash_map<string, uint32_t>/iteration 2456356 1255153 1.96 *
+hash_map<string, uint32_t>/operator[] 47209502 12581624 3.75 *
+
+hash_map<uint32_t, TestObject>/clear 533172 546449 0.98
+hash_map<uint32_t, TestObject>/count 28667432 2899997 9.89 *
+hash_map<uint32_t, TestObject>/erase pos 683239 538289 1.27 *
+hash_map<uint32_t, TestObject>/erase range 9632676 253037 38.07 *
+hash_map<uint32_t, TestObject>/erase val 25466026 7752188 3.29 *
+hash_map<uint32_t, TestObject>/find 20048253 4678502 4.29 *
+hash_map<uint32_t, TestObject>/insert 71085798 37686187 1.89 *
+hash_map<uint32_t, TestObject>/iteration 1460318 1338317 1.09
+hash_map<uint32_t, TestObject>/operator[] 23226692 7888748 2.94 *
+
+heap (uint32_t[])/make_heap 5399966 6961305 0.78 *
+heap (uint32_t[])/pop_heap 108060534 103511318 1.04
+heap (uint32_t[])/push_heap 22595661 16640688 1.36 *
+heap (uint32_t[])/sort_heap 93559424 83076731 1.13 *
+
+heap (vector<TestObject>)/make_heap 91770743 21724870 4.22 *
+heap (vector<TestObject>)/pop_heap 1175599317 284007398 4.14 *
+heap (vector<TestObject>)/push_heap 207804541 45918046 4.53 *
+heap (vector<TestObject>)/sort_heap 970394145 208321477 4.66 *
+
+list<TestObject>/ctor(it) 805539509 760938607 1.06
+list<TestObject>/ctor(n) 80959236 75106995 1.08
+list<TestObject>/erase 1052543704 1044976137 1.01
+list<TestObject>/find 97785267 75970884 1.29 *
+list<TestObject>/insert 873895175 807051107 1.08
+list<TestObject>/push_back 812797710 780742425 1.04
+list<TestObject>/remove 1850600714 1436980599 1.29 *
+list<TestObject>/reverse 180270465 80466636 2.24 *
+list<TestObject>/size/1 440148 599642 0.73 *
+list<TestObject>/size/10 439433 1329817 0.33 * EASTL intentionally implements list::size as O(n).
+list<TestObject>/size/100 439595 11030060 0.04 * EASTL intentionally implements list::size as O(n).
+list<TestObject>/splice 177106094 69383027 2.55 *
+
+map<TestObject, uint32_t>/clear 508283 470807 1.08
+map<TestObject, uint32_t>/count 43145354 14280357 3.02 *
+map<TestObject, uint32_t>/equal_range 38594004 16520447 2.34 *
+map<TestObject, uint32_t>/erase/key 33948082 16123175 2.11 *
+map<TestObject, uint32_t>/erase/pos 578332 455201 1.27 * MS uses a code bloating implementation of erase.
+map<TestObject, uint32_t>/erase/range 387345 284538 1.36 *
+map<TestObject, uint32_t>/find 22897224 12766100 1.79 *
+map<TestObject, uint32_t>/insert 61665800 47286928 1.30 *
+map<TestObject, uint32_t>/iteration 1977202 745391 2.65 *
+map<TestObject, uint32_t>/lower_bound 19892941 12260928 1.62 *
+map<TestObject, uint32_t>/operator[] 24199084 15429634 1.57 *
+map<TestObject, uint32_t>/upper_bound 19842409 12064441 1.64 *
+
+set<uint32_t>/clear 1027625 1000901 1.03
+set<uint32_t>/count 39730182 13329565 2.98 *
+set<uint32_t>/equal_range 34681649 14768827 2.35 *
+set<uint32_t>/erase range 841458 602030 1.40 *
+set<uint32_t>/erase/pos 1380485 1084303 1.27 * MS uses a code bloating implementation of erase.
+set<uint32_t>/erase/val 31617425 13344023 2.37 *
+set<uint32_t>/find 19582428 10788864 1.82 *
+set<uint32_t>/insert 61434014 48232086 1.27 *
+set<uint32_t>/iteration 1512057 667820 2.26 *
+set<uint32_t>/lower_bound 18394885 10402785 1.77 *
+set<uint32_t>/upper_bound 17189083 10554425 1.63 *
+
+sort/q_sort/TestObject[] 87088799 15037988 5.79 *
+sort/q_sort/TestObject[]/sorted 21502892 3284299 6.55 *
+sort/q_sort/vector<TestObject> 87962047 15004677 5.86 *
+sort/q_sort/vector<TestObject>/sorted 21396523 3341163 6.40 *
+sort/q_sort/vector<ValuePair> 80334589 10429161 7.70 *
+sort/q_sort/vector<ValuePair>/sorted 22133295 3230553 6.85 *
+sort/q_sort/vector<uint32> 72195388 5940302 12.15 *
+sort/q_sort/vector<uint32>/sorted 19635171 995495 19.72 *
+
+string<char16_t>/compare 523013373 534722089 0.98
+string<char16_t>/erase/pos,n 3446597 3439492 1.00
+string<char16_t>/find/p,pos,n 383873158 441902786 0.87 *
+string<char16_t>/find_first_not_of/p,pos,n 174157 134131 1.30 *
+string<char16_t>/find_first_of/p,pos,n 11715423 8520944 1.37 *
+string<char16_t>/find_last_of/p,pos,n 1871556 1226457 1.53 *
+string<char16_t>/insert/pos,p 3624877 3357058 1.08
+string<char16_t>/iteration 6766787933 581916665 11.63 *
+string<char16_t>/operator[] 4820827 2335579 2.06 *
+string<char16_t>/push_back 59812962 6757466 8.85 *
+string<char16_t>/replace/pos,n,p,n 4371279 4459713 0.98
+string<char16_t>/reserve 2307530 1919386 1.20 *
+string<char16_t>/rfind/p,pos,n 734826 372615 1.97 *
+string<char16_t>/size 41608 28866 1.44 *
+string<char16_t>/swap 1033932 1490994 0.69 *
+
+string<char8_t>/compare 63086797 64194771 0.98
+string<char8_t>/erase/pos,n 2045687 1960270 1.04
+string<char8_t>/find/p,pos,n 123872549 471364764 0.26 *
+string<char8_t>/find_first_not_of/p,pos,n 140013 130271 1.07
+string<char8_t>/find_first_of/p,pos,n 8051906 8749994 0.92
+string<char8_t>/find_last_of/p,pos,n 1318835 1230715 1.07
+string<char8_t>/insert/pos,p 1770610 1724234 1.03
+string<char8_t>/iteration 28112136 2544475 11.05 *
+string<char8_t>/operator[] 4810525 2255841 2.13 *
+string<char8_t>/push_back 54869634 6127447 8.95 *
+string<char8_t>/replace/pos,n,p,n 2737578 2847900 0.96
+string<char8_t>/reserve 1123395 394902 2.84 *
+string<char8_t>/rfind/p,pos,n 737299 368518 2.00 *
+string<char8_t>/size 42245 26801 1.58 *
+string<char8_t>/swap 1036142 1491028 0.69 *
+
+vector<uint64>/erase 56417135 55770251 1.01
+vector<uint64>/insert 56617761 56100468 1.01
+vector<uint64>/iteration 10413895 1291269 8.06 *
+vector<uint64>/operator[] 23507193 3479390 6.76 *
+vector<uint64>/push_back 34687939 13806627 2.51 *
+vector<uint64>/sort 256886550 84669657 3.03 *
+```
+
+### Win32.VC71.MS.Release
+
+```
+EASTL version: 0.96.00
+Platform: Windows on X86
+Compiler: Microsoft Visual C++ compiler, version 1310
+Allocator: PPMalloc::GeneralAllocator. Thread safety enabled.
+Build: Full optimization. Inlining enabled.
+
+Values are times to complete tests; smaller values are better.
+Alarm indicates a greater than 10% difference.
+
+Test VC++ EASTL Ratio Alarm
+----------------------------------------------------------------------------------------
+algorithm/adj_find/vector<TestObject> 2783546 2750660 1.01
+algorithm/copy/vector<LargePOD> 6474025 4972738 1.30 *
+algorithm/copy/vector<uint32_t> 157267 173162 0.91
+algorithm/copy_backward/vector<LargePOD> 4836406 4374780 1.11 *
+algorithm/copy_backward/vector<uint32_t> 104780 120912 0.87 *
+algorithm/count/vector<uint64_t> 1368440 1368696 1.00
+algorithm/equal_range/vector<uint64_t> 114199387 102783938 1.11 *
+algorithm/fill/bool[] 253215 27353 9.26 *
+algorithm/fill/char[]/'d' 253164 27404 9.24 *
+algorithm/fill/vector<char>/'d' 253105 27362 9.25 *
+algorithm/fill/vector<char>/0 253275 27353 9.26 *
+algorithm/fill/vector<uint64_t> 397001 394323 1.01
+algorithm/fill/vector<void*> 547196 642362 0.85 *
+algorithm/fill_n/bool[] 229177 27361 8.38 *
+algorithm/fill_n/char[] 228845 27404 8.35 *
+algorithm/fill_n/vector<uint64_t> 565233 1376822 0.41 *
+algorithm/find_end/string/end 2107116 82356 25.59 *
+algorithm/find_end/string/middle 2111672 664283 3.18 *
+algorithm/find_end/string/none 2110423 1519596 1.39 *
+algorithm/lex_cmp/schar[] 741021 176162 4.21 *
+algorithm/lex_cmp/vector<TestObject> 2610494 2642183 0.99
+algorithm/lex_cmp/vector<uchar> 697595 167866 4.16 *
+algorithm/lower_bound/vector<TestObject> 62462233 58146664 1.07
+algorithm/min_element/vector<TestObject> 4350385 2671227 1.63 *
+algorithm/rand_shuffle/vector<uint64_t> 10868261 11300818 0.96
+algorithm/reverse/list<TestObject> 483718 470024 1.03
+algorithm/reverse/vector<TestObject> 476739 484322 0.98
+algorithm/search/string<char> 2560387 1259496 2.03 *
+algorithm/search_n/string<char> 2770991 458524 6.04 *
+algorithm/unique/vector<TestObject> 4194520 4658910 0.90 *
+algorithm/unique/vector<uint32_t> 538730 787924 0.68 *
+algorithm/unique/vector<uint64_t> 3169829 2575636 1.23 *
+algorithm/upper_bound/vector<uint32_t> 27495562 25321593 1.09
+
+bitset<1500>/>>=/1 33464228 33469719 1.00
+bitset<1500>/count 18736116 18814903 1.00
+bitset<1500>/flip 19299309 18605438 1.04
+bitset<1500>/reset 22200487 15262847 1.45 *
+bitset<1500>/set() 14418193 17557319 0.82 *
+bitset<1500>/set(i) 1599250 1599199 1.00
+bitset<1500>/test 1599241 1599233 1.00
+
+bitset<15>/>>=/1 2199222 2264442 0.97
+bitset<15>/count 1399406 1399193 1.00
+bitset<15>/flip 1266712 1199197 1.06
+bitset<15>/reset 1399364 1399109 1.00
+bitset<15>/set() 1199197 999201 1.20 *
+bitset<15>/set(i) 1599258 1462952 1.09
+bitset<15>/test 1599275 1599224 1.00
+
+bitset<35>/>>=/1 2599266 1933376 1.34 *
+bitset<35>/count 2599240 2592559 1.00
+bitset<35>/flip 1693124 1199188 1.41 *
+bitset<35>/reset 1399406 999201 1.40 *
+bitset<35>/set() 1599403 1199205 1.33 *
+bitset<35>/set(i) 1599241 1599190 1.00
+bitset<35>/test 1599250 1599232 1.00
+
+bitset<75>/>>=/1 4199332 4199213 1.00
+bitset<75>/count 2999497 2199341 1.36 *
+bitset<75>/flip 2399499 1830178 1.31 *
+bitset<75>/reset 2199468 1199197 1.83 *
+bitset<75>/set() 1999387 1199851 1.67 *
+bitset<75>/set(i) 1599266 1599198 1.00
+bitset<75>/test 1599241 1662651 0.96
+
+deque<ValuePair>/erase 90444165 37113253 2.44 *
+deque<ValuePair>/insert 93299349 36175167 2.58 *
+deque<ValuePair>/iteration 2756414 2122076 1.30 *
+deque<ValuePair>/operator[] 5117969 4632075 1.10
+deque<ValuePair>/push_back 30300757 3060357 9.90 *
+deque<ValuePair>/push_front 25498529 2808392 9.08 *
+deque<ValuePair>/sort 142283047 111292464 1.28 *
+
+hash_map<string, uint32_t>/clear 146769 389699 0.38 *
+hash_map<string, uint32_t>/count 13059434 3460324 3.77 *
+hash_map<string, uint32_t>/erase pos 184246 331925 0.56 *
+hash_map<string, uint32_t>/erase range 382432 167237 2.29 *
+hash_map<string, uint32_t>/erase val 6187898 3302114 1.87 *
+hash_map<string, uint32_t>/find 11289369 3459024 3.26 *
+hash_map<string, uint32_t>/find_as/char* 13559192 3662387 3.70 *
+hash_map<string, uint32_t>/insert 17514012 14095176 1.24 *
+hash_map<string, uint32_t>/iteration 801014 218450 3.67 *
+hash_map<string, uint32_t>/operator[] 11457065 3690385 3.10 *
+
+hash_map<uint32_t, TestObject>/clear 141865 265379 0.53 *
+hash_map<uint32_t, TestObject>/count 1766045 703613 2.51 *
+hash_map<uint32_t, TestObject>/erase pos 172337 218458 0.79 *
+hash_map<uint32_t, TestObject>/erase range 537846 102340 5.26 *
+hash_map<uint32_t, TestObject>/erase val 2220132 1441787 1.54 *
+hash_map<uint32_t, TestObject>/find 1612994 1043953 1.55 *
+hash_map<uint32_t, TestObject>/insert 7141547 4348056 1.64 *
+hash_map<uint32_t, TestObject>/iteration 199512 169328 1.18 *
+hash_map<uint32_t, TestObject>/operator[] 1831733 1519707 1.21 *
+
+heap (uint32_t[])/make_heap 3366247 1949093 1.73 *
+heap (uint32_t[])/pop_heap 57280514 53779440 1.07
+heap (uint32_t[])/push_heap 9700217 7582935 1.28 *
+heap (uint32_t[])/sort_heap 47227751 46131948 1.02
+
+heap (vector<TestObject>)/make_heap 11458442 11510819 1.00
+heap (vector<TestObject>)/pop_heap 122897267 119061132 1.03
+heap (vector<TestObject>)/push_heap 21688481 21176220 1.02
+heap (vector<TestObject>)/sort_heap 90867380 88869523 1.02
+
+list<TestObject>/ctor(it) 74591104 69845817 1.07
+list<TestObject>/ctor(n) 6243998 5838582 1.07
+list<TestObject>/erase 299509298 206013676 1.45 *
+list<TestObject>/find 40927185 14514243 2.82 *
+list<TestObject>/insert 71277251 47234534 1.51 *
+list<TestObject>/push_back 73780527 44116725 1.67 *
+list<TestObject>/remove 786197776 326434612 2.41 *
+list<TestObject>/reverse 49283128 25029678 1.97 *
+list<TestObject>/size/1 159741 139400 1.15 *
+list<TestObject>/size/10 159324 346579 0.46 * EASTL intentionally implements list::size as O(n).
+list<TestObject>/size/100 159188 97235419 0.00 * EASTL intentionally implements list::size as O(n).
+list<TestObject>/splice 63548584 19322931 3.29 *
+
+map<TestObject, uint32_t>/clear 167408 170501 0.98
+map<TestObject, uint32_t>/count 10213685 4748346 2.15 *
+map<TestObject, uint32_t>/equal_range 9515053 5677558 1.68 *
+map<TestObject, uint32_t>/erase/key 6646260 4302300 1.54 *
+map<TestObject, uint32_t>/erase/pos 297135 327938 0.91 MS uses a code bloating implementation of erase.
+map<TestObject, uint32_t>/erase/range 148614 163702 0.91
+map<TestObject, uint32_t>/find 5637531 4767055 1.18 *
+map<TestObject, uint32_t>/insert 9591128 9030349 1.06
+map<TestObject, uint32_t>/iteration 323595 325261 0.99
+map<TestObject, uint32_t>/lower_bound 5398239 4784089 1.13 *
+map<TestObject, uint32_t>/operator[] 5631250 5141166 1.10
+map<TestObject, uint32_t>/upper_bound 5436336 4762431 1.14 *
+
+set<uint32_t>/clear 155983 156026 1.00
+set<uint32_t>/count 9635965 4392146 2.19 *
+set<uint32_t>/equal_range 8504157 5247832 1.62 *
+set<uint32_t>/erase range 140488 119408 1.18 *
+set<uint32_t>/erase/pos 260678 286697 0.91 MS uses a code bloating implementation of erase.
+set<uint32_t>/erase/val 6008225 4012825 1.50 *
+set<uint32_t>/find 5145432 4381945 1.17 *
+set<uint32_t>/insert 8087129 8697251 0.93
+set<uint32_t>/iteration 271507 304538 0.89 *
+set<uint32_t>/lower_bound 4666228 4404250 1.06
+set<uint32_t>/upper_bound 4623600 4402974 1.05
+
+sort/q_sort/TestObject[] 9596169 5578652 1.72 *
+sort/q_sort/TestObject[]/sorted 602463 1016132 0.59 *
+sort/q_sort/vector<TestObject> 9674828 5430199 1.78 *
+sort/q_sort/vector<TestObject>/sorted 606908 1111647 0.55 *
+sort/q_sort/vector<ValuePair> 6284194 3423452 1.84 *
+sort/q_sort/vector<ValuePair>/sorted 711629 569364 1.25 *
+sort/q_sort/vector<uint32> 5453379 2916146 1.87 *
+sort/q_sort/vector<uint32>/sorted 537047 419144 1.28 *
+
+string<char16_t>/compare 435083295 251985824 1.73 *
+string<char16_t>/erase/pos,n 3454842 3451858 1.00
+string<char16_t>/find/p,pos,n 401954723 165298157 2.43 *
+string<char16_t>/find_first_not_of/p,pos,n 131452 65374 2.01 *
+string<char16_t>/find_first_of/p,pos,n 11657444 4144515 2.81 *
+string<char16_t>/find_last_of/p,pos,n 1604248 567571 2.83 *
+string<char16_t>/insert/pos,p 3398734 3355460 1.01
+string<char16_t>/iteration 218856504 218771844 1.00
+string<char16_t>/operator[] 714161 240023 2.98 *
+string<char16_t>/push_back 34968235 2444897 14.30 *
+string<char16_t>/replace/pos,n,p,n 4226693 4198498 1.01
+string<char16_t>/reserve 1901765 390805 4.87 *
+string<char16_t>/rfind/p,pos,n 195483 150985 1.29 *
+string<char16_t>/size 11169 11245 0.99
+string<char16_t>/swap 1459280 419807 3.48 *
+
+string<char8_t>/compare 63071275 77209580 0.82 *
+string<char8_t>/erase/pos,n 2008652 1944494 1.03
+string<char8_t>/find/p,pos,n 123201023 167536164 0.74 *
+string<char8_t>/find_first_not_of/p,pos,n 93372 67864 1.38 *
+string<char8_t>/find_first_of/p,pos,n 7542492 3375758 2.23 *
+string<char8_t>/find_last_of/p,pos,n 933972 583576 1.60 *
+string<char8_t>/insert/pos,p 1737213 1750847 0.99
+string<char8_t>/iteration 893834 899130 0.99
+string<char8_t>/operator[] 817879 313437 2.61 *
+string<char8_t>/push_back 20857734 2004410 10.41 *
+string<char8_t>/replace/pos,n,p,n 2578696 2607655 0.99
+string<char8_t>/reserve 915127 85289 10.73 *
+string<char8_t>/rfind/p,pos,n 196103 148894 1.32 *
+string<char8_t>/size 11619 11220 1.04
+string<char8_t>/swap 1461056 419874 3.48 *
+
+vector<uint64>/erase 55235116 55284587 1.00
+vector<uint64>/insert 55166046 55142755 1.00
+vector<uint64>/iteration 553954 509719 1.09
+vector<uint64>/operator[] 1284239 798516 1.61 *
+vector<uint64>/push_back 5399549 3867959 1.40 *
+vector<uint64>/sort 43636314 42619952 1.02
+```
+
+### Win32.VC71.STLPort.Debug
+
+```
+EASTL version: 0.96.00
+Platform: Windows on X86
+Compiler: Microsoft Visual C++ compiler, version 1310
+Allocator: PPMalloc::GeneralAllocatorDebug. Thread safety enabled.
+Build: Debug. Inlining disabled. STL debug features disabled.
+
+Values are times to complete tests; smaller values are better.
+Alarm indicates a greater than 10% difference.
+
+Test STLPort EASTL Ratio Alarm
+----------------------------------------------------------------------------------------
+algorithm/adj_find/vector<TestObject> 5661170 5689517 1.00
+algorithm/copy/vector<LargePOD> 5573815 5124428 1.09
+algorithm/copy/vector<uint32_t> 148273 125782 1.18 *
+algorithm/copy_backward/vector<LargePOD> 5429791 4834510 1.12 *
+algorithm/copy_backward/vector<uint32_t> 156765 163038 0.96
+algorithm/count/vector<uint64_t> 2730922 2730072 1.00
+algorithm/equal_range/vector<uint64_t> 639366489 452896251 1.41 *
+algorithm/fill/bool[] 1299326 27361 47.49 *
+algorithm/fill/char[]/'d' 27378 27361 1.00
+algorithm/fill/vector<char>/'d' 34459 27361 1.26 *
+algorithm/fill/vector<char>/0 1299224 27361 47.48 *
+algorithm/fill/vector<uint64_t> 1400647 1400145 1.00
+algorithm/fill/vector<void*> 1308779 1309085 1.00
+algorithm/fill_n/bool[] 1299156 27352 47.50 *
+algorithm/fill_n/char[] 1299258 27369 47.47 *
+algorithm/fill_n/vector<uint64_t> 1451162 1313632 1.10
+algorithm/find_end/string/end 13089999 2526412 5.18 *
+algorithm/find_end/string/middle 12627412 20190101 0.63 *
+algorithm/find_end/string/none 12704185 40728803 0.31 *
+algorithm/lex_cmp/schar[] 1749844 195806 8.94 *
+algorithm/lex_cmp/vector<TestObject> 5060968 4799882 1.05
+algorithm/lex_cmp/vector<uchar> 1668354 189490 8.80 *
+algorithm/lower_bound/vector<TestObject> 450240945 353437573 1.27 *
+algorithm/min_element/vector<TestObject> 5861744 5326371 1.10
+algorithm/rand_shuffle/vector<uint64_t> 40780449 45780090 0.89 *
+algorithm/reverse/list<TestObject> 2657678 2130627 1.25 *
+algorithm/reverse/vector<TestObject> 2666424 2124889 1.25 *
+algorithm/search/string<char> 3110379 3613460 0.86 *
+algorithm/search_n/string<char> 3061665 1521261 2.01 *
+algorithm/unique/vector<TestObject> 12423684 9485439 1.31 *
+algorithm/unique/vector<uint32_t> 3718699 1726596 2.15 *
+algorithm/unique/vector<uint64_t> 6205110 4591631 1.35 *
+algorithm/upper_bound/vector<uint32_t> 185391094 139336317 1.33 *
+
+bitset<1500>/>>=/1 120666960 92449816 1.31 * STLPort is broken, neglects wraparound check.
+bitset<1500>/count 201709793 52874726 3.81 *
+bitset<1500>/flip 87360297 81737071 1.07
+bitset<1500>/reset 23950178 77390323 0.31 *
+bitset<1500>/set() 84608107 76912011 1.10
+bitset<1500>/set(i) 18023620 12229604 1.47 *
+bitset<1500>/test 18006553 13276396 1.36 *
+
+bitset<15>/>>=/1 11935904 6012695 1.99 * STLPort is broken, neglects wraparound check.
+bitset<15>/count 9368581 6022742 1.56 *
+bitset<15>/flip 11600706 6533635 1.78 *
+bitset<15>/reset 5830957 5874690 0.99
+bitset<15>/set() 11695328 5701621 2.05 *
+bitset<15>/set(i) 16363205 12570216 1.30 *
+bitset<15>/test 16743172 13201452 1.27 *
+
+bitset<35>/>>=/1 22950918 6774457 3.39 * STLPort is broken, neglects wraparound check.
+bitset<35>/count 12655309 11736256 1.08
+bitset<35>/flip 13738575 5800042 2.37 *
+bitset<35>/reset 15561434 5800510 2.68 *
+bitset<35>/set() 13564283 5600709 2.42 *
+bitset<35>/set(i) 18519689 12199973 1.52 *
+bitset<35>/test 18000569 13103566 1.37 *
+
+bitset<75>/>>=/1 25579525 16669664 1.53 * STLPort is broken, neglects wraparound check.
+bitset<75>/count 18740698 8480492 2.21 *
+bitset<75>/flip 13555630 8300335 1.63 *
+bitset<75>/reset 15200133 8200000 1.85 *
+bitset<75>/set() 14408112 8001959 1.80 *
+bitset<75>/set(i) 18137741 12374257 1.47 *
+bitset<75>/test 18422135 13100038 1.41 *
+
+deque<ValuePair>/erase 651933790 326443043 2.00 *
+deque<ValuePair>/insert 659786183 333304660 1.98 *
+deque<ValuePair>/iteration 23734592 16173706 1.47 *
+deque<ValuePair>/operator[] 59126816 23911774 2.47 *
+deque<ValuePair>/push_back 58056988 31859266 1.82 *
+deque<ValuePair>/push_front 57780891 31743199 1.82 *
+deque<ValuePair>/sort 818414195 596568113 1.37 *
+
+hash_map<string, uint32_t>/clear 3422133 2204517 1.55 *
+hash_map<string, uint32_t>/count 9869545 8624924 1.14 *
+hash_map<string, uint32_t>/erase pos 3256350 2069299 1.57 *
+hash_map<string, uint32_t>/erase range 3230203 1151392 2.81 *
+hash_map<string, uint32_t>/erase val 16860362 15939778 1.06
+hash_map<string, uint32_t>/find 10286971 9920910 1.04
+hash_map<string, uint32_t>/find_as/char* 118136025 9458468 12.49 *
+hash_map<string, uint32_t>/insert 188948336 174490082 1.08
+hash_map<string, uint32_t>/iteration 4037049 2021036 2.00 *
+hash_map<string, uint32_t>/operator[] 11472127 12887699 0.89 *
+
+hash_map<uint32_t, TestObject>/clear 2522264 1331848 1.89 *
+hash_map<uint32_t, TestObject>/count 3210739 2897063 1.11 *
+hash_map<uint32_t, TestObject>/erase pos 1862281 1304783 1.43 *
+hash_map<uint32_t, TestObject>/erase range 698079 579606 1.20 *
+hash_map<uint32_t, TestObject>/erase val 8806722 7041298 1.25 *
+hash_map<uint32_t, TestObject>/find 3604875 4709645 0.77 *
+hash_map<uint32_t, TestObject>/insert 40785711 40376342 1.01
+hash_map<uint32_t, TestObject>/iteration 3064088 1508834 2.03 *
+hash_map<uint32_t, TestObject>/operator[] 6053742 8176906 0.74 *
+
+heap (uint32_t[])/make_heap 5799813 5738596 1.01
+heap (uint32_t[])/pop_heap 113775168 102076134 1.11 *
+heap (uint32_t[])/push_heap 21649151 16854845 1.28 *
+heap (uint32_t[])/sort_heap 97535213 83290735 1.17 *
+
+heap (vector<TestObject>)/make_heap 22215557 22277063 1.00
+heap (vector<TestObject>)/pop_heap 275392171 277340039 0.99
+heap (vector<TestObject>)/push_heap 51479442 47342577 1.09
+heap (vector<TestObject>)/sort_heap 214474736 218497540 0.98
+
+list<TestObject>/ctor(it) 767753795 753421427 1.02
+list<TestObject>/ctor(n) 74185322 73386245 1.01
+list<TestObject>/erase 1021003824 1033873589 0.99
+list<TestObject>/find 77666072 74917622 1.04
+list<TestObject>/insert 788071150 774188737 1.02
+list<TestObject>/push_back 760490154 737327348 1.03
+list<TestObject>/remove 1682511938 1434771006 1.17 *
+list<TestObject>/reverse 87237327 80394623 1.09
+list<TestObject>/size/1 3828111 599530 6.39 *
+list<TestObject>/size/10 9600605 1329535 7.22 * EASTL intentionally implements list::size as O(n).
+list<TestObject>/size/100 62952334 15022551 4.19 * EASTL intentionally implements list::size as O(n).
+list<TestObject>/splice 96536412 60804817 1.59 *
+
+map<TestObject, uint32_t>/clear 1142127 1099066 1.04
+map<TestObject, uint32_t>/count 19659726 14647548 1.34 *
+map<TestObject, uint32_t>/equal_range 36680687 18219086 2.01 *
+map<TestObject, uint32_t>/erase/key 28892154 16037774 1.80 *
+map<TestObject, uint32_t>/erase/pos 1209643 1185495 1.02
+map<TestObject, uint32_t>/erase/range 715402 670539 1.07
+map<TestObject, uint32_t>/find 21020992 13429575 1.57 *
+map<TestObject, uint32_t>/insert 59530871 51120640 1.16 *
+map<TestObject, uint32_t>/iteration 972825 1191946 0.82 *
+map<TestObject, uint32_t>/lower_bound 18852651 12495034 1.51 *
+map<TestObject, uint32_t>/operator[] 22889573 16676736 1.37 *
+map<TestObject, uint32_t>/upper_bound 18603584 12406922 1.50 *
+
+set<uint32_t>/clear 919555 882988 1.04
+set<uint32_t>/count 17561110 12461084 1.41 *
+set<uint32_t>/equal_range 31522488 15230282 2.07 *
+set<uint32_t>/erase range 687582 564765 1.22 *
+set<uint32_t>/erase/pos 1044352 1045355 1.00
+set<uint32_t>/erase/val 25525304 12940774 1.97 *
+set<uint32_t>/find 17140751 10704866 1.60 *
+set<uint32_t>/insert 56035051 45555664 1.23 *
+set<uint32_t>/iteration 682669 640831 1.07
+set<uint32_t>/lower_bound 16339932 10475740 1.56 *
+set<uint32_t>/upper_bound 17779424 10652599 1.67 *
+
+sort/q_sort/TestObject[] 17000866 14823515 1.15 *
+sort/q_sort/TestObject[]/sorted 6658559 3263328 2.04 *
+sort/q_sort/vector<TestObject> 17476629 14953285 1.17 *
+sort/q_sort/vector<TestObject>/sorted 6667034 3327435 2.00 *
+sort/q_sort/vector<ValuePair> 15391357 10820848 1.42 *
+sort/q_sort/vector<ValuePair>/sorted 6617122 3232949 2.05 *
+sort/q_sort/vector<uint32> 8343906 6014846 1.39 *
+sort/q_sort/vector<uint32>/sorted 3039430 1003127 3.03 *
+
+string<char16_t>/compare 1489709846 532664000 2.80 *
+string<char16_t>/erase/pos,n 3528690 3439864 1.03
+string<char16_t>/find/p,pos,n 2521448321 443752189 5.68 *
+string<char16_t>/find_first_not_of/p,pos,n 661206 137419 4.81 *
+string<char16_t>/find_first_of/p,pos,n 54746434 8521335 6.42 *
+string<char16_t>/find_last_of/p,pos,n 10607778 1212414 8.75 *
+string<char16_t>/insert/pos,p 3445016 3360126 1.03
+string<char16_t>/iteration 580955636 579452556 1.00
+string<char16_t>/operator[] 2206353 1987809 1.11 *
+string<char16_t>/push_back 22421368 6007808 3.73 *
+string<char16_t>/replace/pos,n,p,n 5138454 4464786 1.15 *
+string<char16_t>/reserve 4922413418 335622 100.00 *
+string<char16_t>/rfind/p,pos,n 1440308 380578 3.78 *
+string<char16_t>/size 25355 25398 1.00
+string<char16_t>/swap 2122704 1490823 1.42 *
+
+string<char8_t>/compare 77222134 77443134 1.00
+string<char8_t>/erase/pos,n 1965344 1956521 1.00
+string<char8_t>/find/p,pos,n 2468091951 474205522 5.20 *
+string<char8_t>/find_first_not_of/p,pos,n 660960 130211 5.08 *
+string<char8_t>/find_first_of/p,pos,n 55020899 9240171 5.95 *
+string<char8_t>/find_last_of/p,pos,n 10576210 1239053 8.54 *
+string<char8_t>/insert/pos,p 1822756 1750880 1.04
+string<char8_t>/iteration 2617889 2540148 1.03
+string<char8_t>/operator[] 2254794 2256443 1.00
+string<char8_t>/push_back 12463022 5210321 2.39 *
+string<char8_t>/replace/pos,n,p,n 3744862 2855260 1.31 *
+string<char8_t>/reserve 1372046888 218815 100.00 *
+string<char8_t>/rfind/p,pos,n 1446232 366902 3.94 *
+string<char8_t>/size 26859 25431 1.06
+string<char8_t>/swap 2123350 1490509 1.42 *
+
+vector<uint64>/erase 55164013 56417449 0.98
+vector<uint64>/insert 55872973 56432664 0.99
+vector<uint64>/iteration 1329102 1324623 1.00
+vector<uint64>/operator[] 5264738 3136746 1.68 *
+vector<uint64>/push_back 14903245 13171175 1.13 *
+vector<uint64>/sort 88429095 88542171 1.00
+```
+
+### Win32.VC71.STLPort.Release
+
+```
+EASTL version: 0.96.00
+Platform: Windows on X86
+Compiler: Microsoft Visual C++ compiler, version 1310
+Allocator: PPMalloc::GeneralAllocator. Thread safety enabled.
+Build: Full optimization. Inlining enabled.
+
+Values are times to complete tests; smaller values are better.
+Alarm indicates a greater than 10% difference.
+
+Test STLPort EASTL Ratio Alarm
+----------------------------------------------------------------------------------------
+algorithm/adj_find/vector<TestObject> 2741046 2731441 1.00
+algorithm/copy/vector<LargePOD> 6065923 5085142 1.19 *
+algorithm/copy/vector<uint32_t> 158304 165555 0.96
+algorithm/copy_backward/vector<LargePOD> 4710258 4896476 0.96
+algorithm/copy_backward/vector<uint32_t> 146030 142630 1.02
+algorithm/count/vector<uint64_t> 1395921 1406334 0.99
+algorithm/equal_range/vector<uint64_t> 211692764 118969493 1.78 *
+algorithm/fill/bool[] 366078 33737 10.85 *
+algorithm/fill/char[]/'d' 33736 33771 1.00
+algorithm/fill/vector<char>/'d' 28466 33720 0.84 *
+algorithm/fill/vector<char>/0 366086 33728 10.85 *
+algorithm/fill/vector<uint64_t> 466250 401591 1.16 *
+algorithm/fill/vector<void*> 521603 693481 0.75 *
+algorithm/fill_n/bool[] 599709 33762 17.76 *
+algorithm/fill_n/char[] 599573 33711 17.79 *
+algorithm/fill_n/vector<uint64_t> 434971 1374084 0.32 *
+algorithm/find_end/string/end 1494742 85349 17.51 *
+algorithm/find_end/string/middle 1480700 687208 2.15 *
+algorithm/find_end/string/none 1540540 1546431 1.00
+algorithm/lex_cmp/schar[] 921638 178797 5.15 *
+algorithm/lex_cmp/vector<TestObject> 2623559 2643551 0.99
+algorithm/lex_cmp/vector<uchar> 960899 183608 5.23 *
+algorithm/lower_bound/vector<TestObject> 60630534 56531528 1.07
+algorithm/min_element/vector<TestObject> 4209022 2768527 1.52 *
+algorithm/rand_shuffle/vector<uint64_t> 13762010 15969052 0.86 *
+algorithm/reverse/list<TestObject> 673387 731825 0.92
+algorithm/reverse/vector<TestObject> 634576 754511 0.84 *
+algorithm/search/string<char> 1262599 1387608 0.91
+algorithm/search_n/string<char> 1166242 458592 2.54 *
+algorithm/unique/vector<TestObject> 4912193 5336317 0.92
+algorithm/unique/vector<uint32_t> 809387 809081 1.00
+algorithm/unique/vector<uint64_t> 4371814 2414255 1.81 *
+algorithm/upper_bound/vector<uint32_t> 31899081 29555596 1.08
+
+bitset<1500>/>>=/1 63308136 40553560 1.56 * STLPort is broken, neglects wraparound check.
+bitset<1500>/count 62523178 22799473 2.74 *
+bitset<1500>/flip 20302845 19919232 1.02
+bitset<1500>/reset 18892015 15403148 1.23 *
+bitset<1500>/set() 15803302 17322192 0.91
+bitset<1500>/set(i) 2799271 2999310 0.93
+bitset<1500>/test 2999293 2799262 1.07
+
+bitset<15>/>>=/1 1199239 3199256 0.37 * STLPort is broken, neglects wraparound check.
+bitset<15>/count 3599461 2199231 1.64 *
+bitset<15>/flip 1199231 1199188 1.00
+bitset<15>/reset 1199188 1199180 1.00
+bitset<15>/set() 1199214 1199180 1.00
+bitset<15>/set(i) 2599257 1399262 1.86 *
+bitset<15>/test 2599274 2599283 1.00
+
+bitset<35>/>>=/1 6643974 4599239 1.44 * STLPort is broken, neglects wraparound check.
+bitset<35>/count 5151331 5399438 0.95
+bitset<35>/flip 1999404 1199273 1.67 *
+bitset<35>/reset 9805285 1399313 7.01 *
+bitset<35>/set() 2799279 1199248 2.33 *
+bitset<35>/set(i) 2799246 1599241 1.75 *
+bitset<35>/test 2999234 2999251 1.00
+
+bitset<75>/>>=/1 7002045 6999333 1.00 STLPort is broken, neglects wraparound check.
+bitset<75>/count 5999351 3002259 2.00 *
+bitset<75>/flip 3599334 3599163 1.00
+bitset<75>/reset 9799344 3399218 2.88 *
+bitset<75>/set() 3599232 3599062 1.00
+bitset<75>/set(i) 2799228 1599284 1.75 *
+bitset<75>/test 2999250 2799339 1.07
+
+deque<ValuePair>/erase 127108651 115258113 1.10
+deque<ValuePair>/insert 137727889 116552332 1.18 *
+deque<ValuePair>/iteration 7144182 6009899 1.19 *
+deque<ValuePair>/operator[] 34241222 20535039 1.67 *
+deque<ValuePair>/push_back 6585800 3932126 1.67 *
+deque<ValuePair>/push_front 6805865 3993513 1.70 *
+deque<ValuePair>/sort 395352323 348778188 1.13 *
+
+hash_map<string, uint32_t>/clear 426640 447015 0.95
+hash_map<string, uint32_t>/count 4359344 3883089 1.12 *
+hash_map<string, uint32_t>/erase pos 584392 458142 1.28 *
+hash_map<string, uint32_t>/erase range 221034 196078 1.13 *
+hash_map<string, uint32_t>/erase val 3539867 3790813 0.93
+hash_map<string, uint32_t>/find 3966831 3811910 1.04
+hash_map<string, uint32_t>/find_as/char* 11591612 4243710 2.73 *
+hash_map<string, uint32_t>/insert 16763887 16719194 1.00
+hash_map<string, uint32_t>/iteration 909968 478609 1.90 *
+hash_map<string, uint32_t>/operator[] 4360041 4108313 1.06
+
+hash_map<uint32_t, TestObject>/clear 302634 283722 1.07
+hash_map<uint32_t, TestObject>/count 916487 907426 1.01
+hash_map<uint32_t, TestObject>/erase pos 388042 321385 1.21 *
+hash_map<uint32_t, TestObject>/erase range 122680 116280 1.06
+hash_map<uint32_t, TestObject>/erase val 1710931 1729529 0.99
+hash_map<uint32_t, TestObject>/find 1089462 1346527 0.81 *
+hash_map<uint32_t, TestObject>/insert 4560310 5072350 0.90 *
+hash_map<uint32_t, TestObject>/iteration 960117 495354 1.94 *
+hash_map<uint32_t, TestObject>/operator[] 1872830 1890595 0.99
+
+heap (uint32_t[])/make_heap 3528418 3327257 1.06
+heap (uint32_t[])/pop_heap 63243859 61011853 1.04
+heap (uint32_t[])/push_heap 11602424 10045869 1.15 *
+heap (uint32_t[])/sort_heap 52965362 48744729 1.09
+
+heap (vector<TestObject>)/make_heap 13191456 13089711 1.01
+heap (vector<TestObject>)/pop_heap 148555656 144787742 1.03
+heap (vector<TestObject>)/push_heap 28696689 26618830 1.08
+heap (vector<TestObject>)/sort_heap 112473989 114018643 0.99
+
+list<TestObject>/ctor(it) 80186731 74006287 1.08
+list<TestObject>/ctor(n) 6232311 6128007 1.02
+list<TestObject>/erase 344556374 212877808 1.62 *
+list<TestObject>/find 39859075 14591347 2.73 *
+list<TestObject>/insert 86935153 56138233 1.55 *
+list<TestObject>/push_back 79569180 46700641 1.70 *
+list<TestObject>/remove 785786758 324201016 2.42 *
+list<TestObject>/reverse 45248186 24852759 1.82 *
+list<TestObject>/size/1 219844 219496 1.00
+list<TestObject>/size/10 519563 519579 1.00 EASTL intentionally implements list::size as O(n).
+list<TestObject>/size/100 4567194 101230266 0.05 * EASTL intentionally implements list::size as O(n).
+list<TestObject>/splice 68321087 23601687 2.89 *
+
+map<TestObject, uint32_t>/clear 168011 180540 0.93
+map<TestObject, uint32_t>/count 4830439 5139287 0.94
+map<TestObject, uint32_t>/equal_range 8700090 6158531 1.41 *
+map<TestObject, uint32_t>/erase/key 6696776 4617038 1.45 *
+map<TestObject, uint32_t>/erase/pos 309273 333183 0.93
+map<TestObject, uint32_t>/erase/range 137419 136068 1.01
+map<TestObject, uint32_t>/find 4773498 4931352 0.97
+map<TestObject, uint32_t>/insert 9651877 9311699 1.04
+map<TestObject, uint32_t>/iteration 372946 416364 0.90 *
+map<TestObject, uint32_t>/lower_bound 4784234 4915797 0.97
+map<TestObject, uint32_t>/operator[] 5040254 5183147 0.97
+map<TestObject, uint32_t>/upper_bound 4724292 4915984 0.96
+
+set<uint32_t>/clear 165300 173289 0.95
+set<uint32_t>/count 4958654 4885086 1.02
+set<uint32_t>/equal_range 8434134 5698681 1.48 *
+set<uint32_t>/erase range 145554 133960 1.09
+set<uint32_t>/erase/pos 299914 324760 0.92
+set<uint32_t>/erase/val 6506155 4335034 1.50 *
+set<uint32_t>/find 4866879 4556043 1.07
+set<uint32_t>/insert 8340523 8957257 0.93
+set<uint32_t>/iteration 294465 343442 0.86 *
+set<uint32_t>/lower_bound 4548095 4756498 0.96
+set<uint32_t>/upper_bound 4559196 4521498 1.01
+
+sort/q_sort/TestObject[] 7316766 7013894 1.04
+sort/q_sort/TestObject[]/sorted 1668439 1332885 1.25 *
+sort/q_sort/vector<TestObject> 7331530 7017260 1.04
+sort/q_sort/vector<TestObject>/sorted 1601629 1247120 1.28 *
+sort/q_sort/vector<ValuePair> 7071643 7067869 1.00
+sort/q_sort/vector<ValuePair>/sorted 2136390 1703799 1.25 *
+sort/q_sort/vector<uint32> 3292891 2943627 1.12 *
+sort/q_sort/vector<uint32>/sorted 653693 473612 1.38 *
+
+string<char16_t>/compare 356579259 432760228 0.82 *
+string<char16_t>/erase/pos,n 3430422 3428645 1.00
+string<char16_t>/find/p,pos,n 229263402 225830975 1.02
+string<char16_t>/find_first_not_of/p,pos,n 187391 81404 2.30 *
+string<char16_t>/find_first_of/p,pos,n 4411831 4413532 1.00
+string<char16_t>/find_last_of/p,pos,n 731655 726155 1.01
+string<char16_t>/insert/pos,p 3408628 3319726 1.03
+string<char16_t>/iteration 309993861 310333547 1.00
+string<char16_t>/operator[] 580839 579904 1.00
+string<char16_t>/push_back 3983338 2975553 1.34 *
+string<char16_t>/replace/pos,n,p,n 4361095 4211504 1.04
+string<char16_t>/reserve 935141729 247010 100.00 *
+string<char16_t>/rfind/p,pos,n 248956 223397 1.11 *
+string<char16_t>/size 13311 13107 1.02
+string<char16_t>/swap 519129 579445 0.90 *
+
+string<char8_t>/compare 76695559 76828015 1.00
+string<char8_t>/erase/pos,n 1951566 1947282 1.00
+string<char8_t>/find/p,pos,n 185878944 185605039 1.00
+string<char8_t>/find_first_not_of/p,pos,n 196877 81600 2.41 *
+string<char8_t>/find_first_of/p,pos,n 4147685 4145356 1.00
+string<char8_t>/find_last_of/p,pos,n 605897 598222 1.01
+string<char8_t>/insert/pos,p 1781592 1768264 1.01
+string<char8_t>/iteration 921502 921272 1.00
+string<char8_t>/operator[] 361250 359873 1.00
+string<char8_t>/push_back 3363288 2530493 1.33 *
+string<char8_t>/replace/pos,n,p,n 2682600 2633130 1.02
+string<char8_t>/reserve 672517501 78387 100.00 *
+string<char8_t>/rfind/p,pos,n 226202 200013 1.13 *
+string<char8_t>/size 11280 11109 1.02
+string<char8_t>/swap 519393 559759 0.93
+
+vector<uint64>/erase 55184856 55192217 1.00
+vector<uint64>/insert 56764267 55682726 1.02
+vector<uint64>/iteration 423122 424039 1.00
+vector<uint64>/operator[] 1189397 860991 1.38 *
+vector<uint64>/push_back 5626609 4027317 1.40 *
+vector<uint64>/sort 49227036 49231362 1.00
+```
+
+----------------------------------------------
+End of document
diff --git a/EASTL/doc/BestPractices.md b/EASTL/doc/BestPractices.md
new file mode 100644
index 0000000..cadb7fa
--- /dev/null
+++ b/EASTL/doc/BestPractices.md
@@ -0,0 +1,749 @@
+# EASTL Best Practices
+
+In this document we discuss best practices for using EASTL. The primary emphasis is on performance with a secondary emphasis on correctness and maintainability. Some best practices apply only to some situations, and these will be pointed out as we go along. In order to be easily digestible, we present these practices as a list of items in the tone of the Effective C++ series of books.
+
+## Summary
+
+The descriptions here are intentionally terse; this is to make them easier to visually scan.
+
+1. [Consider intrusive containers.](#consider-intrusive-containers)
+2. [Consider fixed-size containers.](#consider-fixed-size-containers)
+3. [Consider custom allocators.](#consider-custom-allocators)
+4. [Consider hash tables instead of maps.](#consider-hash-tables-instead-of-maps)
+5. [Consider a vector_map (a.k.a. sorted vector) for unchanging data.](#consider-a-vector_map-aka-sorted-vector-for-unchanging-data)
+6. [Consider slist instead of list.](#consider-slist-instead-of-list)
+7. [Avoid redundant end() and size() in loops.](#avoid-redundant-end-and-size-in-loops)
+8. [Iterate containers instead of using operator\[\].](#iterate-containers-instead-of-using-operator)
+9. [Learn to use the string class appropriately.](#learn-to-use-the-string-class-appropriately)
+10. [Cache list size if you want size() to be O(1).](#cache-list-size-if-you-want-listsize-to-be-o1)
+11. [Use empty() instead of size() when possible.](#use-empty-instead-of-size-when-possible)
+12. [Know your container efficiencies.](#know-your-container-efficiencies)
+13. [Use vector::reserve.](#use-vectorreserve)
+14. [Use vector::set_capacity to trim memory usage.](#use-vectorset_capacity-to-trim-memory-usage)
+15. [Use swap() instead of a manually implemented version.](#use-swap-instead-of-a-manually-implemented-version)
+16. [Consider storing pointers instead of objects.](#consider-storing-pointers-instead-of-objects)
+17. [Consider smart pointers instead of raw pointers.](#consider-smart-pointers-instead-of-raw-pointers)
+18. [Use iterator pre-increment instead of post-increment.](#use-iterator-pre-increment-instead-of-post-increment)
+19. [Make temporary references so the code can be traced/debugged.](#make-temporary-references-so-the-code-can-be-traceddebugged)
+20. [Consider bitvector or bitset instead of vector\<bool>.](#consider-bitvector-or-bitset-instead-of-vector)
+21. [Vectors can be treated as contiguous memory.](#vectors-can-be-treated-as-contiguous-memory)
+22. [Search hash_map\<string> via find_as() instead of find().](#search-hash_map-via-find_as-instead-of-find)
+23. [Take advantage of type_traits (e.g. EASTL_DECLARE_TRIVIAL_RELOCATE).](#take-advantage-of-type_traits-eg-eastl_declare_trivial_relocate)
+24. [Name containers to track memory usage.](#name-containers-to-track-memory-usage)
+25. [Learn the algorithms.](#learn-the-algorithms)
+26. [Pass and return containers by reference instead of value.](#pass-and-return-containers-by-reference-instead-of-value)
+27. [Consider using reset() for fast container teardown.](#consider-using-reset-for-fast-container-teardown)
+28. [Consider using fixed_substring instead of copying strings.](#consider-using-fixed_substring-instead-of-copying-strings)
+29. [Consider using vector::push_back(void).](#consider-using-vectorpush_backvoid)
+
+## Detail
+
+### Consider intrusive containers.
+
+Intrusive containers (such as intrusive_list) differ from regular containers (such as list) in that they use the stored objects to manage the linked list instead of using nodes allocated from a memory heap. The result is better usage of memory. Additionally intrusive_list objects can be removed from their list without knowing what list they belong to. To make an intrusive_list of Widgets, you have Widget inherit from intrusive_list_node or simply have mpPrev/mpNext member variables.
+
+To create an intrusive_list container, you can use the following code:
+
+```cpp
+class Widget : public intrusive_list_node
+
+{ };
+
+
+
+intrusive_list<Widget> widgetList;
+
+widgetList.push_back(someWidget);
+```
+
+### Consider fixed-size containers.
+
+Fixed-size containers (such as fixed_list) are variations of regular containers (such as list) in that they allocate from a fixed block of local memory instead of allocating from a generic heap. The result is better usage of memory due to reduced fragmentation, better cache behavior, and faster allocation/deallocation. The presence of fixed-size containers negate the most common complaint that people have about STL: that it fragments the heap or "allocates all over the place."
+
+EASTL fixed containers include:
+
+* fixed_list
+* fixed_slist
+* fixed_vector
+* fixed_string
+* fixed_map
+* fixed_multimap
+* fixed_set
+* fixed_multiset
+* fixed_hash_map
+* fixed_hash_multimap
+* fixed_hash_set
+* fixed_hash_multiset
+
+To create a fixed_set, you can use the following code:
+
+```cpp
+fixed_set<int, 25> intSet; // Create a set capable of holding 25 elements.
+
+intSet.push_back(37);
+```
+
+### Consider custom allocators.
+
+While EASTL provides fixed-size containers in order to control container memory usage, EASTL lets you assign a custom allocator to any container. This lets you define your own memory pool. EASTL has a more flexible and powerful mechanism of doing this that standard STL, as EASTL understands object alignment requirements, allows for debug naming, allows for sharing allocators across containers, and allows dynamic allocator assignment.
+
+To create a list container that uses your custom allocator and uses block naming, you can use the following code:
+
+```cpp
+list<int> intList(pSomeAllocator, "graphics/intList");
+
+intList.push_back(37);
+```
+
+### Consider hash tables instead of maps.
+
+Hash containers (such as hash_map) provide the same interface as associative containers (such as map) but have faster lookup and use less memory. The primary disadvantage relative to associative containers is that hash containers are not sorted.
+
+To make a hash_map (dictionary) of integers to strings, you can use the following code:
+```cpp
+hash_map<int, const char*> stringTable;
+
+stringTable[37] = "hello";
+```
+
+### Consider a vector_map (a.k.a. sorted vector) for unchanging data.
+
+You can improve speed, memory usage, and cache behavior by using a vector_map instead of a map (or vector_set instead of set, etc.). The primary disadvantage of vector_map is that insertions and removal of elements is O(n) instead of O(1). However, if your associative container is not going to be changing much or at all, you can benefit from using a vector_map. Consider calling reserve on the vector_map in order to set the desired capacity up front.
+
+To make a vector_set, you can use the following code:
+
+```cpp
+vector_set<int> intSet(16); // Create a vector_set with an initial capacity of 16.
+
+intSet.insert(37);
+```
+
+Note that you can use containers other than vector to implement vector_set. Here's how you do it with deque:
+
+```cpp
+vector_set<int, less<int>, EASTLAllocatorType, deque<int> > intSet;
+
+intSet.insert(37);
+```
+
+### Consider slist instead of list.
+
+An slist is a singly-linked list; it is much like a list except that it can only be traversed in a forward direction and not a backward direction. The benefit is that each node is 4 bytes instead of 8 bytes. This is a small improvement, but if you don't need reverse iteration then it can be an improvement. There's also intrusive_slist as an option.
+
+To make an slist, you can use the following code:
+
+```cpp
+slist<int> intSlist;
+
+intSlist.push_front(37);
+```
+
+### Avoid redundant end() and size() in loops.
+
+Instead of writing code like this:
+
+```cpp
+for(deque<int>::iterator it = d.begin(); it != d.end(); ++it)
+
+ ...
+```
+
+write code like this:
+
+```cpp
+for(deque<int>::iterator it = d.begin(), itEnd = d.end(); it != itEnd; ++it)
+
+ ...
+```
+
+The latter avoids a function call and return of an object (which in deque's case happens to be more than just a pointer). The above only works when the container is unchanged or for containers that have a constant end value. But "constant end value" we mean containers which can be modified but end always remains the same.
+
+| Constant begin | Non-constant begin | Constant end | Non-constant end |
+|------|------|------|------|
+| array<sup>1</sup> | string<br> vector<br> deque<br> intrusive_list<br> intrusive_slist<br> vector_map<br> vector_multimap<br> vector_set<br> vector_multiset<br> bit_vector<br> hash_map<br> hash_multimap<br> hash_set<br> hash_multiset<br> intrusive_hash_map<br> intrusive_hash_multimap<br> intrusive_hash_set<br> intrusive_hash_multiset | array<br> list<br> slist<br> intrusive_list<br> intrusive_slist<br> map<br> multimap<br> set<br> multiset<br> hash_map<sup>2</sup><br> hash_multimap<sup>2</sup><br> hash_set<sup>2</sup><br> hash_multiset<sup>2</sup><br> intrusive_hash_map<br> intrusive_hash_multimap<br> intrusive_hash_set<br> intrusive_hash_multiset | string<br> vector<br> deque<br> vector_map<br> vector_multimap<br> vector_set<br> vector_multiset<br> bit_vector |
+
+* <sup>1</sup> Arrays can be neither resized nor reallocated.
+* <sup>2</sup> Constant end if the hashtable can't/won't re-hash. Non-constant if it can re-hash.
+
+### Iterate containers instead of using operator[].
+
+It's faster to iterate random access containers via iterators than via operator[], though operator[] usage may look simpler.
+
+Instead of doing this:
+
+```cpp
+for(unsigned i = 0, iEnd = intVector.size(); i != iEnd; ++i)
+
+ intVector[i] = 37;
+```
+
+you can execute more efficiently by doing this:
+
+```cpp
+for(vector<int>::iterator it = intVector.begin(), itEnd = intVector.end(); it != itEnd; ++it)
+
+ *it = 37;
+```
+
+### Learn to use the string class appropriately.
+
+Oddly enough, the most mis-used STL container is easily the string class. The tales of string abuse could rival the 1001 Arabian Nights. Most of the abuses involve doing things in a harder way than need be. In examining the historical mis-uses of string, it is clear that many of the problems stem from the user thinking in terms of C-style string operations instead of object-oriented strings. This explains why statements such as strlen(s.c_str()) are so common, whereas the user could just use s.length() instead and be both clearer and more efficient.
+
+Here we provide a table of actual collected examples of things done and how they could have been done instead.
+
+| What was written | What could have been written |
+|------|------|
+| `s = s.Left(i) + '+' + s.Right(s.length() - i - 1);` | `s[i] = '+';` |
+| `string s(""); // This is the most commonly found misuse.` | `string s;` |
+| `s = "";` | `s.clear();` |
+| `s.c_str()[0] = 'u';` | `s[0] = 'u';` |
+| `len = strlen(s.c_str());` | `len = s.length();` |
+| `s = string("u");` | `s = "u";` |
+| `puts(s + string("u"));` | `puts(s + "u");` |
+| `string s(" ");`<br> `puts(s.c_str());` | `puts(" ");` |
+| `s.sprintf("u");` | s = "u";` |
+| `char array[32];`<br> `sprintf(array, "%d", 10);`<br> `s = string(array);` | `s.sprintf("%d", 10);` |
+
+The chances are that if you want to do something with a string, there is a very basic way to do it. You don't want your code to appear in a future version of the above table.
+
+### Cache list size if you want list::size() to be O(1).
+
+EASTL's list, slist, intrusive_list, and intrusive_slist containers have a size() implementation which is O(n). That is, these containers don't keep a count (cache) of the current list size and when you call the size() function they iterate the list. This is by design and the reasoning behind it has been deeply debated and considered (and is discussed in the FAQ and the list header file). In summary, list doesn't cache its size because the only function that would benefit is the size function while many others would be negatively impacted and the memory footprint would be negatively impacted, yet list::size is not a very frequently called function in well-designed code. At the same time, nothing prevents the user from caching the size himself, though admittedly it adds some tedium and risk to the code writing process.
+
+Here's an example of caching the list size manually:
+
+```cpp
+list<int> intList;
+
+ size_t n = 0;
+
+
+
+ intList.push_back(37);
+
+ ++n;
+
+ intList.pop_front();
+
+ --n;
+```
+
+### Use empty() instead of size() when possible.
+
+All conventional containers have both an empty function and a size function. For all containers empty() executes with O(1) (constant time) efficiency. However, this is not so for size(), as some containers need to calculate the size and others need to do pointer subtraction (which may involve integer division) to find the size.
+
+### Know your container efficiencies.
+
+The above two practices lead us to this practice, which is a generalization of the above. We present a table of basic information for the conventional EASTL containers. The values are described at the bottom.
+
+| Container | empty() efficiency | size() efficiency | operator[] efficiency | insert() efficiency | erase() efficiency | find() efficiency | sort efficiency |
+|------|------|------|------|------|------|------|------|
+| slist | 1 | O(n) | - | O(1) | O(1) | O(n) | O(n+) |
+| list | 1 | n | - | 1 | 1 | n | n log(n) |
+| intrusive_slist | 1 | n | - | 1 | 1 | 1 | n+ |
+| intrusive_list | 1 | n | - | 1 | 1 | 1 | n log(n) |
+| array | 1 | 1 | 1 | - | - | n | n log(n) |
+| vector | 1 | 1<sup>a</sup> | 1 | 1 at end, else n | 1 at end, else n | n | n log(n) |
+| vector_set | 1 | 1<sup>a</sup> | 1 | 1 at end, else n | 1 at end, else n | log(n) | 1 |
+| vector_multiset | 1 | 1<sup>a</sup> | 1 | 1 at end, else n | 1 at end, else n | log(n) | 1 |
+| vector_map | 1 | 1<sup>a</sup> | 1 | 1 at end, else n | 1 at end, else n | log(n) | 1 |
+| vector_multimap | 1 | 1<sup>a</sup> | 1 | 1 at end, else n | 1 at end, else n | log(n) | 1 |
+| deque | 1 | 1<sup>a</sup> | 1 | 1 at begin or end, else n / 2 | 1 at begin or end, else n / 2 | n | n log(n) |
+| bit_vector | 1 | 1<sup>a</sup> | 1 | 1 at end, else n | 1 at end, else n | n | n log(n) |
+| string, cow_string | 1 | 1<sup>a</sup> | 1 | 1 at end, else n | 1 at end, else n | n | n log(n) |
+| set | 1 | 1 | - | log(n) | log(n) | log(n) | 1 |
+| multiset | 1 | 1 | - | log(n) | log(n) | log(n) | 1 |
+| map | 1 | 1 | log(n) | log(n) | log(n) | log(n) | 1 |
+| multimap | 1 | 1 | - | log(n) | log(n) | log(n) | 1 |
+| hash_set | 1 | 1 | - | 1 | 1 | 1 | - |
+| hash_multiset | 1 | 1 | - | 1 | 1 | 1 | - |
+| hash_map | 1 | 1 | - | 1 | 1 | 1 | - |
+| hash_multimap | 1 | 1 | - | 1 | 1 | 1 | - |
+| intrusive_hash_set | 1 | 1 | - | 1 | 1 | 1 | - |
+| intrusive_hash_multiset | 1 | 1 | - | 1 | 1 | 1 | - |
+| intrusive_hash_map | 1 | 1 | - | 1 | 1 | 1 | - |
+| intrusive_hash_multimap | 1 | 1 | - | 1 | 1 | 1 | - |
+
+Notes:
+
+* \- means that the operation does not exist.
+* 1 means amortized constant time. Also known as O(1)
+* n means time proportional to the container size. Also known as O(n)
+* log(n) means time proportional to the natural logarithm of the container size. Also known as O(log(n))
+* n log(n) means time proportional to log(n) times the size of the container. Also known as O(n log(n))
+* n+ means that the time is at least n, and possibly higher.
+* Inserting at the end of a vector may cause the vector to be resized; resizing a vector is O(n). However, the amortized time complexity for vector insertions at the end is constant.
+* Sort assumes the usage of the best possible sort for a large container of random data. Some sort algorithms (e.g. quick_sort) require random access iterators and so the sorting of some containers requires a different sort algorithm. We do not include bucket or radix sorts, as they are always O(n).
+* <sup>a</sup> vector, deque, string size is O(1) but involves pointer subtraction and thus integer division and so is not as efficient as containers that store the size directly.
+
+### Use vector::reserve.
+
+You can prevent vectors (and strings) from reallocating as you add items by specifying up front how many items you will be requiring. You can do this in the constructor or by calling the reserve function at any time. The capacity function returns the amount of space which is currently reserved.
+
+Here's how you could specify reserved capacity in a vector:
+
+```cpp
+vector<Widget> v(37); // Reserve space to hold up to 37 items.
+
+ or
+
+vector<Widget> v; // This empty construction causes to memory to be allocated or reserved.
+
+ v.reserve(37);
+```
+
+The EASTL vector (and string) implementation looks like this:
+
+```cpp
+template <typename T>
+
+ class vector {
+
+ T* mpBegin; // Beginning of used element memory.
+
+ T* mpEnd; // End of used element memory.
+
+ T* mpCapacity; // End of storage capacity. Is >= mpEnd
+
+ }
+```
+
+Another approach to being efficient with vector memory usage is to use fixed_vector.
+
+### Use vector::set_capacity to trim memory usage.
+
+A commonly asked question about vectors and strings is, "How do I reduce the capacity of a vector?" The conventional solution for std STL is to use the somewhat non-obvious trick of using vector<Widget>(v).swap(v). EASTL provides the same functionality via a member function called set_capacity() which is present in both the vector and string classes.
+
+An example of reducing a vector is the following:
+
+```cpp
+vector<Widget> v;
+
+...
+
+ v.set_capacity();
+```
+
+An example of resizing to zero and completely freeing the memory of a vector is the following:
+
+```cpp
+vector<Widget> v;
+
+ ...
+
+ v.set_capacity(0);
+```
+
+### Use swap() instead of a manually implemented version.
+
+The generic swap algorithm provides a basic version for any kind of object. However, each EASTL container provides a specialization of swap which is optimized for that container. For example, the list container implements swap by simply swapping the internal member pointers and not by moving individual elements.
+
+### Consider storing pointers instead of objects.
+
+There are times when storing pointers to objects is more efficient or useful than storing objects directly in containers. It can be more efficient to store pointers when the objects are big and the container may need to construct, copy, and destruct objects during sorting or resizing. Moving pointers is usually faster than moving objects. It can be useful to store pointers instead of objects when somebody else owns the objects or the objects are in another container. It might be useful for a Widget to be in a list and in a hash table at the same time.
+
+### Consider smart pointers instead of raw pointers.
+
+If you take the above recommendation and store objects as pointers instead of as objects, you may want to consider storing them as smart pointers instead of as regular pointers. This is particularly useful for when you want to delete the object when it is removed from the container. Smart pointers will automatically delete the pointed-to object when the smart pointer is destroyed. Otherwise, you will have to be careful about how you work with the list so that you don't generate memory leaks. Smart pointers implement a shared reference count on the stored pointer, as so any operation you do on a smart pointer container will do the right thing. Any pointer can be stored in a smart pointer, and custom new/delete mechanisms can work with smart pointers. The primary smart pointer is shared_ptr.
+
+Here is an example of creating and using a shared_ptr:
+
+```cpp
+typedef shared_ptr<Widget> WPtr;
+
+ list<WPtr> wList;
+
+
+
+ wList.push_back(WPtr(new Widget)); // The user may have operator new/delete overrides.
+
+wList.pop_back(); // Implicitly deletes the Widget.
+```
+
+Here is an example of creating and using a shared_ptr that uses a custom allocation and deallocation mechanism:
+
+```cpp
+typedef shared_ptr<Widget, EASTLAllocatorType, WidgetDelete> WPtr; // WidgetDelete is a custom destroyer.
+
+ list<WPtr> wList;
+
+
+
+ wList.push_back(WPtr(WidgetCreate(Widget))); // WidgetCreate is a custom allocator.
+
+wList.pop_back(); // Implicitly calls WidgetDelete.
+```
+
+### Use iterator pre-increment instead of post-increment.
+
+Pre-increment (e.g. ++x) of iterators is better than post-increment (x++) when the latter is not specifically needed. It is common to find code that uses post-incrementing when it could instead use pre-incrementing; presumably this is due to post-increment looking a little better visually. The problem is that the latter constructs a temporary object before doing the increment. With built-in types such as pointers and integers, the compiler will recognize that the object is a trivial built-in type and that the temporary is not needed, but the compiler cannot do this for other types, even if the compiler sees that the temporary is not used; this is because the constructor may have important side effects and the compiler would be broken if it didn't construct the temporary object.
+
+EASTL iterators are usually not trivial types and so it's best not to hope the compiler will do the best thing. Thus you should always play it safe an use pre-increment of iterators whenever post-increment is not required.
+
+Here is an example of using iterator pre-increment; for loops like this should always use pre-increment:
+
+```cpp
+for(set<int>::iterator it(intSet.begin()), itEnd(intSet.end()); it != itEnd; ++it)
+
+ *it = 37;
+```
+
+### Make temporary references so the code can be traced/debugged.
+
+Users want to be able to inspect or modify variables which are referenced by iterators. While EASTL containers and iterators are designed to make this easier than other STL implementations, it makes things very easy if the code explicitly declares a reference to the iterated element. In addition to making the variable easier to debug, it also makes code easier to read and makes the debug (and possibly release) version of the application run more efficiently.
+
+Instead of doing this:
+
+```cpp
+for(list<Widget>::iterator it = wl.begin(), itEnd = wl.end(); it != itEnd; ++it) {
+
+ (*it).x = 37;
+
+ (*it).y = 38;
+
+ (*it).z = 39;
+
+ }
+```
+
+Consider doing this:
+
+```cpp
+for(list<Widget>::iterator it = wl.begin(), itEnd = wl.end(); it != itEnd; ++it) {
+
+ Widget& w = *it; // The user can easily inspect or modify w here.
+
+ w.x = 37;
+
+ w.y = 38;
+
+ w.z = 39;
+
+ }
+```
+
+### Consider bitvector or bitset instead of vector<bool>.
+
+In EASTL, a vector of bool is exactly that. It intentionally does not attempt to make a specialization which implements a packed bit array. The bitvector class is specifically designed for this purpose. There are arguments either way, but if vector<bool> were allowed to be something other than an array of bool, it would go against user expectations and prevent users from making a true array of bool. There's a mechanism for specifically getting the bit packing, and it is bitvector.
+
+Additionally there is bitset, which is not a conventional iterateable container but instead acts like bit flags. bitset may better suit your needs than bitvector if you need to do flag/bit operations instead of array operations. bitset does have an operator[], though.
+
+### Vectors can be treated as contiguous memory.
+
+EASTL vectors (and strings) guarantee that elements are present in a linear contiguous array. This means that you can use a vector as you would a C-style array by using the vector data() member function or by using &v[0].
+
+To use a vector as a pointer to an array, you can use the following code:
+
+```cpp
+struct Widget {
+
+ uint32_t x;
+
+ uint32_t y;
+
+ };
+
+
+
+ vector<Widget> v;
+
+
+
+ quick_sort((uint64_t*)v.data(), (uint64_t*)(v.data() + v.size()));
+```
+
+### Search hash_map<string> via find_as() instead of find().
+
+EASTL hash tables offer a bonus function called find_as when lets you search a hash table by something other than the container type. This is particularly useful for hash tables of string objects that you want to search for by string literals (e.g. "hello") or char pointers. If you search for a string via the find function, your string literal will necessarily be converted to a temporary string object, which is inefficient.
+
+To use find_as, you can use the following code:
+
+```cpp
+hash_map<string, int> hashMap;
+
+ hash_map<string, int>::iterator it = hashMap.find_as("hello"); // Using default hash and compare.
+```
+
+### Take advantage of type_traits (e.g. EASTL_DECLARE_TRIVIAL_RELOCATE).
+
+EASTL includes a fairly serious type traits library that is on par with the one found in Boost but offers some additional performance-enhancing help as well. The type_traits library provides information about class *types*, as opposed to class instances. For example, the is_integral type trait tells if a type is one of int, short, long, char, uint64_t, etc.
+
+There are three primary uses of type traits:
+
+* Allowing for optimized operations on some data types.
+* Allowing for different logic pathways based on data types.
+* Allowing for compile-type assertions about data type expectations.
+
+Most of the type traits are automatically detected and implemented by the compiler. However, EASTL allows for the user to explicitly give the compiler hints about type traits that the compiler cannot know, via the EASTL_DECLARE declarations. If the user has a class that is relocatable (i.e. can safely use memcpy to copy values), the user can use the EASTL_DECLARE_TRIVIAL_RELOCATE declaration to tell the compiler that the class can be copied via memcpy. This will automatically significantly speed up some containers and algorithms that use that class.
+
+Here is an example of using type traits to tell if a value is a floating point value or not:
+
+```cpp
+template <typename T>
+
+ DoSomething(T t) {
+
+ assert(is_floating_point<T>::value);
+
+ }
+```
+
+Here is an example of declaring a class as relocatable and using it in a vector.
+
+```cpp
+EASTL_DECLARE_TRIVIAL_RELOCATE(Widget); // Usually you put this at the Widget class declaration.
+
+ vector<Widget> wVector;
+
+ wVector.erase(wVector.begin()); // This operation will be optimized via using memcpy.
+```
+
+The following is a full list of the currently recognized type traits. Most of these are implemented as of this writing, but if there is one that is missing, feel free to contact the maintainer of this library and request that it be completed.
+
+* is_void
+* is_integral
+* is_floating_point
+* is_arithmetic
+* is_fundamental
+* is_const
+* is_volatile
+* is_abstract
+* is_signed
+* is_unsigned
+* is_array
+* is_pointer
+* is_reference
+* is_member_object_pointer
+* is_member_function_pointer
+* is_member_pointer
+* is_enum
+* is_union
+* is_class
+* is_polymorphic
+* is_function
+* is_object
+* is_scalar
+* is_compound
+* is_same
+* is_convertible
+* is_base_of
+* is_empty
+* is_pod
+* is_aligned
+* has_trivial_constructor
+* has_trivial_copy
+* has_trivial_assign
+* has_trivial_destructor
+* has_trivial_relocate1
+* has_nothrow_constructor
+* has_nothrow_copy
+* has_nothrow_assign
+* has_virtual_destructor
+* alignment_of
+* rank
+* extent
+*
+<sup>1</sup> has_trivial_relocate is not found in Boost nor the C++ standard update proposal. However, it is very useful in allowing for the generation of optimized object moving operations. It is similar to the is_pod type trait, but goes further and allows non-pod classes to be categorized as relocatable. Such categorization is something that no compiler can do, as only the user can know if it is such. Thus EASTL_DECLARE_TRIVIAL_RELOCATE is provided to allow the user to give the compiler a hint.
+
+### Name containers to track memory usage.
+
+All EASTL containers which allocate memory have a built-in function called set_name and have a constructor argument that lets you specify the container name. This name is used in memory tracking and allows for the categorization and measurement of memory usage. You merely need to supply a name for your containers to use and it does the rest.
+
+Here is an example of creating a list and naming it "collision list":
+
+`list<CollisionData> collisionList(allocator("collision list"));`
+
+or
+
+```cpp
+list<CollisionData> collisionList;
+
+collisionList.get_allocator().set_name("collision list");
+```
+
+Note that EASTL containers do not copy the name contents but merely copy the name pointer. This is done for simplicity and efficiency. A user can get around this limitation by creating a persistently present string table. Additionally, the user can get around this by declaring static but non-const strings and modifying them at runtime.
+
+### Learn the algorithms.
+
+EASTL algorithms provide a variety of optimized implementations of fundamental algorithms. Many of the EASTL algorithms are the same as the STL algorithm set, though EASTL adds additional algorithms and additional optimizations not found in STL implementations such as Microsoft's. The copy algorithm, for example, will memcpy data types that have the has_trivial_relocate type trait instead of doing an element-by-element copy.
+
+The classifications we use here are not exactly the same as found in the C++ standard; they have been modified to be a little more intuitive. Not all the functions listed here may be yet available in EASTL as you read this. If you want some function then send a request to the maintainer. Detailed documentation for each algorithm is found in algorithm.h or the otherwise corresponding header file for the algorithm.
+
+**Search**
+
+* find, find_if
+* find_end
+* find_first_of
+* adjacent_find
+* binary_search
+* search, search_n
+* lower_bound
+* upper_bound
+* equal_range
+
+**Sort**
+
+* is_sorted
+* quick_sort
+* insertion_sort
+* shell_sort
+* heap_sort
+* merge_sort, merge_sort_buffer
+* merge
+* inplace_merge
+* partial_sort
+* stable_sort
+* partial_sort_copy
+* <other sort functions found in the EASTL bonus directories>
+
+**Modifying**
+
+* fill, fill_n
+* generate, generate_n
+* random_shuffle
+* swap
+* iter_swap
+* swap_ranges
+* remove, remove_if
+* remove_copy, remove_copy_if
+* replace, replace_if
+* replace_copy, replace_copy_if
+* reverse
+* reverse_copy
+* rotate
+* rotate_copy
+* partition
+* stable_partition
+* transform
+* next_permutation
+* prev_permutation
+* unique
+* unique_copy
+
+**Non-Modifying**
+
+* for_each
+* copy
+* copy_backward
+* count, count_if
+* equal
+* mismatch
+* min
+* max
+* min_element
+* max_element
+* lexicographical_compare
+* nth_element
+
+**Heap**
+
+* is_heap
+* make_heap
+* push_heap
+* pop_heap
+* change_heap
+* sort_heap
+* remove_heap
+
+**Set**
+
+* includes
+* set_difference
+* set_symmetric_difference
+* set_intersection
+* set_union
+
+### Pass and return containers by reference instead of value.
+
+If you aren't paying attention you might accidentally write code like this:
+
+```cpp
+void DoSomething(list<Widget> widgetList) {
+
+ ...
+
+}
+```
+
+The problem with the above is that widgetList is passed by value and not by reference. Thus the a copy of the container is made and passed instead of a reference of the container being passed. This may seem obvious to some but this happens periodically and the compiler gives no warning and the code will often execute properly, but inefficiently. Of course there are some occasions where you really do want to pass values instead of references.
+
+### Consider using reset() for fast container teardown.
+
+EASTL containers have a reset function which unilaterally resets the container to a newly constructed state. The contents of the container are forgotten; no destructors are called and no memory is freed. This is a risky but power function for the purpose of implementing very fast temporary containers. There are numerous cases in high performance programming when you want to create a temporary container out of a scratch buffer area, use the container, and then just "vaporize" it, as it would be waste of time to go through the trouble of clearing the container and destroying and freeing the objects. Such functionality is often used with hash tables or maps and with a stack allocator (a.k.a. linear allocator).
+
+Here's an example of usage of the reset function and a PPMalloc-like StackAllocator:
+
+```cpp
+pStackAllocator->push_bookmark();
+
+ hash_set<Widget, less<Widget>, StackAllocator> wSet(pStackAllocator);
+
+<use wSet>
+
+ wSet.reset();
+
+ pStackAllocator->pop_bookmark();
+```
+
+### Consider using fixed_substring instead of copying strings.
+
+EASTL provides a fixed_substring class which uses a reference to a character segment instead of allocating its own string memory. This can be a more efficient way to work with strings under some circumstances.
+
+Here's an example of usage of fixed_substring:
+
+```cpp
+basic_string<char> str("hello world");
+
+ fixed_substring<char> sub(str, 6, 5); // sub == "world"
+
+fixed_substring can refer to any character array and not just one that derives from a string object.
+```
+
+### Consider using vector::push_back(void).
+
+EASTL provides an alternative way to insert elements into containers that avoids copy construction and/or the creation of temporaries. Consider the following code:
+
+```cpp
+vector<Widget> widgetArray;
+
+ widgetArray.push_back(Widget());
+```
+
+The standard vector push_back function requires you to supply an object to copy from. This incurs the cost of the creation of a temporary and for some types of classes or situations this cost may be undesirable. It additionally requires that your contained class support copy-construction whereas you may not be able to support copy construction. As an alternative, EASTL provides a push_back(void) function which requires nothing to copy from but instead constructs the object in place in the container. So you can do this:
+
+```cpp
+vector<Widget> widgetArray;
+
+ widgetArray.push_back();
+
+widgetArray.back().x = 0; // Example of how to reference the new object.
+```
+
+Other containers with such copy-less functions include:
+
+```cpp
+vector::push_back()
+
+ deque::push_back()
+
+ deque::push_front()
+
+ list::push_back()
+
+ list::push_front()
+
+ slist::push_front()
+
+ map::insert(const key_type& key)
+
+ multimap::insert(const key_type& key)
+
+ hash_map::insert(const key_type& key)
+
+ hash_multimap::insert(const key_type& key)
+```
+
+Note that the map functions above allow you to insert a default value specified by key alone and not a value_type like with the other map insert functions.
+
+----------------------------------------------
+End of document
diff --git a/EASTL/doc/Bonus/tuple_vector_readme.md b/EASTL/doc/Bonus/tuple_vector_readme.md
new file mode 100644
index 0000000..f406ac5
--- /dev/null
+++ b/EASTL/doc/Bonus/tuple_vector_readme.md
@@ -0,0 +1,416 @@
+## Introduction to tuple_vector
+
+`tuple_vector` is a data container that is designed to abstract and simplify
+the handling of a "structure of arrays" layout of data in memory. In
+particular, it mimics the interface of `vector`, including functionality to do
+inserts, erases, push_backs, and random-access. It also provides a
+`RandomAccessIterator` and corresponding functionality, making it compatible
+with most STL (and STL-esque) algorithms such as ranged-for loops, `find_if`,
+`remove_if`, or `sort`.
+
+When used or applied properly, this container can improve performance of
+some algorithms through cache-coherent data accesses or allowing for
+sensible SIMD programming, while keeping the structure of a single
+container, to permit a developer to continue to use existing algorithms in
+STL and the like.
+
+## Review of "Structure of arrays" data layouts
+
+When trying to improve the performance of some code, it can sometimes be
+desirable to transform how some data is stored in memory to be laid out not as
+an "array of structures", but as a "structure of arrays". That is, instead of
+storing a series of objects as a single contiguous chunk of memory, one or
+more data members are instead stored as separate chunks of memory that are
+handled and accessed in parallel to each other.
+
+This can be beneficial in two primary respects:
+
+1) To improve the cache coherency of the data accesses, e.g. by utilizing more
+data that is loaded per cache line loaded from memory, and thereby reducing
+the amount of time waiting on memory accesses from off-CPU memory.
+This presentation from Mike Acton touches on this, among other things:
+https://www.youtube.com/watch?v=rX0ItVEVjHc
+
+2) To allow the data to be more easily loaded and utilized by SIMD kernels,
+by being able to load memory directly into a SIMD register.
+This is touched on in this presentation from Andreas Fredriksson for writing
+code with SIMD intrinsics:
+http://www.gdcvault.com/play/1022249/SIMD-at-Insomniac-Games-How
+...and as well in this guide for writing performant ISPC kernels:
+https://ispc.github.io/perfguide.html
+
+## How TupleVecImpl works
+
+`tuple_vector` inherits from `TupleVecImpl`, which
+provides the bulk of the functionality for those data containers. It manages
+the memory allocated, marshals data members to each array of memory, generates
+the necessary iterators, and so on.
+
+When a `tuple_vector` is declared, it is alongside a list of types, or "tuple
+elements", indicating what data to store in the container, similar to how `tuple`
+operates. `TupleVecImpl` uses this list of tuple elements to then inherit from a series of
+`TupleVecLeaf` structures, which each have their own pointer to an array of their
+corresponding type in memory. When dereferencing the container, either to fetch a
+tuple of references or just fetching pointers to the memory, it is these pointers
+that are utilized or fetched.
+
+While each `TupleVecLeaf` contains a pointer to its own block of memory, they
+are not individual memory allocations. When `TupleVecImpl` needs to grow its
+capacity, it calculates the total size needed for a single allocation, taking
+into account the number of objects for the container, the size of each tuple
+element's type, and the alignment requirements for each type. Pointers into the
+allocation for each tuple element are also determined at the same time, which
+are passed to each `TupleVecLeaf`. From there, many of the interactions with
+`TupleVecImpl`, to modify or access members of the container, then reference
+each `TupleVecLeaf`'s data pointer in series, using parameter packs to repeat
+each operation for each parent `TupleVecLeaf`.
+
+## How tuple_vector's iterator works
+
+`TupleVecImpl` provides a definition to an iterator type, `TupleVecIter`.
+As mentioned above, `TupleVecIter` provides all of the functionality to operate
+as a `RandomAccessIterator`. When it is dereferenced, it provides a tuple of
+references, similar to `at()` or `operator[]` on `TupleVecImpl`, as opposed to
+a reference of some other type. As well, a customization of `move_iterator` for
+`TupleVecIter` is provided, which will return a tuple of rvalue-references.
+
+The way that `TupleVecIter` operates internally is to track an index into the
+container, as well as a copy of all of the `TupleVecImpl`'s `TupleVecLeaf`
+pointers at the time of the iterator's construction. As a result, modifying the
+iterator involves just changing the index, and dereferencing the iterator into
+the tuple of references involves dereferencing each pointer with an offset
+specified by that index.
+
+Of the various ways of handling the multitude of references, this tended to
+provide the best code-generation. For example, having a tuple of pointers that
+are collectively modified with each iterator modification resulted in the compiler
+not being able to accurately determine which pointers were relevant to the final
+output of some function, creating many redundant operations. Similarly, having
+the iterator refer to the source `TupleVecImpl` for the series of pointers
+often resulted in extra, unnecessary, data hops to the `TupleVecImpl` to repeatedly
+fetch data that was not practically mutable, but theoretically mutable. While this
+solution is the heaviest in terms of storage, the resulted assembly tends to be
+competitive with traditional structure-of-arrays setups.
+
+## How to work with tuple_vector, and where to use it
+
+Put simply, `tuple_vector` can be used as a replacement for `vector`. For example,
+instead of declaring a structure and vector as:
+
+```
+struct Entity
+{
+ bool active;
+ float lifetime;
+ Vec3 position;
+}
+vector<Entity> entityVec;
+```
+
+...the `tuple_vector` equivalent of this can be defined as:
+
+```
+tuple_vector<bool, float, Vec3> entityVec;
+```
+
+In terms of how `tuple_vector` is modified and accessed, it has a similar
+featureset as `vector`, except where `vector` would accept or return a single
+value, it instead accepts or returns a tuple of values or unstructured series
+of equivalent arguments.
+
+For example, the following functions can be used to access the data, either by
+fetching a tuple of references to a series of specific values, or the data
+pointers to the tuple elements:
+
+```
+tuple<bool&, float&, Vec3&> operator[](size_type)
+tuple<bool&, float&, Vec3&> at(size_type)
+tuple<bool&, float&, Vec3&> iterator::operator*()
+tuple<bool&&, float&&, Vec3&&> move_iterator::operator*()
+tuple<bool*, float*, Vec3*> data()
+
+// extract the Ith tuple element pointer from the tuple_vector
+template<size_type I>
+T* get<I>()
+// e.g. bool* get<0>(), float* get<1>(), and Vec3* get<2>()
+
+// extract the tuple element pointer of type T from the tuple_vector
+// note that this function can only be used if there is one instance
+// of type T in the tuple_vector's elements
+template<typename T>
+T* get<T>()
+// e.g. bool* get<bool>(), float* get<float>(), and Vec3* get<Vec3>()
+```
+
+And `push_back(...)` has the following overloads, accepting either values or tuples as needed.
+
+```
+tuple<bool&, float&, Vec3&> push_back()
+push_back(const bool&, const float&, const Vec3&)
+push_back(tuple<const bool&, const float&,const Vec3&>)
+push_back(bool&&, float&&, Vec3&&)
+push_back(tuple<bool&&, float&&, Vec3&&>)
+```
+...and so on, and so forth, for others like the constructor, `insert(...)`,
+`emplace(...)`, `emplace_back(...)`, `assign(...)`, and `resize(...)`.
+
+As well, note that the tuple types that are accepted or returned for
+`tuple_vector<Ts...>` have typedefs available in the case of not wanting to use
+automatic type deduction:
+```
+typedef eastl::tuple<Ts...> value_tuple;
+typedef eastl::tuple<Ts&...> reference_tuple;
+typedef eastl::tuple<const Ts&...> const_reference_tuple;
+typedef eastl::tuple<Ts*...> ptr_tuple;
+typedef eastl::tuple<const Ts*...> const_ptr_tuple;
+typedef eastl::tuple<Ts&&...> rvalue_tuple;
+```
+With this, and the fact that the iterator type satisfies
+the `RandomAccessIterator` requirements, it is possible to use `tuple_vector` in
+most ways and manners that `vector` was previously used, with few structural
+differences.
+
+However, even if not using it strictly as a replacement for `vector`, it is
+still useful as a tool for simplifying management of a traditional structure of
+arrays. That is, it is possible to use `tuple_vector` to just perform a single
+large memory allocation instead of a series of smaller memory allocations,
+by sizing the `tuple_vector` as needed, fetching the necessary pointers with
+`data()` or `get<...>()`, and carrying on normally.
+
+One example where this can be utilized is with ISPC integration. Given the
+following ISPC function definition:
+
+ export void simple(uniform float vin[], uniform float vfactors[], uniform float vout[], uniform int size);
+
+...which generates the following function prototype for C/C++ usage:
+
+ extern void simple(float* vin, float* vfactors, float* vout, int32_t size);
+
+...this can be utilized with some raw float arrays:
+```
+float* vin = new float[NumElements];
+float* vfactors = new float[NumElements];
+float* vout = new float[NumElements];
+
+// Initialize input buffer
+for (int i = 0; i < NumElements; ++i)
+{
+ vin[i] = (float)i;
+ vfactors[i] = (float)i / 2.0f;
+}
+
+// Call simple() function from simple.ispc file
+simple(vin, vfactors, vout, NumElements);
+
+delete vin;
+delete vfactors;
+delete vout;
+```
+or, with `tuple_vector`:
+
+```
+tuple_vector<float, float, float> simpleData(NumElements);
+float* vin = simpleData.get<0>();
+float* vfactors = simpleData.get<1>();
+float* vout = simpleData.get<2>();
+
+// Initialize input buffer
+for (int i = 0; i < NumElements; ++i)
+{
+ vin[i] = (float)i;
+ vfactors[i] = (float)i / 2.0f;
+}
+
+// Call simple() function from simple.ispc file
+simple(vin, vfactors, vout, NumElements);
+```
+
+`simpleData` here only has a single memory allocation during its construction,
+instead of the three in the first example, and also automatically releases the
+memory when it falls out of scope.
+
+It is possible to also skip a memory allocation entirely, in some circumstances.
+EASTL provides "fixed" counterparts of many data containers which allows for a
+data container to have an inlined buffer of memory. For example,
+`eastl::vector<typename T>` has the following counterpart:
+
+ eastl::fixed_vector<typename T, size_type nodeCount, bool enableOverflow = true>
+
+This buffer allows for enough space to hold a `nodeCount` number of `T` objects,
+skipping any memory allocation at all, until the requested size becomes
+greater than `nodeCount` - assuming `enableOverflow` is True.
+
+There is a similar counterpart to `eastl::tuple_vector<typename... Ts>` available as well:
+
+ eastl::fixed_tuple_vector<size_type nodeCount, bool enableOverflow, typename... Ts>
+
+This does the similar legwork in creating an inlined buffer, and all of the
+functionality of `tuple_vector` otherwise is supported. Note the slight
+difference in declaration, though: `nodeCount` and `enableOverflow` are defined
+first, and `enableOverflow` is not a default parameter. This change arises out
+of restrictions surrounding variadic templates, in that they must be declared
+last, and cannot be mixed with default template parameters.
+
+Lastly, `eastl::vector` and other EASTL data containers support custom Memory Allocator
+types, through their template parameters. For example, `eastl::vector`'s full declaration
+is actually:
+
+ eastl::vector<typename T, typename AllocatorType = EASTLAllocatorType>
+
+However, because such a default template parameter cannot be used with
+variadic templates, a separate type for `tuple_vector` is required for such a
+definition:
+
+ eastl::tuple_vector_alloc<typename AllocatorType, typename... Ts>
+
+Note that `tuple_vector` uses EASTLAllocatorType as the allocator.
+
+## Performance comparisons/discussion
+
+A small benchmark suite for `tuple_vector` is included when running the
+EASTLBenchmarks project. It provides the following output on a Core i7 3770k
+(Skylake) at 3.5GHz, with DDR3-1600 memory.
+
+The `tuple_vector` benchmark cases compare total execution time of similar
+algorithms run against `eastl::tuple_vector` and `std::vector`, such as
+erasing or inserting elements, iterating through the array to find a specific
+element, sum all of the elements together via operator[] access, or just
+running `eastl::sort` on the data containers. More information about the
+EASTLBenchmarks suite can be found in EASTL/doc/EASTL Benchmarks.html
+
+Benchmark | STD execution time | EASTL execution time | Ratio
+--------- | -------- | ---------- | -----
+`tuple_vector<AutoRefCount>/erase ` | 1.7 ms | 1.7 ms | 1.00
+`tuple_vector<MovableType>/erase ` | 104.6 ms | 106.3 ms | 0.98
+`tuple_vector<MovableType>/reallocate ` | 1.3 ms | 1.7 ms | 0.77 -
+ | | |
+`tuple_vector<uint64>/erase ` | 3.4 ms | 3.5 ms | 0.98
+`tuple_vector<uint64>/insert ` | 3.4 ms | 3.4 ms | 0.99
+`tuple_vector<uint64>/iteration ` | 56.3 us | 81.4 us | 0.69 -
+`tuple_vector<uint64>/operator[] ` | 67.4 us | 61.8 us | 1.09
+`tuple_vector<uint64>/push_back ` | 1.3 ms | 818.3 us | 1.53 +
+`tuple_vector<uint64>/sort ` | 5.8 ms | 7.3 ms | 0.80
+ | | |
+`tuple_vector<uint64,Padding>/erase ` | 34.7 ms | 32.9 ms | 1.05
+`tuple_vector<uint64,Padding>/insert ` | 41.0 ms | 32.6 ms | 1.26
+`tuple_vector<uint64,Padding>/iteration ` | 247.1 us | 80.5 us | 3.07 +
+`tuple_vector<uint64,Padding>/operator[]` | 695.7 us | 81.1 us | 8.58 +
+`tuple_vector<uint64,Padding>/push_back ` | 10.0 ms | 6.0 ms | 1.67 +
+`tuple_vector<uint64,Padding>/sort ` | 8.2 ms | 10.1 ms | 0.81
+ | | |
+`vector<AutoRefCount>/erase ` | 1.3 ms | 1.2 ms | 1.05
+`vector<MovableType>/erase ` | 104.4 ms | 109.4 ms | 0.95
+`vector<MovableType>/reallocate ` | 1.5 ms | 1.5 ms | 0.95
+ | | |
+`vector<uint64>/erase ` | 4.3 ms | 3.6 ms | 1.20
+`vector<uint64>/insert ` | 4.8 ms | 4.8 ms | 1.01
+`vector<uint64>/iteration ` | 71.5 us | 77.3 us | 0.92
+`vector<uint64>/operator[] ` | 90.7 us | 87.2 us | 1.04
+`vector<uint64>/push_back ` | 1.6 ms | 1.2 ms | 1.38 +
+`vector<uint64>/sort ` | 7.7 ms | 8.2 ms | 0.93
+
+First off, `tuple_vector<uint64>`'s performance versus `std::vector<uint64>` is
+comparable, as expected, as the `tuple_vector`'s management for one type
+becomes very similar to just a regular vector. The major notable exception is
+the iteration case, which runs `eastl::find_if`. This
+performance differences is a consequence of the iterator design, and how
+it works with indices, not a direct pointer, so the code generation suffers slightly
+in this compute-bound scenario. This is worth noting as a demonstration of a
+case where falling back to pointer-based iteration by fetching the `begin` and
+`end` pointers of that tuple element may be preferable, instead of using the
+iterator constructs.
+
+The set of `tuple_vector<uint64,Padding>` tests are more interesting.
+This is a comparison between a single `std::vector` with a
+structure containing a `uint64` and 56 bytes of padding, and a `tuple_vector` with
+two elements: one for `uint64` and one for 56 bytes of padding. The erase,
+insert, push_back, and sort cases all perform at a similar relative rate as
+they did in the `tuple_vector<uint64>` tests - demonstrating that operations
+that have to touch all of elements do not have a significant change in
+performance.
+
+However, iteration and operator[] are very different, because
+those only access the `uint64` member of both `vector` and `tuple_vector` to run
+some operation. The iteration test now runs 3x faster whereas before it ran
+0.7x as fast, and operator[] runs 8.5x faster, instead of 1.1x. This
+demonstrates some of the utility of `tuple_vector`, in that these algorithms end
+up being limited by the CPU's compute capabilities, as opposed to being
+limited by how fast they can load memory in from DRAM.
+
+In a series of other tests, generally speaking, `tuple_vector` tends to perform
+on par with manual management of multiple arrays in many algorithms and
+operations, often even generating the same code. It should be noted that
+significant degrees of inlining and optimization are required to get the most out
+of `tuple_vector`. Compared to accessing a series of arrays or vectors,
+`tuple_vector` does perform a multitude of extra trivial function calls internally
+in order to manage the various elements, or interact with `eastl::tuple` through
+its interface, so running in debug configurations can run significantly slower
+in some cases, e.g. sometimes running at 0.2x the speed compared to vector.
+
+## The problem of referencing tuple elements
+
+This will be experienced shortly after using `tuple_vector` in most capacities,
+but it should be noted that the most significant drawback is that there is no
+way to **symbolically** reference each tuple element of the `tuple_vector` - much
+in the same way as `tuple`. For example, if translating a struct such as...
+
+```
+struct Entity
+{
+ float x, y, z;
+ float lifetime;
+};
+```
+...to `tuple_vector`, it will exist as:
+
+```
+tuple_vector<float, float, float, float> entityVec;
+```
+
+...and can only be accessed in a manner like `entityVec.get<3>()` to refer to
+the `lifetime` member. With existing tools, the only good alternatives are to
+encapsulate each float as a separate struct to give it unique typenames...
+
+```
+struct entityX { float val; };
+struct entityY { float val; };
+struct entityZ { float val; };
+struct entityLifetime { float val; };
+
+tuple_vector<entityX, entityY, entityZ, entityLifetime> entityVec;
+```
+...and then access each tuple element by typename like
+`entityVec.get<entityLifetime>()`; or, creating an enumerated value to replace
+the indices...
+
+```
+enum EntityTypeEnum
+{
+ entityX = 0,
+ entityY = 1,
+ entityZ = 2,
+ entityLifetime = 3
+};
+
+tuple_vector<float, float, float, float> entityVec;
+```
+
+...and then access each tuple element by the enumerated value:
+`entityVec.get<entityLifetime>()`.
+
+Either way, there is a fairly significant maintenance and readability issue
+around this. This is arguably more severe than with `tuple` on its own
+because that is generally not intended for structures with long lifetime.
+
+Ideally, if the language could be mutated to accommodate such a thing, it would
+be good to have some combination of typenames and symbolic names in the
+declaration, e.g. something like
+
+```
+tuple_vector<float x, float y, float z, float lifetime> entityVec;
+```
+and be able to reference the tuple elements not just by typename or index, but
+through their corresponding symbol, like `entityVec.get<lifetime>()`. Or, it may
+be interesting if the necessary `get` functions could be even automatically
+generated through a reflection system, e.g. `entityVec.get_lifetime()`.
+All of this remains a pipe dream for now.
diff --git a/EASTL/doc/CMake/EASTL_Project_Integration.md b/EASTL/doc/CMake/EASTL_Project_Integration.md
new file mode 100644
index 0000000..4b014f9
--- /dev/null
+++ b/EASTL/doc/CMake/EASTL_Project_Integration.md
@@ -0,0 +1,93 @@
+## Using EASTL in your own projects
+
+This page describes the steps needed to use EASTL in your own projects
+
+## Setting up your project
+
+### Using CMake
+
+Add to your CMakeLists.txt:
+
+```cmake
+set(EASTL_ROOT_DIR C:/EASTL)
+include_directories (${EASTL_ROOT_DIR}/include)
+include_directories (${EASTL_ROOT_DIR}/test/packages/EAAssert/include)
+include_directories (${EASTL_ROOT_DIR}/test/packages/EABase/include/Common)
+include_directories (${EASTL_ROOT_DIR}/test/packages/EAMain/include)
+include_directories (${EASTL_ROOT_DIR}/test/packages/EAStdC/include)
+include_directories (${EASTL_ROOT_DIR}/test/packages/EATest/include)
+include_directories (${EASTL_ROOT_DIR}/test/packages/EAThread/include)
+set(EASTL_LIBRARY debug ${EASTL_ROOT_DIR}/build/Debug/EASTL.lib optimized ${EASTL_ROOT_DIR}/build/Release/EASTL.lib)
+add_custom_target(NatVis SOURCES ${EASTL_ROOT_DIR}/doc/EASTL.natvis)
+```
+
+And then add the library into the linker
+
+```
+target_link_libraries(... ${EASTL_LIBRARY})
+```
+
+### Using Visual Studio
+
+Using Visual Studio projecs directly you will need do the following steps:
+- Add the include paths
+- Add the library path
+- Add the library dependency
+- Add natvis (optional)
+
+> Note that in the examples below ${EASTL_ROOT_DIR} is the folder in which you stored EASTL. You could create an environment variable for this.
+
+#### Add the include paths
+
+Add the following paths to your C/C++ -> General -> Additional include directories:
+```
+${EASTL_ROOT_DIR}/include
+${EASTL_ROOT_DIR}/test/packages/EAAssert/include
+${EASTL_ROOT_DIR}/test/packages/EABase/include/Common
+${EASTL_ROOT_DIR}/test/packages/EAMain/include)
+${EASTL_ROOT_DIR}/test/packages/EAStdC/include)
+${EASTL_ROOT_DIR}/test/packages/EATest/include)
+${EASTL_ROOT_DIR}/test/packages/EAThread/include)
+```
+
+#### Add the library path
+
+Add the following library path to your Linker -> General -> Additional Library Directories:
+```
+${EASTL_ROOT_DIR}/build/$(Configuration)
+```
+
+#### Add the library dependency
+
+Either add the following library to your Linker -> Input -> Additional Dependencies
+```
+EASTL.lib
+```
+Or in code use the following:
+```
+#pragma comment(lib, "EASTL.lib")
+```
+
+#### Add natvis (optional)
+
+> Adding the natvis file to your project allows the debugger to use custom visualizers for the eastl data types. This greatly enhances the debugging experience.
+
+Add the natvis file anywhere in your solution:
+
+```
+Right-click your project: Add -> Existing item and then add the following file:
+${EASTL_ROOT_DIR}/doc/EASTL.natvis
+```
+
+## Setting up your code
+
+### Overloading operator new[]
+
+EASTL requires you to have an overload for the operator new[], here is an example that just forwards to global new[]:
+
+```c
+void* __cdecl operator new[](size_t size, const char* name, int flags, unsigned debugFlags, const char* file, int line)
+{
+ return new uint8_t[size];
+}
+```
diff --git a/EASTL/doc/Design.md b/EASTL/doc/Design.md
new file mode 100644
index 0000000..5877bb7
--- /dev/null
+++ b/EASTL/doc/Design.md
@@ -0,0 +1,374 @@
+# EASTL Design
+
+## Introduction
+
+EASTL (EA Standard Template Library) is designed to be a template library which encompasses and extends the functionality of standard C++ STL while improving it in various ways useful to game development. Much of EASTL's design is identical to standard STL, as the large majority of the STL is well-designed for many uses. The primary areas where EASTL deviates from standard STL implementations are essentially the following:
+
+* EASTL has a simplified and more flexible custom allocation scheme.
+* EASTL has significantly easier to read code.
+* EASTL has extension containers and algorithms.
+* EASTL has optimizations designed for game development.
+
+Of the above items, the only one which is an incompatible difference with STL is the case of memory allocation. The method for defining a custom allocator for EASTL is slightly different than that of standard STL, though they are 90% similar. The 10% difference, however, is what makes EASTL generally easier and more powerful to work with than standard STL. Containers without custom allocators act identically between EASTL and standard STL.
+
+## Motivations
+
+Our motifications for making EASTL drive the design of EASTL. As identified in the EASTL RFC (Request for Comment), the primary reasons for implementing a custom version of the STL are:
+
+* <span class="458151900-03082005"><font><font>Some STL implementations (especially Microsoft STL) have inferior performance characteristics that make them unsuitable for game development. EASTL is faster than all existing STL implementations.</font></font></span>
+* The STL is sometimes hard to debug, as most STL implementations use cryptic variable names and unusual data structures.
+* STL allocators are sometimes painful to work with, as they have many requirements and cannot be modified once bound to a container.
+* The STL includes excess functionality that can lead to larger code than desirable. It's not very easy to tell programmers they shouldn't use that functionality.
+* The STL is implemented with very deep function calls. This results is unacceptable performance in non-optimized builds and sometimes in optimized builds as well.
+* The STL doesn't support alignment of contained objects.
+* STL containers won't let you insert an entry into a container without supplying an entry to copy from. This can be inefficient.
+* Useful STL extensions (e.g. slist, hash_map, shared_ptr) found in existing STL implementations such as STLPort are not portable because they don't exist in other versions of STL or aren't consistent between STL versions.
+
+* The STL lacks useful extensions that game programmers find useful (e.g. intrusive_list) but which could be best optimized in a portable STL environment.
+* The STL has specifications that limit our ability to use it efficiently. For example, STL vectors are not guaranteed to use contiguous memory and so cannot be safely used as an array.
+* The STL puts an emphasis on correctness before performance, whereas sometimes you can get significant performance gains by making things less academcially pure.
+* STL containers have private implementations that don't allow you to work with their data in a portable way, yet sometimes this is an important thing to be able to do (e.g. node pools).
+* All existing versions of STL allocate memory in empty versions of at least some of their containers. This is not ideal and prevents optimizations such as container memory resets that can greatly increase performance in some situations.
+* The STL is slow to compile, as most modern STL implementations are very large.
+
+* There are legal issues that make it hard for us to freely use portable STL implementations such as STLPort.
+* We have no say in the design and implementation of the STL and so are unable to change it to work for our needs.
+
+## Prime Directives
+
+The implementation of EASTL is guided foremost by the following directives which are listed in order of importance.
+
+1. Efficiency (speed and memory usage)
+2. Correctness
+3. Portability
+4. Readability
+
+Note that unlike commercial STL implementations which must put correctness above all, we put a higher value on efficiency. As a result, some functionality may have some usage limitation that is not present in other similar systems but which allows for more efficient operation, especially on the platforms of significance to us.
+
+Portability is significant, but not critical. Yes, EASTL must compile and run on all platforms that we will ship games for. But we don't take that to mean under all compilers that could be conceivably used for such platforms. For example, Microsoft VC6 can be used to compile Windows programs, but VC6's C++ support is too weak for EASTL and so you simply cannot use EASTL under VC6.
+
+Readability is something that EASTL achieves better than many other templated libraries, particularly Microsoft STL and STLPort. We make every attempt to make EASTL code clean and sensible. Sometimes our need to provide optimizations (particularly related to type_traits and iterator types) results in less simple code, but efficiency happens to be our prime directive and so it overrides all other considerations.
+
+## Thread Safety
+
+It's not simple enough to simply say that EASTL is thread-safe or thread-unsafe. However, we can say that with respect to thread safety that EASTL does the right thing.
+
+Individual EASTL containers are not thread-safe. That is, access to an instance of a container from multiple threads at the same time is unsafe if any of those accesses are modifying operations. A given container can be read from multiple threads simultaneously as well as any other standalone data structure. If a user wants to be able to have modifying access an instance of a container from multiple threads, it is up to the user to ensure that proper thread synchronization occurs. This usually means using a mutex.
+
+EASTL classes other than containers are the same as containers with respect to thread safety. EASTL functions (e.g. algorithms) are inherently thread-safe as they have no instance data and operate entirely on the stack. As of this writing, no EASTL function allocates memory and thus doesn't bring thread safety issues via that means.
+
+The user may well need to be concerned about thread safety with respect to memory allocation. If the user modifies containers from multiple threads, then allocators are going to be accessed from multiple threads. If an allocator is shared across multiple container instances (of the same type of container or not), then mutexes (as discussed above) the user uses to protect access to indivudual instances will not suffice to provide thread safety for allocators used across multiple instances. The conventional solution here is to use a mutex within the allocator if it is exected to be used by multiple threads.
+
+EASTL uses neither static nor global variables and thus there are no inter-instance dependencies that would make thread safety difficult for the user to implement.
+
+## Container Design
+
+All EASTL containers follow a set of consistent conventions. Here we define the prototypical container which has the minimal functionality that all (non-adapter) containers must have. Some containers (e.g. stack) are explicitly adapter containers and thus wrap or inherit the properties of the wrapped container in a way that is implementation specific.
+
+```cpp
+template <class T, class Allocator =
+EASTLAllocator>
+
+class container
+
+{
+
+public:
+
+ typedef container<T, Allocator> this_type;
+
+ typedef
+T
+ value_type;
+
+ typedef T*
+ pointer;
+
+ typedef const T*
+ const_pointer;
+
+ typedef
+T& reference;
+
+
+ typedef const
+T& const_reference;
+
+
+ typedef
+ptrdiff_t difference_type;
+
+
+ typedef
+impl_defined size_type;
+
+
+ typedef impl-defined
+ iterator;
+
+ typedef impl-defined
+ const_iterator;
+
+ typedef reverse_iterator<iterator> reverse_iterator;
+
+ typedef reverse_iterator<const_iterator> reverse_const_iterator;
+
+ typedef Allocator
+ allocator_type;
+
+
+
+public:
+
+ container(const
+allocator_type& allocator = allocator_type());
+
+ container(const this_type&
+x);
+
+
+
+ this_type&
+operator=(this_type& x);
+
+ void swap(this_type& x);
+
+ void reset();
+
+
+
+ allocator_type& get_allocator();
+
+ void set_allocator(allocator_type& allocator);
+
+
+
+ iterator begin();
+
+ const_iterator begin() const;
+
+ iterator end();
+
+ const_iterator end() const;
+
+
+
+ bool validate() const;
+ int validate_iterator(const_iterator i)
+const;
+
+
+protected:
+
+ allocator_type mAllocator;
+
+};
+
+
+
+template <class T, class
+Allocator>
+
+bool operator==(const container<T, Allocator>& a, const container<T, Allocator>& b);
+
+
+
+template <class T, class
+Allocator>
+
+bool operator!=(const container<T, Allocator>& a, const
+container<T, Allocator>&
+b);
+```
+
+Notes:
+
+* Swapped containers do not swap their allocators.
+* Newly constructed empty containers do no memory allocation. Some STL and other container libraries allocate an initial node from the class memory allocator. EASTL containers by design never do this. If a container needs an initial node, that node should be made part of the container itself or be a static empty node object.
+* Empty containers (new or otherwise) contain no constructed objects, including those that might be in an 'end' node. Similarly, no user object (e.g. of type T) should be constructed unless required by the design and unless documented in the cotainer/algorithm contract. 
+* The reset function is a special extension function which unilaterally resets the container to an empty state without freeing the memory of the contained objects. This is useful for very quickly tearing down a container built into scratch memory. No memory is allocated by reset, and the container has no allocatedmemory after the reset is executed.
+* The validate and validate_iterator functions provide explicit container and iterator validation. EASTL provides an option to do implicit automatic iterator and container validation, but full validation (which can be potentially extensive) has too much of a performance cost to execute implicitly, even in a debug build. So EASTL provides these explicit functions which can be called by the user at the appropriate time and in optimized builds as well as debug builds.
+
+## Allocator Design
+
+The most significant difference between EASTL and standard C++ STL is that standard STL containers are templated on an allocator class with the interface defined in std::allocator. std::allocator is defined in the C++ standard as this:
+
+```cpp
+// Standard C++ allocator
+
+
+
+ template <class T>
+
+class allocator
+
+{
+
+public:
+
+ typedef size_t size_type;
+
+ typedef ptrdiff_t difference_type;
+
+ typedef T* pointer;
+
+ typedef const T* const_pointer;
+
+ typedef T&
+ reference;
+
+ typedef const
+T& const_reference;
+
+ typedef T value_type;
+
+
+
+ template <class U>
+
+ struct rebind { typedef allocator<U> other; };
+
+
+
+ allocator() throw();
+
+ allocator(const allocator&) throw();
+
+ template <class U>
+
+ allocator(const allocator<U>&) throw();
+
+
+
+ ~allocator()
+throw();
+
+
+
+ pointer
+ address(reference x) const;
+
+ const_pointer address(const_reference x)
+const;
+
+ pointer allocate(size_type, typename
+allocator<void>::const_pointer hint = 0);
+
+ void deallocate(pointer p,
+size_type n);
+
+ size_type max_size() const
+throw();
+
+ void construct(pointer p,
+const T& val);
+
+ void destroy(pointer
+p);
+
+};
+```
+
+Each STL container needs to have an allocator templated on container type T associated with it. The problem with this is that allocators for containers are defined at the class level and not the instance level. This makes it painful to define custom allocators for containers and adds to code bloat. Also, it turns out that the containers don't actually use allocator<T> but instead use allocator\<T>::rebind\<U>::other. Lastly, you cannot access this allocator after the container is constructed. There are some good academic reasons why the C++ standard works this way, but it results in a lot of unnecessary pain and makes concepts like memory tracking much harder to implement.
+
+What EASTL does is use a more familiar memory allocation pattern whereby there is only one allocator class interface and it is used by all containers. Additionally EASTL containers let you access their allocators and query them, name them, change them, etc.
+
+EASTL has chosen to make allocators not be copied between containers during container swap and assign operations. This means that if container A swaps its contents with container B, both containers retain their original allocators. Similarly, assigning container A to container B causes container B to retain its original allocator. Containers that are equivalent should report so via operator==; EASTL will do a smart swap if allocators are equal, and a brute-force swap otherwise.
+
+```cpp
+// EASTL allocator
+
+class allocator
+{
+public:
+    allocator(const char* pName = NULL);
+
+    void* allocate(size_t n, int flags = 0);
+    void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0);
+    void  deallocate(void* p, size_t n);
+
+    const char* get_name() const;
+    void        set_name(const char* pName);
+};
+
+allocator* GetDefaultAllocator();
+```
+
+## Fixed Size Container Design
+
+EASTL supplies a set of fixed-size containers that the user can use, though the user can also implement their own versions. So in addition to class list there is class fixed_list. The fixed_list class implements a linked list via a fixed-size pool of contiguous memory which has no space overhead (unlike with a regular heap), doesn't cause fragmentation, and allocates very quickly.
+
+EASTL implements fixed containers via subclasses of regular containers which set the regular container's allocator to point to themselves. Thus the implementation for fixed_list is very tiny and consists of little more than constructor and allocator functions. This design has some advantages but has one small disadvantage. The primary advantages are primarily that code bloat is reduced and that the implementation is simple and the user can easily extend it. The primary disadvantage is that the parent list class ends up with a pointer to itself and thus has 4 bytes that could arguably be saved if system was designed differently. That different design would be to make the list class have a policy template parameter which specifies that it is a fixed pool container. EASTL chose not to follow the policy design because it would complicate the implementation, make it harder for the user to extend the container, and would potentially waste more memory due to code bloat than it would save due to the 4 byte savings it achieves in container instances.
+
+## Algorithm Design
+
+EASTL algorithms very much follow the philosophy of standard C++ algorithms, as this philosophy is sound and efficient. One of the primary aspects of algorithms is that they work on iterators and not containers. You will note for example that the find algorithm takes a first and last iterator as arguments and not a container. This has two primary benefits: it allows the user to specify a subrange of the container to search within and it allows the user to apply the find algorithm to sequences that aren't containers (e.g. a C array).
+
+EASTL algorithms are optimized at least as well as the best STL algorithms found in commercial libraries and are significantly optimized over the algorithms that come with the first-party STLs that come with compilers. Most significantly, EASTL algorithms take advantage of type traits of contained classes and take advantage of iterator types to optimize code generation. For example, if you resize an array of integers (or other "pod" type), EASTL will detect that this can be done with a memcpy instead of a slow object-by-object move as would Micrsoft STL.
+
+The optimizations found in EASTL algorithms and the supporting code in EASTL type traits consists of some fairly tricky advanced C++ and while it is fairly easy to read, it requires a C++ expert (language lawyer, really) to implement confidently. The result of this is that it takes more effort to develop and maintain EASTL than it would to maintain a simpler library. However, the performance advantages have been deemed worth the tradeoff.
+
+## Smart Pointer Design
+
+EASTL implements the following smart pointer types:
+
+* shared_ptr
+* shared_array
+* weak_ptr
+* instrusive_ptr
+* scoped_ptr
+* scoped_array
+* linked_ptr
+* linked_array
+
+All but linked_ptr/linked_array are well-known smart pointers from the Boost library. The behaviour of these smart pointers is very similar to those from Boost with two exceptions:
+
+* EASTL smart pointers allow you to assign an allocator to them.
+* EASTL shared_ptr implements deletion via a templated parameter instead of a dynamically allocated virtual member object interface.
+
+With respect to assigning an allocator, this gives EASTL more control over memory allocation and tracking, as Boost smart pointers unilaterally use global operator new to allocate memory from the global heap.
+
+With respect to shared_ptr deletion, EASTL's current design of using a templated parameter is questionable, but does have some reason. The advantage is that EASTL avoids a heap allocation, avoids virtual function calls, and avoids templated class proliferation. The disadvantage is that EASTL shared_ptr containers which hold void pointers can't call the destructors of their contained objects unless the user manually specifies a custom deleter template parameter. This is case whereby EASTL is more efficient but less safe. We can revisit this topic in the future if it becomes an issue.
+
+## list::size is O(n)
+
+As of this writing, EASTL has three linked list classes: list, slist, and intrusive_list. In each of these classes, the size of the list is not cached in a member size variable. The result of this is that getting the size of a list is not a fast operation, as it requires traversing the list and counting the nodes. We could make the list::size function be fast by having a member mSize variable which tracks the size as we insert and delete items. There are reasons for having such functionality and reasons for not having such functionality. We currently choose to not have a member mSize variable as it would add four bytes to the class, add a tiny amount of processing to functions such as insert and erase, and would only serve to improve the size function, but no others. In the case of intrusive_list, it would do additional harm. The alternative argument is that the C++ standard states that std::list should be an O(1) operation (i.e. have a member size variable), that many C++ standard library list implementations do so, that the size is but an integer which is quick to update, and that many users expect to have a fast size function. In the final analysis, we are developing a library for game development and performance is paramount, so we choose to not cache the list size. The user can always implement a size cache himself.
+
+## basic_string doesn't use copy-on-write
+
+The primary benefit of CoW is that it allows for the sharing of string data between two string objects. Thus if you say this:
+
+```cpp
+string a("hello");
+string b(a);
+```
+
+the "hello" will be shared between a and b. If you then say this:
+
+```cpp
+a = "world";
+```
+
+then `a` will release its reference to "hello" and leave b with the only reference to it. Normally this functionality is accomplished via reference counting and with atomic operations or mutexes.
+
+The C++ standard does not say anything about basic_string and CoW. However, for a basic_string implementation to be standards-conforming, a number of issues arise which dictate some things about how one would have to implement a CoW string. The discussion of these issues will not be rehashed here, as you can read the references below for better detail than can be provided in the space we have here. However, we can say that the C++ standard is sensible and that anything we try to do here to allow for an efficient CoW implementation would result in a generally unacceptable string interface.
+
+The disadvantages of CoW strings are:
+
+* A reference count needs to exist with the string, which increases string memory usage.
+* With thread safety, atomic operations and mutex locks are expensive, especially on weaker memory systems such as console gaming platforms.
+* All non-const string accessor functions need to do a sharing check the the first such check needs to detach the string. Similarly, all string assignments need to do a sharing check as well. If you access the string before doing an assignment, the assignment doesn't result in a shared string, because the string has already been detached.
+* String sharing doesn't happen the large majority of the time. In some cases, the total sum of the reference count memory can exceed any memory savings gained by the strings that share representations. 
+
+The addition of a cow_string class is under consideration for EASTL. There are conceivably some systems which have string usage patterns which would benefit from CoW sharing. Such functionality is best saved for a separate string implementation so that the other string uses aren't penalized.
+
+This is a good starting HTML reference on the topic:
+
+> [http://www.gotw.ca/publications/optimizations.htm](http://www.gotw.ca/publications/optimizations.htm)
+
+Here is a well-known Usenet discussion on the topic:
+
+> [http://groups-beta.google.com/group/comp.lang.c++.moderated/browse_thread/thread/3dc6af5198d0bf7/886c8642cb06e03d](http://groups-beta.google.com/group/comp.lang.c++.moderated/browse_thread/thread/3dc6af5198d0bf7/886c8642cb06e03d)
+
+----------------------------------------------
+End of document
diff --git a/EASTL/doc/EASTL-n2271.pdf b/EASTL/doc/EASTL-n2271.pdf
new file mode 100644
index 0000000..8a1b0b9
--- /dev/null
+++ b/EASTL/doc/EASTL-n2271.pdf
Binary files differ
diff --git a/EASTL/doc/EASTL.natvis b/EASTL/doc/EASTL.natvis
new file mode 100644
index 0000000..c3e94db
--- /dev/null
+++ b/EASTL/doc/EASTL.natvis
@@ -0,0 +1,731 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ This is a Microsoft natvis file, which allows visualization of complex variables in the
+ Microsoft debugger starting with VS2012. It's a successor to the AutoExp.dat file format.
+
+ This file needs to go into your C:\Users\<user>\Documents\Visual Studio 2011\Visualizers\
+ folder. Microsoft documentation states that it should go into a 2012 folder, but testing
+ in June of 2013 suggests that it still needs to be the 2011 folder.
+
+ You don't need to restart Visual Studio to use it, you just need to restart the debug
+ session. You can have multiple .natvis files and they will all be used.
+
+ VS2017 natvis documentation:
+ https://docs.microsoft.com/en-us/visualstudio/debugger/create-custom-views-of-native-objects
+-->
+
+<AutoVisualizer xmlns="http://schemas.microsoft.com/vstudio/debugger/natvis/2010">
+
+<Type Name="eastl::unique_ptr&lt;*&gt;">
+ <DisplayString Condition="mPair.mFirst != nullptr">({(void*)mPair.mFirst} = {*mPair.mFirst})</DisplayString>
+ <DisplayString Condition="mPair.mFirst == nullptr">({nullptr})</DisplayString>
+ <Expand>
+ <Item Name="[pointer]">(void*)mPair.mFirst</Item>
+ <Item Name="[value]">*mPair.mFirst</Item>
+ </Expand>
+</Type>
+
+<Type Name="eastl::shared_ptr&lt;*&gt;">
+ <DisplayString Condition="mpValue != nullptr">({(void*)mpValue} = {*mpValue})</DisplayString>
+ <DisplayString Condition="mpValue == nullptr">({nullptr})</DisplayString>
+ <Expand>
+ <Item Name="[pointer]">(void*)mpValue</Item>
+ <Item Name="[value]">*mpValue</Item>
+ <Item Name="[reference count]">mpRefCount->mRefCount</Item>
+ <Item Name="[weak reference count]">mpRefCount->mWeakRefCount</Item>
+ </Expand>
+</Type>
+
+<Type Name="eastl::weak_ptr&lt;*&gt;">
+ <DisplayString>{((mpRefCount &amp;&amp; mpRefCount-&gt;mRefCount) ? mpValue : nullptr)}</DisplayString>
+ <Expand>
+ <ExpandedItem>mpRefCount &amp;&amp; mpRefCount-&gt;mRefCount ? mpValue : nullptr</ExpandedItem>
+ </Expand>
+</Type>
+
+<Type Name="eastl::array&lt;*,*&gt;">
+ <DisplayString Condition="$T2 == 0">[{$T2}] {{}}</DisplayString>
+ <DisplayString Condition="$T2 == 1">[{$T2}] {{ {*mValue} }}</DisplayString>
+ <DisplayString Condition="$T2 == 2">[{$T2}] {{ {*mValue}, {*(mValue+1)} }}</DisplayString>
+ <DisplayString Condition="$T2 == 3">[{$T2}] {{ {*mValue}, {*(mValue+1)}, {*(mValue+2)} }}</DisplayString>
+ <DisplayString Condition="$T2 == 4">[{$T2}] {{ {*mValue}, {*(mValue+1)}, {*(mValue+2)}, {*(mValue+3)} }}</DisplayString>
+ <DisplayString Condition="$T2 == 5">[{$T2}] {{ {*mValue}, {*(mValue+1)}, {*(mValue+2)}, {*(mValue+3)}, {*(mValue+4)} }}</DisplayString>
+ <DisplayString Condition="$T2 == 6">[{$T2}] {{ {*mValue}, {*(mValue+1)}, {*(mValue+2)}, {*(mValue+3)}, {*(mValue+4)}, {*(mValue+5)} }}</DisplayString>
+ <DisplayString Condition="$T2 &gt; 6">[{$T2}] {{ {*mValue}, {*(mValue+1)}, {*(mValue+2)}, {*(mValue+3)}, {*(mValue+4)}, {*(mValue+5)}, ... }}</DisplayString>
+ <Expand>
+ <Item Name="[size]">$T2</Item>
+ <ArrayItems>
+ <Size>$T2</Size>
+ <ValuePointer>mValue</ValuePointer>
+ </ArrayItems>
+ </Expand>
+</Type>
+
+<Type Name="eastl::basic_string&lt;*&gt;">
+ <DisplayString Condition="!!(mPair.mFirst.sso.mRemainingSizeField.mnRemainingSize &amp; kSSOMask)">"{mPair.mFirst.heap.mpBegin,sb}"</DisplayString>
+ <DisplayString Condition="!(mPair.mFirst.sso.mRemainingSizeField.mnRemainingSize &amp; kSSOMask)">"{mPair.mFirst.sso.mData,sb}"</DisplayString>
+ <Expand>
+ <Item Name="[length]" Condition="!!(mPair.mFirst.sso.mRemainingSizeField.mnRemainingSize &amp; kSSOMask)">mPair.mFirst.heap.mnSize</Item>
+ <Item Name="[capacity]" Condition="!!(mPair.mFirst.sso.mRemainingSizeField.mnRemainingSize &amp; kSSOMask)">(mPair.mFirst.heap.mnCapacity &amp; ~kHeapMask)</Item>
+ <Item Name="[value]" Condition="!!(mPair.mFirst.sso.mRemainingSizeField.mnRemainingSize &amp; kSSOMask)">mPair.mFirst.heap.mpBegin,sb</Item>
+
+ <Item Name="[length]" Condition="!(mPair.mFirst.sso.mRemainingSizeField.mnRemainingSize &amp; kSSOMask)">(SSOLayout::SSO_CAPACITY - mPair.mFirst.sso.mRemainingSizeField.mnRemainingSize)</Item>
+ <Item Name="[capacity]" Condition="!(mPair.mFirst.sso.mRemainingSizeField.mnRemainingSize &amp; kSSOMask)">SSOLayout::SSO_CAPACITY</Item>
+ <Item Name="[value]" Condition="!(mPair.mFirst.sso.mRemainingSizeField.mnRemainingSize &amp; kSSOMask)">mPair.mFirst.sso.mData,sb</Item>
+
+ <Item Name="[uses heap]">!!(mPair.mFirst.sso.mRemainingSizeField.mnRemainingSize &amp; kSSOMask)</Item>
+ </Expand>
+</Type>
+
+
+<Type Name="eastl::basic_string&lt;wchar_t,*&gt;">
+ <DisplayString Condition="!!(mPair.mFirst.sso.mRemainingSizeField.mnRemainingSize &amp; kSSOMask)">{mPair.mFirst.heap.mpBegin,su}</DisplayString>
+ <DisplayString Condition="!(mPair.mFirst.sso.mRemainingSizeField.mnRemainingSize &amp; kSSOMask)">{mPair.mFirst.sso.mData,su}</DisplayString>
+ <Expand>
+ <Item Name="[length]" Condition="!!(mPair.mFirst.sso.mRemainingSizeField.mnRemainingSize &amp; kSSOMask)">mPair.mFirst.heap.mnSize</Item>
+ <Item Name="[capacity]" Condition="!!(mPair.mFirst.sso.mRemainingSizeField.mnRemainingSize &amp; kSSOMask)">(mPair.mFirst.heap.mnCapacity &amp; ~kHeapMask)</Item>
+ <Item Name="[value]" Condition="!!(mPair.mFirst.sso.mRemainingSizeField.mnRemainingSize &amp; kSSOMask)">mPair.mFirst.heap.mpBegin,su</Item>
+
+ <Item Name="[length]" Condition="!(mPair.mFirst.sso.mRemainingSizeField.mnRemainingSize &amp; kSSOMask)">(SSOLayout::SSO_CAPACITY - mPair.mFirst.sso.mRemainingSizeField.mnRemainingSize)</Item>
+ <Item Name="[capacity]" Condition="!(mPair.mFirst.sso.mRemainingSizeField.mnRemainingSize &amp; kSSOMask)">SSOLayout::SSO_CAPACITY</Item>
+ <Item Name="[value]" Condition="!(mPair.mFirst.sso.mRemainingSizeField.mnRemainingSize &amp; kSSOMask)">mPair.mFirst.sso.mData,su</Item>
+
+ <Item Name="[uses heap]">!!(mPair.mFirst.sso.mRemainingSizeField.mnRemainingSize &amp; kSSOMask)</Item>
+ </Expand>
+</Type>
+
+<Type Name="eastl::pair&lt;*&gt;">
+ <DisplayString>({first}, {second})</DisplayString>
+ <Expand>
+ <Item Name="first">first</Item>
+ <Item Name="second">second</Item>
+ </Expand>
+</Type>
+
+<Type Name="eastl::span&lt;*&gt;">
+ <DisplayString Condition="mnSize == 0">[{mnSize}] {{}}</DisplayString>
+ <DisplayString Condition="mnSize == 1">[{mnSize}] {{ {*mpData} }}</DisplayString>
+ <DisplayString Condition="mnSize == 2">[{mnSize}] {{ {*mpData}, {*(mpData+1)} }}</DisplayString>
+ <DisplayString Condition="mnSize == 3">[{mnSize}] {{ {*mpData}, {*(mpData+1)}, {*(mpData+2)} }}</DisplayString>
+ <DisplayString Condition="mnSize == 4">[{mnSize}] {{ {*mpData}, {*(mpData+1)}, {*(mpData+2)}, {*(mpData+3)} }}</DisplayString>
+ <DisplayString Condition="mnSize == 5">[{mnSize}] {{ {*mpData}, {*(mpData+1)}, {*(mpData+2)}, {*(mpData+3)}, {*(mpData+4)} }}</DisplayString>
+ <DisplayString Condition="mnSize == 6">[{mnSize}] {{ {*mpData}, {*(mpData+1)}, {*(mpData+2)}, {*(mpData+3)}, {*(mpData+4)}, {*(mpData+5)} }}</DisplayString>
+ <DisplayString Condition="mnSize &gt; 6">[{mnSize}] {{ {*mpData}, {*(mpData+1)}, {*(mpData+2)}, {*(mpData+3)}, {*(mpData+4)}, {*(mpData+5)}, ... }}</DisplayString>
+ <Expand>
+ <Item Name="[size]">mnSize</Item>
+ <ArrayItems>
+ <Size>mnSize</Size>
+ <ValuePointer>mpData</ValuePointer>
+ </ArrayItems>
+ </Expand>
+</Type>
+
+<Type Name="eastl::VectorBase&lt;*&gt;">
+ <DisplayString Condition="mpEnd == mpBegin">[{mpEnd - mpBegin}] {{}}</DisplayString>
+ <DisplayString Condition="mpEnd - mpBegin == 1">[{mpEnd - mpBegin}] {{ {*mpBegin} }}</DisplayString>
+ <DisplayString Condition="mpEnd - mpBegin == 2">[{mpEnd - mpBegin}] {{ {*mpBegin}, {*(mpBegin+1)} }}</DisplayString>
+ <DisplayString Condition="mpEnd - mpBegin == 3">[{mpEnd - mpBegin}] {{ {*mpBegin}, {*(mpBegin+1)}, {*(mpBegin+2)} }}</DisplayString>
+ <DisplayString Condition="mpEnd - mpBegin == 4">[{mpEnd - mpBegin}] {{ {*mpBegin}, {*(mpBegin+1)}, {*(mpBegin+2)}, {*(mpBegin+3)} }}</DisplayString>
+ <DisplayString Condition="mpEnd - mpBegin == 5">[{mpEnd - mpBegin}] {{ {*mpBegin}, {*(mpBegin+1)}, {*(mpBegin+2)}, {*(mpBegin+3)}, {*(mpBegin+4)} }}</DisplayString>
+ <DisplayString Condition="mpEnd - mpBegin == 6">[{mpEnd - mpBegin}] {{ {*mpBegin}, {*(mpBegin+1)}, {*(mpBegin+2)}, {*(mpBegin+3)}, {*(mpBegin+4)}, {*(mpBegin+5)} }}</DisplayString>
+ <DisplayString Condition="mpEnd - mpBegin &gt; 6">[{mpEnd - mpBegin}] {{ {*mpBegin}, {*(mpBegin+1)}, {*(mpBegin+2)}, {*(mpBegin+3)}, {*(mpBegin+4)}, {*(mpBegin+5)}, ... }}</DisplayString>
+ <Expand>
+ <Item Name="[size]">mpEnd - mpBegin</Item>
+ <Item Name="[capacity]">mCapacityAllocator.mFirst - mpBegin</Item>
+ <ArrayItems>
+ <Size>mpEnd - mpBegin</Size>
+ <ValuePointer>mpBegin</ValuePointer>
+ </ArrayItems>
+ </Expand>
+</Type>
+
+<Type Name="eastl::DequeBase&lt;*,*,*&gt;">
+ <DisplayString Condition="mItBegin.mpCurrent == mItEnd.mpCurrent">
+ [0] {{}}
+ </DisplayString>
+ <DisplayString Condition="(mItEnd.mpCurrentArrayPtr - mItBegin.mpCurrentArrayPtr) * $T3 + (mItEnd.mpCurrent-mItEnd.mpBegin) - (mItBegin.mpCurrent-mItBegin.mpBegin) == 1">
+ [1] {{ {*mItBegin.mpCurrent} }}
+ </DisplayString>
+ <DisplayString Condition="(mItEnd.mpCurrentArrayPtr - mItBegin.mpCurrentArrayPtr) * $T3 + (mItEnd.mpCurrent-mItEnd.mpBegin) - (mItBegin.mpCurrent-mItBegin.mpBegin) != 0">
+ [{(mItEnd.mpCurrentArrayPtr - mItBegin.mpCurrentArrayPtr) * $T3 + (mItEnd.mpCurrent-mItEnd.mpBegin) - (mItBegin.mpCurrent-mItBegin.mpBegin)}]
+ {{
+ {*mItBegin.mpCurrent},
+ ...
+ }}
+ </DisplayString>
+ <Expand>
+ <Item Name="[size]">(mItEnd.mpCurrentArrayPtr - mItBegin.mpCurrentArrayPtr) * $T3 + (mItEnd.mpCurrent-mItEnd.mpBegin) - (mItBegin.mpCurrent-mItBegin.mpBegin)</Item>
+ <IndexListItems>
+ <Size>(mItEnd.mpCurrentArrayPtr - mItBegin.mpCurrentArrayPtr) * $T3 + (mItEnd.mpCurrent-mItEnd.mpBegin) - (mItBegin.mpCurrent-mItBegin.mpBegin)</Size>
+ <ValueNode>mItBegin.mpCurrentArrayPtr[(mItBegin.mpCurrent-mItBegin.mpBegin + $i) / $T3][(mItBegin.mpCurrent-mItBegin.mpBegin + $i) % $T3]</ValueNode>
+ </IndexListItems>
+ </Expand>
+</Type>
+
+<Type Name="eastl::DequeIterator&lt;*&gt;">
+ <DisplayString>{*mpCurrent}</DisplayString>
+ <Expand>
+ <Item Name="Value">*mpCurrent</Item>
+ <Item Name="Previous" Condition="mpCurrent == mpBegin">*(*(mpCurrentArrayPtr-1) + (mpEnd-mpBegin) - 1)</Item>
+ <Item Name="Previous" Condition="mpCurrent != mpBegin">*(mpCurrent-1)</Item>
+ <Item Name="Next" Condition="mpCurrent+1 == mpEnd">**(mpCurrentArrayPtr+1)</Item>
+ <Item Name="Next" Condition="mpCurrent+1 != mpEnd">*(mpCurrent+1)</Item>
+ <Item Name="Begin">mpCurrent == mpBegin</Item>
+ <Item Name="End">mpCurrent+1 == mpEnd</Item>
+ </Expand>
+</Type>
+
+<Type Name="eastl::queue&lt;*&gt;">
+ <AlternativeType Name="eastl::priority_queue&lt;*&gt;" />
+ <AlternativeType Name="eastl::stack&lt;*&gt;" />
+ <DisplayString>{c}</DisplayString>
+ <Expand>
+ <ExpandedItem>c</ExpandedItem>
+ </Expand>
+</Type>
+
+<Type Name="eastl::ListBase&lt;*&gt;">
+ <DisplayString Condition="mNodeAllocator.mFirst.mpNext == &amp;mNodeAllocator.mFirst">
+ [0] {{}}
+ </DisplayString>
+ <DisplayString Condition="mNodeAllocator.mFirst.mpNext != &amp;mNodeAllocator.mFirst &amp;&amp; mNodeAllocator.mFirst.mpNext-&gt;mpNext == &amp;mNodeAllocator.mFirst">
+ [1] {{ {((eastl::ListNode&lt;$T1&gt;*)mNodeAllocator.mFirst.mpNext)-&gt;mValue} }}
+ </DisplayString>
+ <DisplayString Condition="mNodeAllocator.mFirst.mpNext != &amp;mNodeAllocator.mFirst &amp;&amp; mNodeAllocator.mFirst.mpNext-&gt;mpNext != &amp;mNodeAllocator.mFirst &amp;&amp; mNodeAllocator.mFirst.mpNext-&gt;mpNext-&gt;mpNext == &amp;mNodeAllocator.mFirst">
+ [2]
+ {{
+ {((eastl::ListNode&lt;$T1&gt;*)mNodeAllocator.mFirst.mpNext)-&gt;mValue},
+ {((eastl::ListNode&lt;$T1&gt;*)mNodeAllocator.mFirst.mpNext-&gt;mpNext)-&gt;mValue}
+ }}
+ </DisplayString>
+ <DisplayString Condition="mNodeAllocator.mFirst.mpNext != &amp;mNodeAllocator.mFirst &amp;&amp; mNodeAllocator.mFirst.mpNext-&gt;mpNext != &amp;mNodeAllocator.mFirst &amp;&amp; mNodeAllocator.mFirst.mpNext-&gt;mpNext-&gt;mpNext != &amp;mNodeAllocator.mFirst">
+ [?]
+ {{
+ {((eastl::ListNode&lt;$T1&gt;*)mNodeAllocator.mFirst.mpNext)-&gt;mValue},
+ {((eastl::ListNode&lt;$T1&gt;*)mNodeAllocator.mFirst.mpNext-&gt;mpNext)-&gt;mValue},
+ ...
+ }}
+ </DisplayString>
+ <Expand>
+ <Synthetic Name="NOTE!">
+ <DisplayString>Content of lists will repeat indefinitely. Keep that in mind!</DisplayString>
+ </Synthetic>
+ <LinkedListItems>
+ <HeadPointer>mNodeAllocator.mFirst.mpNext</HeadPointer>
+ <NextPointer>mpNext</NextPointer>
+ <ValueNode>((eastl::ListNode&lt;$T1&gt;*)this)-&gt;mValue</ValueNode>
+ </LinkedListItems>
+ </Expand>
+</Type>
+
+<Type Name="eastl::ListNode&lt;*&gt;">
+ <DisplayString>{mValue}</DisplayString>
+ <Expand>
+ <Item Name="Value">mValue</Item>
+ <Item Name="Next">*(eastl::ListNode&lt;$T1&gt;*)mpNext</Item>
+ <Item Name="Previous">*(eastl::ListNode&lt;$T1&gt;*)mpPrev</Item>
+ <Synthetic Name="NOTE!">
+ <DisplayString>Content of lists will repeat indefinitely. Keep that in mind!</DisplayString>
+ </Synthetic>
+ <Synthetic Name="List">
+ <DisplayString>The rest of the list follows:</DisplayString>
+ </Synthetic>
+ <LinkedListItems>
+ <HeadPointer>(eastl::ListNode&lt;$T1&gt;*)mpNext-&gt;mpNext</HeadPointer>
+ <NextPointer>(eastl::ListNode&lt;$T1&gt;*)mpNext</NextPointer>
+ <ValueNode>mValue</ValueNode>
+ </LinkedListItems>
+ </Expand>
+</Type>
+
+<Type Name="eastl::ListIterator&lt;*&gt;">
+ <DisplayString>{*mpNode}</DisplayString>
+ <Expand>
+ <ExpandedItem>mpNode</ExpandedItem>
+ </Expand>
+</Type>
+
+<Type Name="eastl::SListBase&lt;*&gt;">
+ <DisplayString Condition="mNode.mpNext == 0">
+ [0] {{}}
+ </DisplayString>
+ <DisplayString Condition="mNode.mpNext != 0 &amp;&amp; mNode.mpNext-&gt;mpNext == 0">
+ [1]
+ {{
+ {((eastl::SListNode&lt;$T1&gt;*)mNode.mpNext)-&gt;mValue}
+ }}
+ </DisplayString>
+ <DisplayString Condition="mNode.mpNext != 0 &amp;&amp; mNode.mpNext-&gt;mpNext != 0 &amp;&amp; mNode.mpNext-&gt;mpNext-&gt;mpNext == 0">
+ [2]
+ {{
+ {((eastl::SListNode&lt;$T1&gt;*)mNode.mpNext)-&gt;mValue},
+ {((eastl::SListNode&lt;$T1&gt;*)mNode.mpNext-&gt;mpNext)-&gt;mValue}
+ }}
+ </DisplayString>
+ <DisplayString Condition="mNode.mpNext != 0 &amp;&amp; mNode.mpNext-&gt;mpNext != 0 &amp;&amp; mNode.mpNext-&gt;mpNext-&gt;mpNext != 0">
+ [?]
+ {{
+ {((eastl::SListNode&lt;$T1&gt;*)mNode.mpNext)-&gt;mValue},
+ {((eastl::SListNode&lt;$T1&gt;*)mNode.mpNext-&gt;mpNext)-&gt;mValue},
+ ...
+ }}
+ </DisplayString>
+ <Expand>
+ <LinkedListItems>
+ <HeadPointer>mNode.mpNext</HeadPointer>
+ <NextPointer>mpNext</NextPointer>
+ <ValueNode>((eastl::SListNode&lt;$T1&gt;*)this)-&gt;mValue</ValueNode>
+ </LinkedListItems>
+ </Expand>
+</Type>
+
+<Type Name="eastl::SListNode&lt;*&gt;">
+ <DisplayString>{mValue}</DisplayString>
+ <Expand>
+ <Item Name="Value">mValue</Item>
+ <Item Name="Next">*(eastl::SListNode&lt;$T1&gt;*)mpNext</Item>
+ <Synthetic Name="List">
+ <DisplayString>The rest of the list follows:</DisplayString>
+ </Synthetic>
+ <LinkedListItems>
+ <HeadPointer>mpNext == nullptr ? nullptr : (eastl::SListNode&lt;$T1&gt;*)mpNext-&gt;mpNext</HeadPointer>
+ <NextPointer>(eastl::SListNode&lt;$T1&gt;*)mpNext</NextPointer>
+ <ValueNode>mValue</ValueNode>
+ </LinkedListItems>
+ </Expand>
+</Type>
+
+<Type Name="eastl::SListIterator&lt;*&gt;">
+ <DisplayString>{*mpNode}</DisplayString>
+ <Expand>
+ <ExpandedItem>*mpNode</ExpandedItem>
+ </Expand>
+</Type>
+
+<Type Name="eastl::intrusive_list_base">
+ <DisplayString Condition="mAnchor.mpNext == &amp;mAnchor">[0] {{}}</DisplayString>
+ <DisplayString Condition="mAnchor.mpNext != &amp;mAnchor &amp;&amp; mAnchor.mpNext-&gt;mpNext == &amp;mAnchor">[1] {{ {mAnchor.mpNext} }}</DisplayString>
+ <DisplayString Condition="mAnchor.mpNext != &amp;mAnchor &amp;&amp; mAnchor.mpNext-&gt;mpNext != &amp;mAnchor">[?] {{ {mAnchor.mpNext}, ... }}</DisplayString>
+ <Expand>
+ <Synthetic Name="NOTE!">
+ <DisplayString>Content of intrusive lists will repeat indefinitely. Keep that in mind!</DisplayString>
+ </Synthetic>
+ <LinkedListItems>
+ <HeadPointer>mAnchor.mpNext</HeadPointer>
+ <NextPointer>mpNext</NextPointer>
+ <ValueNode>*this</ValueNode>
+ </LinkedListItems>
+ </Expand>
+</Type>
+
+<Type Name="eastl::intrusive_list_iterator&lt;*&gt;">
+ <DisplayString>{*mpNode}</DisplayString>
+ <Expand>
+ <ExpandedItem>*mpNode</ExpandedItem>
+ </Expand>
+</Type>
+
+<Type Name="eastl::set&lt;*&gt;">
+ <AlternativeType Name="eastl::multiset&lt;*&gt;" />
+ <DisplayString Condition="mnSize == 0">
+ [0] {{}}
+ </DisplayString>
+ <DisplayString Condition="mnSize == 1">
+ [1]
+ {{
+ {((eastl::rbtree_node&lt;$T1&gt;*)mAnchor.mpNodeLeft)-&gt;mValue}
+ }}
+ </DisplayString>
+ <DisplayString Condition="mnSize &gt; 1">
+ [{mnSize}]
+ {{
+ {((eastl::rbtree_node&lt;$T1&gt;*)mAnchor.mpNodeLeft)-&gt;mValue},
+ ...
+ }}
+ </DisplayString>
+ <Expand>
+ <Item Name="[size]">mnSize</Item>
+ <TreeItems>
+ <Size>mnSize</Size>
+ <HeadPointer>mAnchor.mpNodeParent</HeadPointer>
+ <LeftPointer>mpNodeLeft</LeftPointer>
+ <RightPointer>mpNodeRight</RightPointer>
+ <ValueNode>((eastl::rbtree_node&lt;$T1&gt;*)this)-&gt;mValue</ValueNode>
+ </TreeItems>
+ </Expand>
+</Type>
+
+<Type Name="eastl::rbtree&lt;*,*&gt;">
+ <DisplayString Condition="mnSize == 0">
+ [0] {{}}
+ </DisplayString>
+ <DisplayString Condition="mnSize == 1">
+ [1]
+ {{
+ {((eastl::rbtree_node&lt;$T2&gt;*)mAnchor.mpNodeLeft)-&gt;mValue}
+ }}
+ </DisplayString>
+ <DisplayString Condition="mnSize &gt; 1">
+ [{mnSize}]
+ {{
+ {((eastl::rbtree_node&lt;$T2&gt;*)mAnchor.mpNodeLeft)-&gt;mValue},
+ ...
+ }}
+ </DisplayString>
+ <Expand>
+ <Item Name="[size]">mnSize</Item>
+ <TreeItems>
+ <Size>mnSize</Size>
+ <HeadPointer>mAnchor.mpNodeParent</HeadPointer>
+ <LeftPointer>mpNodeLeft</LeftPointer>
+ <RightPointer>mpNodeRight</RightPointer>
+ <ValueNode>((eastl::rbtree_node&lt;$T2&gt;*)this)-&gt;mValue</ValueNode>
+ </TreeItems>
+ </Expand>
+</Type>
+
+<Type Name="eastl::rbtree_node&lt;*&gt;">
+ <DisplayString>{mValue}</DisplayString>
+ <Expand>
+ <Item Name="Value">mValue</Item>
+ <Synthetic Name="NOTE!">
+ <DisplayString>It is possible to expand parents that do not exist.</DisplayString>
+ </Synthetic>
+ <Item Name="Parent">*(eastl::rbtree_node&lt;$T1&gt;*)mpNodeParent</Item>
+ <Item Name="Left">*(eastl::rbtree_node&lt;$T1&gt;*)mpNodeLeft</Item>
+ <Item Name="Right">*(eastl::rbtree_node&lt;$T1&gt;*)mpNodeRight</Item>
+ </Expand>
+</Type>
+
+<Type Name="eastl::rbtree_iterator&lt;*&gt;">
+ <DisplayString>{*mpNode}</DisplayString>
+ <Expand>
+ <ExpandedItem>mpNode</ExpandedItem>
+ </Expand>
+</Type>
+
+
+<Type Name="eastl::hashtable&lt;*&gt;">
+ <DisplayString Condition="mnElementCount == 0">[{mnElementCount}] {{}}</DisplayString>
+ <DisplayString Condition="mnElementCount != 0">[{mnElementCount}] {{ ... }}</DisplayString>
+ <Expand>
+ <ArrayItems IncludeView="detailed">
+ <Size>mnBucketCount</Size>
+ <ValuePointer>mpBucketArray</ValuePointer>
+ </ArrayItems>
+ <CustomListItems ExcludeView="detailed">
+ <Variable Name="bucketIndex" InitialValue="0"/>
+ <Variable Name="entry" InitialValue ="mpBucketArray[bucketIndex]"/>
+ <Loop>
+ <Item Condition="entry != nullptr">entry->mValue</Item>
+ <If Condition="entry != nullptr">
+ <Exec>entry = entry->mpNext</Exec>
+ </If>
+ <If Condition="entry == nullptr">
+ <Exec>bucketIndex++</Exec>
+ <Break Condition="bucketIndex == mnBucketCount"/>
+ <Exec>entry = mpBucketArray[bucketIndex]</Exec>
+ </If>
+ </Loop>
+ </CustomListItems>
+ </Expand>
+</Type>
+
+<Type Name="eastl::hash_node&lt;*&gt;">
+ <DisplayString Condition="this != 0 &amp;&amp; mpNext != 0"> {mValue}, {*mpNext}</DisplayString>
+ <DisplayString Condition="this != 0 &amp;&amp; mpNext == 0"> {mValue}</DisplayString>
+ <DisplayString Condition="this == 0"></DisplayString>
+ <Expand>
+ <LinkedListItems>
+ <HeadPointer>this</HeadPointer>
+ <NextPointer>mpNext</NextPointer>
+ <ValueNode>mValue</ValueNode>
+ </LinkedListItems>
+ </Expand>
+</Type>
+
+<Type Name="eastl::hashtable_iterator_base&lt;*&gt;">
+ <DisplayString>{mpNode-&gt;mValue}</DisplayString>
+ <Expand>
+ <ExpandedItem>mpNode-&gt;mValue</ExpandedItem>
+ </Expand>
+</Type>
+
+<Type Name="eastl::reverse_iterator&lt;*&gt;">
+ <DisplayString>{*(mIterator-1)}</DisplayString>
+ <Expand>
+ <ExpandedItem>mIterator-1</ExpandedItem>
+ </Expand>
+</Type>
+
+<Type Name="eastl::bitset&lt;*&gt;">
+ <DisplayString>{{count = {kSize}}}</DisplayString>
+ <Expand>
+ <Item Name="[count]">kSize</Item>
+ <CustomListItems>
+ <Variable Name="iWord" InitialValue="0" />
+ <Variable Name="iBitInWord" InitialValue="0" />
+ <Variable Name="bBitValue" InitialValue="false" />
+
+ <Size>kSize</Size>
+
+ <Loop>
+ <Exec>bBitValue = ((mWord[iWord] >> iBitInWord) % 2) != 0 ? true : false</Exec>
+ <Item>bBitValue</Item>
+ <Exec>iBitInWord++</Exec>
+ <If Condition="iBitInWord == kBitsPerWord">
+ <Exec>iWord++</Exec>
+ <Exec>iBitInWord = 0</Exec>
+ </If>
+ </Loop>
+ </CustomListItems>
+ </Expand>
+</Type>
+
+<Type Name="eastl::ring_buffer&lt;*,*,*&gt;">
+ <DisplayString>{c}</DisplayString>
+ <Expand>
+ <ExpandedItem>c</ExpandedItem>
+ </Expand>
+</Type>
+
+<Type Name="eastl::basic_string_view&lt;*&gt;">
+ <DisplayString>{mpBegin,[mnCount]}</DisplayString>
+ <StringView>mpBegin,[mnCount]</StringView>
+</Type>
+
+<Type Name="eastl::compressed_pair_imp&lt;*&gt;">
+ <DisplayString Condition="($T3) == 0" Optional="true">({mFirst}, {mSecond})</DisplayString>
+ <DisplayString Condition="($T3) == 1" Optional="true">({mSecond})</DisplayString>
+ <DisplayString Condition="($T3) == 2" Optional="true">({mFirst})</DisplayString>
+ <DisplayString Condition="($T3) == 3" Optional="true">(empty)</DisplayString>
+ <DisplayString Condition="($T3) == 4" Optional="true">(empty)</DisplayString>
+ <DisplayString Condition="($T3) == 5" Optional="true">({mFirst}, {mSecond})</DisplayString>
+</Type>
+
+<Type Name="eastl::optional&lt;*&gt;">
+ <Intrinsic Name="value" Expression="*($T1*)&amp;val"/>
+ <DisplayString Condition="!engaged">nullopt</DisplayString>
+ <DisplayString Condition="engaged">{value()}</DisplayString>
+ <Expand>
+ <Item Condition="engaged" Name="value">value()</Item>
+ </Expand>
+</Type>
+
+<Type Name="eastl::ratio&lt;*&gt;">
+ <DisplayString>{$T1} to {$T2}}</DisplayString>
+</Type>
+
+
+<Type Name="eastl::chrono::duration&lt;*,eastl::ratio&lt;1,1000000000&gt; &gt;">
+ <DisplayString>{mRep} nanoseconds</DisplayString>
+</Type>
+
+<Type Name="eastl::chrono::duration&lt;*,eastl::ratio&lt;1,1000000&gt; &gt;">
+ <DisplayString>{mRep} microseconds</DisplayString>
+</Type>
+
+<Type Name="eastl::chrono::duration&lt;*,eastl::ratio&lt;1,1000&gt; &gt;">
+ <DisplayString>{mRep} milliseconds</DisplayString>
+</Type>
+
+<Type Name="eastl::chrono::duration&lt;*,eastl::ratio&lt;1,1&gt; &gt;">
+ <DisplayString>{mRep} seconds</DisplayString>
+</Type>
+
+<Type Name="eastl::chrono::duration&lt;*,eastl::ratio&lt;60,1&gt; &gt;">
+ <DisplayString>{mRep} minutes</DisplayString>
+</Type>
+
+<Type Name="eastl::chrono::duration&lt;*,eastl::ratio&lt;3600,1&gt; &gt;">
+ <DisplayString>{mRep} hours</DisplayString>
+</Type>
+
+<Type Name="eastl::chrono::duration&lt;*,eastl::ratio&lt;*,*&gt; &gt;">
+ <DisplayString>{mRep} duration with ratio = [{$T2} : {$T3}] </DisplayString>
+</Type>
+
+
+
+<Type Name="eastl::function&lt;*&gt;">
+ <DisplayString Condition="mInvokeFuncPtr == nullptr">empty</DisplayString>
+ <DisplayString>{mInvokeFuncPtr}</DisplayString>
+</Type>
+
+
+<Type Name="eastl::reference_wrapper&lt;*&gt;">
+ <DisplayString>{*val}</DisplayString>
+</Type>
+
+<!--
+ This implementation isn't ideal because it can't switch between showing inline value vs values stored in a heap allocation.
+ We are hitting the limit of natvis scripting that we can't workaround unless we change the implementation of eastl::any.
+-->
+<Type Name="eastl::any">
+ <DisplayString Condition="m_handler == nullptr">empty</DisplayString>
+ <DisplayString Condition="m_handler != nullptr">{m_storage.external_storage}</DisplayString>
+</Type>
+
+
+
+<Type Name="eastl::atomic_flag">
+ <DisplayString>{mFlag.mAtomic}</DisplayString>
+</Type>
+
+<Type Name="eastl::variant&lt;*&gt;">
+ <Intrinsic Name="index" Expression="(int)mIndex"/>
+ <DisplayString Condition="index() == size_t(-1)">[valueless_by_exception]</DisplayString>
+ <DisplayString Condition="index() == 0" Optional="true">{{ index=0, value={($T1*)mStorage.mBuffer.mCharData}}</DisplayString>
+ <DisplayString Condition="index() == 1" Optional="true">{{ index=1, value={($T2*)mStorage.mBuffer.mCharData}}</DisplayString>
+ <DisplayString Condition="index() == 2" Optional="true">{{ index=2, value={($T3*)mStorage.mBuffer.mCharData}}</DisplayString>
+ <DisplayString Condition="index() == 3" Optional="true">{{ index=3, value={($T4*)mStorage.mBuffer.mCharData}}</DisplayString>
+ <DisplayString Condition="index() == 4" Optional="true">{{ index=4, value={($T5*)mStorage.mBuffer.mCharData}}</DisplayString>
+ <DisplayString Condition="index() == 5" Optional="true">{{ index=5, value={($T6*)mStorage.mBuffer.mCharData}}</DisplayString>
+ <DisplayString Condition="index() == 6" Optional="true">{{ index=6, value={($T7*)mStorage.mBuffer.mCharData}}</DisplayString>
+ <DisplayString Condition="index() == 7" Optional="true">{{ index=7, value={($T8*)mStorage.mBuffer.mCharData}}</DisplayString>
+ <DisplayString Condition="index() == 8" Optional="true">{{ index=8, value={($T9*)mStorage.mBuffer.mCharData}}</DisplayString>
+ <DisplayString Condition="index() == 9" Optional="true">{{ index=9, value={($T10*)mStorage.mBuffer.mCharData}}</DisplayString>
+ <DisplayString Condition="index() == 10" Optional="true">{{ index=10, value={($T11*)mStorage.mBuffer.mCharData}}</DisplayString>
+ <DisplayString Condition="index() == 11" Optional="true">{{ index=11, value={($T12*)mStorage.mBuffer.mCharData}}</DisplayString>
+ <DisplayString Condition="index() == 12" Optional="true">{{ index=12, value={($T13*)mStorage.mBuffer.mCharData}}</DisplayString>
+ <DisplayString Condition="index() == 13" Optional="true">{{ index=13, value={($T14*)mStorage.mBuffer.mCharData}}</DisplayString>
+ <DisplayString Condition="index() == 14" Optional="true">{{ index=14, value={($T15*)mStorage.mBuffer.mCharData}}</DisplayString>
+ <DisplayString Condition="index() == 15" Optional="true">{{ index=15, value={($T16*)mStorage.mBuffer.mCharData}}</DisplayString>
+ <DisplayString Condition="index() == 16" Optional="true">{{ index=16, value={($T17*)mStorage.mBuffer.mCharData}}</DisplayString>
+ <DisplayString Condition="index() == 17" Optional="true">{{ index=17, value={($T18*)mStorage.mBuffer.mCharData}}</DisplayString>
+ <DisplayString Condition="index() == 18" Optional="true">{{ index=18, value={($T19*)mStorage.mBuffer.mCharData}}</DisplayString>
+ <DisplayString Condition="index() == 19" Optional="true">{{ index=19, value={($T20*)mStorage.mBuffer.mCharData}}</DisplayString>
+ <DisplayString Condition="index() == 20" Optional="true">{{ index=20, value={($T21*)mStorage.mBuffer.mCharData}}</DisplayString>
+ <DisplayString Condition="index() == 21" Optional="true">{{ index=21, value={($T22*)mStorage.mBuffer.mCharData}}</DisplayString>
+ <DisplayString Condition="index() == 22" Optional="true">{{ index=22, value={($T23*)mStorage.mBuffer.mCharData}}</DisplayString>
+ <DisplayString Condition="index() == 23" Optional="true">{{ index=23, value={($T24*)mStorage.mBuffer.mCharData}}</DisplayString>
+ <DisplayString Condition="index() == 24" Optional="true">{{ index=24, value={($T25*)mStorage.mBuffer.mCharData}}</DisplayString>
+ <DisplayString Condition="index() == 25" Optional="true">{{ index=25, value={($T26*)mStorage.mBuffer.mCharData}}</DisplayString>
+ <DisplayString Condition="index() == 26" Optional="true">{{ index=26, value={($T27*)mStorage.mBuffer.mCharData}}</DisplayString>
+ <DisplayString Condition="index() == 27" Optional="true">{{ index=27, value={($T28*)mStorage.mBuffer.mCharData}}</DisplayString>
+ <DisplayString Condition="index() == 28" Optional="true">{{ index=28, value={($T29*)mStorage.mBuffer.mCharData}}</DisplayString>
+ <DisplayString Condition="index() == 29" Optional="true">{{ index=29, value={($T30*)mStorage.mBuffer.mCharData}}</DisplayString>
+ <DisplayString Condition="index() == 30" Optional="true">{{ index=30, value={($T31*)mStorage.mBuffer.mCharData}}</DisplayString>
+ <Expand>
+ <Item Name="index">index()</Item>
+ <Item Name="[value]" Condition="index() == 0" Optional="true">($T1*)mStorage.mBuffer.mCharData</Item>
+ <Item Name="[value]" Condition="index() == 1" Optional="true">($T2*)mStorage.mBuffer.mCharData</Item>
+ <Item Name="[value]" Condition="index() == 2" Optional="true">($T3*)mStorage.mBuffer.mCharData</Item>
+ <Item Name="[value]" Condition="index() == 3" Optional="true">($T4*)mStorage.mBuffer.mCharData</Item>
+ <Item Name="[value]" Condition="index() == 4" Optional="true">($T5*)mStorage.mBuffer.mCharData</Item>
+ <Item Name="[value]" Condition="index() == 5" Optional="true">($T6*)mStorage.mBuffer.mCharData</Item>
+ <Item Name="[value]" Condition="index() == 6" Optional="true">($T7*)mStorage.mBuffer.mCharData</Item>
+ <Item Name="[value]" Condition="index() == 7" Optional="true">($T8*)mStorage.mBuffer.mCharData</Item>
+ <Item Name="[value]" Condition="index() == 8" Optional="true">($T9*)mStorage.mBuffer.mCharData</Item>
+ <Item Name="[value]" Condition="index() == 9" Optional="true">($T10*)mStorage.mBuffer.mCharData</Item>
+ <Item Name="[value]" Condition="index() == 10" Optional="true">($T11*)mStorage.mBuffer.mCharData</Item>
+ <Item Name="[value]" Condition="index() == 11" Optional="true">($T12*)mStorage.mBuffer.mCharData</Item>
+ <Item Name="[value]" Condition="index() == 12" Optional="true">($T13*)mStorage.mBuffer.mCharData</Item>
+ <Item Name="[value]" Condition="index() == 13" Optional="true">($T14*)mStorage.mBuffer.mCharData</Item>
+ <Item Name="[value]" Condition="index() == 14" Optional="true">($T15*)mStorage.mBuffer.mCharData</Item>
+ <Item Name="[value]" Condition="index() == 15" Optional="true">($T16*)mStorage.mBuffer.mCharData</Item>
+ <Item Name="[value]" Condition="index() == 16" Optional="true">($T17*)mStorage.mBuffer.mCharData</Item>
+ <Item Name="[value]" Condition="index() == 17" Optional="true">($T18*)mStorage.mBuffer.mCharData</Item>
+ <Item Name="[value]" Condition="index() == 18" Optional="true">($T19*)mStorage.mBuffer.mCharData</Item>
+ <Item Name="[value]" Condition="index() == 19" Optional="true">($T20*)mStorage.mBuffer.mCharData</Item>
+ <Item Name="[value]" Condition="index() == 20" Optional="true">($T21*)mStorage.mBuffer.mCharData</Item>
+ <Item Name="[value]" Condition="index() == 21" Optional="true">($T22*)mStorage.mBuffer.mCharData</Item>
+ <Item Name="[value]" Condition="index() == 22" Optional="true">($T23*)mStorage.mBuffer.mCharData</Item>
+ <Item Name="[value]" Condition="index() == 23" Optional="true">($T24*)mStorage.mBuffer.mCharData</Item>
+ <Item Name="[value]" Condition="index() == 24" Optional="true">($T25*)mStorage.mBuffer.mCharData</Item>
+ <Item Name="[value]" Condition="index() == 25" Optional="true">($T26*)mStorage.mBuffer.mCharData</Item>
+ <Item Name="[value]" Condition="index() == 26" Optional="true">($T27*)mStorage.mBuffer.mCharData</Item>
+ <Item Name="[value]" Condition="index() == 27" Optional="true">($T28*)mStorage.mBuffer.mCharData</Item>
+ <Item Name="[value]" Condition="index() == 28" Optional="true">($T29*)mStorage.mBuffer.mCharData</Item>
+ <Item Name="[value]" Condition="index() == 29" Optional="true">($T30*)mStorage.mBuffer.mCharData</Item>
+ <Item Name="[value]" Condition="index() == 30" Optional="true">($T31*)mStorage.mBuffer.mCharData</Item>
+ </Expand>
+</Type>
+
+
+<Type Name="eastl::tuple&lt;&gt;">
+ <DisplayString IncludeView="noparens"></DisplayString>
+ <DisplayString ExcludeView="noparens">({*this,view(noparens)})</DisplayString>
+ <Expand/>
+</Type>
+
+<Type Name="eastl::tuple&lt;*&gt;">
+ <DisplayString IncludeView="noparens">{(*((eastl::Internal::TupleLeaf&lt;0,$T1,0&gt;*)&amp;mImpl)).mValue}</DisplayString>
+ <DisplayString ExcludeView="noparens">({*this,view(noparens)})</DisplayString>
+ <Expand>
+ <Item Name="[0]">(*((eastl::Internal::TupleLeaf&lt;0,$T1,0&gt;*)&amp;mImpl)).mValue</Item>
+ </Expand>
+</Type>
+
+<Type Name="eastl::tuple&lt;*,*&gt;">
+ <DisplayString IncludeView="noparens">{(*((eastl::Internal::TupleLeaf&lt;0,$T1,0&gt;*)&amp;mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf&lt;1,$T2,0&gt;*)&amp;mImpl)).mValue}</DisplayString>
+ <DisplayString ExcludeView="noparens">({*this,view(noparens)})</DisplayString>
+ <Expand>
+ <Item Name="[0]">(*((eastl::Internal::TupleLeaf&lt;0,$T1,0&gt;*)&amp;mImpl)).mValue</Item>
+ <Item Name="[1]">(*((eastl::Internal::TupleLeaf&lt;1,$T2,0&gt;*)&amp;mImpl)).mValue</Item>
+ </Expand>
+</Type>
+
+<Type Name="eastl::tuple&lt;*,*,*&gt;">
+ <DisplayString IncludeView="noparens">{(*((eastl::Internal::TupleLeaf&lt;0,$T1,0&gt;*)&amp;mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf&lt;1,$T2,0&gt;*)&amp;mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf&lt;2,$T3,0&gt;*)&amp;mImpl)).mValue}</DisplayString>
+ <DisplayString ExcludeView="noparens">({*this,view(noparens)})</DisplayString>
+ <Expand>
+ <Item Name="[0]">(*((eastl::Internal::TupleLeaf&lt;0,$T1,0&gt;*)&amp;mImpl)).mValue</Item>
+ <Item Name="[1]">(*((eastl::Internal::TupleLeaf&lt;1,$T2,0&gt;*)&amp;mImpl)).mValue</Item>
+ <Item Name="[2]">(*((eastl::Internal::TupleLeaf&lt;2,$T3,0&gt;*)&amp;mImpl)).mValue</Item>
+ </Expand>
+</Type>
+
+<Type Name="eastl::tuple&lt;*,*,*,*&gt;">
+ <DisplayString IncludeView="noparens">{(*((eastl::Internal::TupleLeaf&lt;0,$T1,0&gt;*)&amp;mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf&lt;1,$T2,0&gt;*)&amp;mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf&lt;2,$T3,0&gt;*)&amp;mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf&lt;3,$T4,0&gt;*)&amp;mImpl)).mValue}</DisplayString>
+ <DisplayString ExcludeView="noparens">({*this,view(noparens)})</DisplayString>
+ <Expand>
+ <Item Name="[0]">(*((eastl::Internal::TupleLeaf&lt;0,$T1,0&gt;*)&amp;mImpl)).mValue</Item>
+ <Item Name="[1]">(*((eastl::Internal::TupleLeaf&lt;1,$T2,0&gt;*)&amp;mImpl)).mValue</Item>
+ <Item Name="[2]">(*((eastl::Internal::TupleLeaf&lt;2,$T3,0&gt;*)&amp;mImpl)).mValue</Item>
+ <Item Name="[3]">(*((eastl::Internal::TupleLeaf&lt;3,$T4,0&gt;*)&amp;mImpl)).mValue</Item>
+ </Expand>
+</Type>
+
+<Type Name="eastl::tuple&lt;*,*,*,*,*&gt;">
+ <DisplayString IncludeView="noparens">{(*((eastl::Internal::TupleLeaf&lt;0,$T1,0&gt;*)&amp;mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf&lt;1,$T2,0&gt;*)&amp;mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf&lt;2,$T3,0&gt;*)&amp;mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf&lt;3,$T4,0&gt;*)&amp;mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf&lt;4,$T5,0&gt;*)&amp;mImpl)).mValue}</DisplayString>
+ <DisplayString ExcludeView="noparens">({*this,view(noparens)})</DisplayString>
+ <Expand>
+ <Item Name="[0]">(*((eastl::Internal::TupleLeaf&lt;0,$T1,0&gt;*)&amp;mImpl)).mValue</Item>
+ <Item Name="[1]">(*((eastl::Internal::TupleLeaf&lt;1,$T2,0&gt;*)&amp;mImpl)).mValue</Item>
+ <Item Name="[2]">(*((eastl::Internal::TupleLeaf&lt;2,$T3,0&gt;*)&amp;mImpl)).mValue</Item>
+ <Item Name="[3]">(*((eastl::Internal::TupleLeaf&lt;3,$T4,0&gt;*)&amp;mImpl)).mValue</Item>
+ <Item Name="[4]">(*((eastl::Internal::TupleLeaf&lt;4,$T5,0&gt;*)&amp;mImpl)).mValue</Item>
+ </Expand>
+</Type>
+
+<Type Name="eastl::tuple&lt;*,*,*,*,*,*&gt;">
+ <DisplayString IncludeView="noparens">{(*((eastl::Internal::TupleLeaf&lt;0,$T1,0&gt;*)&amp;mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf&lt;1,$T2,0&gt;*)&amp;mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf&lt;2,$T3,0&gt;*)&amp;mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf&lt;3,$T4,0&gt;*)&amp;mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf&lt;4,$T5,0&gt;*)&amp;mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf&lt;5,$T6,0&gt;*)&amp;mImpl)).mValue}</DisplayString>
+ <DisplayString ExcludeView="noparens">({*this,view(noparens)})</DisplayString>
+ <Expand>
+ <Item Name="[0]">(*((eastl::Internal::TupleLeaf&lt;0,$T1,0&gt;*)&amp;mImpl)).mValue</Item>
+ <Item Name="[1]">(*((eastl::Internal::TupleLeaf&lt;1,$T2,0&gt;*)&amp;mImpl)).mValue</Item>
+ <Item Name="[2]">(*((eastl::Internal::TupleLeaf&lt;2,$T3,0&gt;*)&amp;mImpl)).mValue</Item>
+ <Item Name="[3]">(*((eastl::Internal::TupleLeaf&lt;3,$T4,0&gt;*)&amp;mImpl)).mValue</Item>
+ <Item Name="[4]">(*((eastl::Internal::TupleLeaf&lt;4,$T5,0&gt;*)&amp;mImpl)).mValue</Item>
+ <Item Name="[5]">(*((eastl::Internal::TupleLeaf&lt;5,$T6,0&gt;*)&amp;mImpl)).mValue</Item>
+ </Expand>
+</Type>
+
+<Type Name="eastl::tuple&lt;*,*,*,*,*,*,*&gt;">
+ <DisplayString IncludeView="noparens">{(*((eastl::Internal::TupleLeaf&lt;0,$T1,0&gt;*)&amp;mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf&lt;1,$T2,0&gt;*)&amp;mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf&lt;2,$T3,0&gt;*)&amp;mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf&lt;3,$T4,0&gt;*)&amp;mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf&lt;4,$T5,0&gt;*)&amp;mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf&lt;5,$T6,0&gt;*)&amp;mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf&lt;6,$T7,0&gt;*)&amp;mImpl)).mValue}</DisplayString>
+ <DisplayString ExcludeView="noparens">({*this,view(noparens)})</DisplayString>
+ <Expand>
+ <Item Name="[0]">(*((eastl::Internal::TupleLeaf&lt;0,$T1,0&gt;*)&amp;mImpl)).mValue</Item>
+ <Item Name="[1]">(*((eastl::Internal::TupleLeaf&lt;1,$T2,0&gt;*)&amp;mImpl)).mValue</Item>
+ <Item Name="[2]">(*((eastl::Internal::TupleLeaf&lt;2,$T3,0&gt;*)&amp;mImpl)).mValue</Item>
+ <Item Name="[3]">(*((eastl::Internal::TupleLeaf&lt;3,$T4,0&gt;*)&amp;mImpl)).mValue</Item>
+ <Item Name="[4]">(*((eastl::Internal::TupleLeaf&lt;4,$T5,0&gt;*)&amp;mImpl)).mValue</Item>
+ <Item Name="[5]">(*((eastl::Internal::TupleLeaf&lt;5,$T6,0&gt;*)&amp;mImpl)).mValue</Item>
+ <Item Name="[6]">(*((eastl::Internal::TupleLeaf&lt;6,$T7,0&gt;*)&amp;mImpl)).mValue</Item>
+ </Expand>
+</Type>
+
+
+</AutoVisualizer>
diff --git a/EASTL/doc/FAQ.md b/EASTL/doc/FAQ.md
new file mode 100644
index 0000000..1444c48
--- /dev/null
+++ b/EASTL/doc/FAQ.md
@@ -0,0 +1,2290 @@
+# EASTL FAQ
+
+We provide a FAQ (frequently asked questions) list here for a number of commonly asked questions about EASTL and STL in general. Feel free to suggest new FAQ additions based on your own experience.
+
+## Information
+
+1. [What is EASTL?](#info1-what-is-eastl)
+2. [What uses are EASTL suitable for?](#info2-what-uses-are-eastl-suitable-for)
+3. [How does EASTL differ from standard C++ STL?](#info3-how-does-eastl-differ-from-standard-c-stl)
+4. [Is EASTL thread-safe?](#info4-is-eastl-thread-safe)
+5. [What platforms/compilers does EASTL support?](#info5-what-platformscompilers-does-eastl-support)
+6. [Why is there EASTL when there is the STL?](#info6-why-is-there-eastl-when-there-is-the-stl)
+7. [Can I mix EASTL with standard C++ STL?](#info7-can-i-mix-eastl-with-standard-c-stl)
+8. [Where can I learn more about STL and EASTL?](#info8-where-can-i-learn-more-about-stl-and-eastl)
+9. [What is the legal status of EASTL?](#info9-what-is-the-legal-status-of-eastl)
+10. [Does EASTL deal with compiler exception handling settings?](#info10-does-eastl-deal-with-compiler-exception-handling-settings)
+11. [What C++ language features does EASTL use (e.g. virtual functions)?](#info11-what-c-language-features-does-eastl-use-eg-virtual-functions)
+12. [What compiler warning levels does EASTL support?](#info12-what-compiler-warning-levels-does-eastl-support)
+13. [Is EASTL compatible with Lint?](#info13-is-eastl-compatible-with-lint)
+14. [What compiler settings do I need to compile EASTL?](#info14-what-compiler-settings-do-i-need-to-compile-eastl)
+15. [How hard is it to incorporate EASTL into my project?](#info15-how-hard-is-it-to-incorporate-eastl-into-my-project)
+16. [Should I use EASTL instead of std STL or instead of my custom library?](#info16-should-i-use-eastl-instead-of-std-stl-or-instead-of-my-custom-library)
+17. [I think I've found a bug. What do I do?](#info17-i-think-ive-found-a-bug-what-do-i-do)
+18. [Can EASTL be used by third party EA developers?](#info18-can-eastl-be-used-by-third-party-ea-developers)
+
+## Performance
+
+1. [How efficient is EASTL compared to standard C++ STL implementations?](#perf1-how-efficient-is-eastl-compared-to-standard-c-stl-implementations)
+2. [How efficient is EASTL in general?](#perf2-how-efficient-is-eastl-in-general)
+3. [Strings don't appear to use the "copy-on-write" optimization. Why not?](#perf3-strings-dont-appear-to-use-the-copy-on-write-cow-optimization-why-not)
+4. [Does EASTL cause code bloat, given that it uses templates?](#perf4-does-eastl-cause-code-bloat-given-that-it-uses-templates)
+5. [Don't STL and EASTL containers fragment memory?](#perf5-dont-stl-and-eastl-containers-fragment-memory)
+6. [I don't see container optimizations for equivalent scalar types such as pointer types. Why?](#perf6-i-dont-see-container-optimizations-for-equivalent-scalar-types-such-as-pointer-types-why)
+7. [I've seen some STL's provide a default quick "node allocator" as the default allocator. Why doesn't EASTL do this?](#perf7-ive-seen-some-stls-provide-a-default-quick-node-allocator-as-the-default-allocator-why-doesnt-eastl-do-this)
+8. [Templates sometimes seem to take a long time to compile. Why do I do about that?](#perf8-templates-sometimes-seem-to-take-a-long-time-to-compile-why-do-i-do-about-that)
+9. [How do I assign a custom allocator to an EASTL container?](#cont8-how-do-i-assign-a-custom-allocator-to-an-eastl-container)
+10. [How well does EASTL inline?](#perf10-how-well-does-eastl-inline)
+11. [How do I control function inlining?](#perf11-how-do-i-control-function-inlining)
+12. [C++ / EASTL seems to bloat my .obj files much more than C does.](#perf12-c--eastl-seems-to-bloat-my-obj-files-much-more-than-c-does)
+13. [What are the best compiler settings for EASTL?](#perf13-what-are-the-best-compiler-settings-for-eastl)
+
+## Problems
+
+1. [I'm getting screwy behavior in sorting algorithms or sorted containers. What's wrong?](#prob1-im-getting-screwy-behavior-in-sorting-algorithms-or-sorted-containers-whats-wrong)
+2. [I am getting compiler warnings (e.g. C4244, C4242 or C4267) that make no sense. Why?](#prob2-i-am-getting-compiler-warnings-eg-c4244-c4242-or-c4267-that-make-no-sense-why)
+3. [I am getting compiler warning C4530, which complains about exception handling and "unwind semantics." What gives?](#prob3-i-am-getting-compiler-warning-c4530-which-complains-about-exception-handling-and-unwind-semantics-what-gives)
+4. [Why are tree-based containers hard to read with a debugger?](#prob4-why-are-tree-based-eastl-containers-hard-to-read-with-a-debugger)
+5. [The EASTL source code is sometimes rather complicated looking. Why is that?](#prob5-the-eastl-source-code-is-sometimes-rather-complicated-looking-why-is-that)
+6. [When I get compilation errors, they are very long and complicated looking. What do I do?](#prob6-when-i-get-compilation-errors-they-are-very-long-and-complicated-looking-what-do-i-do)
+7. [Templates sometimes seem to take a long time to compile. Why do I do about that?](#prob7-templates-sometimes-seem-to-take-a-long-time-to-compile-why-do-i-do-about-that)
+8. [I get the compiler error: "template instantiation depth exceeds maximum of 17. use -ftemplate-depth-NN to increase the maximum"](#prob8-i-get-the-compiler-error-template-instantiation-depth-exceeds-maximum-of-17-use--ftemplate-depth-nn-to-increase-the-maximum)
+9. [I'm getting errors about min and max while compiling.](#prob9-im-getting-errors-about-min-and-max-while-compiling)
+10. [C++ / EASTL seems to bloat my .obj files much more than C does.](#prob10-c--eastl-seems-to-bloat-my-obj-files-much-more-than-c-does)
+11. [I'm getting compiler errors regarding operator new being previously defined.](#prob11-im-getting-compiler-errors-regarding-placement-operator-new-being-previously-defined)
+12. [I'm getting errors related to wchar_t string functions such as wcslen().](#prob12-im-getting-errors-related-to-wchar_t-string--functions-such-as-wcslen)
+13. [I'm getting compiler warning C4619: there is no warning number Cxxxx (e.g. C4217).](#prob13-im-getting-compiler-warning-c4619-there-is-no-warning-number-cxxxx-eg-c4217)
+14. [My stack-based fixed_vector is not respecting the object alignment requirements.](#prob14-my-stack-based-fixed_vector-is-not-respecting-the-object-alignment-requirements)
+15. [I am getting compiler errors when using GCC under XCode (Macintosh/iphone).](#prob15-i-am-getting-compiler-errors-when-using-gcc-under-xcode-macintoshiphone)
+16. [I am getting linker errors about Vsnprintf8 or Vsnprintf16.](#prob16-i-am-getting-linker-errors-about-vsnprintf8-or-vsnprintf16)
+17. [I am getting compiler errors about UINT64_C or UINT32_C.](#prob17-i-am-getting-compiler-errors-about-uint64_c-or-uint32_c)
+18. [I am getting a crash with a global EASTL container.](#prob18-i-am-getting-a-crash-with-a-global-eastl-container)
+19. [Why doesn't EASTL support passing NULL to functions with pointer arguments?](#prob19-why-doesnt-eastl-support-passing-null-string-functions)
+
+## Debug
+
+1. [How do I get VC++ mouse-overs to view templated data?](#debug1-how-do-i-set-the-vc-debugger-to-display-eastl-container-data-with-tooltips)
+2. [How do I view containers if the visualizer/tooltip support is not present?](#debug2-how-do-i-view-containers-if-the-visualizertooltip-support-is-not-present)
+3. [The EASTL source code is sometimes rather complicated looking. Why is that?](#debug3-the-eastl-source-code-is-sometimes-rather-complicated-looking-why-is-that)
+4. [When I get compilation errors, they are very long and complicated looking. What do I do?](#debug4-when-i-get-compilation-errors-they-are-very-long-and-complicated-looking-what-do-i-do)
+5. [How do I measure hash table balancing?](#debug5-how-do-i-measure-hash-table-balancing)
+
+## Containers
+
+1. [Why do some containers have "fixed" versions (e.g. fixed_list) but others(e.g. deque) don't have fixed versions?](#cont1-why-do-some-containers-have-fixed-versions-eg-fixed_list-but-otherseg-deque-dont-have-fixed-versions)
+2. [Can I mix EASTL with standard C++ STL?](#cont2-can-i-mix-eastl-with-standard-c-stl)
+3. [Why are there so many containers?](#cont3-why-are-there-so-many-containers)
+4. [Don't STL and EASTL containers fragment memory?](#cont4-dont-stl-and-eastl-containers-fragment-memory)
+5. [I don't see container optimizations for equivalent scalar types such as pointer types. Why?](#cont5-i-dont-see-container-optimizations-for-equivalent-scalar-types-such-as-pointer-types-why)
+6. [What about alternative container and algorithm implementations (e.g. treaps, skip lists, avl trees)?](#cont6-what-about-alternative-container-and-algorithm-implementations-eg-treaps-skip-lists-avl-trees)
+7. [Why are containers hard to read with a debugger?](#cont7-why-are-tree-based-eastl-containers-hard-to-read-with-a-debugger)
+8. [How do I assign a custom allocator to an EASTL container?](#cont8-how-do-i-assign-a-custom-allocator-to-an-eastl-container)
+9. [How do I set the VC++ debugger to display EASTL container data with tooltips?](#cont9-how-do-i-set-the-vc-debugger-to-display-eastl-container-data-with-tooltips)
+10. [How do I use a memory pool with a container?](#cont10-how-do-i-use-a-memory-pool-with-a-container)
+11. [How do I write a comparison (operator<()) for a struct that contains two or more members?](#cont11-how-do-i-write-a-comparison-operator-for-a-struct-that-contains-two-or-more-members)
+12. [Why doesn't container X have member function Y?](#cont12-why-doesnt-container-x-have-member-function-y)
+13. [How do I search a hash_map of strings via a char pointer efficiently? If I use map.find("hello") it creates a temporary string, which is inefficient.](#cont13-how-do-i-search-a-hash_map-of-strings-via-a-char-pointer-efficiently-if-i-use-mapfindhello-it-creates-a-temporary-string-which-is-inefficient)
+14. [Why are set and hash_set iterators const (i.e. const_iterator)?](#cont14-why-are-set-and-hash_set-iterators-const-ie-const_iterator)
+15. [How do I prevent my hash container from re-hashing?](#cont15-how-do-i-prevent-my-hash-container-from-re-hashing)
+16. [Which uses less memory, a map or a hash_map?](#cont16-which-uses-less-memory-a-map-or-a-hash_map)
+17. [How do I write a custom hash function?](#cont17-how-do-i-write-a-custom-hash-function)
+18. [How do I write a custom compare function for a map or set?](#cont18-how-do-i-write-a-custom-compare-function-for-a-map-or-set)
+19. [How do I force my vector or string capacity down to the size of the container?](#cont19-how-do-i-force-my-vector-or-string-capacity-down-to-the-size-of-the-container)
+20. [How do I iterate a container while (selectively) removing items from it?](#cont20-how-do-i-iterate-a-container-while-selectively-removing-items-from-it)
+21. [How do I store a pointer in a container?](#cont21-how-do-i-store-a-pointer-in-a-container)
+22. [How do I make a union of two containers? difference? intersection?](#cont22-how-do-i-make-a-union-of-two-containers-difference-intersection)
+23. [How do I override the default global allocator?](#cont23-how-do-i-override-the-default-global-allocator)
+24. [How do I do trick X with the string container?](#cont24-how-do-i-do-trick-x-with-the-string-container)
+25. [How do EASTL smart pointers compare to Boost smart pointers?](#cont25-how-do-eastl-smart-pointers-compare-to-boost-smart-pointers)
+26. [How do your forward-declare an EASTL container?](#cont26-how-do-your-forward-declare-an-eastl-container)
+27. [How do I make two containers share a memory pool?](#cont27-how-do-i-make-two-containers-share-a-memory-pool)
+28. [Can I use a std (STL) allocator with EASTL?](#cont28-can-i-use-a-std-stl-allocator-with-eastl)
+29. [What are the requirements of classes stored in containers?](#what-are-the-requirements-of-classes-stored-in-containers)
+
+## Algorithms
+
+1. [I'm getting screwy behavior in sorting algorithms or sorted containers. What's wrong?](#algo1-im-getting-screwy-behavior-in-sorting-algorithms-or-sorted-containers-whats-wrong)
+2. [How do I write a comparison (operator<()) for a struct that contains two or more members?](#algo2-how-do-i-write-a-comparison-operator-for-a-struct-that-contains-two-or-more-members)
+3. [How do I sort something in reverse order?](#algo3-how-do-i-sort-something-in-reverse-order)
+4. [I'm getting errors about min and max while compiling.](#algo4-im-getting-errors-about-min-and-max-while-compiling)
+5. [Why don't algorithms take a container as an argument instead of iterators? A container would be more convenient.](#algo5-why-dont-algorithms-take-a-container-as-an-argument-instead-of-iterators-a-container-would-be-more-convenient)
+6. [Given a container of pointers, how do I find an element by value (instead of by pointer)?](#algo6-given-a-container-of-pointers-how-do-i-find-an-element-by-value-instead-of-by-pointer)
+7. [When do stored objects need to support opertor < vs. when do they need to support operator ==?](#algo7-when-do-stored-objects-need-to-support-operator--vs-when-do-they-need-to-support-operator-)
+8. [How do I sort via pointers or array indexes instead of objects directly?](#algo8-how-do-i-sort-via-pointers-or-array-indexes-instead-of-objects-directly)
+
+## Iterators
+
+1. [What's the difference between iterator, const iterator, and const_iterator?](#iter1-whats-the-difference-between-iterator-const-iterator-and-const_iterator)
+2. [How do I tell from an iterator what type of thing it is iterating?](#iter2-how-do-i-tell-from-an-iterator-what-type-of-thing-it-is-iterating)
+3. [How do I iterate a container while (selectively) removing items from it?](#iter3-how-do-i-iterate-a-container-while-selectively-removing-items-from-it)
+4. [What is an insert_iterator?](#iter4-what-is-an-insert_iterator)
+
+## Information
+
+### Info.1 What is EASTL?
+
+EASTL refers to "EA Standard Template Library." It is a C++ template library that is analogous to the template facilities of the C++ standard library, which are often referred to as the STL. EASTL consists of the following systems:
+
+* Containers
+* Iterators
+* Algorithms
+* Utilities
+* Smart pointers
+* Type traits
+
+Of these, the last two (smart pointers and type traits) do not have analogs in standard C++. With respect to the other items, EASTL provides extensions and optimizations over the equivalents in standard C++ STL.
+
+EASTL is a professional-level implementation which outperforms commercial implementations (where functionality overlaps) and is significantly easier to read and debug.
+
+### Info.2 What uses are EASTL suitable for?
+
+EASTL is suitable for any place where templated containers and algorithms would be appropriate. Thus any C++ tools could use it and many C++ game runtimes could use it, especially 2005+ generation game platforms. EASTL has optimizations that make it more suited to the CPUs and memory systems found on console platforms. Additionally, EASTL has some type-traits and iterator-traits-derived template optimizations that make it generally more efficient than home-brew templated containers.
+
+### Info.3 How does EASTL differ from standard C++ STL?
+
+There are three kinds of ways that EASTL differs from standard STL:
+
+* EASTL equivalents to STL sometimes differ.
+* EASTL implementations sometimes differ from STL implementations of the same thing.
+* EASTL has functionality that doesn't exist in STL.
+
+With respect to item #1, the changes are such that they benefit game development and and not the type that could silently hurt you if you were more familiar with STL interfaces.
+
+With respect to item #2, where EASTL implementations differ from STL implementations it is almost always due to improvements being made in the EASTL versions or tradeoffs being made which are considered better for game development.
+
+With respect to item #3, there are a number of facilities that EASTL has that STL doesn't have, such as intrusive_list and slist containers, smart pointers, and type traits. All of these are facilities that assist in making more efficient game code and data.
+
+Ways in which EASTL is better than standard STL:
+
+* Has higher performance in release builds, sometimes dramatically so.
+* Has significantly higher performance in debug builds, due to less call overhead.
+* Has extended per-container functionality, particularly for game development.
+* Has additional containers that are useful for high performance game development.
+* Is easier to read, trace, and debug.
+* Memory allocation is much simpler and more controllable.
+* Has higher portability, as there is a single implementation for all platforms.
+* Has support of object alignment, whereas such functionality is not natively supported by STL.
+* We have control over it, so we can modify it as we like.
+* Has stricter standards for container design and behavior, particularly as this benefits game development.
+
+Ways in which EASTL is worse than standard STL:
+
+* Standard STL implementations are currently very reliable and weather-worn, whereas EASTL is less tested.
+* Standard STL is automatically available with just about every C++ compiler vendor's library.
+* Standard STL is supported by the compiler vendor and somewhat by the Internet community.
+
+#### EASTL coverage of std STL
+
+* list
+* vector
+* deque
+* string
+* set
+* multiset
+* map
+* multimap
+* bitset
+* queue
+* stack
+* priority_queue
+* memory
+* numeric
+* algorithm (all but inplace_merge, prev_permutation, next_permutation, nth_element, includes, unique_copy)
+* utility
+* functional
+* iterator
+
+EASTL additions/amendments to std STL
+
+* allocators work in a simpler way.
+* exception handling can be disabled.
+* all containers expose/declare their node size, so you can make a node allocator for them.
+* all containers have reset(), which unilaterally forgets their contents.
+* all containers have validate() and validate_iterator() functions.
+* all containers understand and respect object alignment requirements.
+* all containers guarantee no memory allocation upon being newly created as empty.
+* all containers and their iterators can be viewed in a debugger (no other STL does this, believe it or not).
+* linear containers guarantee linear memory.
+* vector has push_back(void).
+* vector has a data() function.
+* vector<bool> is actually a vector of type bool.
+* vector and string have set_capacity().
+* string has sprintf(), append_sprintf(), trim(), compare_i(), make_lower(), make_upper().
+* deque allows you to specify the subarray size.
+* list has a push_front(void) and push_back(void) function.
+* hash_map, hash_set, etc. have find_as().
+
+EASTL coverage of TR1 (tr1 refers to proposed additions for the next C++ standard library, ~2008)
+
+* array
+* type_traits (there are about 30 of these)
+* unordered_set (EASTL calls it hash_set)
+* unordered_multiset
+* unordered_map
+* unordered_multimap
+* shared_ptr, shared_array, weak_ptr, scoped_ptr, scoped_array, intrusive_ptr
+
+EASTL additional functionality (not found elsewhere)
+
+* fixed_list
+* fixed_slist
+* fixed_vector
+* fixed_string
+* fixed_substring
+* fixed_set
+* fixed_multiset
+* fixed_map
+* fixed_multimap
+* fixed_hash_set
+* fixed_hash_multiset
+* fixed_hash_map
+* fixed_hash_multimap
+* vector_set
+* vector_multiset
+* vector_map
+* vector_multimap
+* intrusive_list
+* intrusive_slist
+* intrusive_sdlist
+* intrusive_hash_set
+* intrusive_hash_multiset
+* intrusive_hash_map
+* intrusive_hash_multimap
+* slist (STLPort's STL has this)
+* heap
+* linked_ptr, linked_array
+* sparse_matrix (this is not complete as of this writing)
+* ring_buffer
+* compressed_pair
+* call_traits
+* binary_search_i, change_heap, find_first_not_of, find_last_of, find_last_not_of, identical
+* comb_sort, bubble_sort, selection_sort, shaker_sort, bucket_sort
+* equal_to_2, not_equal_to_2, str_equal_to, str_equal_to_i
+
+### Info.4 Is EASTL thread-safe?
+
+It's not simple enough to simply say that EASTL is thread-safe or thread-unsafe. However, we can say that with respect to thread safety that EASTL does the right thing.
+
+Individual EASTL containers are not thread-safe. That is, access to an instance of a container from multiple threads at the same time is unsafe if any of those accesses are modifying operations. A given container can be read from multiple threads simultaneously as well as any other standalone data structure. If a user wants to be able to have modifying access an instance of a container from multiple threads, it is up to the user to ensure that proper thread synchronization occurs. This usually means using a mutex.
+
+EASTL classes other than containers are the same as containers with respect to thread safety. EASTL functions (e.g. algorithms) are inherently thread-safe as they have no instance data and operate entirely on the stack. As of this writing, no EASTL function allocates memory and thus doesn't bring thread safety issues via that means.
+
+The user may well need to be concerned about thread safety with respect to memory allocation. If the user modifies containers from multiple threads, then allocators are going to be accessed from multiple threads. If an allocator is shared across multiple container instances (of the same type of container or not), then mutexes (as discussed above) the user uses to protect access to individual instances will not suffice to provide thread safety for allocators used across multiple instances. The conventional solution here is to use a mutex within the allocator if it is expected to be used by multiple threads.
+
+EASTL uses neither static nor global variables and thus there are no inter-instance dependencies that would make thread safety difficult for the user to implement.
+
+### Info.5 What platforms/compilers does EASTL support?
+
+EASTL's support depends entirely on the compiler and not on the platform. EASTL works on any C++ compiler that completely conforms the C++ language standard. Additionally, EASTL is 32 bit and 64 bit compatible. Since EASTL does not use the C or C++ standard library (with a couple small exceptions), it doesn't matter what kind of libraries are provided (or not provided) by the compiler vendor. However, given that we need to work with some compilers that aren't 100% conforming to the language standard, it will be useful to make a list here of these that are supported and those that are not:
+
+| Compiler | Status | Notes |
+|---------|--------|-------|
+| GCC 2.9x | Supported | However, GCC 2.9x has some issues that you may run into that cause you to use EASTL facilities differently than a fully compliant compiler would allow. |
+| GCC 3.x+ | Supported | This compiler is used by the Mac OSX, and Linux platforms. |
+| MSVC 6.0 | Not supported | This compiler is too weak in the area of template and namespace support. |
+| MSVC 7.0+ | Supported | This compiler is used by the PC and Win CE platforms |
+| Borland 5.5+ | Not supported | Borland can successfully compile many parts of EASTL, but not all parts. |
+| EDG | Supported | This is the compiler front end to some other compilers, such as Intel, and Comeau C++. |
+| IBM XL 5.0+ | Supported | This compiler is sometimes used by PowerPC platforms such as Mac OSX and possibly future console platforms. |
+
+### Info.6 Why is there EASTL when there is the STL?
+
+The STL is largely a fine library for general purpose C++. However, we can improve upon it for our uses and gain other advantages as well. The primary motivations for the existence of EASTL are the following:
+
+* Some STL implementations (especially Microsoft STL) have inferior performance characteristics that make them unsuitable for game development. EASTL is faster than all existing STL implementations.
+* The STL is sometimes hard to debug, as most STL implementations use cryptic variable names and unusual data structures.
+* STL allocators are sometimes painful to work with, as they have many requirements and cannot be modified once bound to a container.
+* The STL includes excess functionality that can lead to larger code than desirable. It's not very easy to tell programmers they shouldn't use that functionality.
+* The STL is implemented with very deep function calls. This results is unacceptable performance in non-optimized builds and sometimes in optimized builds as well.
+* The STL doesn't support alignment of contained objects.
+* STL containers won't let you insert an entry into a container without supplying an entry to copy from. This can be inefficient.
+* Useful STL extensions (e.g. slist, hash_map, shared_ptr) found in existing STL implementations such as STLPort are not portable because they don't exist in other versions of STL or aren't consistent between STL versions.
+* The STL lacks useful extensions that game programmers find useful (e.g. intrusive_list) but which could be best optimized in a portable STL environment.
+* The STL has specifications that limit our ability to use it efficiently. For example, STL vectors are not guaranteed to use contiguous memory and so cannot be safely used as an array.
+* The STL puts an emphasis on correctness before performance, whereas sometimes you can get significant performance gains by making things less academcially pure.
+* STL containers have private implementations that don't allow you to work with their data in a portable way, yet sometimes this is an important thing to be able to do (e.g. node pools).
+* All existing versions of STL allocate memory in empty versions of at least some of their containers. This is not ideal and prevents optimizations such as container memory resets that can greatly increase performance in some situations.
+* The STL is slow to compile, as most modern STL implementations are very large.
+* There are legal issues that make it hard for us to freely use portable STL implementations such as STLPort.
+* We have no say in the design and implementation of the STL and so are unable to change it to work for our needs.
+* Note that there isn't actually anything in the C++ standard called "STL." STL is a term that merely refers to the templated portion of the C++ standard library.
+
+### Info.7 Can I mix EASTL with standard C++ STL?
+
+This is possible to some degree, though the extent depends on the implementation of C++ STL. One of things that makes interoperability is something called iterator categories. Containers and algorithms recognize iterator types via their category and STL iterator categories are not recognized by EASTL and vice versa.
+
+Things that you definitely can do:
+
+* #include both EASTL and standard STL headers from the same .cpp file.
+* Use EASTL containers to hold STL containers.
+* Construct an STL reverse_iterator from an EASTL iterator.
+* Construct an EASTL reverse_iterator from an STL iterator.
+
+Things that you probably will be able to do, though a given std STL implementation may prevent it:
+
+* Use STL containers in EASTL algorithms.
+* Use EASTL containers in STL algorithms.
+* Construct or assign to an STL container via iterators into an EASTL container.
+* Construct or assign to an EASTL container via iterators into an STL container.
+*
+Things that you would be able to do if the given std STL implementation is bug-free:
+
+* Use STL containers to hold EASTL containers. Unfortunately, VC7.x STL has a confirmed bug that prevents this. Similarly, STLPort versions prior to v5 have a similar but.
+
+Things that you definitely can't do:
+
+* Use an STL allocator directly with an EASTL container (though you can use one indirectly).
+* Use an EASTL allocator directly with an STL container (though you can use one indirectly).
+
+### Info.8 Where can I learn more about STL and EASTL?
+
+EASTL is close enough in philosophy and functionality to standard C++ STL that most of what you read about STL applies to EASTL. This is particularly useful with respect to container specifications. It would take a lot of work to document EASTL containers and algorithms in fine detail, whereas most standard STL documentation applies as-is to EASTL. We won't cover the differences here, as that's found in another FAQ entry.
+
+That being said, we provide a list of sources for STL documentation that may be useful to you, especially if you are less familiar with the concepts of STL and template programming in general.
+
+* The SGI STL web site. Includes a good STL reference.
+* CodeProject STL introduction.
+* Scott Meyers Effective STL book.
+* The Microsoft online STL documentation. Microsoft links go bad every couple months, so try searching for STL at the * Microsoft MSDN site.
+* The Dinkumware online STL documentation.
+* The C++ standard, which is fairly readable. You can buy an electronic version for about $18 and in the meantime you can make do with draft revisions of it off the Internet by searching for "c++ draft standard".
+* STL performance tips, by Pete Isensee
+* STL algorithms vs. hand-written loops, by Scott Meyers.
+
+### Info.9 What is the legal status of EASTL?
+
+EASTL is usable for all uses within Electronic Arts, both for internal usage and for shipping products for all platforms. All source code was written by a single EA engineer. Any externally derived code would be explicitly stated as such and approved by the legal department if such code ever gets introduced. As of EASTL v1.0, the red_black_tree.cpp file contains two functions derived from the original HP STL and have received EA legal approval for usage in any product.
+
+### Info.10 Does EASTL deal with compiler exception handling settings?
+
+EASTL has automatic knowledge of the compiler's enabling/disabling of exceptions. If your compiler is set to disable exceptions, EASTL automatically detects so and executes without them. Also, you can force-enable or force-disable that setting to override the automatic behavior by #defining EASTL_EXCEPTIONS_ENABLED to 0 or 1. See EASTL's config.h for more information.
+
+### Info.11 What C++ language features does EASTL use (e.g. virtual functions)?
+
+EASTL uses the following C++ language features:
+
+* Template functions, classes, member functions.
+* Multiple inheritance.
+* Namespaces.
+* Operator overloading.
+
+EASTL does not use the following C++ language features:
+
+* Virtual functions / interfaces.
+* RTTI (dynamic_cast).
+* Global and static variables. There are a couple class static const variables, but they act much like enums.
+* Volatile declarations
+* Template export.
+* Virtual inheritance.
+
+EASTL may use the following C++ language features:
+
+* Try/catch. This is an option that the user can enable and it defaults to whatever the compiler is set to use.
+* Floating point math. Hash containers have one floating point calculation, but otherwise floating point is not used.
+
+Notes:
+
+* EASTL uses rather little of the standard C or C++ library and uses none of the C++ template library (STL) and iostream library. The memcpy family of functions is one example EASTL C++ library usage.
+* EASTL never uses global new / delete / malloc / free. All allocations are done via user-specified allocators, though a default allocator definition is available.
+
+
+### Info.12 What compiler warning levels does EASTL support?
+
+For VC++ EASTL should compile without warnings on level 4, and should compile without warnings for "warnings disabled by default" except C4242, C4514, C4710, C4786, and C4820. These latter warnings are somewhat draconian and most EA projects have little choice but to leave them disabled.
+
+For GCC, EASTL should compile without warnings with -Wall. Extensive testing beyond that hasn't been done.
+
+However, due to the nature of templated code generation and due to the way compilers compile templates, unforeseen warnings may occur in user code that may or may not be addressible by modifying EASTL.
+
+### Info.13 Is EASTL compatible with Lint?
+
+As of EASTL 1.0, minimal lint testing has occurred. Testing with the November 2005 release of Lint (8.00t) demonstrated bugs in Lint that made its analysisnot very useful. For example, Lint seems to get confused about the C++ typename keyword and spews many errors with code that uses it. We will work with the makers of Lint to get this resolved so that Lint can provide useful information about EASTL.
+
+### Info.14 What compiler settings do I need to compile EASTL?
+
+EASTL consists mostly of header files with templated C++ code, but there are also a few .cpp files that need to be compiled and linked in order to use some of the modules. EASTL will compile in just about any environment. As mentioned elsewhere in this FAQ, EASTL can be compiled at the highest warning level of most compilers, transparently deals with compiler exception handling settings, is savvy to most or all compilation language options (e.g. wchar_t is built-in or not, for loop variables are local or not), and has almost no platform-specific or compiler-specific code. For the most part, you can just drop it in and it will work. The primary thing that needs to be in place is that EASTL .cpp files need to be compiled with the same struct padding/alignment settings as other code in the project. This of course is the same for just about any C++ source code library.
+
+See the Performance section of this FAQ for a discussion of the optimal compiler settings for EASTL performance.
+
+### Info.15 How hard is it to incorporate EASTL into my project?
+
+It's probably trivial.
+
+EASTL has only one dependency: EABase. And EASTL auto-configures itself for most compiler environments and for the most typical configuration choices. Since it is fairly highly warning-free, you won't likely need to modify your compiler warning settings, even if they're pretty stict. EASTL has a few .cpp files which need to be compiled if you want to use the modules associated with those files. You can just compile those files with your regular compiler settings. Alternatively, you can use one of the EASTL project files.
+
+In its default configuration, the only thing you need to provide to make EASTL work is to define implementations of the following operator new functions:
+
+```cpp
+#include <new>
+
+void* operator new[](size_t size, const char* pName, int flags, unsigned debugFlags, const char* file, int line);
+void* operator new[](size_t size, size_t alignment, size_t alignmentOffset, const char* pName, int flags, unsigned debugFlags, const char* file, int line);
+```
+The flags and debugFlags arguments correspond to PPMalloc/RenderWare GeneralAllocator/GeneralAllocatorDebug Malloc equivalents.
+
+### Info.16 Should I use EASTL instead of std STL or instead of my custom library?
+
+There are reasons you may want to use EASTL; there are reasons you may not want to use it. Ditto for std STL or any other library. Here we present a list of reasons (+ and -) for why you might want to use one or another. However, it should be noted that while EASTL contains functionality found in std STL, it has another ~40% of functionality not found in std STL, so EASTL and std STL (and whatever other template library you may have) are not mutually exclusive.
+
+**EASTL**
+* \+ Has higher performance than any commercial STL, especially on console platforms.
+* \+ Has extended functionality tailored for game development.
+* \+ Is highly configurable, and we own it so it can be amended at will. Std STL is owned by a third party committee.
+* \+ Is much easier to read and debug than other similar libraries, especiallly std STL.
+
+
+* \- Is highly unit tested, but does not have the same level as std STL.
+* \- Is more complicated than many users' lite template libraries, and may put off some beginners.
+* \- EASTL
+
+**Std STL**
+
+* \+ Is highly portable; your STL code will likely compile and run anywhere.
+* \+ Works without the need to install or download any package to use it. It just works.
+* \+ Is highly reliable and supported by the compiler vendor. You can have confidence in it.
+* \+ Some std STL versions (e.g. STLPort, VC8 STL) have better runtime debug checking than EASTL.
+
+
+* \- Has (sometimes greatly) variable implementations, behavior, and performance between implementations.
+* \- Is usually hard to read and debug.
+* \- Doesn't support some of the needs of game development, such as aligned allocations, named allocations, intrusive containers, etc.
+* \- Is not as efficient as EASTL, especially on console platforms.
+
+**Your own library**
+(please forgive us for implying there may be weaknesses in your libraries)
+
+* \+ You have control over it and can make it work however you want.
+* \+ You can fix bugs in it on the spot and have the fix in your codebase immediately.
+* \+ Your own library can be highly integrated into your application code or development environment.
+
+
+* \- Many custom libraries don't have the same level of testing as libraries such as std STL or EASTL.
+* \- Many custom libraries don't have the same breadth or depth as std STL or especially EASTL.
+* \- Many custom libraries don't have the level of performance tuning that std STL or especially EASTL has.
+
+### Info.17 I think I've found a bug. What do I do?
+
+**Verify that you indeed have a bug**
+
+There are various levels of bugs that can occur, which include the following:
+
+* Compiler warnings generated by EASTL.
+* Compiler errors generated by EASTL (failure to compile well-formed code).
+* Runtime misbehavior by EASTL (function does the wrong thing).
+* Runtime crash or data corruption by EASTL.
+* Mismatch between EASTL documentation and behavior.
+* Mismatch between EASTL behavior and user's expections (mis-design).
+
+Any of the above items can be the fault of EASTL. However, the first four can also be the fault of the user. Your primary goal in verifying a potential bug is to determine if it is an EASTL bug or a user bug. Template errors can sometimes be hard to diagnose. It's probably best if you first show the problem to somebody you know to make sure you are not missing something obvious. Creating a reproducible case may be useful in helping convince yourself, but as is mentioned below, this is not required in order to report the bug.
+
+**Report the bug**
+
+The first place to try is the standard EA centralized tech support site. As of this writing (10/2005), that tech site is http://eatech/. Due to the frequent technology churn that seems to occur within Electronic Arts, the bug reporting system in place when you read this may not be the one that was in place when this FAQ entry was written. If the tech site route fails, consider directly contacting the maintainer of the EASTL package.
+
+In reporting a bug, it is nice if there is a simple reproducible case that can be presented. However, such a case requires time to create, and so you are welcome to initially simply state what you think the bug is without producing a simple reproducible case. It may be that this is a known bug or it may be possible to diagnose the bug without a reproducible case. If more information is needed then the step of trying to produce a reproducible case may be necessary.
+
+### Info.18 Can EASTL be used by third party EA developers?
+
+EASTL and other core technologies authored by EA (and not licensed from other companies) can be used in source and binary form by designated 3rd parties. The primary case where there is an issue is if the library contains platform specific code for a platform that the 3rd party is not licensed for. In that case the platform-specific code would need to be removed. This doesn’t apply to EASTL, nor many of the other core tech packages.
+
+## Performance
+
+### Perf.1 How efficient is EASTL compared to standard C++ STL implementations?
+
+With respect to the functionality that is equivalent between EASTL and standard STL, the short answer to this is that EASTL is as at least as efficient as othe STL implementations and in a number of aspects is more so. EASTL has functionality such as intrusive_list and linked_ptr that don't exist in standard STL but are explicitly present to provide significant optimizations over standard STL.
+
+The medium length answer is that EASTL is significantly more efficient than Dinkumware STL, and Microsoft Windows STL. EASTL is generally more efficient than Metrowerks STL, but Metrowerks has a few tricks up its sleeve which EASTL doesn't currently implement. EASTL is roughly equal in efficiency to STLPort and GCC 3.x+ STL, though EASTL has some optimizations that these do not.
+
+The long answer requires a breakdown of the functionality between various versions of the STL.
+
+### Perf.2 How efficient is EASTL in general?
+
+This question is related to the question, "How efficient are templates?" If you understand the effects of templates then you can more or less see the answer for EASTL. Templates are more efficient than the alternative when they are used appropriately, but can be less efficient than the alternative when used under circumstances that don't call for them. The strength of templates is that the compiler sees all the code and data types at compile time and can often reduce statements to smaller and faster code than with conventional non-templated code. The weakness of templates is that the sometimes produce more code and can result in what is often called "code bloat". However, it's important to note that unused template functions result in no generated nor linked code, so if you have a templated class with 100 functions but you only use one, only that one function will be compiled.
+
+EASTL is a rather efficient implementation of a template library and pulls many tricks of the trade in terms of squeezing optimal performance out of the compiler. The only way to beat it is to write custom code for the data types you are working with, and even then people are sometimes surprised to find that their hand-implemented algorithm works no better or even worse than the EASTL equivalent. But certainly there are ways to beat templates, especially if you resort to assembly language programming and some kinds of other non-generic tricks.
+
+### Perf.3 Strings don't appear to use the "copy-on-write" (CoW) optimization. Why not?
+
+**Short answer**
+CoW provides a benefit for a small percentage of uses but provides a disadvantage for the large majority of uses.
+
+**Long answer**
+The primary benefit of CoW is that it allows for the sharing of string data between two string objects. Thus if you say this:
+
+```cpp
+string a("hello");
+string b(a);
+```
+
+the "hello" will be shared between a and b. If you then say this:
+
+```cpp
+a = "world";
+```
+
+then *a* will release its reference to "hello" and leave b with the only reference to it. Normally this functionality is accomplished via reference counting and with atomic operations or mutexes.
+
+The C++ standard does not say anything about basic_string and CoW. However, for a basic_string implementation to be standards-conforming, a number of issues arise which dictate some things about how one would have to implement a CoW string. The discussion of these issues will not be rehashed here, as you can read the references below for better detail than can be provided in the space we have here. However, we can say that the C++ standard is sensible and that anything we try to do here to allow for an efficient CoW implementation would result in a generally unacceptable string interface.
+
+The disadvantages of CoW strings are:
+
+* A reference count needs to exist with the string, which increases string memory usage.
+* With thread safety, atomic operations and mutex locks are expensive, especially on weaker memory systems such as console gaming platforms.
+* All non-const string accessor functions need to do a sharing check the the first such check needs to detach the string. Similarly, all string assignments need to do a sharing check as well. If you access the string before doing an assignment, the assignment doesn't result in a shared string, because the string has already been detached.
+* String sharing doesn't happen the large majority of the time. In some cases, the total sum of the reference count memory can exceed any memory savings gained by the strings that share representations.
+
+The addition of a cow_string class is under consideration for EASTL. There are conceivably some systems which have string usage patterns which would benefit from CoW sharing. Such functionality is best saved for a separate string implementation so that the other string uses aren't penalized.
+
+References
+
+This is a good starting HTML reference on the topic:
+ http://www.gotw.ca/publications/optimizations.htm
+
+Here is a well-known Usenet discussion on the topic:
+ http://groups-beta.google.com/group/comp.lang.c++.moderated/browse_thread/thread/3dc6af5198d0bf7/886c8642cb06e03d
+
+### Perf.4 Does EASTL cause code bloat, given that it uses templates?
+
+The reason that templated functions and classes might cause an increase in code size because each template instantiation theoretically creates a unique piece of code. For example, when you compile this code:
+
+```cpp
+template <typename T>
+const T min(const T a, const T b)
+ { return b < a ? b : a; }
+
+int i = min<int>(3, 4);
+double d = min<double>(3.0, 4.0);
+```
+
+the compiler treats it as if you wrote this:
+
+```cpp
+int min(const int a, const int b)
+ { return b < a ? b : a; }
+
+double min(const double a, const double b)
+ { return b < a ? b : a; }
+```
+
+Imagine this same effect happening with containers such as list and map and you can see how it is that templates can cause code proliferation.
+
+A couple things offset the possibility of code proliferation: inlining and folding. In practice the above 'min' function would be converted to inlined functions by the compiler which occupy only a few CPU instructions. In many of the simplest cases the inlined version actually occupies less code than the code required to push parameters on the stack and execute a function call. And they will execute much faster as well.
+
+Code folding (a.k.a. "COMDAT folding", "duplicate stripping", "ICF" / "identical code folding") is a compiler optimization whereby the compiler realizes that two independent functions have compiled to the same code and thus can be reduced to a single function. The Microsoft VC++ compiler (Since VS2005), and GCC (v 4.5+) can do these kinds of optimizations on all platforms. This can result, for example, in all templated containers of pointers (e.g. vector<char*>, vector<Widget*>, etc.) to be linked as a single implementation. This folding occurs at a function level and so individual member functions can be folded while other member functions are not. A side effect of this optimization is that you aren't likely to gain much much declaring containers of void* instead of the pointer type actually contained.
+
+The above two features reduce the extent of code proliferation, but certainly don't eliminate it. What you need to think about is how much code might be generated vs. what your alternatives are. Containers like vector can often inline completely away, whereas more complicated containers such as map can only partially be inlined. In the case of map, if you need an such a container for your Widgets, what alternatives do you have that would be more efficient than instantiating a map? This is up to you to answer.
+
+It's important to note that C++ compilers will throw away any templated functions that aren't used, including unused member functions of templated classes. However, some argue that by having many functions available to the user that users will choose to use that larger function set rather than stick with a more restricted set.
+
+Also, don't be confused by syntax bloat vs. code bloat. In looking at templated libraries such as EASTL you will notice that there is sometimes a lot of text in the definition of a template implementation. But the actual underlying code is what you need to be concerned about.
+
+There is a good Usenet discussion on this topic at: http://groups.google.com/group/comp.lang.c++.moderated/browse_frm/thread/2b00649a935997f5
+
+### Perf.5 Don't STL and EASTL containers fragment memory?
+
+They only fragment memory if you use them in a way that does so. This is no different from any other type of container used in a dynamic way. There are various solutions to this problem, and EASTL provides additional help as well:
+
+* For vectors, use the reserve function (or the equivalent constructor) to set aside a block of memory for the container. The container will not reallocate memory unless you try grow beyond the capacity you reserve.
+* EASTL has "fixed" variations of containers which allow you to specify a fixed block of memory which the container uses for its memory. The container will not allocate any memory with these types of containers and all memory will be cache-friendly due to its locality.
+* You can assign custom allocators to containers instead of using the default global allocator. You would typically use an allocator that has its own private pool of memory.
+* Where possible, add all a container's elements to it at once up front instead of adding them over time. This avoids memory fragmentation and increase cache coherency.
+
+### Perf.6 I don't see container optimizations for equivalent scalar types such as pointer types. Why?
+
+Metrowerks (and no other, as of this writing) STL has some container specializations for type T* which maps them to type void*. The idea is that a user who declares a list of Widget* and a list of Gadget* will generate only one container: a list of void*. As a result, code generation will be smaller. Often this is done only in optimized builds, as such containers are harder to view in debug builds due to type information being lost.
+
+The addition of this optimization is under consideration for EASTL, though it might be noted that optimizing compilers such as VC++ are already capable of recognizing duplicate generated code and folding it automatically as part of link-time code generation (LTCG) (a.k.a. "whole program optimization"). This has been verified with VC++, as the following code and resulting disassembly demonstrate:
+
+```cpp
+eastl::list<int*> intPtrList;
+eastl::list<TestObject*> toPtrList;
+
+eastl_size_t n1 = intPtrList.size();
+eastl_size_t n2 = toPtrList.size();
+
+0042D288 lea edx,[esp+14h]
+0042D28C call eastl::list<TestObject>::size (414180h)
+0042D291 push eax
+0042D292 lea edx,[esp+24h]
+0042D296 call eastl::list<TestObject>::size (414180h)
+```
+
+Note that in the above case the compiler folded the two implementations of size() into a single implementation.
+
+### Perf.7 I've seen some STL's provide a default quick "node allocator" as the default allocator. Why doesn't EASTL do this?
+
+**Short answer**
+
+This is a bad, misguided idea.
+
+**Long answer**
+
+These node allocators implement a heap for all of STL with buckets for various sizes of allocations and implemented fixed-size pools for each of these buckets. These pools are attractive at first because they do well in STL comparison benchmarks, especially when thread safety is disabled. Such benchmarks make it impossible to truly compare STL implementations because you have two different allocators in use and in some cases allocator performance can dominate the benchmark. However, the real problem with these node allocators is that they badly fragment and waste memory. The technical discussion of this topic is outside the scope of this FAQ, but you can learn more about it by researching memory management on the Internet. Unfortunately, the people who implement STL libraries are generally not experts on the topic of memory management. A better approach, especially for game development, is for the user to decide when fixed-size pools are appropriate and use them via custom allocator assignment to containers.
+
+### Perf.8 Templates sometimes seem to take a long time to compile. Why do I do about that?
+
+C++ compilers are generally slower than C compilers, and C++ templates are generally slower to compile than regular C++ code. EASTL has some extra functionality (such as type_traits and algorithm specializations) that is not found in most other template libraries and significantly improves performance and usefulness but adds to the amount of code that needs to be compiled. Ironically, we have a case where more source code generates faster and smaller object code.
+
+The best solution to the problem is to use pre-compiled headers, which are available on all modern ~2002+) compilers, such as VC6.0+, GCC 3.2+, and Metrowerks 7.0+. In terms of platforms this means all 2002+ platforms.
+
+Some users have been speeding up build times by creating project files that put all the source code in one large .cpp file. This has an effect similar to pre-compiled headers. It can go even faster than pre-compiled headers but has downsides in the way of convenience and portability.
+
+### Perf.10 How well does EASTL inline?
+
+EASTL is written in such as way as to be easier to inline than typical templated libraries such as STL. How is this so? It is so because EASTL reduces the inlining depth of many functions, particularly the simple ones. In doing so it makes the implementation less "academic" but entirely correct. An example of this is the vector operator[] function, which is implemented like so with Microsoft STL:
+
+```cpp
+reference operator[](size_type n) {
+ return *(begin() + n);
+}
+```
+
+EASTL implements the function directly, like so:
+
+```cpp
+reference operator[](size_type n) {
+ return *(mpBegin + n);
+}
+```
+
+Both implementations are correct, but hte EASTL implementation will run faster in debug builds, be easier to debug, and will be more likely to be inlined when the usage of this function is within a hierarchy of other functions being inlined. It is not so simple to say that the Microsoft version will always inline in an optimized build, as it could be part of a chain and cause the max depth to be exceeded.
+
+That being said, EASTL appears to inline fairly well under most circumstances, including with GCC, which is the poorest of the compilers in its ability to inline well.
+
+### Perf.11 How do I control function inlining?
+
+Inlining is an important topic for templated code, as such code often relies on the compiler being able to do good function inlining for maximum performance. GCC, VC++, and Metrowerks are discussed here. We discuss compilation-level inlining and function-level inling here, though the latter is likely to be of more use to the user of EASTL, as it can externally control how EASTL is inlined. A related topic is GCC's template expansion depth, discussed elsewhere in this FAQ. We provide descriptions of inlining options here but don't currently have any advice on how to best use these with EASTL.
+
+Compilation-Level Inlining -- VC++
+
+VC++ has some basic functionality to control inlining, and the compiler is pretty good at doing aggressive inlining when optimizing on for all platforms.
+
+> **#pragma inline_depth( [0... 255] )**
+>
+> Controls the number of times inline expansion can occur by controlling the number of times that a series of function calls can be expanded (from 0 to 255 times). This pragma controls the inlining of functions marked inline and or inlined automatically under the /Ob2 option. The inline_depth pragma controls the number of times a series of function calls can be expanded. For example, if the inline depth is 4, and if A calls B and B then calls C, all three calls will be expanded inline. However, if the closest inline expansion is 2, only A and B are expanded, and C remains as a function call.
+
+> **#pragma inline_recursion( [{on | off}] )**
+>
+> Controls the inline expansion of direct or mutually recursive function calls. Use this pragma to control functions marked as inline and or functions that the compiler automatically expands under the /Ob2 option. Use of this pragma requires an /Ob compiler option setting of either 1 or 2. The default state for inline_recursion is off. The inline_recursion pragma controls how recursive functions are expanded. If inline_recursion is off, and if an inline function calls itself (either directly or indirectly), the function is expanded only once. If inline_recursion is on, the function is expanded multiple times until it reaches the value set by inline_depth, the default value of 8, or a capacity limit.
+
+Compilation-Level Inlining -- GCC
+
+GCC has a large set of options to control function inlining. Some options are available only in GCC 3.0 and later and thus not present on older platforms.
+
+
+> **-fno-default-inline**
+>
+> Do not make member functions inline by default merely because they are defined inside the class scope (C++ only). Otherwise, when you specify -O, member functions defined inside class scope are compiled inline by default; i.e., you don't need to add 'inline' in front of the member function name.
+>
+> **-fno-inline**
+>
+> Don't pay attention to the inline keyword. Normally this option is used to keep the compiler from expanding any functions inline. Note that if you are not optimizing, no functions can be expanded inline.
+>
+> **-finline-functions**
+>
+> Integrate all simple functions into their callers. The compiler heuristically decides which functions are simple enough to be worth integrating in this way. If all calls to a given function are integrated, and the function is declared static, then the function is normally not output as assembler code in its own right. Enabled at level -O3.
+>
+> **-finline-limit=n**
+>
+> By default, GCC limits the size of functions that can be inlined. This flag allows the control of this limit for functions that are explicitly marked as inline (i.e., marked with the inline keyword or defined within the class definition in c++). n is the size of functions that can be inlined in number of pseudo instructions (not counting parameter handling). pseudo-instructions are an internal representation of function size. The default value of n is 600. Increasing this value can result in more inlined code at the cost of compilation time and memory consumption. Decreasing usually makes the compilation faster and less code will be inlined (which presumably means slower programs). This option is particularly useful for programs that use inlining heavily such as those based on recursive templates with C++.
+>
+> Inlining is actually controlled by a number of parameters, which may be specified individually by using --param name=value. The -finline-limit=n option sets some of these parameters as follows:
+>
+> ```
+> max-inline-insns-single
+> is set to n/2.
+> max-inline-insns-auto
+> is set to n/2.
+> min-inline-insns
+> is set to 130 or n/4, whichever is smaller.
+> max-inline-insns-rtl
+> is set to n.
+> ```
+>
+> See --param below for a documentation of the individual parameters controlling inlining.
+>
+> **-fkeep-inline-functions**
+>
+> Emit all inline functions into the object file, even if they are inlined where used.
+>
+> **--param name=value**
+>
+> In some places, GCC uses various constants to control the amount of optimization that is done. For example, GCC will not inline functions that contain more that a certain number of instructions. You can control some of these constants on the command-line using the --param option.
+>
+> max-inline-insns-single
+> Several parameters control the tree inliner used in gcc. This number sets the maximum number of instructions (counted in GCC's internal representation) in a single function that the tree inliner will consider for inlining. This only affects functions declared inline and methods implemented in a class declaration (C++). The default value is 450.
+>
+> max-inline-insns-auto
+> When you use -finline-functions (included in -O3), a lot of functions that would otherwise not be considered for inlining by the compiler will be investigated. To those functions, a different (more restrictive) limit compared to functions declared inline can be applied. The default value is 90.
+>
+>large-function-insns
+> The limit specifying really large functions. For functions larger than this limit after inlining inlining is constrained by --param large-function-growth. This parameter is useful primarily to avoid extreme compilation time caused by non-linear algorithms used by the backend. This parameter is ignored when -funit-at-a-time is not used. The default value is 2700.
+>
+> large-function-growth
+> Specifies maximal growth of large function caused by inlining in percents. This parameter is ignored when -funit-at-a-time is not used. The default value is 100 which limits large function growth to 2.0 times the original size.
+>
+> inline-unit-growth
+> Specifies maximal overall growth of the compilation unit caused by inlining. This parameter is ignored when -funit-at-a-time is not used. The default value is 50 which limits unit growth to 1.5 times the original size.
+>
+> max-inline-insns-recursive
+> max-inline-insns-recursive-auto
+> Specifies maximum number of instructions out-of-line copy of self recursive inline function can grow into by performing recursive inlining. For functions declared inline --param max-inline-insns-recursive is taken into acount. For function not declared inline, recursive inlining happens only when -finline-functions (included in -O3) is enabled and --param max-inline-insns-recursive-auto is used. The default value is 450.
+>
+> max-inline-recursive-depth
+> max-inline-recursive-depth-auto
+> Specifies maximum recursion depth used by the recursive inlining. For functions declared inline --param max-inline-recursive-depth is taken into acount. For function not declared inline, recursive inlining happens only when -finline-functions (included in -O3) is enabled and --param max-inline-recursive-depth-auto is used. The default value is 450.
+>
+> inline-call-cost
+> Specify cost of call instruction relative to simple arithmetics operations (having cost of 1). Increasing this cost disqualify inlinining of non-leaf functions and at same time increase size of leaf function that is believed to reduce function size by being inlined. In effect it increase amount of inlining for code having large abstraction penalty (many functions that just pass the argumetns to other functions) and decrease inlining for code with low abstraction penalty. Default value is 16.
+>
+> **-finline-limit=n**
+>
+> By default, GCC limits the size of functions that can be inlined. This flag allows the control of this limit for functions that are explicitly marked as inline (i.e., marked with the inline keyword or defined within the class definition in c++). n is the size of functions that can be inlined in number of pseudo instructions (not counting parameter handling). The default value of n is 600. Increasing this value can result in more inlined code at the cost of compilation time and memory consumption. Decreasing usually makes the compilation faster and less code will be inlined (which presumably means slower programs). This option is particularly useful for programs that use inlining heavily such as those based on recursive templates with C++.
+
+Inlining is actually controlled by a number of parameters, which may be specified individually by using --param name=value. The -finline-limit=n option sets some of these parameters as follows:
+
+```
+max-inline-insns-single
+ is set to n/2.
+max-inline-insns-auto
+ is set to n/2.
+min-inline-insns
+ is set to 130 or n/4, whichever is smaller.
+max-inline-insns-rtl
+ is set to n.
+```
+
+See below for a documentation of the individual parameters controlling inlining.
+
+Note: pseudo instruction represents, in this particular context, an abstract measurement of function's size. In no way, it represents a count of assembly instructions and as such its exact meaning might change from one release to an another.
+
+GCC additionally has the -Winline compiler warning, which emits a warning whenever a function declared as inline was not inlined.
+
+Compilation-Level Inlining -- Metrowerks
+
+Metrowerks has a number of pragmas (and corresponding compiler settings) to control inlining. These include always_inline, inline_depth, inline_max_size, and inline max_total_size.
+
+> ```
+> #pragma always_inline on | off | reset
+> ```
+>
+> Controls the use of inlined functions. If you enable this pragma, the compiler ignores all inlining limits and attempts to inline all functions where it is legal to do so. This pragma is deprecated. Use the inline_depth pragma instead.
+>
+> ```
+> #pragma inline_depth(n)
+> #pragma inline_depth(smart)
+> ```
+>
+> Controls how many passes are used to expand inline function. Sets the number of passes used to expand inline function calls. The number n is an integer from 0 to 1024 or the smart specifier. It also represents the distance allowed in the call chain from the last function up. For example, if d is the total depth of a call chain, then functions below (d-n) are inlined if they do not exceed the inline_max_size and inline_max_total_size settings which are discussed directly below.
+>
+> ```
+> #pragma inline_max_size(n);
+> #pragma inline_max_total_size(n);
+> ```
+>
+> The first pragma sets the maximum function size to be considered for inlining; the second sets the maximum size to which a function is allowed to grow after the functions it calls are inlined. Here, n is the number of statements, operands, and operators in the function, which turns out to be roughly twice the number of instructions generated by the function. However, this number can vary from function to function. For the inline_max_size pragma, the default value of n is 256; for the inline_max_total_size pragma, the default value of n is 10000. The smart specifier is the default mode, with four passes where the passes 2-4 are limited to small inline functions. All inlineable functions are expanded if inline_depth is set to 1-1024.
+
+Function-Level Inlining -- VC++
+
+> To force inline usage under VC++, you use this:
+>
+> ```
+> __forceinline void foo(){ ... }
+> ```
+>
+> It should be noted that __forceinline has no effect if the compiler is set to disable inlining. It merely tells the compiler that when inlining is enabled that it shouldn't use its judgment to decide if the function should be inlined but instead to always inline it.
+>
+> To disable inline usage under VC++, you need to use this:
+>
+> ```
+> #pragma inline_depth(0) // Disable inlining.
+> void foo() { ... }
+> #pragma inline_depth() // Restore default.
+> ```
+>
+> The above is essentially specifying compiler-level inlining control within the code for a specific function.
+
+**Function-Level Inlining -- GCC / Metrowerks**
+
+> To force inline usage under GCC 3.1+, you use this:
+>
+> `inline void foo() __attribute__((always_inline)) { ... }`
+>
+> or
+>
+> `inline __attribute__((always_inline)) void foo() { ... }`
+>
+> To disable inline usage under GCC 3+, you use this:
+>
+> `void foo() __attribute__((noinline)) { ... }`
+>
+> or
+>
+> `inline __attribute__((noinline)) void foo() { ... }`
+
+EABase has some wrappers for this, such as EA_FORCE_INLINE.
+
+### Perf.12 C++ / EASTL seems to bloat my .obj files much more than C does.
+
+There is no need to worry. The way most C++ compilers compile templates, they compile all seen template code into the current .obj module, which results in larger .obj files and duplicated template code in multiple .obj files. However, the linker will (and in fact must) select only a single version of any given function for the application, and these linked functions will usually be located contiguously.
+
+Additionally, the debug information for template definitions is usually larger than that for non-templated C++ definitions, which itself is sometimes larger than C defintions due to name decoration.
+
+### Perf.13 What are the best compiler settings for EASTL?
+
+We will discuss various aspects of this topic here. As of this writing, more EASTL research on this topic has been done on Microsoft compiler platforms (e.g. Win32) than GCC platforms. Thus currently this discussion focuses on VC++ optimization. Some of the concepts are applicable to GCC, though. EASTL has been sucessfully compiled and tested (the EASTL unit test) on our major development platforms with the highest optimization settings enabled, including GCC's infamous -O3 level.
+
+**Optimization Topics**
+
+* Function inlining.
+* Optimization for speed vs. optimization for size.
+* Link-time code generation (LTCG).
+* Profile-guided optimization (PGO).
+
+**Function inlining**
+
+EASTL is a template library and inlining is important for optimal speed. Compilers have various options for enabling inlining and those options are discussed in this FAQ in detail. Most users will want to enable some form of inlining when compiling EASTL and other templated libraries. For users that are most concerned about the compiler's inlining increasing code size may want to try the 'inline only functions marked as inline' compiler option. Here is a table of normalized results from the benchmark project (Win32 platform):
+| Inlining Disabled | Inline only 'inline' | Inline any |
+|------|------|------|------|
+| **Application size** | 100K | 86K | 86K |
+| **Execution time** | 100 | 75 | 75 |
+
+The above execution times are highly simplified versions of the actual benchmark data but convey a sense of the general average behaviour that can be expected. In practice, simple functions such as vector::operator[] will execute much faster with inlining enabled but complex functions such as map::insert may execute no faster within inlining enabled.
+
+**Optimization for Speed / Size**
+
+Optimization for speed results in the compiler inlining more code than it would otherwise. This results in the inlined code executing faster than if it was not inlined. As mentioned above, basic function inlining can result in smaller code as well as faster code, but after a certain point highly inlined code becomes greater in size than less inlined code and the performance advantages of inlining start to lessen. The EASTL Benchmark project is a medium sized application that is about 80% templated and thus acts as a decent measure of the practical tradeoff between speed and size. Here is a table of normalized results from the benchmark project (Windows platform):
+| Size | Speed | Speed + LTCG | Speed + LTCG + PGO |
+|------|------|------|------|
+| **Application size** | 80K | 100K | 98K | 98K |
+| **Execution time** | 100 | 90 | 83 | 75 |
+
+What the above table is saying is that if you are willing to have your EASTL code be 20% larger, it will be 10% faster. Note that it doesn't mean that your app will be 20% larger, only the templated code in it like EASTL will be 20% larger.
+
+**Link-time code generation (LTCG)**
+
+LTCG is a mechanism whereby the compiler compiles the application as if it was all in one big .cpp file instead of separate .cpp files that don't see each other. Enabling LTCG optimizations is done by simply setting some compiler and linker settings and results in slower link times. The benchmark results are presented above and for the EASTL Benchmark project show some worthwhile improvement.
+
+**Profile-guided optimization (PGO)**
+
+PGO is a mechanism whereby the compiler uses profiling information from one or more runs to optimize the compilation and linking of an application. Enabling PGO optimizations is done by setting some linker settings and doing some test runs of the application, then linking the app with the test run results. Doing PGO optimizations is a somewhat time-consuming task but the benchmark results above demonstrate that for the EASTL Benchmark project that PGO is worth the effort.
+
+## Problems
+
+### Prob.1 I'm getting screwy behavior in sorting algorithms or sorted containers. What's wrong?
+
+It may possible that you are seeing floating point roundoff problems. Many STL algorithms require object comparisons to act consistently. However, floating point values sometimes compare differently between uses because in one situation a value might be in 32 bit form in system memory, whereas in anther situation that value might be in an FPU register with a different precision. These are difficult problems to track down and aren't the fault of EASTL or whatever similar library you might be using. There are various solutions to the problem, but the important thing is to find a way to force the comparisons to be consistent.
+
+The code below was an example of this happening, whereby the object pA->mPos was stored in system memory while pB->mPos was stored in a register and comparisons were inconsistent and a crash ensued.
+
+```cpp
+class SortByDistance : public binary_function<WorldTreeObject*, WorldTreeObject*, bool>
+{
+private:
+ Vector3 mOrigin;
+
+public:
+ SortByDistance(Vector3 origin) {
+ mOrigin = origin;
+ }
+
+ bool operator()(WorldTreeObject* pA, WorldTreeObject* pB) const {
+ return ((WorldObject*)pA)->mPos - mOrigin).GetLength()
+ < ((WorldObject*)pB)->mPos - mOrigin).GetLength();
+ }
+};
+```
+
+Another thing to watch out for is the following mistake:
+
+```cpp
+struct ValuePair
+{
+ uint32_t a;
+ uint32_t b;
+};
+
+// Improve speed by casting the struct to uint64_t
+bool operator<(const ValuePair& vp1, const ValuePair& vp2)
+ { return *(uint64_t*)&vp1 < *(uint64_t*)&vp2; }
+```
+
+The problem is that the ValuePair struct has 32 bit alignment but the comparison assumes 64 bit alignment. The code above has been observed to crash on the PowerPC 64-based machines. The resolution is to declare ValuePair as having 64 bit alignment.
+
+### Prob.2 I am getting compiler warnings (e.g. C4244, C4242 or C4267) that make no sense. Why?
+
+One cause of this occurs with VC++ when you have code compiled with the /Wp64 (detect 64 bit portability issues) option. This causes pointer types to have a hidden flag called __w64 attached to them by the compiler. So 'ptrdiff_t' is actually known by the compiler as '__w64 int', while 'int' is known by the compilers as simply 'int'. A problem occurs here when you use templates. For example, let's say we have this templated function
+
+``` cpp
+template <typename T>
+T min(const T a, const T b) {
+ return b < a ? b : a;
+}
+```
+
+If you compile this code:
+
+```cpp
+ptrdiff_t a = min(ptrdiff_t(0), ptrdiff_t(1));
+int b = min((int)0, (int)1);
+```
+
+You will get the following warning for the second line, which is somewhat nonsensical:
+
+`warning C4244: 'initializing' : conversion from 'const ptrdiff_t' to 'int', possible loss of data`
+
+This could probably be considered a VC++ bug, but in the meantime you have little choice but to ignore the warning or disable it.
+
+### Prob.3 I am getting compiler warning C4530, which complains about exception handling and "unwind semantics." What gives?
+
+VC++ has a compiler option (/EHsc) that allows you to enable/disable exception handling stack unwinding but still enable try/catch. This is useful because it can save a lot in the way of code generation for your application. Disabling stack unwinding will decrease the size of your executable on at least the Win32 platform by 10-12%.
+
+If you have stack unwinding disabled, but you have try/catch statements, VC++ will generate the following warning:
+
+`warning C4530: C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc`
+
+As of EASTL v1.0, this warning has been disabled within EASTL for EASTL code. However, non-EASTL code such as std STL code may still cause this warning to be triggered. In this case there is not much you can do about this other than to disable the warning.
+
+### Prob.4 Why are tree-based EASTL containers hard to read with a debugger?
+
+**Short answer**
+
+Maximum performance and design mandates.
+
+**Long answer**
+
+You may notice that when you have a tree-based container (e.g. set, map) in the debugger that it isn't automatically able to recognize the tree nodes as containing instances of your contained object. You can get the debugger to do what you want with casting statements in the debug watch window, but this is not an ideal solution. The reason this is happening is that node-based containers always use an anonymous node type as the base class for container nodes. This is primarily done for performance, as it allows the node manipulation code to exist as a single non-templated library of functions and it saves memory because containers will have one or two base nodes as container 'anchors' and you don't want to allocate a node of the size of the user data when you can just use a base node. See list.h for an example of this and some additional in-code documentation on this.
+
+Additionally, EASTL has the design mandate that an empty container constructs no user objects. This is both for performance reasons and because it doing so would skew the user's tracking of object counts and might possibly break some expectation the user has about object lifetimes.
+
+Currently this debug issue exists only with tree-based containers. Other node-based containers such as list and slist use a trick to get around this problem in debug builds.
+
+See [Debug.2](#debug2-how-do-i-view-containers-if-the-visualizertooltip-support-is-not-present) for more.
+
+### Prob.5 The EASTL source code is sometimes rather complicated looking. Why is that?
+
+**Short answer**
+
+Maximum performance.
+
+**Long answer**
+EASTL uses templates, type_traits, iterator categories, redundancy reduction, and branch reduction in order to achieve optimal performance. A side effect of this is that there are sometimes a lot of template parameters and multiple levels of function calls due to template specialization. The ironic thing about this is that this makes the code (an optimized build, at least) go faster, not slower. In an optimized build the compiler will see through the calls and template parameters and generate a direct optimized inline version.
+
+As an example of this, take a look at the implementation of the copy implementation in algorithm.h. If you are copying an array of scalar values or other trivially copyable values, the compiler will see how the code directs this to the memcpy function and will generate nothing but a memcpy in the final code. For non-memcpyable data types the compiler will automatically understand that in do the right thing.
+
+EASTL's primary objective is maximal performance, and it has been deemed worthwhile to make the code a little less obvious in order to achieve this goal. Every case where EASTL does something in an indirect way is by design and usually this is for the purpose of achieving the highest possible performance.
+
+### Prob.6 When I get compilation errors, they are very long and complicated looking. What do I do?
+
+Assuming the bugs are all worked out of EASTL, these errors really do indicate that you have something wrong. EASTL is intentionally very strict about types, as it tries to minimize the chance of users errors. Unfortunately, there is no simple resolution to the problem of long compiler errors other than to deal with them. On the other hand, once you've dealt with them a few times, you tend to realize that most of time they are the same kinds of errors and
+
+Top five approaches to dealing with long compilation errors:
+
+1. Look at the line where the compilation error occurred and ignore the text of the error and just look at obvious things that might be wrong.
+2. Consider the most common typical causes of templated compilation errors and consider if any of these might be your problem. Usually one of them are.
+3. Either read through the error (it's not as hard as it may look on the surface) or copy the error to a text file and remove the extraneous
+4. Compile the code under GCC instead of MSVC, as GCC warnings and errors tend to be more helpful than MSVC's. Possibly also consider compiling an isolated version under Comeau C++'s free online compiler at www.comeaucomputing.com or the Dinkumware online compiler at http://dinkumware.com/exam/.
+5. Try using an STL filter (http://www.bdsoft.com/tools/stlfilt.html) which automatically boils down template errors to simpler forms. We haven't tried this yet with EASTL. Also there is the more generic TextFilt (http://textfilt.sourceforge.net/).
+
+Top five causes of EASTL compilation errors:
+
+1. const-correctness. Perhaps a quarter of container template errors are due to the user not specifying const correctly.
+2. Missing hash function. hash_map, hash_set, etc. require that you either specify a hash function or one exists for your class. See functional.h for examples of declarations of hash functions for common data types.
+3. Missing operators. Various containers and algorithms require that certain operators exist for your contained classes. For example, list requires that you can test contained objects for equivalence (i.e. operator==), while map requires that you can test contained objects for "less-ness" (operator <). If you define a Widget class and don't have a way to compare two Widgets, you will get errors when trying to put them into a map.
+4. Specifying the wrong data type. For example, it is a common mistake to forget that when you insert into a map, you need to insert a pair of objects and not just your key or value type.
+5. Incorrect template parameters. When declaring a template instantiation (e.g. map<int, int, less<int> >) you simply need to get the template parameters correct. Also note that when you have ">>" next to each other that you need to separate them by one space (e.g. "> >").
+
+### Prob.7 Templates sometimes seem to take a long time to compile. Why do I do about that?
+
+C++ compilers are generally slower than C compilers, and C++ templates are generally slower to compile than regular C++ code. EASTL has some extra functionality (such as type_traits and algorithm specializations) that is not found in most other template libraries and significantly improves performance and usefulness but adds to the amount of code that needs to be compiled. Ironically, we have a case where more source code generates faster and smaller object code.
+
+The best solution to the problem is to use pre-compiled headers, which are available on all modern ~2002+) compilers, such as VC6.0+, GCC 3.2+, and Metrowerks 7.0+. In terms of platforms this means all 2002+ platforms.
+
+Some users have been speeding up build times by creating project files that put all the source code in one large .cpp file. This has an effect similar to pre-compiled headers. It can go even faster than pre-compiled headers but has downsides in the way of convenience and portability.
+
+### Prob.8 I get the compiler error: "template instantiation depth exceeds maximum of 17. use -ftemplate-depth-NN to increase the maximum".
+
+This is a GCC error that occurs when a templated function calls a templated function which calls a templated function, etc. past a depth of 17. You can use the GCC command line argument -ftemplate-depth-40 (or some other high number) to get around this. As note below, the syntax starting with GCC 4.5 has changed slightly.
+
+The primary reason you would encounter this with EASTL is type traits that are used by algorithms. The type traits library is a (necessarily) highly templated set of types and functions which adds at most about nine levels of inlining. The copy and copy_backward algorithms have optimized pathways that add about four levels of inlining. If you have just a few more layers on top of that in container or user code then the default limit of 17 can be exceeded. We are investigating ways to reduce the template depth in the type traits library, but only so much can be done, as most compilers don't support type traits natively. Metrowerks is the current exception.
+
+From the GCC documentation:
+
+```
+-ftemplate-depth-n
+
+Set the maximum instantiation depth for template classes to n.
+A limit on the template instantiation depth is needed to detect
+endless recursions during template class instantiation ANSI/ISO
+C++ conforming programs must not rely on a maximum depth greater than 17.
+Note that starting with GCC 4.5 the syntax is -ftemplate-depth=N instead of -ftemplate-depth-n.
+```
+
+### Prob.9 I'm getting errors about min and max while compiling.
+
+You need to define NOMINMAX under VC++ when this occurs, as it otherwise defines min and max macros that interfere. There may be equivalent issues with other compilers. Also, VC++ has a specific <minmax.h> header file which defines min and max macros but which doesn't pay attention to NOMINMAX and so in that case there is nothing to do but not include that file or to undefine min and max. minmax.h is not a standard file and its min and max macros are not standard C or C++ macros or functions.
+
+### Prob.10 C++ / EASTL seems to bloat my .obj files much more than C does.
+
+There is no need to worry. The way most C++ compilers compile templates, they compile all seen template code into the current .obj module, which results in larger .obj files and duplicated template code in multiple .obj files. However, the linker will (and must) select only a single version of any given function for the application, and these linked functions will usually be located contiguously.
+
+### Prob.11 I'm getting compiler errors regarding placement operator new being previously defined.
+
+This can happen if you are attempting to define your own versions of placement new/delete. The C++ language standard does not allow the user to override these functions. Section 18.4.3 of the standard states:
+
+> Placement forms
+> 1. These functions are reserved, a C++ program may not define functions that displace the versions in the Standard C++ library.
+
+You may find that #defining __PLACEMENT_NEW_INLINE seems to fix your problems under VC++, but it can fail under some circumstances and is not portable and fails with other compilers, which don't have an equivalent workaround.
+
+### Prob.12 I'm getting errors related to wchar_t string functions such as wcslen().
+
+EASTL requires EABase-related items that the following be so. If not, then EASTL gets confused about what types it can pass to wchar_t related functions.
+
+* The #define EA_WCHAR_SIZE is equal to sizeof(wchar_t).
+* If sizeof(wchar_t) == 2, then char16_t is typedef'd to wchar_t.
+* If sizeof(wchar_t) == 4, then char32_t is typedef'd to wchar_t.
+
+EABase v2.08 and later automatically does this for most current generation and all next generation platforms. With GCC 2.x, the user may need to predefine EA_WCHAR_SIZE to the appropriate value, due to limitations with the GCC compiler. Note that GCC defaults to sizeof(wchar_t) ==4, but it can be changed to 2 with the -fshort_wchar compiler command line argument. If you are using EASTL without EABase, you will need to make sure the above items are correctly defined.
+
+### Prob.13 I'm getting compiler warning C4619: there is no warning number Cxxxx (e.g. C4217).
+
+Compiler warning C4619 is a VC++ warning which is saying that the user is attempting to enable or disable a warning which the compiler doesn't recognize. This warning only occurs if the user has the compiler set to enable warnings that are normally disabled, regardless of the warning level. The problem, however, is that there is no easy way for user code to tell what compiler warnings any given compiler version will recognize. That's why Microsoft normally disables this warning.
+
+The only practical solution we have for this is for the user to disable warning 4619 globally or an a case-by-case basis. EA build systems such as nant/framework 2's eaconfig will usually disable 4619. In general, global enabling of 'warnings that are disabled by default' often result in quandrys such as this.
+
+### Prob.14 My stack-based fixed_vector is not respecting the object alignment requirements.
+
+EASTL fixed_* containers rely on the compiler-supplied alignment directives, such as that implemented by EA_PREFIX_ALIGN. This is normally a good thing because it allows the memory to be local with the container. However, as documented by Microsoft at http://msdn2.microsoft.com/en-us/library/83ythb65(VS.71).aspx, this doesn't work for stack variables. The two primary means of working around this are:
+
+* Use something like AlignedObject<> from the EAStdC package's EAAllocator.h file.
+* Use eastl::vector with a custom allocator and have it provide aligned memory. EASTL automatically recognizes that the objects are aligned and will call the aligned version of your allocator allocate() function. You can get this aligned memory from the stack, if you need it, somewhat like how AlignedObject<> works.
+
+### Prob.15 I am getting compiler errors when using GCC under XCode (Macintosh/iphone).
+
+The XCode environment has a compiler option which causes it to evaluate include directories recursively. So if you specify /a/b/c as an include directory, it will consider all directories underneath c to also be include directories. This option is enabled by default, though many XCode users disable it, as it is a somewhat dangerous option. The result of enabling this option with EASTL is that <EASTL/string.h> is used by the compiler when you say #include <string.h>. The solution is to disable this compiler option. It's probably a good idea to disable this option anyway, as it typically causes problems for users yet provides minimal benefits.
+
+### Prob.16 I am getting linker errors about Vsnprintf8 or Vsnprintf16.
+
+EASTL requires the user to provide a function called Vsnprintf8 if the string::sprintf function is used. vsnprintf is not a standard C function, but most C standard libraries provide some form of it, though in some ways their implementations differ, especially in what the return value means. Also, most implementations of vsnprintf are slow, mostly due to mutexes related to locale functionality. And you can't really use vendor vsnprintf on an SPU due to the heavy standard library size. EASTL is stuck because it doesn't want to depend on something with these problems. EAStdC provides a single consistent fast lightweight, yet standards-conforming, implementation in the form of Vsnprintf(char8_t*, ...), but EASTL can't have a dependency on EAStdC. So the user must provide an implementation, even if all it does is call EAStdC's Vsnprintf or the vendor vsnprintf for that matter.
+
+Example of providing Vsnprintf8 via EAStdC:
+
+```cpp
+#include <EAStdC/EASprintf.h>
+
+int Vsnprintf8(char8_t* pDestination, size_t n, const char8_t* pFormat, va_list arguments)
+{
+ return EA::StdC::Vsnprintf(pDestination, n, pFormat, arguments);
+}
+
+int Vsnprintf16(char16_t* pDestination, size_t n, const char16_t* pFormat, va_list arguments)
+{
+ return EA::StdC::Vsnprintf(pDestination, n, pFormat, arguments);
+}
+```
+
+Example of providing Vsnprintf8 via C libraries:
+
+```cpp
+#include <stdio.h>
+
+int Vsnprintf8(char8_t* p, size_t n, const char8_t* pFormat, va_list arguments)
+{
+ #ifdef _MSC_VER
+ return vsnprintf_s(p, n, _TRUNCATE, pFormat, arguments);
+ #else
+ return vsnprintf(p, n, pFormat, arguments);
+ #endif
+}
+
+int Vsnprintf16(char16_t* p, size_t n, const char16_t* pFormat, va_list arguments)
+{
+ #ifdef _MSC_VER
+ return vsnwprintf_s(p, n, _TRUNCATE, pFormat, arguments);
+ #else
+ return vsnwprintf(p, n, pFormat, arguments); // Won't work on Unix because its libraries implement wchar_t as int32_t.
+ #endif
+}
+```
+
+### Prob.17 I am getting compiler errors about UINT64_C or UINT32_C.
+
+This is usually an order-of-include problem that comes about due to the implementation of __STDC_CONSTANT_MACROS in C++ Standard libraries. The C++ <stdint.h> header file defineds UINT64_C only if __STDC_CONSTANT_MACROS has been defined by the user or the build system; the compiler doesn't automatically define it. The failure you are seeing occurs because user code is #including a system header before #including EABase and without defining __STDC_CONSTANT_MACROS itself or globally. EABase defines __STDC_CONSTANT_MACROS and #includes the appropriate system header. But if the system header was already previously #included and __STDC_CONSTANT_MACROS was not defined, then UINT64_C doesn't get defined by anybody.
+
+The real solution that the C++ compiler and standard library wants is for the app to globally define __STDC_CONSTANT_MACROS itself in the build.
+
+### Prob.18 I am getting a crash with a global EASTL container.
+
+This usually due to compiler's lack of support for global (and static) C++ class instances. The crash is happening because the global variable exists but its constructor was not called on application startup and it's member data is zeroed bytes. To handle this you need to manually initialize such variables. There are two primary ways:
+
+Failing code:
+
+```cpp
+eastl::list<int> gIntList; // Global variable.
+
+void DoSomething()
+{
+ gIntList.push_back(1); // Crash. gIntList was never constructed.
+}
+```
+
+Declaring a pointer solution:
+
+```cpp
+eastl::list<int>* gIntList = NULL;
+
+void DoSomething()
+{
+ if(!gIntList) // Or move this to an init function.
+ gIntList = new eastl::list<int>;
+
+ gIntList->push_back(1); // Success
+}
+```
+
+Manual constructor call solution:
+
+```cpp
+eastl::list<int> gIntList;
+
+void InitSystem()
+{
+ new(&gIntList) eastl::list<int>;
+}
+
+void DoSomething()
+{
+ gIntList.push_back(1); // Success
+}
+```
+
+### Prob.19 Why doesn't EASTL support passing NULL string functions?
+
+The primary argument is to make functions safer for use. Why crash on NULL pointer access when you can make the code safe? That's a good argument. The counter argument, which EASTL currently makes, is:
+
+> It breaks consistency with the C++ STL library and C libraries, which require strings to be valid.
+>
+> It makes the coder slower and bigger for all users, though few need NULL checks.
+The specification for how to handle NULL is simple for some cases but not simple for others. Operator < below a case where the proper handling of it in a consistent way is not simple, as all comparison code (<, >, ==, !=, >=, <=) in EASTL must universally and consistently handle the case where either or both sides are NULL. A NULL string seems similar to an empty string, but doesn't always work out so simply.
+>
+> What about other invalid string pointers? NULL is merely one invalid value of many, with its only distinction being that sometimes it's intentionally NULL (as opposed to being NULL due to not being initialized).
+How and where to implement the NULL checks in such a way as to do it efficiently is not always simple, given that public functions call public functions.
+>
+> It's arguable (and in fact the the intent of the C++ standard library) that using pointers that are NULL is a user/app mistake. If we really want to be safe then we should be using string objects for everything. You may not entirely buy this argument in practice, but on the other hand one might ask why is the caller of EASTL using a NULL pointer in the first place? The answer of course is that somebody gave it to him.
+
+## Debug
+
+### Debug.1 How do I set the VC++ debugger to display EASTL container data with tooltips?
+
+See [Cont.9](#cont9-how-do-i-set-the-vc-debugger-to-display-eastl-container-data-with-tooltips)
+
+### Debug.2 How do I view containers if the visualizer/tooltip support is not present?
+
+Here is a table of answers about how to manually inspect containers in the debugger.
+
+| Container | Approach |
+|------|------|
+| slist<br>fixed_slist | slist is a singly-linked list. Look at the slist mNode variable. You can walk the list by looking at mNode.mpNext, etc. |
+| list<br>fixed_list | list is a doubly-linked list. Look at the list mNode variable. You can walk the list forward by looking at mNode.mpNext, etc. and backward by looking at mpPrev, etc. |
+| intrusive_list<br>intrusive_slist† | Look at the list mAnchor node. This lets you walk forward and backward in the list via mpNext and mpPrev. |
+| array | View the array mValue member in the debugger. It's simply a C style array. |
+| vector<br>fixed_vector | View the vector mpBegin value in the debugger. If the string is long, use ", N" to limit the view length, as with someVector.mpBegin, 32 |
+| vector_set<br>vector_multiset<br>vector_map<br>vector_multimap | These are containers that are implemented as a sorted vector, deque, or array. They are searched via a standard binary search. You can view them the same way you view a vector or deque. |
+| deque | deque is implemented as an array of arrays, where the arrays implement successive equally-sized segments of the deque. The mItBegin deque member points the deque begin() position. |
+| bitvector | Look at the bitvector mContainer variable. If it's a vector, then see vector above. |
+| bitset | Look at the bitset mWord variable. The bitset is nothing but one or more uint32_t mWord items. |
+| set<br>multiset<br>fixed_set<br>fixed_multiset | The set containers are implemented as a tree of elements. The set mAnchor.mpNodeParent points to the top of the tree; the mAnchor.mpNodeLeft points to the far left node of the tree (set begin()); the mAnchor.mpNodeRight points to the right of the tree (set end()). |
+| map<br>multimap<br>fixed_map<br>fixed_multimap | The map containers are implemented as a tree of pairs, where pair.first is the map key and pair.second is the map value. The map mAnchor.mpNodeParent points to the top of the tree; the mAnchor.mpNodeLeft points to the far left node of the tree (map begin()); the mAnchor.mpNodeRight points to the right of the tree (map end()). |
+| hash_map<br>hash_multimap<br>fixed_hash_map<br>fixed_hash_multimap | hash tables in EASTL are implemented as an array of singly-linked lists. The array is the mpBucketArray member. Each element in the list is a pair, where the first element of the pair is the map key and the second is the map value. |
+| intrusive_hash_map<br>intrusive_hash_multimap<br>intrusive_hash_set<br>intrusive_hash_multiset | intrusive hash tables in EASTL are implemented very similarly to regular hash tables. See the hash_map and hash_set entries for more info. |
+| hash_set<br>hash_multiset<br>fixed_hash_set<br>fixed_hash_map | hash tables in EASTL are implemented as an array of singly-linked lists. The array is the mpBucketArray member. |
+| basic_string<br>fixed_string<br>fixed_substring | View the string mpBegin value in the debugger. If the string is long, use ", N" to limit the view length, as with someString.mpBegin, 32 |
+| heap | A heap is an array of data (e.g. EASTL vector) which is organized in a tree whereby the highest priority item is array[0], The next two highest priority items are array[1] and [2]. Underneath [1] in priority are items [3] and [4], and underneath item [2] in priority are items [5] and [6]. etc. |
+| stack | View the stack member c value in the debugger. That member will typically be a list or deque. |
+| queue | View the queue member c value in the debugger. That member will typically be a list or deque. |
+| priority_queue | View the priority_queue member c value in the debugger. That member will typically be a vector or deque which is organized as a heap. See the heap section above for how to view a heap. |
+| smart_ptr | View the mpValue member. |
+
+### Debug.3 The EASTL source code is sometimes rather complicated looking. Why is that?
+
+**Short answer**
+
+Maximum performance.
+
+**Long answer**
+
+EASTL uses templates, type_traits, iterator categories, redundancy reduction, and branch reduction in order to achieve optimal performance. A side effect of this is that there are sometimes a lot of template parameters and multiple levels of function calls due to template specialization. The ironic thing about this is that this makes the code (an optimized build, at least) go faster, not slower. In an optimized build the compiler will see through the calls and template parameters and generate a direct optimized inline version.
+
+As an example of this, take a look at the implementation of the copy implementation in algorithm.h. If you are copying an array of scalar values or other trivially copyable values, the compiler will see how the code directs this to the memcpy function and will generate nothing but a memcpy in the final code. For non-memcpyable data types the compiler will automatically understand that in do the right thing.
+
+EASTL's primary objective is maximal performance, and it has been deemed worthwhile to make the code a little less obvious in order to achieve this goal. Every case where EASTL does something in an indirect way is by design and usually this is for the purpose of achieving the highest possible performance.
+
+### Debug.4 When I get compilation errors, they are very long and complicated looking. What do I do?
+
+Assuming the bugs are all worked out of EASTL, these errors really do indicate that you have something wrong. EASTL is intentionally very strict about types, as it tries to minimize the chance of users errors. Unfortunately, there is no simple resolution to the problem of long compiler errors other than to deal with them. On the other hand, once you've dealt with them a few times, you tend to realize that most of time they are the same kinds of errors and
+
+Top five approaches to dealing with long compilation errors:
+
+1.Look at the line where the compilation error occurred and ignore the text of the error and just look at obvious things that might be wrong.
+2. Consider the most common typical causes of templated compilation errors and consider if any of these might be your problem. Usually one of them are.
+3. Either read through the error (it's not as hard as it may look on the surface) or copy the error to a text file and remove the extraneous
+4. Compile the code under GCC instead of MSVC, as GCC warnings and errors tend to be more helpful than MSVC's. Possibly also consider compiling an isolated version under Comeau C++'s free online compiler at www.comeaucomputing.com or the Dinkumware online compiler at http://dinkumware.com/exam/.
+5. Try using an STL filter (http://www.bdsoft.com/tools/stlfilt.html) which automatically boils down template errors to simpler forms. We haven't tried this yet with EASTL. Also there is the more generic TextFilt (http://textfilt.sourceforge.net/).
+
+Top five causes of EASTL compilation errors:
+
+1. const-correctness. Perhaps a quarter of container template errors are due to the user not specifying const correctly.
+2. Missing hash function. hash_map, hash_set, etc. require that you either specify a hash function or one exists for your class. See functional.h for examples of declarations of hash functions for common data types.
+3. Missing operators. Various containers and algorithms require that certain operators exist for your contained classes. For example, list requires that you can test contained objects for equivalence (i.e. operator==), while map requires that you can test contained objects for "less-ness" (operator <). If you define a Widget class and don't have a way to compare two Widgets, you will get errors when trying to put them into a map.
+4. Specifying the wrong data type. For example, it is a common mistake to forget that when you insert into a map, you need to insert a pair of objects and not just your key or value type.
+5. Incorrect template parameters. When declaring a template instantiation (e.g. map<int, int, less<int> >) you simply need to get the template parameters correct. Also note that when you have ">>" next to each other that you need to separate them by one space (e.g. "> >").
+
+### Debug.5 How do I measure hash table balancing?
+
+The following functionality lets you spelunk hash container layout.
+
+* There is the load_factor function which tells you the overall hashtable load, but doesn't tell you if a load is unevenly distributed.
+* You can control the load factor and thus the automated bucket redistribution with set_load_factor.
+* The local_iterator begin(size_type n) and local_iterator end(size_type) functions lets you iterate each bucket individually. You can use this to examine the elements in a bucket.
+* You can use the above to get the size of any bucket, but there is also simply the bucket_size(size_type n) function.
+* The bucket_count function tells you the count of buckets. So with this you can completely visualize the layout of the hash table.
+* There is also iterator find_by_hash(hash_code_t c), for what it's worth.
+
+The following function draws an ASCII bar graph of the hash table for easy visualization of bucket distribution:
+
+```cpp
+#include <EASTL/hash_map.h>
+#include <EASTL/algorithm.h>
+#include <stdio.h>
+
+template <typename HashTable>
+void VisualizeHashTableBuckets(const HashTable& h)
+{
+ eastl_size_t bucketCount = h.bucket_count();
+ eastl_size_t largestBucketSize = 0;
+
+ for(eastl_size_t i = 0; i < bucketCount; i++)
+ largestBucketSize = eastl::max_alt(largestBucketSize, h.bucket_size(i));
+
+ YourPrintFunction("\n --------------------------------------------------------------------------------\n");
+
+ for(eastl_size_t i = 0; i < bucketCount; i++)
+ {
+ const eastl_size_t k = h.bucket_size(i) * 80 / largestBucketSize;
+
+ char buffer[16];
+ sprintf(buffer, "%3u|", (unsigned)i);
+ YourPrintFunction(buffer);
+
+ for(eastl_size_t j = 0; j < k; j++)
+ YourPrintFunction("*");
+
+ YourPrintFunction("\n");
+ }
+
+ YourPrintFunction(" --------------------------------------------------------------------------------\n");
+}
+```
+
+This results in a graph that looks like the following (with one horizontal bar per bucket). This hashtable has a large number of collisions in each of its 10 buckets.
+
+```
+ ------------------------------------------------------
+ 0|********************************************
+ 1|************************************************
+ 2|***************************************
+ 3|********************************************
+ 4|*****************************************************
+ 5|*************************************************
+ 6|****************************************
+ 7|***********************************************
+ 8|********************************************
+ 9|**************************************
+10|********************************************
+ -----------------------------------------------------
+```
+
+## Containers
+
+### Cont.1 Why do some containers have "fixed" versions (e.g. fixed_list) but others(e.g. deque) don't have fixed versions?
+
+Recall that fixed containers are those that are implemented via a single contiguous block of memory and don't use a general purpose heap to allocate memory from. For example, fixed_list is a list container that implements its list by a user-configurable fixed block of memory. Such containers have an upper limit to how many items they can hold, but have the advantage of being more efficient with memory use and memory access coherency.
+
+The reason why some containers don't have fixed versions is that such functionality doesn't make sense with these containers. Containers which don't have fixed versions include:
+
+```
+array, deque, bitset, stack, queue, priority_queue,
+intrusive_list, intrusive_hash_map, intrusive_hash_set,
+intrusive_hash_multimap, intrusive_hash_multimap,
+vector_map, vector_multimap, vector_set, vector_multiset.
+```
+
+Some of these containers are adapters which wrap other containers and thus there is no need for a fixed version because you can just wrap a fixed container. In the case of intrusive containers, the user is doing the allocation and so there are no memory allocations. In the case of array, the container is a primitive type which doesn't allocate memory. In the case of deque, it's primary purpose for being is to dynamically resize and thus the user would likely be better of using a fixed_vector.
+
+### Cont.2 Can I mix EASTL with standard C++ STL?
+
+This is possible to some degree, though the extent depends on the implementation of C++ STL. One of things that makes interoperability is something called iterator categories. Containers and algorithms recognize iterator types via their category and STL iterator categories are not recognized by EASTL and vice versa.
+
+Things that you definitely can do:
+
+* #include both EASTL and standard STL headers from the same .cpp file.
+* Use EASTL containers to hold STL containers.
+* Construct an STL reverse_iterator from an EASTL iterator.
+* Construct an EASTL reverse_iterator from an STL iterator.
+
+Things that you probably will be able to do, though a given std STL implementation may prevent it:
+
+* Use STL containers in EASTL algorithms.
+* Use EASTL containers in STL algorithms.
+* Construct or assign to an STL container via iterators into an EASTL container.
+* Construct or assign to an EASTL container via iterators into an STL container.
+
+Things that you would be able to do if the given std STL implementation is bug-free:
+
+* Use STL containers to hold EASTL containers. Unfortunately, VC7.x STL has a confirmed bug that prevents this. Similarly, STLPort versions prior to v5 have a similar but.
+
+Things that you definitely can't do:
+
+* Use an STL allocator directly with an EASTL container (though you can use one indirectly).
+* Use an EASTL allocator directly with an STL container (though you can use one indirectly).
+
+### Cont.3 Why are there so many containers?
+
+EASTL has a large number of container types (e.g vector, list, set) and often has a number of variations of given types (list, slist, intrusive_list, fixed_list). The reason for this is that each container is tuned and to a specific need and there is no single container that works for all needs. The more the user is concerned about squeezing the most performance out of their system, the more the individual container variations become significant. It's important to note that having additional container types generally does not mean generating additional code or code bloat. Templates result in generated code regardless of what templated class they come from, and so for the most part you get optimal performance by choosing the optimal container for your needs.
+
+### Cont.4 Don't STL and EASTL containers fragment memory?
+
+They only fragment memory if you use them in a way that does so. This is no different from any other type of container used in a dynamic way. There are various solutions to this problem, and EASTL provides additional help as well:
+
+For vectors, use the reserve function (or the equivalent constructor) to set aside a block of memory for the container. The container will not reallocate memory unless you try grow beyond the capacity you reserve.
+EASTL has "fixed" variations of containers which allow you to specify a fixed block of memory which the container uses for its memory. The container will not allocate any memory with these types of containers and all memory will be cache-friendly due to its locality.
+You can assign custom allocators to containers instead of using the default global allocator. You would typically use an allocator that has its own private pool of memory.
+Where possible, add all a container's elements to it at once up front instead of adding them over time. This avoids memory fragmentation and increase cache coherency.
+
+### Cont.5 I don't see container optimizations for equivalent scalar types such as pointer types. Why?
+
+Metrowerks (and no other, as of this writing) STL has some container specializations for type T* which maps them to type void*. The idea is that a user who declares a list of Widget* and a list of Gadget* will generate only one container: a list of void*. As a result, code generation will be smaller. Often this is done only in optimized builds, as such containers are harder to view in debug builds due to type information being lost.
+
+The addition of this optimization is under consideration for EASTL, though it might be noted that optimizing compilers such as VC++ are already capable of recognizing duplicate generated code and folding it automatically as part of link-time code generation (LTCG) (a.k.a. "whole program optimization"). This has been verified with VC++, as the following code and resulting disassembly demonstrate:
+
+```cpp
+eastl::list<int*> intPtrList;
+eastl::list<TestObject*> toPtrList;
+
+eastl_size_t n1 = intPtrList.size();
+eastl_size_t n2 = toPtrList.size();
+
+0042D288 lea edx,[esp+14h]
+0042D28C call eastl::list<TestObject>::size (414180h)
+0042D291 push eax
+0042D292 lea edx,[esp+24h]
+0042D296 call eastl::list<TestObject>::size (414180h)
+```
+
+Note that in the above case the compiler folded the two implementations of size() into a single implementation.
+
+### Cont.6 What about alternative container and algorithm implementations (e.g. treaps, skip lists, avl trees)?
+
+EASTL chooses to implement some alternative containers and algorithms and not others. It's a matter of whether or not the alternative provides truly complementary or improved functionality over existing containers. The following is a list of some implemented and non-implemented alternatives and the rationale behind each:
+
+Implemented:
+
+* intrusive_list, etc. -- Saves memory and improves cache locality.
+* vector_map, etc. -- Saves memory and improves cache locality.
+* ring_buffer -- Useful for some types of operations and has no alternative.
+* shell_sort -- Useful sorting algorithm.
+* sparse_matrix -- Useful for some types of operations and has no alternative.
+
+Not implemented:
+
+* skip lists (alternative to red-black tree) -- These use more memory and usually perform worse than rbtrees.
+* treap (alternative to red-black tree) -- These are easier and smaller than rbtrees, but perform worse.
+* avl tree (alternative to red-black tree) -- These have slightly better search performance than rbtrees, but significantly worse * * insert/remove performance.
+* btree (alternative to red-black tree) -- These are no better than rbtrees.
+
+If you have an idea of something that should be implemented, please suggest it or even provide at least a prototypical implementation.
+
+### Cont.7 Why are tree-based EASTL containers hard to read with a debugger?
+
+**Short answer**
+
+Maximum performance and design mandates.
+
+**Long answer**
+
+You may notice that when you have a tree-based container (e.g. set, map) in the debugger that it isn't automatically able to recognize the tree nodes as containing instances of your contained object. You can get the debugger to do what you want with casting statements in the debug watch window, but this is not an ideal solution. The reason this is happening is that node-based containers always use an anonymous node type as the base class for container nodes. This is primarily done for performance, as it allows the node manipulation code to exist as a single non-templated library of functions and it saves memory because containers will have one or two base nodes as container 'anchors' and you don't want to allocate a node of the size of the user data when you can just use a base node. See list.h for an example of this and some additional in-code documentation on this.
+
+Additionally, EASTL has the design mandate that an empty container constructs no user objects. This is both for performance reasons and because it doing so would skew the user's tracking of object counts and might possibly break some expectation the user has about object lifetimes.
+
+Currently this debug issue exists only with tree-based containers. Other node-based containers such as list and slist use a trick to get around this problem in debug builds.
+
+### Cont.8 How do I assign a custom allocator to an EASTL container?
+
+There are two ways of doing this:
+
+1. Use the set_allocator function that is present in each container.
+2. Specify a new allocator type via the Allocator template parameter that is present in each container.
+
+For item #1, EASTL expects that you provide an instance of an allocator of the type that EASTL recognizes. This is simple but has the disadvantage that all such allocators must be of the same class. The class would need to have C++ virtual functions in order to allow a given instance to act differently from another instance.
+
+For item #2, you specify that the container use your own allocator class. The advantage of this is that your class can be implemented any way you want and doesn't require virtual functions for differentiation from other instances. Due to the way C++ works your class would necessarily have to use the same member function names as the default allocator class type. In order to make things easier, we provide a skeleton allocator here which you can copy and fill in with your own implementation.
+
+```cpp
+class custom_allocator
+{
+public:
+ custom_allocator(const char* pName = EASTL_NAME_VAL("custom allocator"))
+ {
+ #if EASTL_NAME_ENABLED
+ mpName = pName ? pName : EASTL_ALLOCATOR_DEFAULT_NAME;
+ #endif
+
+ // Possibly do something here.
+ }
+
+ custom_allocator(const allocator& x, const char* pName = EASTL_NAME_VAL("custom allocator"));
+ {
+ #if EASTL_NAME_ENABLED
+ mpName = pName ? pName : EASTL_ALLOCATOR_DEFAULT_NAME;
+ #endif
+
+ // Possibly copy from x here.
+ }
+
+ ~custom_allocator();
+ {
+ // Possibly do something here.
+ }
+
+ custom_allocator& operator=(const custom_allocator& x)
+ {
+ // Possibly copy from x here.
+ return *this;
+ }
+
+ void* allocate(size_t n, int flags = 0)
+ {
+ // Implement the allocation here.
+ }
+
+ void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0)
+ {
+ // Implement the allocation here.
+ }
+
+ void deallocate(void* p, size_t n)
+ {
+ // Implement the deallocation here.
+ }
+
+ const char* get_name() const
+ {
+ #if EASTL_NAME_ENABLED
+ return mpName;
+ #else
+ return "custom allocator";
+ #endif
+ }
+
+ void set_name(const char* pName)
+ {
+ #if EASTL_NAME_ENABLED
+ mpName = pName;
+ #endif
+ }
+
+protected:
+ // Possibly place instance data here.
+
+ #if EASTL_NAME_ENABLED
+ const char* mpName; // Debug name, used to track memory.
+ #endif
+};
+
+
+inline bool operator==(const allocator& a, const allocator& b)
+{
+ // Provide a comparison here.
+}
+
+inline bool operator!=(const allocator& a, const allocator& b)
+{
+ // Provide a negative comparison here.
+}
+```
+
+Here's an example of how to use the above custom allocator:
+
+```cpp
+// Declare a Widget list and have it default construct.
+list<Widget, custom_allocator> widgetList;
+
+// Declare a Widget list and have it construct with a copy of some global allocator.
+list<Widget, custom_allocator> widgetList2(gSomeGlobalAllocator);
+
+// Declare a Widget list and have it default construct, but assign
+// an underlying implementation after construction.
+list<Widget, custom_allocator> widgetList;
+widgetList.get_allocator().mpIAllocator = new WidgetAllocatorImpl;
+```
+
+### Cont.9 How do I set the VC++ debugger to display EASTL container data with tooltips?
+
+Visual Studio supports this via the AutoExp.dat file, an example of which is [present](./html/AutoExp.dat) with this documentation.
+
+Sometimes the AutoExp.dat doesn't seem to work. Avery Lee's explanation:
+
+> If I had to take a guess, the problem is most likely in the cast to the concrete node type. These are always tricky because, for some strange reason, the debugger is whitespace sensitive with regard to specifying template types. You might try manually checking one of the routines of the specific map instantiation and checking that the placement of whitespace and const within the template expression still matches exactly. In some cases the compiler uses different whitespace rules depending on the value type which makes it impossible to correctly specify a single visualizer – this was the case for eastl::list<>, for which I was forced to include sections for both cases. The downside is that you have a bunch of (error) entries either way.
+
+### Cont.10 How do I use a memory pool with a container?
+
+Using custom memory pools is a common technique for decreasing memory fragmentation and increasing memory cache locality. EASTL gives you the flexibility of defining your own memory pool systems for containers. There are two primary ways of doing this:
+
+* Assign a custom allocator to a container. eastl::fixed_pool provides an implementation.
+* Use one of the EASTL fixed containers, such as fixed_list.
+
+**Custom Allocator**
+
+In the custom allocator case, you will want to create a memory pool and assign it to the container. For purely node-based containers such as list, slist, map, set, multimap, and multiset, your pool simply needs to be able to allocate list nodes. Each of these containers has a member typedef called node_type which defines the type of node allocated by the container. So if you have a memory pool that has a constructor that takes the size of pool items and the count of pool items, you would do this (assuming that MemoryPool implements the Allocator interface):
+
+```cpp
+typedef list<Widget, MemoryPool> WidgetList; // Declare your WidgetList type.
+
+MemoryPool myPool(sizeof(WidgetList::node_type), 100); // Make a pool of 100 Widget nodes.
+WidgetList myList(&myPool); // Create a list that uses the pool.
+```
+
+In the case of containers that are array-based, such as vector and basic_string, memory pools don't work very well as these containers work on a realloc-basis instead of by adding incremental nodes. What what want to do with these containers is assign a sufficient block of memory to them and reserve() the container's capacity to the size of the memory.
+
+In the case of mixed containers which are partly array-based and partly node based, such as hash containers and deque, you can use a memory pool for the nodes but will need a single array block to supply for the buckets (hash containers and deque both use a bucket-like system).
+
+You might consider using eastl::fixed_pool as such an allocator, as it provides such functionality and allows the user to provide the actual memory used for the pool. Here is some example code:
+
+```cpp
+char buffer[256];
+
+list<Widget, fixed_pool> myList;
+myList.get_allocator().init(buffer, 256);
+Fixed Container
+In the fixed container case, the container does all the work for you. To use a list which implements a private pool of memory, just declare it like so:
+
+fixed_list<Widget, 100> fixedList; // Declare a fixed_list that can hold 100 Widgets
+```
+
+### Cont.11 How do I write a comparison (operator<()) for a struct that contains two or more members?
+
+See [Algo.2](#algo2-how-do-i-write-a-comparison-operator-for-a-struct-that-contains-two-or-more-members).
+
+### Cont.12 Why doesn't container X have member function Y?
+
+Why don't the list or vector containers have a find() function? Why doesn't the vector container have a sort() function? Why doesn't the string container have a mid() function? These are common examples of such questions.
+
+The answer usually boils down to two reasons:
+
+* The functionality exists in a more centralized location elsewhere, such as the algorithms.
+* The functionality can be had by using other member functions.
+
+In the case of find and sort functions not being part of containers, the find algorithm and sort algorithm are centralized versions that apply to any container. Additionally, the algorithms allow you to specify a sub-range of the container on which to apply the algorithm. So in order to find an element in a list, you would do this:
+
+`list<int>::iterator i = find(list.begin(), list.end(), 3);`
+
+And in order to sort a vector, you would do this:
+
+```cpp
+quick_sort(v.begin(), v.end()); // Sort the entire array.
+
+quick_sort(&v[3], &v[8]); // Sort the items at the indexes in the range of [3, 8).
+```
+
+In the case of functionality that can be had by using other member functions, note that EASTL follows the philosophy that duplicated functionality should not exist in a container, with exceptions being made for cases where mistakes and unsafe practices commonly happen if the given function isn't present. In the case of string not having a mid function, this is because there is a string constructor that takes a sub-range of another string. So to make a string out of the middle of another, you would do this:
+
+`string strMid(str, 3, 5); // Make a new string of the characters from the source range of [3, 3+5).`
+
+It might be noted that the EASTL string class is unique among EASTL containers in that it sometimes violates the minimum functionality rule. This is so because the std C++ string class similarly does so and EASTL aims to be compatible.
+
+### Cont.13 How do I search a hash_map of strings via a char pointer efficiently? If I use map.find("hello") it creates a temporary string, which is inefficient.
+
+The problem is illustrated with this example:
+
+```cpp
+map<string, Widget> swMap;
+ ...
+map<string, Widget>::iterator it = swMap.find("blue"); // A temporary string object is created here.
+```
+
+In this example, the find function expects a string object and not a string literal and so (silently!) creates a temporary string object for the duration of the find. There are two solutions to this problem:
+
+* Make the map a map of char pointers instead of string objects. Don't forget to write a custom compare or else the default comparison function will compare pointer values instead of string contents.
+* Use the EASTL hash_map::find_as function, which allows you to find an item in a hash container via an alternative key than the one the hash table uses.
+
+### Cont.14 Why are set and hash_set iterators const (i.e. const_iterator)?
+
+The situation is illustrated with this example:
+
+```cpp
+set<int> intSet;
+
+intSet.insert(1);
+set<int>::iterator i = intSet.begin();
+*i = 2; // Error: iterator i is const.
+```
+
+In this example, the iterator is a regular iterator and not a const_iterator, yet the compiler gives an error when trying to change the iterator value. The reason this is so is that a set is an ordered container and changing the value would make it out of order. Thus, set and multiset iterators are always const_iterators. If you need to change the value and are sure the change will not alter the container order, use const_cast or declare mutable member variables for your contained object. This resolution is the one blessed by the C++ standardization committee.
+
+### Cont.15 How do I prevent my hash container from re-hashing?
+
+If you want to make a hashtable never re-hash (i.e. increase/reallocate its bucket count), call set_max_load_factor with a very high value such as 100000.f.
+
+Similarly, you can control the bucket growth factor with the rehash_policy function. By default, when buckets reallocate, they reallocate to about twice their previous count. You can control that value as with the example code here:
+
+```cpp
+hash_set<int> hashSet;
+hashSet.rehash_policy().mfGrowthFactor = 1.5f
+```
+
+### Cont.16 Which uses less memory, a map or a hash_map?
+
+A hash_map will virtually always use less memory. A hash_map will use an average of two pointers per stored element, while a map uses three pointers per stored element.
+
+### Cont.17 How do I write a custom hash function?
+
+You can look at the existing hash functions in functional.h, but we provide a couple examples here.
+
+To write a specific hash function for a Widget class, you would do this:
+
+```cpp
+struct WidgetHash {
+ size_t operator()(const Widget& w) const
+ { return w.id; }
+};
+
+hash_set<Widget, WidgetHash> widgetHashSet;
+```
+
+To write a generic (templated) hash function for a set of similar classes (in this case that have an id member), you would do this:
+
+```cpp
+template <typename T>
+struct GeneralHash {
+ size_t operator()(const T& t) const
+ { return t.id; }
+};
+
+hash_set<Widget, GeneralHash<Widget> > widgetHashSet;
+hash_set<Dogget, GeneralHash<Dogget> > doggetHashSet;
+```
+
+### Cont.18 How do I write a custom compare function for a map or set?
+
+The sorted containers require that an operator< exist for the stored values or that the user provide a suitable custom comparison function. A custom can be implemented like so:
+
+```cpp
+struct WidgetLess {
+ bool operator()(const Widget& w1, const Widget& w2) const
+ { return w.id < w2.id; }
+};
+
+set<Widget, WidgetLess> wSet;
+```
+
+It's important that your comparison function must be consistent in its behaviour, else the container will either be unsorted or a crash will occur. This concept is called "strict weak ordering."
+
+### Cont.19 How do I force my vector or string capacity down to the size of the container?
+
+You can simply use the set_capacity() member function which is present in both vector and string. This is a function that is not present in std STL vector and string functions.
+
+```cpp
+eastl::vector<Widget> x;
+x.set_capacity(); // Shrink x's capacity to be equal to its size.
+
+eastl::vector<Widget> x;
+x.set_capacity(0); // Completely clear x.
+```
+
+To compact your vector or string in a way that would also work with std STL you need to do the following.
+
+How to shrink a vector's capacity to be equal to its size:
+
+```cpp
+std::vector<Widget> x;
+std::vector<Widget>(x).swap(x); // Shrink x's capacity.
+```
+
+How to completely clear a std::vector (size = 0, capacity = 0, no allocation):
+
+```cpp
+std::vector<Widget> x;
+std::vector<Widget>().swap(x); // Completely clear x.
+```
+
+### Cont.20 How do I iterate a container while (selectively) removing items from it?
+
+All EASTL containers have an erase function which takes an iterator as an argument and returns an iterator to the next item. Thus, you can erase items from a container while iterating it like so:
+
+```cpp
+set<int> intSet;
+
+set<int>::iterator i = intSet.begin();
+
+while(i != intSet.end())
+{
+ if(*i & 1) // Erase all odd integers from the container.
+ i = intSet.erase(i);
+ else
+ ++i;
+}
+```
+
+### Cont.21 How do I store a pointer in a container?
+
+The problem with storing pointers in containers is that clearing the container will not free the pointers automatically. There are two conventional resolutions to this problem:
+
+Manually free pointers when removing them from containers.
+Store the pointer as a smart pointer instead of a "raw"pointer.
+The advantage of the former is that it makes the user's intent obvious and prevents the possibility of smart pointer "thrashing" with some containers. The disadvantage of the former is that it is more tedicous and error-prone.
+
+The advantage of the latter is that your code will be cleaner and will always be error-free. The disadvantage is that it is perhaps slightly obfuscating and with some uses of some containers it can cause smart pointer thrashing, whereby a resize of a linear container (e.g. vector) can cause shared pointers to be repeatedly incremented and decremented with no net effect.
+
+It's important that you use a shared smart pointer and not an unshared one such as C++ auto_ptr, as the latter will result in crashes upon linear container resizes. Here we provide an example of how to create a list of smart pointers:
+
+```cpp
+list< shared_ptr<Widget> > wList;
+
+wList.push_back(shared_ptr<Widget>(new Widget));
+wList.pop_back(); // The Widget will be freed.
+```
+
+### Cont.22 How do I make a union of two containers? difference? intersection?
+
+The best way to accomplish this is to sort your container (or use a sorted container such as set) and then apply the set_union, set_difference, or set_intersection algorithms.
+
+### Cont.23 How do I override the default global allocator?
+
+There are multiple ways to accomplish this. The allocation mechanism is defined in EASTL/internal/config.h and in allocator.h/cpp. Overriding the default global allocator means overriding these files, overriding what these files refer to, or changing these files outright. Here is a list of things you can do, starting with the simplest:
+
+* Simply provide the following versions of operator new (which EASTL requires, actually):
+```cpp
+void* operator new[](size_t size, const char* pName, int flags, unsigned debugFlags, const char* file, int line);
+void* operator new[](size_t size, size_t alignment, size_t alignmentOffset, const char* pName, int flags, unsigned debugFlags, const char* file, int line);
+```
+* Predefine the config.h macros for EASTLAlloc, EASTLFree, etc. See config.h for this.
+* Override config.h entirely via EASTL_USER_CONFIG_HEADER. See config.h for this.
+* Provide your own version of allocator.h/cpp
+* Provide your own version of config.h.
+
+If you redefine the allocator class, you can make it work however you want.
+
+Note that config.h defines EASTLAllocatorDefault, which returns the default allocator instance. As documented in config.h, this is not a global allocator which implements all container allocations but is the allocator that is used when EASTL needs to allocate memory internally. There are very few cases where EASTL allocates memory internally, and in each of these it is for a sensible reason that is documented to behave as such.
+
+### Cont.24 How do I do trick X with the string container?
+
+There seem to be many things users want to do with strings. Perhaps the most commonly requested EASTL container extensions are string class shortcut functions. While some of these requests are being considered, we provide some shortcut functions here.
+
+**find_and_replace**
+
+```cpp
+template <typename String>
+void find_and_replace(String& s, const typename String::value_type* pFind, const typename String::value_type* pReplace)
+{
+ for(size_t i; (i = source.find(pFind)) != T::npos; )
+ s.replace(i, eastl::CharStrlen(pFind), pReplace);
+}
+
+Example:
+ find_and_replace(s, "hello", "hola");
+```
+
+**trim front (multiple chars)**
+
+```cpp
+template <typename String>
+void trim_front(String& s, const typename String::value_type* pValues)
+{
+ s.erase(0, s.find_first_not_of(pValues));
+}
+
+Example:
+ trim_front(s, " \t\n\r");
+```
+
+**trim back (multiple chars)**
+
+```cpp
+template <typename String>
+void trim_front(String& s, const typename String::value_type* pValues)
+{
+ s.resize(s.find_last_not_of(pValues) + 1);
+}
+
+Example:
+ trim_back(s, " \t\n\r");
+```
+
+**prepend**
+
+```cpp
+template <typename String>
+void prepend(String& s, const typename String::value_type* p)
+{
+ s.insert(0, p);
+}
+
+Example:
+ prepend(s, "log: ");
+```
+
+**begins_with**
+
+```cpp
+template <typename String>
+bool begins_with(const String& s, const typename String::value_type* p)
+{
+ return s.compare(0, eastl::CharStrlen(p), p) == 0;
+}
+
+Example:
+ if(begins_with(s, "log: ")) ...
+```
+
+**ends_with**
+
+```cpp
+template <typename String>
+bool ends_with(const String& s, const typename String::value_type* p)
+{
+ const typename String::size_type n1 = s.size();
+ const typename String::size_type n2 = eastl::CharStrlen(p);
+ return ((n1 >= n2) && s.compare(n1 - n2, n2, p) == 0);
+}
+
+Example:
+ if(ends_with(s, "test.")) ...
+```
+
+**tokenize**
+
+Here is a simple tokenization function that acts very much like the C strtok function.
+
+```cpp
+template <typename String>
+size_t tokenize(const String& s, const typename String::value_type* pDelimiters,
+ String* resultArray, size_t resultArraySize)
+{
+ size_t n = 0;
+ typename String::size_type lastPos = s.find_first_not_of(pDelimiters, 0);
+ typename String::size_type pos = s.find_first_of(pDelimiters, lastPos);
+
+ while((n < resultArraySize) && (pos != String::npos) || (lastPos != String::npos))
+ {
+ resultArray[n++].assign(s, lastPos, pos - lastPos);
+ lastPos = s.find_first_not_of(pDelimiters, pos);
+ pos = s.find_first_of(pDelimiters, lastPos);
+ }
+
+ return n;
+}
+
+Example:
+ string resultArray[32];
+tokenize(s, " \t", resultArray, 32));
+```
+
+### Cont.25 How do EASTL smart pointers compare to Boost smart pointers?
+
+EASTL's smart pointers are nearly identical to Boost (including all that crazy member template and dynamic cast functionality in shared_ptr), but are not using the Boost source code. EA legal has already stated that it is fine to have smart pointer classes with the same names and functionality as those present in Boost. EA legal specifically looked at the smart pointer classes in EASTL for this. There are two differences between EASTL smart pointers and Boost smart pointers:
+
+* EASTL smart pointers don't have thread safety built-in. It was deemed that this is too much overhead and that thread safety is something best done at a higher level. By coincidence the C++ library proposal to add shared_ptr also omits the thread safety feature. FWIW, I put a thread-safe shared_ptr in EAThread, though it doesn't attempt to do all the fancy member template things that Boost shared_ptr does. Maybe I'll add that some day if people care.
+* EASTL shared_ptr object deletion goes through a deletion object instead of through a virtual function interface. 95% of the time this makes no difference (aside from being more efficient), but the primary case where it matters is when you have shared_ptr<void> and assign to is something like "new Widget". The problem is that shared_ptr<void> doesn't know what destructor to call and so doesn't call a destructor unless you specify a custom destructor object as part of the template specification. I don't know what to say about this one, as it is less safe, but forcing everybody to have the overhead of additional templated classes and virtual destruction functions doesn't seem to be in the spirit of high performance or lean game development.
+
+There is the possibility of making a shared_ptr_boost which is completely identical to Boost shared_ptr. So perhaps that will be done some day.
+
+### Cont.26 How do your forward-declare an EASTL container?
+
+Here is are some examples of how to do this:
+
+```cpp
+namespace eastl
+{
+ template <typename T, typename Allocator> class basic_string;
+ typedef basic_string<char, allocator> string8; // Forward declare EASTL's string8 type.
+
+ template <typename T, typename Allocator> class vector;
+ typedef vector<char, allocator> CharArray;
+
+ template <typename Value, typename Hash, typename Predicate, typename Allocator, bool bCacheHashCode> class hash_set;
+
+ template <typename Key, typename T, typename Compare, typename Allocator> class map;
+}
+```
+
+The forward declaration can be used to declare a pointer or reference to such a class. It cannot be used to declare an instance of a class or refer to class data, static or otherwise. Nevertheless, forward declarations for pointers and references are useful for reducing the number of header files a header file needs to include.
+
+### Cont.27 How do I make two containers share a memory pool?
+
+EASTL (and std STL) allocators are specified by value semantics and not reference semantics. Value semantics is more powerful (because a value can also be a reference, but not the other way around), but is not always what people expects if they're used to writing things the other way.
+
+Here is some example code:
+
+```cpp
+struct fixed_pool_reference
+{
+public:
+ fixed_pool_reference()
+ {
+ mpFixedPool = NULL;
+ }
+
+ fixed_pool_reference(eastl::fixed_pool& fixedPool)
+ {
+ mpFixedPool = &fixedPool;
+ }
+
+ fixed_pool_reference(const fixed_pool_reference& x)
+ {
+ mpFixedPool = x.mpFixedPool;
+ }
+
+ fixed_pool_reference& operator=(const fixed_pool_reference& x)
+ {
+ mpFixedPool = x.mpFixedPool;
+ return *this;
+ }
+
+ void* allocate(size_t /*n*/, int /*flags*/ = 0)
+ {
+ return mpFixedPool->allocate();
+ }
+
+ void* allocate(size_t /*n*/, size_t /*alignment*/, size_t /*offset*/, int /*flags*/ = 0)
+ {
+ return mpFixedPool->allocate();
+ }
+
+ void deallocate(void* p, size_t /*n*/)
+ {
+ return mpFixedPool->deallocate(p);
+ }
+
+ const char* get_name() const
+ {
+ return "fixed_pool_reference";
+ }
+
+ void set_name(const char* /*pName*/)
+ {
+ }
+
+protected:
+ friend bool operator==(const fixed_pool_reference& a, const fixed_pool_reference& b);
+ friend bool operator!=(const fixed_pool_reference& a, const fixed_pool_reference& b);
+
+ eastl::fixed_pool* mpFixedPool;
+};
+
+inline bool operator==(const fixed_pool_reference& a, const fixed_pool_reference& b)
+{
+ return (a.mpFixedPool == b.mpFixedPool);
+}
+
+inline bool operator!=(const fixed_pool_reference& a, const fixed_pool_reference& b)
+{
+ return (a.mpFixedPool != b.mpFixedPool);
+}
+```
+
+Example usage of the above:
+
+```cpp
+typedef eastl::list<int, fixed_pool_reference> IntList;
+
+IntList::node_type buffer[2];
+eastl::fixed_pool myPool(buffer, sizeof(buffer), sizeof(Int::node_type), 2);
+
+IntList myList1(myPool);
+IntList myList2(myPool);
+
+myList1.push_back(37);
+myList2.push_back(39);
+```
+
+### Cont.28 Can I use a std (STL) allocator with EASTL?
+
+No. EASTL allocators are similar in interface to std STL allocators, but not 100% compatible. If it was possible to make them compatible with std STL allocators but also match the design of EASTL then compatibility would exist. The primary reasons for lack of compatibility are:
+
+* EASTL allocators have a different allocate function signature.
+* EASTL allocators have as many as four extra required functions: ctor(name), get_name(), set_name(), allocate(size, align, offset).
+* EASTL allocators have an additional allocate function specifically for aligned allocations, as listed directly above.
+
+### What are the requirements of classes stored in containers?
+
+Class types stored in containers must have:
+
+* a public copy constructor
+* a public assignment operator
+* a public destructor
+* an operator < that compares two such classes (sorted containers only).
+* an operator == that compares two such classes (hash containers only).
+
+Recall that the compiler generates basic versions these functions for you when you don't implement them yourself, so you can omit any of the above if the compiler-generated version is sufficient.
+
+For example, the following code will act incorrectly, because the user forgot to implement an assignment operator. The compiler-generated assignment operator will assign the refCount value, which the user doesn't want, and which will be called by the vector during resizing.
+
+```cpp
+struct NotAPod
+{
+ NotAPod(const NotAPod&) {} // Intentionally don't copy the refCount
+
+ int refCount; // refCounts should not be copied between NotAPod instances.
+};
+
+eastl::vector<NotAPod> v;
+```
+
+## Algorithms
+
+### Algo.1 I'm getting screwy behavior in sorting algorithms or sorted containers. What's wrong?
+
+It may possible that you are seeing floating point roundoff problems. Many STL algorithms require object comparisons to act consistently. However, floating point values sometimes compare differently between uses because in one situation a value might be in 32 bit form in system memory, whereas in anther situation that value might be in an FPU register with a different precision. These are difficult problems to track down and aren't the fault of EASTL or whatever similar library you might be using. There are various solutions to the problem, but the important thing is to find a way to force the comparisons to be consistent.
+
+The code below was an example of this happening, whereby the object pA->mPos was stored in system memory while pB->mPos was stored in a register and comparisons were inconsistent and a crash ensued.
+
+```cpp
+class SortByDistance : public binary_function<WorldTreeObject*, WorldTreeObject*, bool>
+{
+private:
+ Vector3 mOrigin;
+
+public:
+ SortByDistance(Vector3 origin) {
+ mOrigin = origin;
+ }
+
+ bool operator()(WorldTreeObject* pA, WorldTreeObject* pB) const {
+ return ((WorldObject*)pA)->mPos - mOrigin).GetLength()
+ < ((WorldObject*)pB)->mPos - mOrigin).GetLength();
+ }
+};
+```
+
+### Algo.2 How do I write a comparison (operator<()) for a struct that contains two or more members?
+
+For a struct with two members such as the following:
+
+```cpp
+struct X {
+ Blah m1;
+ Blah m2;
+};
+```
+
+You would write the comparison function like this:
+
+```cpp
+bool operator<(const X& a, const X& b) {
+ return (a.m1 == b.m1) ? (a.m2 < b.m2) : (a.m1 < b.m1);
+}
+```
+
+or, using only operator < but more instructions:
+
+```cpp
+bool operator<(const X& a, const X& b) {
+ return (a.m1 < b.m1) || (!(b.m1 < a.m1) && (a.m2 < b.m2));
+}
+```
+
+For a struct with three members, you would have:
+
+```cpp
+bool operator<(const X& a, const X& b) {
+ if(a.m1 != b.m1)
+ return (a.m1 < b.m1);
+ if(a.m2 != b.m2)
+ return (a.m2 < b.m2);
+ return (a.mType < b.mType);
+}
+```
+
+And a somewhat messy implementation if you wanted to use only operator <.
+
+Note also that you can use the above technique to implement operator < for spatial types such as vectors, points, and rectangles. You would simply treat the members of the stuct as an array of values and ignore the fact that they have spatial meaning. All operator < cares about is that things order consistently.
+
+```cpp
+bool operator<(const Point2D& a, const Point2D& b) {
+ return (a.x == b.x) ? (a.y < b.y) : (a.x < b.x);
+}
+```
+
+### Algo.3 How do I sort something in reverse order?
+
+Normally sorting puts the lowest value items first in the sorted range. You can change this by simply reversing the comparison. For example:
+
+`sort(intVector.begin(), intVector.end(), greater<int>());`
+
+It's important that you use operator > instead of >=. The comparison function must return false for every case where values are equal.
+
+### Algo.4 I'm getting errors about min and max while compiling.
+
+You need to define NOMINMAX under VC++ when this occurs, as it otherwise defines min and max macros that interfere. There may be equivalent issues with other compilers. Also, VC++ has a specific <minmax.h> header file which defines min and max macros but which doesn't pay attention to NOMINMAX and so in that case there is nothing to do but not include that file or to undefine min and max. minmax.h is not a standard file and its min and max macros are not standard C or C++ macros or functions.
+
+### Algo.5 Why don't algorithms take a container as an argument instead of iterators? A container would be more convenient.
+
+Having algorithms that use containers instead of algorithms would reduce reduce functionality with no increase in performance. This is because the use of iterators allows for the application of algorithms to sub-ranges of containers and allows for the application of algorithms to containers aren't formal C++ objects, such as C-style arrays.
+
+Providing additional algorithms that use containers would introduce redundancy with respect to the existing algorithms that use iterators.
+
+### Algo.6 Given a container of pointers, how do I find an element by value (instead of by pointer)?
+
+Functions such as find_if help you find a T element in a container of Ts. But if you have a container of pointers such as vector<Widget*>, these functions will enable you to find an element that matches a given Widget* pointer, but they don't let you find an element that matches a given Widget object.
+
+You can write your own iterating 'for' loop and compare values, or you can use a generic function object to do the work if this is a common task:
+
+```cpp
+template<typename T>
+struct dereferenced_equal
+{
+ const T& mValue;
+
+ dereferenced_equal(const T& value) : mValue(value) { }
+ bool operator==(const T* pValue) const { return *pValue == mValue; }
+};
+
+...
+
+find_if(container.begin(), container.end(), dereferenced_equal<Widget>(someWidget));
+```
+
+### Algo.7 When do stored objects need to support operator < vs. when do they need to support operator ==?
+
+Any object which is sorted needs to have operator < defined for it, implicitly via operator < or explicitly via a user-supplied Compare function. Sets and map containers require operator <, while sort, binary search, and min/max algorithms require operator <.
+
+Any object which is compareed for equality needs to have operator == defined for it, implicitly via operator == or explicitly via a user-supplied BinaryPredicate function. Hash containers required operator ==, while many of the algorithms other than those mentioned above for operator < require operator ==.
+
+Some algorithms and containers require neither < nor ==. Interestingly, no algorithm or container requires both < and ==.
+
+### Algo.8 How do I sort via pointers or array indexes instead of objects directly?
+
+Pointers
+
+```cpp
+vector<TestObject> toArray;
+vector<TestObject*> topArray;
+
+for(eastl_size_t i = 0; i < 32; i++)
+ toArray.push_back(TestObject(rng.RandLimit(20)));
+for(eastl_size_t i = 0; i < 32; i++) // This needs to be a second loop because the addresses might change in the first loop due to container resizing.
+ topArray.push_back(&toArray[i]);
+
+struct TestObjectPtrCompare
+{
+ bool operator()(TestObject* a, TestObject* b)
+ { return a->mX < a->mX; }
+};
+
+quick_sort(topArray.begin(), topArray.end(), TestObjectPtrCompare());
+```
+
+Array indexes
+
+```cpp
+vector<TestObject> toArray;
+vector<eastl_size_t> toiArray;
+
+for(eastl_size_t i = 0; i < 32; i++)
+{
+ toArray.push_back(TestObject(rng.RandLimit(20)));
+ toiArray.push_back(i);
+}
+
+struct TestObjectIndexCompare
+{
+ vector* mpArray;
+
+ TestObjectIndexCompare(vector<TestObject>* pArray) : mpArray(pArray) { }
+ TestObjectIndexCompare(const TestObjectIndexCompare& x) : mpArray(x.mpArray){ }
+ TestObjectIndexCompare& operator=(const TestObjectIndexCompare& x) { mpArray = x.mpArray; return *this; }
+
+ bool operator()(eastl_size_t a, eastl_size_t b)
+ { return (*mpArray)[a] < (*mpArray)[b]; }
+};
+
+quick_sort(toiArray.begin(), toiArray.end(), TestObjectIndexCompare(&toArray));
+```
+
+Array indexes (simpler version using toArray as a global variable)
+
+```cpp
+vector<TestObject> toArray;
+vector<eastl_size_t> toiArray;
+
+for(eastl_size_t i = 0; i < 32; i++)
+{
+ toArray.push_back(TestObject(rng.RandLimit(20)));
+ toiArray.push_back(i);
+}
+
+struct TestObjectIndexCompare
+{
+ bool operator()(eastl_size_t a, eastl_size_t b)
+ { return toArray[a] < toArray[b]; }
+};
+
+quick_sort(toiArray.begin(), toiArray.end(), TestObjectIndexCompare(&toArray));
+```
+
+## Iterators
+
+### Iter.1 What's the difference between iterator, const iterator, and const_iterator?
+
+An iterator can be modified and item it points to can be modified.
+A const iterator cannot be modified, but the items it points to can be modified.
+A const_iterator can be modified, but the items it points to cannot be modified.
+A const const_iterator cannot be modified, nor can the items it points to.
+
+This situation is much like with char pointers:
+
+| Iterator type | Pointer equivalent |
+|------|------|
+| iterator | char* |
+| const iterator | char* const |
+| const_iterator | const char* |
+| const const_iterator | const char* const |
+
+### Iter.2 How do I tell from an iterator what type of thing it is iterating?
+
+Use the value_type typedef from iterator_traits, as in this example
+
+```cpp
+template <typename Iterator>
+void DoSomething(Iterator first, Iterator last)
+{
+ typedef typename iterator_traits<Iterator>::value_type;
+
+ // use value_type
+}
+```
+
+### Iter.3 How do I iterate a container while (selectively) removing items from it?
+
+All EASTL containers have an erase function which takes an iterator as an argument and returns an iterator to the next item. Thus, you can erase items from a container while iterating it like so:
+
+```cpp
+set<int> intSet;
+set<int>::iterator i = intSet.begin();
+
+while(i != intSet.end())
+{
+ if(*i & 1) // Erase all odd integers from the container.
+ i = intSet.erase(i);
+ else
+ ++i;
+}
+```
+
+### Iter.4 What is an insert_iterator?
+
+An insert_iterator is a utility class which is like an iterator except that when you assign a value to it, the insert_iterator inserts the value into the container (via insert()) and increments the iterator. Similarly, there are front_insert_iterator and back_insert_iterator, which are similar to insert_iterator except that assigning a value to them causes then to call push_front and push_back, respectively, on the container. These utilities may seem a slightly abstract, but they have uses in generic programming.
+
+----------------------------------------------
+End of document
diff --git a/EASTL/doc/Glossary.md b/EASTL/doc/Glossary.md
new file mode 100644
index 0000000..550209d
--- /dev/null
+++ b/EASTL/doc/Glossary.md
@@ -0,0 +1,93 @@
+# EASTL Glossary
+
+This document provides definitions to various terms related to EASTL. Items that are capitalized are items that are used as template parameters.
+
+| | |
+|------|------|
+| adapter | An adapter is something that encapsulates a component to provide another interface, such as a C++ class which makes a stack out of a list. |
+| algorithm | Algorithms are standalone functions which manipulate data which usually but not necessarily comes from a container. Some algorithms change the data while others don't. Examples are reverse, sort, find, and remove. |
+| associative container | An associative container is a variable-sized container that supports efficient retrieval of elements (values) based on keys. It supports insertion and removal of elements, but differs from a sequence in that it does not provide a mechanism for inserting an element at a specific position. Associative containers include map, multimap, set, multiset, hash_map, hash_multimap, hash_set, hash_multiset. |
+| array | An array is a C++ container which directly implements a C-style fixed array but which adds STL container semantics to it. |
+| basic_string | A templated string class which is usually used to store char or wchar_t strings. |
+| begin | The function used by all conventional containers to return the first item in the container. |
+| BidirectionalIterator | An input iterator which is like ForwardIterator except it can be read in a backward direction as well. |
+| BinaryOperation  | A function which takes two arguments and returns a value (which will usually be assigned to a third object). |
+| BinaryPredicate | A function which takes two arguments and returns true if some criteria is met (e.g. they are equal). |
+| binder1st, binder2nd | These are function objects which convert one function object into another.  In particular, they implement a binary function whereby you can specify one of the arguments.This is a somewhat abstract concept but has its uses. |
+| bit vector | A specialized container that acts like vector<bool> but is implemented via one bit per entry. STL vector<bool> is usually implemented as a bit vector but EASTL avoids this in favor of a specific bit vector container. |
+| bitset | An extensible yet efficient implementation of bit flags. Not strictly a conventional STL container and not the same thing as vector<bool> or a bit_vector, both of which are formal iterate-able containers. |
+| capacity | Refers to the amount of total storage available in an array-based container such as vector, string, and array. Capacity is always >= container size and is > size in order to provide extra space for a container to grow into. |
+| const_iterator | An iterator whose iterated items are cannot be modified. A const_iterator is akin to a const pointer such as 'const char*'. |
+| container | A container is an object that stores other objects (its elements), and that has methods for accessing its elements. In particular, every type that is a model of container has an associated iterator type that can be used to iterate through the container's elements. |
+| copy constructor | A constructor for a type which takes another object of that type as its argument. For a hypothetical Widget class, the copy constructor is of the form Widget(const Widget& src); |
+| Compare | A function which takes two arguments and returns the lesser of the two. |
+| deque | The name deque is pronounced "deck" and stands for "double-ended queue."<br><br> A deque is very much like a vector: like vector, it is a sequence that supports random access to elements, constant time insertion and removal of elements at the end of the sequence, and linear time insertion and removal of elements in the middle.<br><br> The main way in which deque differs from vector is that deque also supports constant time insertion and removal of elements at the beginning of the sequence. Additionally, deque does not have any member functions analogous to vector's capacity() and reserve(), and does not provide the guarantees on iterator validity that are associated with those member functions. |
+| difference_type | The typedef'd type used by all conventional containers and iterators to define the distance between two iterators. It is usually the same thing as the C/C++ ptrdiff_t data type. |
+| empty | The function used by all conventional containers to tell if a container has a size of zero. In many cases empty is more efficient than checking for size() == 0. |
+| element | An element refers to a member of a container. |
+| end | The function used by all conventional containers to return one-past the last item in the container. |
+| equal_range | equal_range is a version of binary search: it attempts to find the element value in an ordered range [first, last). The value returned by equal_range is essentially a combination of the values returned by lower_bound and upper_bound: it returns a pair of iterators i and j such that i is the first position where value could be inserted without violating the ordering and j is the last position where value could be inserted without violating the ordering. It follows that every element in the range [i, j) is equivalent to value, and that [i, j) is the largest subrange of [first, last) that has this property. |
+| explicit instantiation | Explicit instantiation lets you create an instantiation of a templated class or function without actually using it in your code. Since this is useful when you are creating library files that use templates for distribution, uninstantiated template definitions are not put into object files. An example of the syntax for explicit instantiation is:<br> `template class vector<char>;`<br> `template void min<int>(int, int);`<br> `template void min(int, int);` |
+| ForwardIterator | An input iterator which is like InputIterator except it can be reset back to the beginning. |
+| Function | A function which takes one argument and applies some operation to the target. |
+| function object, functor | A function object or functor is a class that has the function-call operator (<tt>operator()</tt>) defined. |
+| Generator | A function which takes no arguments and returns a value (which will usually be assigned to an object). |
+| hash_map, hash_multimap, hash_set, hash_multiset | The hash containers are implementations of map, multimap, set, and multiset via a hashtable instead of via a tree. Searches are O(1) (fast) but the container is not sorted. |
+| heap | A heap is a data structure which is not necessarily sorted but is organized such that the highest priority item is at the top. A heap is synonymous with a priority queue and has numerous applications in computer science. |
+| InputIterator | An input iterator (iterator you read from) which allows reading each element only once and only in a forward direction. |
+| intrusive_list, intrusive_hash_map, etc. | Intrusive containers are containers which don't allocate memory but instead use their contained object to manage the container's memory. While list allocates nodes (with mpPrev/mpNext pointers) that contain the list items, intrusive_list doesn't allocate nodes but instead the container items have the mpPrev/mpNext pointers. |
+| intrusive_ptr | intrusive_ptr is a smart pointer which doesn't allocate memory but instead uses the contained object to manage lifetime via addref and release functions. |
+| iterator | An iterator is the fundamental entity of reading and enumerating values in a container. Much like a pointer can be used to walk through a character array, an iterator is used to walk through a linked list. |
+| iterator category | An iterator category defines the functionality the iterator provides. The conventional iterator categories are InputIterator, ForwardIterator, BidirectionalIterator, RandomAccessIterator, and OutputIterator. See the definitions of each of these for more information.Iterator category is synonymous with <span style="font-style: italic;">iterator_tag</span>. |
+| iterator_tag | See <span style="font-style: italic;">iterator category</span>. |
+| key_type, Key | A Key or key_type is the identifier used by associative (a.k.a. dictionary) containers (e.g. map, hash_map) to identify the type used to index the mapped_type. If you have a dictionary of strings that you access by an integer id, the ids are the keys and the strings are the mapped types. |
+| lexicographical compare | A lexicographical compare is a comparison of two containers that compares them element by element, much like the C strcmp function compares two strings. |
+| linked_ptr | A linked_ptr is a shared smart pointer which implements object lifetime via a linked list of all linked_ptrs that are referencing the object. linked_ptr, like intrusive_ptr, is a non-memory-allocating alternative to shared_ptr. |
+| list | A list is a doubly linked list. It is a sequence that supports both forward and backward traversal, and (amortized) constant time insertion and removal of elements at the beginning or the end, or in the middle. Lists have the important property that insertion and splicing do not invalidate iterators to list elements, and that even removal invalidates only the iterators that point to the elements that are removed. The ordering of iterators may be changed (that is, list<T>::iterator might have a different predecessor or successor after a list operation than it did before), but the iterators themselves will not be invalidated or made to point to different elements unless that invalidation or mutation is explicit. |
+| lower_bound | lower_bound is a version of binary search: it attempts to find the element value in an ordered range [first, last). Specifically, it returns the first position where value could be inserted without violating the ordering. |
+| map | Map is a sorted associative container that associates objects of type Key with objects of type T. Map is a pair associative container, meaning that its value type is pair<const Key, T>. It is also a unique associative container, meaning that no two elements have the same key. It is implemented with a tree structure. |
+| mapped_type | A mapped_type is a typedef used by associative containers to identify the container object which is accessed by a key. If you have a dictionary of strings that you access by an integer id, the ids are the keys and the strings are the mapped types. |
+| member template | A member template is a templated function of a templated class. Thus with a member template function there are two levels of templating -- the class and the function. |
+| multimap,  | Multimap is a sorted associative container that associates objects of type Key with objects of type T. multimap is a pair associative container, meaning that its value type is pair<const Key, T>. It is also a multiple associative container, meaning that there is no limit on the number of elements with the same key.It is implemented with a tree structure. |
+| multiset | Multiset is a sorted associative container that stores objects of type Key. Its value type, as well as its key type, is Key. It is also a multiple associative container, meaning that two or more elements may be identical. It is implemented with a tree structure. |
+| node | A node is a little holder class used by many containers to hold the contained items. A linked-list, for example, defines a node which has three members: mpPrev, mpNext, and T (the contained object). |
+| npos | npos is used by the string class to identify a non-existent index. Some string functions return npos to indicate that the function failed. |
+| rel_ops | rel_ops refers to "relational operators" and is a set of templated functions which provide operator!= for classes that  have only operator== and provide operator > for classes that have only operator <, etc. Unfortunately, rel_ops have a habit of polluting the global operator space and creating conflicts. They must be used with discretion. |
+| reverse_iterator | A reverse_iterator is an iterator which wraps a bidirectional or random access iterator and allows the iterator to be read in reverse direction. The difference between using reverse_iterators and just decrementing regular iterators is that reverse_iterators use operator++ to move backwards and thus work in any algorithm that calls ++ to move through a container. |
+| OutputIterator | An output iterator (iterator you write to) which allows writing each element only once in only in a forward direction. |
+| POD | POD means Plain Old Data. It refers to C++ classes which act like built-in types and C structs. These are useful to distinguish because some algorithms can be made more efficient when they can detect that they are working with PODs instead of regular classes.  |
+| Predicate | A function which takes one argument returns true if the argument meets some criteria. |
+| priority_queue | A priority_queue is an adapter container which implements a heap via a random access container such as vector or deque. |
+| queue | A queue is an adapter container which implements a FIFO (first-in, first-out) container with which you can add items to the back and get items from the front. |
+| RandomAccessIterator | An input iterator which can be addressed like an array. It is a superset of all other input iterators. |
+| red-black tree | A red-black tree is a binary tree which has the property of being always balanced. The colors red and black are somewhat arbitrarily named monikers for nodes used to measure the balance of the tree. Red-black trees are considered the best all-around data structure for sorted containers. |
+| scalar | A scalar is a data type which is implemented via a numerical value. In C++ this means integers, floating point values, enumerations, and pointers.  |
+| scoped_ptr | A scoped_ptr is a smart pointer which is the same as C++ auto_ptr except that it cannot be copied. |
+| set | Set is a sorted associative container that stores objects of type Key. Its value type, as well as its key type, is Key. It is also a unique associative container, meaning that no two elements are the same.It is implemented with a tree structure. |
+| sequence | A sequence is a variable-sized container whose elements are arranged in a strict linear (though not necessarily contiguous) order. It supports insertion and removal of elements. Sequence containers include vector, deque, array, list, slist. |
+| size | All conventional containers have a size member function which returns the count of elements in the container. The efficiency of the size function differs between containers. |
+| size_type | The type that a container uses to define its size and counts. This is similar to the C/C++ size_t type but may be specialized for the container. |
+| skip list | A skip-list is a type of container which is an alternative to a binary tree for finding data. |
+| shared_ptr | A shared_ptr is a smart pointer which allows multiple references (via multiple shared_ptrs) to the same object. When the last shared_ptr goes away, the pointer is freed. shared_ptr is implemented via a shared count between all instances. |
+| slist | An slist is like a list but is singly-linked instead of doubly-linked. It can only be iterated in a forward-direction. |
+| smart pointer | Smart pointer is a term that identifies a family of utility classes which store pointers and free them when the class instance goes out of scope. Examples of smart pointers are shared_ptr, linked_ptr, intrusive_ptr, and scoped_ptr. |
+| splice | Splicing refers to the moving of a subsequence of one Sequence into another Sequence. |
+| stack | A stack is a adapter container which implements LIFO (last-in, first, out) access via another container such as a list or deque. |
+| STL | Standard Template Library.  |
+| StrictWeakOrdering | A BinaryPredicate that compares two objects, returning true if the first precedes the second. Like Compare but has additional requirements. Used for sorting routines.<br><br> This predicate must satisfy the standard mathematical definition of a strict weak ordering. A StrictWeakOrdering has to behave the way that "less than" behaves: if a is less than b then b is not less than a, if a is less than b and b is less than c then a is less than c, and so on. |
+| string | See basic_string. |
+| T | T is the template parameter name used by most containers to identify the contained element type.  |
+| template parameter | A template parameter is the templated type used to define a template function or class. In the declaration 'template <typename T> class vector{ },'  T is a template parameter. |
+| template specialization | A template specialization is a custom version of a template which overrides the default version and provides alternative functionality, often for the purpose of providing improved or specialized functionality. |
+| treap | A tree-like structure implemented via a heap. This is an alternative to a binary tree (e.g. red-black tree), skip-list, and sorted array as a mechanism for a fast-access sorted container. |
+| type traits | Type traits are properties of types. If you have a templated type T and you want to know if it is a pointer, you would use the is_pointer type trait. If you want to know if the type is a POD, you would use the is_pod type trait. Type traits are very useful for allowing the implementation of optimized generic algorithms and for asserting that types have properties expected by the function or class contract. For example, you can use type_traits to tell if a type can be copied via memcpy instead of a slower element-by-element copy. |
+| typename | Typename is a C++ keyword used in templated function implementations which identifies to the compiler that the following expression is a type and not a value. It is used extensively in EASTL, particularly in the algorithms. |
+| UnaryOperation | A function which takes one argument and returns a value (which will usually be assigned to second object). |
+| upper_bound | upper_bound is a version of binary search: it attempts to find the element value in an ordered range [first, last). Specifically, it returns the last position where value could be inserted without violating the ordering. |
+| value_type, Value | A value_type is a typedef used by all containers to identify the elements they contain. In most cases value_type is simply the same thing as the user-supplied T template parameter. The primary exception is the associative containers whereby value_type is the pair of key_type and mapped_type. |
+| vector | A vector is a Sequence that supports random access to elements, constant time insertion and removal of elements at the end, and linear time insertion and removal of elements at the beginning or in the middle. The number of elements in a vector may vary dynamically; memory management is automatic. Vector is the simplest of the container classes, and in many cases the most efficient. |
+| vector_map, vector_multimap, vector_set, vector_multiset | These are containers that implement the functionality of map, multimap, set, and multiset via a vector or deque instead of a tree. They use less memory and find items faster, but are slower to modify and modification invalidates iterators. |
+| weak_ptr | A weak_ptr is an adjunct to shared_ptr which doesn't increment the reference on the contained object but can safely tell you if the object still exists and access it if so. It has uses in preventing circular references in shared_ptrs. |
+
+----------------------------------------------
+End of document
diff --git a/EASTL/doc/Gotchas.md b/EASTL/doc/Gotchas.md
new file mode 100644
index 0000000..aefe362
--- /dev/null
+++ b/EASTL/doc/Gotchas.md
@@ -0,0 +1,134 @@
+# EASTL Gotchas
+
+There are some cases where the EASTL design results in "gotchas" or behavior that isn't necessarily what the new user would expect. These are all situations in which this behavior may be undesirable. One might ask, "Why not change EASTL to make these gotchas go away?" The answer is that in each case making the gotchas go away would either be impossible or would compromise the functionality of the library.
+
+## Summary
+
+The descriptions here are intentionally terse; this is to make them easier to visually scan.
+
+1. [map::operator[] can create elements.](#mapoperator-can-create-elements)
+2. [char* converts to string silently.](#char-converts-to-string-silently)
+3. [char* is compared by ptr and not by contents.](#char-is-compared-by-ptr-and-not-by-contents)
+4. [Iterators can be invalidated by container mutations.](#iterators-can-be-invalidated-by-container-mutations)
+5. [Vector resizing may cause ctor/dtor cascades.](#vector-resizing-may-cause-ctordtor-cascades)
+6. [Vector and string insert/push_back/resize can reallocate.](#vector-and-string-insertpush_backresize-can-reallocate)
+7. [Deriving from containers may not work.](#deriving-from-containers-may-not-work)
+8. [set::iterator is const_iterator.](#setiterator-is-const_iterator)
+9. [Inserting elements means copying by value.](#inserting-elements-means-copying-by-value)
+10. [Containers of pointers can leak if you aren't careful.](#containers-of-pointers-can-leak-if-you-arent-careful)
+11. [Containers of auto_ptrs can crash.](#containers-of-auto_ptrs-can-crash)
+12. [Remove algorithms don't actually remove elements.](#remove-algorithms-dont-actually-remove-elements)
+13. [list::size() is O(n).](#listsize-is-on)
+14. [vector and deque::size() may incur integer division.](#vector-and-dequesize-may-incur-integer-division)
+15. [Be careful making custom Compare functions.](#be-careful-making-custom-compare-functions)
+16. [Comparisons involving floating point are dangerous.](#comparisons-involving-floating-point-are-dangerous)
+17. [Writing beyond string::size and vector::size is dangerous.](#writing-beyond-stringsize-and-vectorsize-is-dangerous)
+18. [Container operator=() doesn't copy allocators.](#container-operator-doesnt-copy-allocators)
+
+## Detail
+
+### map::operator[] can create elements.
+
+By design, map operator[] creates a value for you if it isn't already present. The reason for this is that the alternative behavior would be to throw an exception, and such behavior isn't desirable. The resolution is to simply use the map::find function instead of operator[].
+
+### char* converts to string silently.
+
+The string class has a non-explicit constructor that takes char* as an argument. Thus if you pass char* to a function that takes a string object, a temporary string will be created. In some cases this is undesirable behavior but the user may not notice it right away, as the compiler gives no warnings. The reason that the string constructor from char* is not declared explicit is that doing so would prevent the user from expressions such as: string s = "hello". In this example, no temporary string object is created, but the syntax is not possible if the char* constructor is declared explicit. Thus a decision to make the string char* constructor explicit involves tradeoffs.
+
+There is an EASTL configuration option called EASTL_STRING_EXPLICIT which makes the string char* ctor explicit and avoids the behaviour described above.
+
+### char* is compared by ptr and not by contents.
+
+If you have a set of strings declared as set<char*>, the find function will compare via the pointer value and not the string contents. The workaround is to make a set of string objects or, better, to supply a custom string comparison function to the set. The workaround is not to declare a global operator< for type char*, as that could cause other systems to break.
+
+### Iterators can be invalidated by container mutations
+
+With some containers, modifications of them may invalidate iterators into them. With other containers, modifications of them only an iterator if the modification involves the element that iterator refers to. Containers in the former category include vector, deque, basic_string (string), vector_map, vector_multimap, vector_set, and vector_multiset. Containers in the latter category include list, slist, map, multimap, multiset, all hash containers, and all intrusive containers.
+
+### Vector resizing may cause ctor/dtor cascades.
+
+If elements are inserted into a vector in middle of the sequence, the elements from the insertion point to the end will be copied upward. This will necessarily cause a series of element constructions and destructions as the elements are copied upward. Similarly, if an element is appended to a vector but the vector capacity is exhausted and needs to be reallocated, the entire vector will undergo a construction and destruction pass as the values are copied to the new storage. This issue exists for deque as well, though to a lesser degree. For vector, the resolution is to reserve enough space in your vector to prevent such reallocation. For deque the resolution is to set its subarray size to enough to prevent such reallocation. Another solution that can often be used is to take advantage of the has_trivial_relocate type trait, which can cause such moves to happen via memcpy instead of via ctor/dtor calls. If your class can be safely memcpy'd, you can use EASTL_DECLARE_TRIVIAL_RELOCATE to tell the compiler it can be memcpy'd. Note that built-in scalars (e.g. int) already are automatically memcpy'd by EASTL.
+
+### Vector and string insert/push_back/resize can reallocate.
+
+If you create an empty vector and use push_back to insert 100 elements, the vector will reallocate itself at least three or four times during the operation. This can be an undesirable thing. The best thing to do if possible is to reserve the size you will need up front in the vector constructor or before you add any elements.
+
+### Deriving from containers may not work.
+
+EASTL containers are not designed with the guarantee that they can be arbitrarily subclassed. This is by design and is done for performance reasons, as such guarantees would likely involve making containers use virtual functions. However, some types of subclassing can be successful and EASTL does such subclassing internally to its advantage. The primary problem with subclassing results when a parent class function calls a function that the user wants to override. The parent class cannot see the overridden function and silent unpredictable behavior will likely occur. If your derived container acts strictly as a wrapper for the container then you will likely be able to successfully subclass it.
+
+### set::iterator is const_iterator.
+
+The reason this is so is that a set is an ordered container and changing the value referred to by an iterator could make the set be out of order. Thus, set and multiset iterators are always const_iterators. If you need to change the value and are sure the change will not alter the container order, use const_cast or declare mutable member variables for your contained object. This resolution is the one blessed by the C++ standardization committee. This issue is addressed in more detail in the EASTL FAQ.
+
+### Inserting elements means copying by value.
+
+When you insert an element into a (non-intrusive) container, the container makes a copy of the element. There is no provision to take over ownership of an object from the user. The exception to this is of course when you use a container of pointers instead of a container of values. See the entry below regarding containers of pointers. Intrusive containers (e.g. intrusive_list) do in fact take over the user-provided value, and thus provide another advantage over regular containers in addition to avoiding memory allocation.
+
+### Containers of pointers can leak if you aren't careful.
+
+Containers of points don't know or care about the possibility that the pointer may have been allocated and need to be freed. Thus if you erase such elements from a container they are not freed. The resolution is to manually free the pointers when removing them or to instead use a container of smart pointers (shared smart pointers, in particular). This issue is addressed in more detail in the EASTL FAQ and the auto_ptr-related entry below.
+
+### Containers of auto_ptrs can crash
+
+We suggested above that the user can use a container of smart pointers to automatically manage contained pointers. However, you don't want to use auto_ptr, as auto_ptrs cannot be safely assigned to each other; doing so results in a stale pointer and most likely a crash.
+
+### Remove algorithms don't actually remove elements.
+
+Algorithms such as remove, remove_if, remove_heap, and unique do not erase elements from the sequences they work on. Instead, they return an iterator to the new end of the sequence and the user must call erase with that iterator in order to actually remove the elements from the container. This behavior exists because algorithms work on sequences via iterators and don't know how to work with containers. Only the container can know how to best erase its own elements. In each case, the documentation for the algorithm reminds the user of this behavior. Similarly, the copy algorithm copies elements from one sequence to another and doesn't modify the size of the destination sequence. So the destination must hold at least as many items as the source, and if it holds more items, you may want to erase the items at the end after the copy.
+
+### list::size() is O(n).
+
+By this we mean that calling size() on a list will iterate the list and add the size as it goes. Thus, getting the size of a list is not a fast operation, as it requires traversing the list and counting the nodes. We could make list::size() be fast by having a member mSize variable. There are reasons for having such functionality and reasons for not having such functionality. We currently choose to not have a member mSize variable as it would add four bytes to the class, add processing to functions such as insert and erase, and would only serve to improve the size function, but no other function. The alternative argument is that the C++ standard states that std::list should be an O(1) operation (i.e. have a member size variable), most C++ standard library list implementations do so, the size is but an integer which is quick to update, and many users expect to have a fast size function. All of this applies to slist and intrusive_list as well.
+
+Note that EASTL's config.h file has an option in it to cause list and slist to cache their size with an mSize variable and thus make size() O(1). This option is disabled by default.
+
+### vector and deque::size() may incur integer division.
+
+Some containers (vector and deque in particular) calculate their size by pointer subtraction. For example, the implementation of vector::size() is 'return mpEnd - mpBegin'. This looks like a harmless subtraction, but if the size of the contained object is not an even power of two then the compiler will likely need to do an integer division to calculate the value of the subtracted pointers. One might suggest that vector use mpBegin and mnSize as member variables instead of mpBegin and mpEnd, but that would incur costs in other vector operations. The suggested workaround is to iterate a vector instead of using a for loop and operator[] and for those cases where you do use a for loop and operator[], get the size once at the beginning of the loop instead of repeatedly during the condition test.
+
+### Be careful making custom Compare functions.
+
+A Compare function compares two values and returns true if the first is less than the second. This is easy to understand for integers and strings, but harder to get right for more complex structures. Many a time have people decided to come up with a fancy mechanism for comparing values and made mistakes. The FAQ has a couple entries related to this. See http://blogs.msdn.com/oldnewthing/archive/2003/10/23/55408.aspx for a story about how this can go wrong by being overly clever.
+
+### Comparisons involving floating point are dangerous.
+
+Floating point comparisons between two values that are very nearly equal can result in inconsistent results. Similarly, floating point comparisons between NaN values will always generate inconsistent results, as NaNs by definition always compare as non-equal. You thus need to be careful when using comparison functions that work with floating point values. Conversions to integral values may help the problem, but not necessarily.
+
+### Writing beyond string::size and vector::size is dangerous.
+
+A trick that often comes to mind when working with strings is to set the string capacity to some maximum value, strcpy data into it, and then resize the string when done. This can be done with EASTL, but only if you resize the string to the maximum value and not reserve the string to the maximum value. The reason is that when you resize a string from size (n) to size (n + count), the count characters are zeroed and overwrite the characters that you strcpyd.
+
+The following code is broken:
+
+```cpp
+string mDataDir;
+
+
+ mDataDir.reserve(kMaxPathLength); // reserve
+ strcpy(&mDataDir[0], "blah/blah/blah");
+
+mDataDir.resize(strlen(&mDataDir[0])); // Overwrites your blah/... with 00000...
+```
+
+This following code is OK:
+
+```cpp
+string mDataDir;
+
+
+ mDataDir.resize(kMaxPathLength); // resize
+ strcpy(&mDataDir[0], "blah/blah/blah");
+
+mDataDir.resize(strlen(&mDataDir[0]));
+```
+
+### Container operator=() doesn't copy allocators.
+
+EASTL container assignment (e.g. vector::operator=(const vector&)) doesn't copy the allocator. There are good and bad reasons for doing this, but that's how it acts. So you need to beware that you need to assign the allocator separately or make a container subclass which overrides opeator=() and does this.
+
+----------------------------------------------
+End of document
+
+
+
diff --git a/EASTL/doc/Introduction.md b/EASTL/doc/Introduction.md
new file mode 100644
index 0000000..9fa8188
--- /dev/null
+++ b/EASTL/doc/Introduction.md
@@ -0,0 +1,18 @@
+# EASTL Introduction
+
+EASTL stands for Electronic Arts Standard Template Library. It is a C++ template library of containers, algorithms, and iterators useful for runtime and tool development across multiple platforms. It is a fairly extensive and robust implementation of such a library and has an emphasis on high performance above all other considerations.
+
+## Intended Audience
+
+This is a short document intended to provide a basic introduction to EASTL for those new to the concept of EASTL or STL. If you are familiar with the C++ STL or have worked with other templated container/algorithm libraries, you probably don't need to read this. If you have no familiarity with C++ templates at all, then you probably will need more than this document to get you up to speed. In this case you need to understand that templates, when used properly, are powerful vehicles for the ease of creation of optimized C++ code. A description of C++ templates is outside the scope of this documentation, but there is plenty of such documentation on the Internet. See the EASTL FAQ.html document for links to information related to learning templates and STL.
+
+## EASTL Modules
+
+EASTL consists primarily of containers, algorithms, and iterators. An example of a container is a linked list, while an example of an algorithm is a sort function; iterators are the entities of traversal for containers and algorithms. EASTL containers a fairly large number of containers and algorithms, each of which is a very clean, efficient, and unit-tested implementation. We can say with some confidence that you are not likely to find better implementations of these (commercial or otherwise), as these are the result of years of wisdom and diligent work. For a detailed list of EASTL modules, see EASTL Modules.html.
+
+## EASTL Suitability
+
+What uses are EASTL suitable for? Essentially any situation in tools and shipping applications where the functionality of EASTL is useful. Modern compilers are capable of producing good code with templates and many people are using them in both current generation and future generation applications on multiple platforms from embedded systems to servers and mainframes.
+
+----------------------------------------------
+End of document \ No newline at end of file
diff --git a/EASTL/doc/Maintenance.md b/EASTL/doc/Maintenance.md
new file mode 100644
index 0000000..82bdb80
--- /dev/null
+++ b/EASTL/doc/Maintenance.md
@@ -0,0 +1,195 @@
+# EASTL Maintenance
+
+## Introduction
+
+The purpose of this document is to provide some necessary background for anybody who might do work on EASTL. Writing generic templated systems like EASTL can be surprisingly tricky. There are numerous details of the C++ language that you need to understand which don't usually come into play during the day-to-day C++ coding that many people do. It is easy to make a change to some function that seems proper and works for your test case but either violates the design expectations or simply breaks under other circumstances.
+
+It may be useful to start with an example. Here we provide an implementation of the count algorithm which is seems simple enough. Except it is wrong and while it will compile in some cases it won't compile in others:
+
+```cpp
+int count(InputIterator first, InputIterator last, const T& value)
+{
+     int result = 0;
+
+     for(; first < last; ++first){
+         if(*first == value)
+             ++result;
+     }
+
+     return result;
+ }
+ ```
+
+The problem is with the comparison 'first < last'. The count algorithm takes an InputIterator and operator< is not guaranteed to exist for any given InputIterator (and indeed while operator< exists for vector::iterator, it doesn't exist for list::iterator). The comparison in the above algorithm must instead be implemented as 'first != last'. If we were working with a RandomAccessIterator then 'first < last' would be valid.
+
+In the following sections we cover various topics of interest regarding the development and maintentance of EASTL. Unfortunately, this document can't cover every aspect of EASTL maintenance issues, but at least it should give you a sense of the kinds of issues.
+
+## C++ Language Standard
+
+First and foremost, you need to be familiar with the C++ standard. In particular, the sections of the standard related to containers, algorithms, and iterators are of prime significance. We'll talk about some of this in more detail below. Similarly, a strong understanding of the basic data types is required. What is the difference between ptrdiff_t and intptr_t; unsigned int and size_t; char and signed char?
+
+In addition to the C++ language standard, you'll want to be familiar with the C++ Defect Report. This is a continuously updated document which lists flaws in the original C++ language specification and the current thinking as the resolutions of those flaws. You will notice various references to the Defect Report in EASTL source code.
+
+Additionally, you will want to be familiar with the C++ Technical Report 1 (as of this writing there is only one). This document is the evolving addendum to the C++ standard based on both the Defect Report and based on desired additions to the C++ language and standard library.
+
+Additionally, you will probably want to have some familiarity with Boost. It also helps to keep an eye on comp.std.c++ Usenet discussions. However, watch out for what people say on Usenet. They tend to defend GCC, Unix, std STL, and C++ to a sometimes unreasonable degree. Many discussions ignore performance implications and concentrate only on correctness and sometimes academic correctness above usability.
+
+## Language Use
+
+Macros are (almost) not allowed in EASTL. A prime directive of EASTL is to be easier to read by users and most of the time macros are an impedence to this. So we avoid macros at all costs, even if it ends up making our development and maintenance more difficult. That being said, you will notice that the EASTL config.h file uses macros to control various options. This is an exception to the rule; when we talk about not using macros, we mean with the EASTL implementation itself.
+
+EASTL assumes a compliant and intelligent C++ compiler, and thus all language facilities are usable. However, we nevertheless choose to stay away from some language functionality. The primary language features we avoid are:
+
+* RTTI (run-time-type-identification) (this is deemed too costly)
+* Template export (few compilers support this)
+* Exception specifications (most compilers ignore them)
+
+Use of per-platform or per-compiler code should be avoided when possible but where there is a significant advantage to be gained it can and indeed should be used. An example of this is the GCC __builtin_expect feature, which allows the user to give the compiler a hint about whether an expression is true or false. This allows for the generation of code that executes faster due to more intelligent branch prediction.
+
+## Prime Directives
+
+The implementation of EASTL is guided foremost by the following directives which are listed in order of importance.
+
+1. Efficiency (speed and memory usage)
+2. Correctness (doesn't have bugs)
+3. Portability (works on all required platforms with minimal specialized code)
+4. Readability (code is legible and comments are present and useful)
+
+Note that unlike commercial STL implementations which must put correctness above all, we put a higher value on efficiency. As a result, some functionality may have some usage limitation that is not present in other similar systems but which allows for more efficient operation, especially on the platforms of significance to us.
+
+Portability is significant, but not critical. Yes, EASTL must compile and run on all platforms that we will ship games for. But we don't take that to mean under all compilers that could be conceivably used for such platforms. For example, Microsoft VC6 can be used to compile Windows programs, but VC6's C++ support is too weak for EASTL and so you simply cannot use EASTL under VC6.
+
+Readability is something that EASTL achieves better than many other templated libraries, particularly Microsoft STL and STLPort. We make every attempt to make EASTL code clean and sensible. Sometimes our need to provide optimizations (particularly related to type_traits and iterator types) results in less simple code, but efficiency happens to be our prime directive and so it overrides all other considerations.
+
+## Coding Conventions
+
+Here we provide a list of coding conventions to follow when maintaining or adding to EASTL, starting with the three language use items from above:
+
+* No RTTI use.
+* No use of exception specifications (e.g. appending the 'throw' declarator to a function).
+* No use of exception handling itself except where explicitly required by the implementation (e.g. vector::at).
+* Exception use needs to savvy to EASTL_EXCEPTIONS_ENABLED.
+* No use of macros (outside of config.h). Macros make things more difficult for the user.
+* No use of static or global variables.
+* No use of global new, delete, malloc, or free. All memory must be user-specifyable via an Allocator parameter (default-specified or explicitly specified).
+* Containers use protected member data and functions as opposed to private. This is because doing so allows subclasses to extend the container without the creation of intermediary functions. Recall from our [prime directives](#Prime_Directives) above that performance and simplicity overrule all.
+* No use of multithreading primitives. 
+* No use of the export keyword.
+* We don't have a rule about C-style casts vs. C++ static_cast<>, etc. We would always use static_cast except that debuggers can't evaluate them and so in practice they can get in the way of debugging and tracing. However, if the cast is one that users don't tend to need to view in a debugger, C++ casts are preferred.
+* No external library dependencies whatsoever, including standard STL. EASTL is dependent on only EABase and the C++ compiler. 
+* All code must be const-correct. This isn't just for readability -- compilation can fail unless const-ness is used correctly everywhere. 
+* Algorithms do not refer to containers; they refer only to iterators.
+* Algorithms in general do not allocate memory. If such a situation arises, there should be a version of the algorithm which allows the user to provide the allocator.
+* No inferior implementations. No facility should be added to EASTL unless it is of professional quality.
+* The maintainer should emulate the EASTL style of code layout, regardless of the maintainer's personal preferences. When in Rome, do as the Romans do. EASTL uses 4 spaces for indents, which is how the large majority of code within EA is written.
+* No major changes should be done without consulting a peer group.
+
+## Compiler Issues
+
+Historically, templates are the feature of C++ that has given C++ compilers the most fits. We are still working with compilers that don't completely and properly support templates. Luckily, most compilers are now good enough to handle what EASTL requires. Nevertheless, there are precautions we must take.
+
+It turns out that the biggest problem in writing portable EASTL code is that VC++ allows you to make illegal statements which are not allowed by other compilers. For example, VC++ will allow you to neglect using the typename keyword in template references, whereas GCC (especially 3.4+) requires it.
+
+In order to feel comfortable that your EASTL code is C++ correct and is portable, you must do at least these two things:
+
+* Test under at least VS2005, GCC 3.4+, GCC 4.4+, EDG, and clang.
+* Test all functions that you write, as compilers will often skip the compilation of a template function if it isn't used.
+
+The two biggest issues to watch out for are 'typename' and a concept called "dependent names". In both cases VC++ will accept non-conforming syntax whereas most other compilers will not. Whenever you reference a templated type (and not a templated value) in a template, you need to prefix it by 'typename'. Whenever your class function refers to a base class member (data or function), you need to refer to it by "this->", "base_type::", or by placing a "using" statement in your class to declare that you will be referencing the given base class member.
+
+## Iterator Issues
+
+The most important thing to understand about iterators is the concept of iterator types and their designated properties. In particular, we need to understand the difference between InputIterator, ForwardIterator, BidirectionalIterator, RandomAccessIterator, and OutputIterator. These differences dictate both how we implement our algorithms and how we implement our optimizations. Please read the C++ standard for a reasonably well-implemented description of these iterator types.
+
+Here's an example from EASTL/algorithm.h which demonstrates how we use iterator types to optimize the reverse algorithm based on the kind of iterator passed to it:
+
+```cpp
+template <class BidirectionalIterator>
+inline void reverse_impl(BidirectionalIterator first, BidirectionalIterator last, bidirectional_iterator_tag)
+{
+ for(; (first != last) && (first != --last); ++first) // We are not allowed to use operator <, <=, >, >= with
+ iter_swap(first, last); // a generic (bidirectional or otherwise) iterator.
+}
+
+
+template <typename RandomAccessIterator>
+inline void reverse_impl(RandomAccessIterator first, RandomAccessIterator last, random_access_iterator_tag)
+{
+ for(; first < --last; ++first) // With a random access iterator, we can use operator < to more efficiently implement
+ iter_swap(first, last); // this algorithm. A generic iterator doesn't necessarily have an operator < defined.
+}
+
+
+template <class BidirectionalIterator>
+inline void reverse(BidirectionalIterator first, BidirectionalIterator last)
+{
+ typedef typename iterator_traits<BidirectionalIterator>::iterator_category IC;
+ reverse_impl(first, last, IC());
+}
+```
+
+## Exception Handling
+
+You will notice that EASTL uses try/catch in some places (particularly in containers) and uses the EASTL_EXCEPTIONS_ENABLED define. For starters, any EASTL code that uses try/catch should always be wrapped within #if EASTL_EXCEPTIONS_ENABLED (note: #if, not #ifdef).
+
+This is simple enough, but what you may be wondering is how it is that EASTL decides to use try/catch for some sections of code and not for others. EASTL follows the C++ standard library conventions with respect to exception handling, and you will see similar exception handling in standard STL. The code that you need to wrap in try/catch is code that can throw a C++ exception (not to be confused with CPU exception) and needs to have something unwound (or fixed) as a result. The important thing is that the container be in a valid state after encountering such exceptions. In general the kinds of things that require such try/catch are:
+
+* Memory allocation failures (which throw exceptions)
+* Constructor exceptions
+
+Take a look at the cases in EASTL where try/catch is used and see what it is doing.
+
+## Type Traits
+
+EASTL provides a facility called type_traits which is very similar to the type_traits being proposed by the C++ TR1 (see above). type_traits are useful because they tell you about properties of types at compile time. This allows you to do things such as assert that a data type is scalar or that a data type is const. The way we put them to use in EASTL is to take advantage of them to implement different pathways for functions based on types. For example, we can copy a contiguous array of scalars much faster via memcpy than we can via a for loop, though we could not safely employ the for loop for a non-trivial C++ class.
+
+As mentioned in the GeneralOptimizations section below, EASTL should take advantage of type_traits information to the extent possible to achive maximum effiiciency.
+
+## General Optimizations
+
+One of the primary goals of EASTL is to achieve the highest possible efficiency. In cases where EASTL functionality overlaps standard C++ STL functionality, standard STL implementations provided by compiler vendors are a benchmark upon which EASTL strives to beat. Indeed EASTL is more efficient than all other current STL implementations (with some exception in the case of some Metrowerks STL facilities). Here we list some of the things to look for when considering optimization of EASTL code These items can be considered general optimization suggestions for any code, but this particular list applies to EASTL:
+
+* Take advantage of type_traits to the extent possible (e.g. to use memcpy to move data instead of a for loop when possible).
+* Take advantage of iterator types to the extent possible.
+* Take advantage of the compiler's expectation that if statements are expected to evaluate as true and for loop conditions are expected to evaluate as false.
+* Make inline-friendly code. This often means avoiding temporaries to the extent possible.
+* Minimize branching (i.e. minimize 'if' statements). Where branching is used, make it so that 'if' statements execute as true.
+* Use EASTL_LIKELY/EASTL_UNLIKELY to give branch hints to the compiler when you are confident it will be beneficial.
+* Use restricted pointers (EABase's EA_RESTRICT or various compiler-specific versions of __restrict).
+* Compare unsigned values to < max instead of comparing signed values to >= 0 && < max.
+* Employ power of 2 integer math instead of math with any kind of integer.
+* Use template specialization where possible to implement improved functionality.
+* Avoid function calls when the call does something trivial. This improves debug build speed (which matters) and sometimes release build speed as well, though sometimes makes the code intent less clear. A comment next to the code saying what call it is replacing makes the intent clear without sacrificing performance.
+
+## Unit Tests
+
+Writing robust templated containers and algorithms is difficult or impossible without a heavy unit test suite in place. EASTL has a pretty extensive set of unit tests for all containers and algorithms. While the successful automated unit testing of shipping application programs may be a difficult thing to pull off, unit testing of libraries such as this is of huge importance and cannot be understated.
+
+* When making a new unit test, start by copying one of the existing unit tests and follow its conventions.
+* Test containers of both scalars and classes.
+* Test algorithms on both container iterators (e.g. vector.begin()) and pointer iterators (e.g. int*).
+* Make sure that algorithm or container member functions which take iterators work with the type of iterator they claim to (InputIterator, ForwardIterator, BidirectionalIterator, RandomAccessIterator). 
+* Test for const-correctness. If a user is allowed to modify something that is supposed to be const, silent errors can go undetected.
+* Make sure that unit tests cover all functions and all pathways of the tested code. This means that in writing the unit test you need to look at the source code to understand all the pathways.
+* Consider using a random number generator (one is provided in the test library) to do 'monkey' testing whereby unexpected input is given to a module being tested. When doing so, make sure you seed the generator in a way that problems can be reproduced.
+* While we avoid macros in EASTL user code, macros to assist in unit tests aren't considered a problem. However, consider that a number of macros could be replaced by templated functions and thus be easier to work with.
+* Unit tests don't need to be efficient; feel free to take up all the CPU power and time you need to test a module sufficiently.
+* EASTL containers are not thread-safe, by design. Thus there is no need to do multithreading tests as long as you stay away from the usage of static and global variables.
+* Unit tests must succeed with no memory leaks and of course no memory corruption. The heap system should be configured to test for this, and heap validation functions are available to the unit tests while in the middle of runs.
+
+## Things to Keep in Mind
+
+* When referring to EASTL functions and types from EASTL code, make sure to preface the type with the EASTL namespace. If you don't do this you can get collisions due to the compiler not knowing if it should use the EASTL namespace or the namespace of the templated type for the function or type.
+* Newly constructed empty containers do no memory allocation. Some STL and other container libraries allocate an initial node from the class memory allocator. EASTL containers by design never do this. If a container needs an initial node, that node should be made part of the container itself or be a static empty node object.
+* Empty containers (new or otherwise) contain no constructed objects, including those that might be in an 'end' node. Similarly, no user object (e.g. of type T) should be constructed unless required by the design and unless documented in the cotainer/algorithm contract. 
+* When creating a new container class, it's best to copy from an existing similar class to the extent possible. This helps keep the library consistent and resolves subtle problems that can happen in the construction of containers.
+* Be very careful about tweaking the code. It's easy to think (for example) that a > could be switch to a >= where instead it is a big deal. Just about every line of code in EASTL has been thought through and has a purpose. Unit tests may or may not currently test every bit of EASTL, so you can't necessarily rely on them to give you 100% confidence in changes. If you are not sure about something, contact the original author and he will tell you for sure.
+* Algorithm templates always work with iterators and not containers. A given container may of course implement an optimized form or an algorithm itself.
+* Make sure everything is heavily unit tested. If somebody finds a bug, fix the bug and make a unit test to make sure the bug doesn't happen again.
+* It's easy to get iterator categories confused or forgotten while implementing algorithms and containers.
+* Watch out for the strictness of GCC 3.4+. There is a bit of syntax — especially related to templates — that other compilers accept but GCC 3.4+ will not.
+* Don't forget to update the config.h EASTL_VERSION define before publishing.
+* The vector and string classes define iterator to be T*. We want to always leave this so — at least in release builds — as this gives some algorithms an advantage that optimizers cannot get around.
+
+----------------------------------------------
+End of document
diff --git a/EASTL/doc/Modules.md b/EASTL/doc/Modules.md
new file mode 100644
index 0000000..fe13f0c
--- /dev/null
+++ b/EASTL/doc/Modules.md
@@ -0,0 +1,89 @@
+# EASTL Modules
+
+## Introduction
+
+We provide here a list of all top-level modules present or planned for future presence in EASTL. In some cases (e.g. algorithm), the module consists of many smaller submodules which are not described in detail here. In those cases you should consult the source code for those modules or consult the detailed documentation for those modules. This document is a high level overview and not a detailed document.
+
+## Module List
+
+| Module | Description |
+|------|------|
+| config | Configuration header. Allows for changing some compile-time options. |
+| slist <br>fixed_slist | Singly-linked list.<br> fixed_slist is a version which is implemented via a fixed block of contiguous memory.|
+| list<br> fixed_list | Doubly-linked list. |
+| intrusive_list<br> intrusive_slist | List whereby the contained item provides the node implementation. |
+| array | Wrapper for a C-style array which extends it to act like an STL container. |
+| vector<br> fixed_vector | Resizable array container.
+| vector_set<br> vector_multiset | Set implemented via a vector instead of a tree. Speed and memory use is improved but resizing is slower. |
+| vector_map<br> vector_multimap | Map implemented via a vector instead of a tree. Speed and memory use is improved but resizing is slower. |
+| deque | Double-ended queue, but also with random access. Acts like a vector but insertions and removals are efficient. |
+| bit_vector | Implements a vector of bool, but the actual storage is done with one bit per bool. Not the same thing as a bitset. |
+| bitset | Implements an efficient arbitrarily-sized bitfield. Note that this is not strictly the same thing as a vector of bool (bit_vector), as it is optimized to act like an arbitrary set of flags and not to be a generic container which can be iterated, inserted, removed, etc. |
+| set<br> multiset<br> fixed_set<br> fixed_multiset | A set is a sorted unique collection, multiset is sorted but non-unique collection. |
+| map<br> multimap<br> fixed_map<br> fixed_multimap | A map is a sorted associative collection implemented via a tree. It is also known as dictionary. |
+| hash_map<br> hash_multimap<br> fixed_hash_map<br> fixed_hash_multimap | Map implemented via a hash table. |
+| intrusive_hash_map<br> intrusive_hash_multimap<br> intrusive_hash_set<br> intrusive_hash_multiset | hash_map whereby the contained item provides the node implementation, much like intrusive_list. |
+| hash_set<br> hash_multiset<br> fixed_hash_set<br> fixed_hash_map | Set implemented via a hash table.
+| basic_string<br> fixed_string<br> fixed_substring | basic_string is a character string/array.<br> fixed_substring is a string which is a reference to a range within another string or character array.<br> cow_string is a string which implements copy-on-write. |
+| algorithm | min/max, find, binary_search, random_shuffle, reverse, etc. |
+| sort | Sorting functionality, including functionality not in STL. quick_sort, heap_sort, merge_sort, shell_sort, insertion_sort, etc. |
+| numeric | Numeric algorithms: accumulate, inner_product, partial_sum, adjacent_difference, etc. |
+| heap | Heap structure functionality: make_heap, push_heap, pop_heap, sort_heap, is_heap, remove_heap, etc. |
+| stack | Adapts any container into a stack. |
+| queue | Adapts any container into a queue. |
+| priority_queue | Implements a conventional priority queue via a heap structure. |
+| type_traits | Type information, useful for writing optimized and robust code. Also used for implementing optimized containers and algorithms. |
+| utility | pair, make_pair, rel_ops, etc. |
+| functional | Function objects. |
+| iterator | Iteration for containers and algorithms. |
+| smart_ptr | Smart pointers: shared_ptr, shared_array, weak_ptr, scoped_ptr, scoped_array, linked_ptr, linked_array, intrusive_ptr. |
+ 
+
+## Module Behaviour
+
+The overhead sizes listed here refer to an optimized release build; debug builds may add some additional overhead. Some of the overhead sizes may be off by a little bit (usually at most 4 bytes). This is because the values reported here are those that refer to when EASTL's container optimizations have been complete. These optimizations may not have been completed as you are reading this.
+
+| Container |Stores | Container Overhead (32 bit) | Container Overhead (64 bit) | Node Overhead (32 bit) | Node Overhead (64 bit) | Iterator category | size() efficiency | operator[] efficiency | Insert efficiency | Erase via Iterator efficiency | Find efficiency | Sort efficiency |
+|------|------|------|------|------|------|------|------|------|------|------|------|------|
+| slist | T | 8 | 16 | 4 | 8 | f | n | - | 1 | 1 | n | n+ |
+| list | T | 12 | 24 | 8 | 16 | b | n | - | 1 | 1 | n | n log(n) |
+| intrusive_slist | T | 4 | 8 | 4 | 8 | f | n | - | 1 | 1 | 1 | n+ |
+| intrusive_list | T | 8 | 16 | 8 | 16 | b | n | - | 1 | 1 | 1 | n log(n) |
+| array | T | 0 | 0 | 0 | 0 | r | 1 | 1 | - | - | n | n log(n) |
+| vector | T | 16 | 32 | 0 | 0 | r | 1 | 1 | 1 at end, else n | 1 at end, else n | n | n log(n) |
+| vector_set | T | 16 | 32 | 0 | 0 | r | 1 | 1 | 1 at end, else n | 1 at end, else n | log(n) | 1 |
+| vector_multiset | T | 16 | 32 | 0 | 0 | r | 1 | 1 | 1 at end, else n | 1 at end, else n | log(n) | 1 |
+| vector_map | Key, T | 16 | 32 | 0 | 0 | r | 1 | 1 | 1 at end, else n | 1 at end, else n | log(n) | 1 |
+| vector_multimap | Key, T | 16 | 32 | 0 | 0 | r | 1 | 1 | 1 at end, else n | 1 at end, else n | log(n) | 1 |
+| deque | T | 44 | 84 | 0 | 0 | r | 1 | 1 | 1 at begin or end, else n / 2 | 1 at begin or end, else n / 2 | n | n log(n) |
+| bit_vector | bool | 8 | 16 | 0 | 0 | r | 1 | 1 | 1 at end, else n | 1 at end, else n | n | n log(n) |
+| string (all types) | T | 16 | 32 | 0 | 0 | r | 1 | 1 | 1 at end, else n | 1 at end, else n | n | n log(n) |
+| set | T | 24 | 44 | 16 | 28 | b | 1 | - | log(n) | log(n) | log(n) | 1 |
+| multiset | T | 24 | 44 | 16 | 28 | b | 1 | - | log(n) | log(n) | log(n) | 1 |
+| map | Key, T | 24 | 44 | 16 | 28 | b | 1 | log(n) | log(n) | log(n) | log(n) | 1 |
+| multimap | Key, T | 24 | 44 | 16 | 28 | b | 1 | - | log(n) | log(n) | log(n) | 1 |
+| hash_set | T | 16 | 20 | 4 | 8 | b | 1 | - | 1 | 1 | 1 | - |
+| hash_multiset | T | 16 | 20 | 4 | 8 | b | 1 | - | 1 | 1 | 1 | - |
+| hash_map | Key, T | 16 | 20 | 4 | 8 | b | 1 | - | 1 | 1 | 1 | - |
+| hash_multimap | Key, T | 16 | 20 | 4 | 8 | b | 1 | - | 1 | 1 | 1 | - |
+| intrusive_hash_set | T | 16 | 20 | 4 | 8 | b | 1 | - | 1 | 1 | 1 | - |
+| intrusive_hash_multiset | T | 16 | 20 | 4 | 8 | b | 1 | - | 1 | 1 | 1 | - |
+| intrusive_hash_map | T <small>(Key == T)</small> | 16 | 20 | 4 | 8 | b | 1 | - | 1 | 1 | 1 | - |
+| intrusive_hash_multimap | T <small>(Key == T) </small> | 16 | 20 | 4 | 8 | b | 1 | - | 1 | 1 | 1 | - |
+
+* \- means that the operation does not exist.
+* 1 means amortized constant time. Also known as O(1)
+* n means time proportional to the container size. Also known as O(n)
+* log(n) means time proportional to the natural logarithm of the container size. Also known as O(log(n))
+* n log(n) means time proportional to log(n) times the size of the container. Also known as O(n log(n))
+* n+ means that the time is at least n, and possibly higher.
+* Iterator meanings are: f = forward iterator; b = bidirectional iterator, r = random iterator.
+* Overhead indicates approximate per-element overhead memory required in bytes. Overhead doesn't include possible additional overhead that may be imposed by the memory heap used to allocate nodes. General heaps tend to have between 4 and 16 bytes of overhead per allocation, depending on the heap.
+* Some overhead values are dependent on the structure alignment characteristics in effect. The values reported here are those that would be in effect for a system that requires pointers to be aligned on boundaries of their size and allocations with a minimum of 4 bytes (thus one byte values get rounded up to 4).
+* Some overhead values are dependent on the size_type used by containers. We assume a size_type of 4 bytes, even for 64 bit machines, as this is the EASTL default.
+* Inserting at the end of a vector may cause the vector to be resized; resizing a vector is O(n). However, the amortized time complexity for vector insertions at the end is constant.
+* Sort assumes the usage of the best possible sort for a large container of random data. Some sort algorithms (e.g. quick_sort) require random access iterators and so the sorting of some containers requires a different sort algorithm. We do not include bucket or radix sorts, as they are always O(n).
+* Some containers (e.g. deque, hash*) have unusual data structures that make per-container and per-node overhead calculations not quite account for all memory.
+
+----------------------------------------------
+End of document
diff --git a/EASTL/doc/html/EASTL Benchmarks.html b/EASTL/doc/html/EASTL Benchmarks.html
new file mode 100644
index 0000000..70ff23f
--- /dev/null
+++ b/EASTL/doc/html/EASTL Benchmarks.html
@@ -0,0 +1,330 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html>
+<head>
+ <title>EASTL Benchmarks</title>
+ <meta content="text/html; charset=us-ascii" http-equiv="content-type">
+ <meta content="Paul Pedriana" name="author">
+ <meta content="Presentation of various benchmarks of EASTL" name="description">
+ <link type="text/css" rel="stylesheet" href="EASTLDoc.css">
+</head>
+
+
+<body>
+
+
+<h1>EASTL Benchmarks</h1>
+<h2>Introduction</h2>
+<p>This document provides a number of benchmark results of EASTL.
+ Where possible, these benchmarks are implemented as comparisons
+ with equivalent functionality found in other libraries such as
+ compiler STL libraries or other well-known libraries. These
+ comparison benchmarks concentrate on highlighting the differences
+ between implementations rather than the similarities. In many
+ mundane cases -- such as accessing a vector element via operator []
+ -- virtually all vector/array implementations you are likely to run
+ into will have identical performance.<br>
+
+
+
+<br>
+
+
+
+It's also important to note that the platform you run on can make a
+ significant difference in the results. On a modern 3+GHz Windows PC
+ many operations are fast due to large memory caches, intelligent
+ branch prediction, and parallel instruction execution. However, on
+ embedded or console systems none of these may be the case.
+ <br>
+
+
+
+<br>
+
+
+
+While EASTL generally outperforms std STL, there are some benchmarks
+ here in which EASTL is slower than std STL. There are three primary
+explanations of this:</p>
+<ol>
+
+
+
+ <li>EASTL is making some kind of speed, memory, or design tradeoff
+that results in the given speed difference. In may such cases, EASTL
+goes slower on one benchmark in order to go faster on another benchmark
+deemed more important. This explanation constitutes about 60% of the
+cases.</li>
+
+
+
+ <li>Compiler optimizations and resulting code generation is
+coincidencally favoring one kind of implementation over another, often
+when they are visually virtually identical. This explantation
+constitutes about 30% of the cases.</li>
+
+
+
+ <li>EASTL is simply not yet as optimized as it could be. This
+explanation constitutes about 10% of the cases (as of this writing
+there are about three such functions throughout EASTL). </li>
+
+
+
+</ol>
+
+
+
+<h2> Benchmarks</h2>
+<p>Below is a table of links to detailed benchmark results derived from
+ the Benchmark test present in the EASTL package. The detailed results
+ are present below the table. Additional platforms will be added as
+ results become available for those platforms. Debug benchmarks are
+ present because (lack of) debug performance can be significant for
+ highly templated libraries. EASTL has specific optimizations to enhance
+ debug performance relative to other standard libraries; in some cases
+ it is 10x or more faster than alternatives (though there are exceptions where EASTL is slower). Feel free to submit results
+ for additional compilers/platforms.<br>
+
+
+
+</p>
+<table style="text-align: left; width: 550px; margin-left: 40px;" border="1" cellpadding="2" cellspacing="2">
+
+
+ <tbody>
+
+
+ <tr>
+
+
+ <td style="text-align: center;"><span style="font-weight: bold;">Platform</span></td>
+
+
+ <td style="text-align: center;"><span style="font-weight: bold;">Compiler</span></td>
+
+
+ <td style="font-weight: bold; text-align: center;">STL type</td>
+
+
+ <td style="font-weight: bold; text-align: center;">Build</td>
+
+
+ <td style="text-align: center;"><span style="font-weight: bold;">Results</span></td>
+
+
+ </tr>
+
+
+
+
+
+ <tr>
+
+
+ <td>Win32</td>
+
+
+ <td>VC++ 7.1</td>
+
+
+ <td>Microsoft (Dinkumware)</td>
+
+
+ <td>Debug</td>
+
+
+ <td><a href="#Win32.VC71.MS.Debug">Detail</a></td>
+
+
+ </tr>
+
+
+ <tr>
+
+
+ <td>Win32</td>
+
+
+ <td>VC++ 7.1</td>
+
+
+ <td>Microsoft (Dinkumware)</td>
+
+
+ <td>Release</td>
+
+
+ <td><a href="#Win32.VC71.MS.Release">Detail</a></td>
+
+
+ </tr>
+
+
+ <tr>
+
+
+ <td>Win32</td>
+
+
+ <td>VC++ 7.1</td>
+
+
+ <td>STLPort</td>
+
+
+ <td>Debug</td>
+
+
+ <td><a href="#Win32.VC71.STLPort.Debug">Detail</a></td>
+
+
+ </tr>
+
+
+ <tr>
+
+
+ <td>Win32</td>
+
+
+ <td>VC++ 7.1</td>
+
+
+ <td>STLPort</td>
+
+
+ <td>Release</td>
+
+
+ <td><a href="#Win32.VC71.STLPort.Release">Detail</a></td>
+
+
+ </tr>
+
+
+
+ </tbody>
+</table>
+
+
+
+
+
+
+
+
+
+
+
+
+
+<h2>
+
+
+
+
+ <a name="Win32.VC71.MS.Debug"></a>Win32.VC71.MS.Debug<span style="font-weight: bold;"></span><span style="font-weight: bold;"></span></h2>
+<div style="margin-left: 40px;">
+<pre>EASTL version: 0.96.00<br>Platform: Windows on X86<br>Compiler: Microsoft Visual C++ compiler, version 1310<br>Allocator: PPMalloc::GeneralAllocatorDebug. Thread safety enabled.<br>Build: Debug. Inlining disabled. STL debug features disabled.<br><br>Values are times to complete tests; smaller values are better.<br>Alarm indicates a greater than 10% difference.<br><br>Test VC++ EASTL Ratio Alarm<br>----------------------------------------------------------------------------------------<br>algorithm/adj_find/vector&lt;TestObject&gt; 33061345 6497757 5.09 *<br>algorithm/copy/vector&lt;LargePOD&gt; 5844906 4876076 1.20 *<br>algorithm/copy/vector&lt;uint32_t&gt; 1634346 166065 9.84 *<br>algorithm/copy_backward/vector&lt;LargePOD&gt; 4515974 4638892 0.97<br>algorithm/copy_backward/vector&lt;uint32_t&gt; 1821168 121746 14.96 *<br>algorithm/count/vector&lt;uint64_t&gt; 17048884 2720766 6.27 *<br>algorithm/equal_range/vector&lt;uint64_t&gt; 1111147812 448756888 2.48 *<br>algorithm/fill/bool[] 1728722 91936 18.80 *<br>algorithm/fill/char[]/'d' 1299200 33745 38.50 *<br>algorithm/fill/vector&lt;char&gt;/'d' 10205092 33796 100.00 *<br>algorithm/fill/vector&lt;char&gt;/0 10200748 33805 100.00 *<br>algorithm/fill/vector&lt;uint64_t&gt; 10416538 1399687 7.44 *<br>algorithm/fill/vector&lt;void*&gt; 10221837 1307700 7.82 *<br>algorithm/fill_n/bool[] 1399033 34196 40.91 *<br>algorithm/fill_n/char[] 1299225 33754 38.49 *<br>algorithm/fill_n/vector&lt;uint64_t&gt; 5961637 1371900 4.35 *<br>algorithm/find_end/string/end 16569373 2657372 6.24 *<br>algorithm/find_end/string/middle 16558638 20242410 0.82 *<br>algorithm/find_end/string/none 16811207 40480468 0.42 *<br>algorithm/lex_cmp/schar[] 1749674 194429 9.00 *<br>algorithm/lex_cmp/vector&lt;TestObject&gt; 32824195 5253587 6.25 *<br>algorithm/lex_cmp/vector&lt;uchar&gt; 29852034 202658 100.00 *<br>algorithm/lower_bound/vector&lt;TestObject&gt; 798624462 350027935 2.28 *<br>algorithm/min_element/vector&lt;TestObject&gt; 21675298 5314676 4.08 *<br>algorithm/rand_shuffle/vector&lt;uint64_t&gt; 84236190 43677506 1.93 *<br>algorithm/reverse/list&lt;TestObject&gt; 3007292 2105799 1.43 *<br>algorithm/reverse/vector&lt;TestObject&gt; 2974618 2124796 1.40 *<br>algorithm/search/string&lt;char&gt; 16228158 3594268 4.52 *<br>algorithm/search_n/string&lt;char&gt; 16926985 1522096 11.12 *<br>algorithm/unique/vector&lt;TestObject&gt; 54206243 9988002 5.43 *<br>algorithm/unique/vector&lt;uint32_t&gt; 26940079 1741991 15.47 *<br>algorithm/unique/vector&lt;uint64_t&gt; 47621344 5213127 9.13 *<br>algorithm/upper_bound/vector&lt;uint32_t&gt; 372381295 137901552 2.70 *<br><br>bitset&lt;1500&gt;/&gt;&gt;=/1 90196544 92539832 0.97<br>bitset&lt;1500&gt;/count 50753832 53742117 0.94<br>bitset&lt;1500&gt;/flip 86935875 85121117 1.02<br>bitset&lt;1500&gt;/reset 78153837 79922611 0.98<br>bitset&lt;1500&gt;/set() 79214968 79360658 1.00<br>bitset&lt;1500&gt;/set(i) 11300589 12199651 0.93<br>bitset&lt;1500&gt;/test 11282679 13186450 0.86 *<br><br>bitset&lt;15&gt;/&gt;&gt;=/1 10500577 6000559 1.75 *<br>bitset&lt;15&gt;/count 4000356 6399753 0.63 *<br>bitset&lt;15&gt;/flip 7268877 5647944 1.29 *<br>bitset&lt;15&gt;/reset 8564235 5800163 1.48 *<br>bitset&lt;15&gt;/set() 9935523 5914012 1.68 *<br>bitset&lt;15&gt;/set(i) 11199703 12503637 0.90 *<br>bitset&lt;15&gt;/test 10600623 12899592 0.82 *<br><br>bitset&lt;35&gt;/&gt;&gt;=/1 13076052 6599834 1.98 *<br>bitset&lt;35&gt;/count 4800384 11500330 0.42 *<br>bitset&lt;35&gt;/flip 7915439 5816313 1.36 *<br>bitset&lt;35&gt;/reset 9400049 5803180 1.62 *<br>bitset&lt;35&gt;/set() 10701152 5840316 1.83 *<br>bitset&lt;35&gt;/set(i) 11342936 12271128 0.92<br>bitset&lt;35&gt;/test 10670799 13099682 0.81 *<br><br>bitset&lt;75&gt;/&gt;&gt;=/1 14198834 17151088 0.83 *<br>bitset&lt;75&gt;/count 5795530 8576373 0.68 *<br>bitset&lt;75&gt;/flip 8516703 8922995 0.95<br>bitset&lt;75&gt;/reset 9999970 8526095 1.17 *<br>bitset&lt;75&gt;/set() 11124877 9009686 1.23 *<br>bitset&lt;75&gt;/set(i) 11300563 12531618 0.90 *<br>bitset&lt;75&gt;/test 11031913 13100523 0.84 *<br><br>deque&lt;ValuePair&gt;/erase 743801706 335646802 2.22 *<br>deque&lt;ValuePair&gt;/insert 742331809 341912866 2.17 *<br>deque&lt;ValuePair&gt;/iteration 29097030 16315827 1.78 *<br>deque&lt;ValuePair&gt;/operator[] 49859598 24026313 2.08 *<br>deque&lt;ValuePair&gt;/push_back 424807033 34497608 12.31 *<br>deque&lt;ValuePair&gt;/push_front 402313373 38006322 10.59 *<br>deque&lt;ValuePair&gt;/sort 725101017 581796551 1.25 *<br><br>hash_map&lt;string, uint32_t&gt;/clear 559462 961019 0.58 *<br>hash_map&lt;string, uint32_t&gt;/count 53377807 8091448 6.60 *<br>hash_map&lt;string, uint32_t&gt;/erase pos 613573 858084 0.72 *<br>hash_map&lt;string, uint32_t&gt;/erase range 5488748 461134 11.90 *<br>hash_map&lt;string, uint32_t&gt;/erase val 35760096 16379858 2.18 *<br>hash_map&lt;string, uint32_t&gt;/find 43490335 10324823 4.21 *<br>hash_map&lt;string, uint32_t&gt;/find_as/char* 49343818 8617139 5.73 *<br>hash_map&lt;string, uint32_t&gt;/insert 107420281 168690439 0.64 *<br>hash_map&lt;string, uint32_t&gt;/iteration 2456356 1255153 1.96 *<br>hash_map&lt;string, uint32_t&gt;/operator[] 47209502 12581624 3.75 *<br><br>hash_map&lt;uint32_t, TestObject&gt;/clear 533172 546449 0.98<br>hash_map&lt;uint32_t, TestObject&gt;/count 28667432 2899997 9.89 *<br>hash_map&lt;uint32_t, TestObject&gt;/erase pos 683239 538289 1.27 *<br>hash_map&lt;uint32_t, TestObject&gt;/erase range 9632676 253037 38.07 *<br>hash_map&lt;uint32_t, TestObject&gt;/erase val 25466026 7752188 3.29 *<br>hash_map&lt;uint32_t, TestObject&gt;/find 20048253 4678502 4.29 *<br>hash_map&lt;uint32_t, TestObject&gt;/insert 71085798 37686187 1.89 *<br>hash_map&lt;uint32_t, TestObject&gt;/iteration 1460318 1338317 1.09<br>hash_map&lt;uint32_t, TestObject&gt;/operator[] 23226692 7888748 2.94 *<br><br>heap (uint32_t[])/make_heap 5399966 6961305 0.78 *<br>heap (uint32_t[])/pop_heap 108060534 103511318 1.04<br>heap (uint32_t[])/push_heap 22595661 16640688 1.36 *<br>heap (uint32_t[])/sort_heap 93559424 83076731 1.13 *<br><br>heap (vector&lt;TestObject&gt;)/make_heap 91770743 21724870 4.22 *<br>heap (vector&lt;TestObject&gt;)/pop_heap 1175599317 284007398 4.14 *<br>heap (vector&lt;TestObject&gt;)/push_heap 207804541 45918046 4.53 *<br>heap (vector&lt;TestObject&gt;)/sort_heap 970394145 208321477 4.66 *<br><br>list&lt;TestObject&gt;/ctor(it) 805539509 760938607 1.06<br>list&lt;TestObject&gt;/ctor(n) 80959236 75106995 1.08<br>list&lt;TestObject&gt;/erase 1052543704 1044976137 1.01<br>list&lt;TestObject&gt;/find 97785267 75970884 1.29 *<br>list&lt;TestObject&gt;/insert 873895175 807051107 1.08<br>list&lt;TestObject&gt;/push_back 812797710 780742425 1.04<br>list&lt;TestObject&gt;/remove 1850600714 1436980599 1.29 *<br>list&lt;TestObject&gt;/reverse 180270465 80466636 2.24 *<br>list&lt;TestObject&gt;/size/1 440148 599642 0.73 *<br>list&lt;TestObject&gt;/size/10 439433 1329817 0.33 * EASTL intentionally implements list::size as O(n).<br>list&lt;TestObject&gt;/size/100 439595 11030060 0.04 * EASTL intentionally implements list::size as O(n).<br>list&lt;TestObject&gt;/splice 177106094 69383027 2.55 *<br><br>map&lt;TestObject, uint32_t&gt;/clear 508283 470807 1.08<br>map&lt;TestObject, uint32_t&gt;/count 43145354 14280357 3.02 *<br>map&lt;TestObject, uint32_t&gt;/equal_range 38594004 16520447 2.34 *<br>map&lt;TestObject, uint32_t&gt;/erase/key 33948082 16123175 2.11 *<br>map&lt;TestObject, uint32_t&gt;/erase/pos 578332 455201 1.27 * MS uses a code bloating implementation of erase.<br>map&lt;TestObject, uint32_t&gt;/erase/range 387345 284538 1.36 *<br>map&lt;TestObject, uint32_t&gt;/find 22897224 12766100 1.79 *<br>map&lt;TestObject, uint32_t&gt;/insert 61665800 47286928 1.30 *<br>map&lt;TestObject, uint32_t&gt;/iteration 1977202 745391 2.65 *<br>map&lt;TestObject, uint32_t&gt;/lower_bound 19892941 12260928 1.62 *<br>map&lt;TestObject, uint32_t&gt;/operator[] 24199084 15429634 1.57 *<br>map&lt;TestObject, uint32_t&gt;/upper_bound 19842409 12064441 1.64 *<br><br>set&lt;uint32_t&gt;/clear 1027625 1000901 1.03<br>set&lt;uint32_t&gt;/count 39730182 13329565 2.98 *<br>set&lt;uint32_t&gt;/equal_range 34681649 14768827 2.35 *<br>set&lt;uint32_t&gt;/erase range 841458 602030 1.40 *<br>set&lt;uint32_t&gt;/erase/pos 1380485 1084303 1.27 * MS uses a code bloating implementation of erase.<br>set&lt;uint32_t&gt;/erase/val 31617425 13344023 2.37 *<br>set&lt;uint32_t&gt;/find 19582428 10788864 1.82 *<br>set&lt;uint32_t&gt;/insert 61434014 48232086 1.27 *<br>set&lt;uint32_t&gt;/iteration 1512057 667820 2.26 *<br>set&lt;uint32_t&gt;/lower_bound 18394885 10402785 1.77 *<br>set&lt;uint32_t&gt;/upper_bound 17189083 10554425 1.63 *<br><br>sort/q_sort/TestObject[] 87088799 15037988 5.79 *<br>sort/q_sort/TestObject[]/sorted 21502892 3284299 6.55 *<br>sort/q_sort/vector&lt;TestObject&gt; 87962047 15004677 5.86 *<br>sort/q_sort/vector&lt;TestObject&gt;/sorted 21396523 3341163 6.40 *<br>sort/q_sort/vector&lt;ValuePair&gt; 80334589 10429161 7.70 *<br>sort/q_sort/vector&lt;ValuePair&gt;/sorted 22133295 3230553 6.85 *<br>sort/q_sort/vector&lt;uint32&gt; 72195388 5940302 12.15 *<br>sort/q_sort/vector&lt;uint32&gt;/sorted 19635171 995495 19.72 *<br><br>string&lt;char16_t&gt;/compare 523013373 534722089 0.98<br>string&lt;char16_t&gt;/erase/pos,n 3446597 3439492 1.00<br>string&lt;char16_t&gt;/find/p,pos,n 383873158 441902786 0.87 *<br>string&lt;char16_t&gt;/find_first_not_of/p,pos,n 174157 134131 1.30 *<br>string&lt;char16_t&gt;/find_first_of/p,pos,n 11715423 8520944 1.37 *<br>string&lt;char16_t&gt;/find_last_of/p,pos,n 1871556 1226457 1.53 *<br>string&lt;char16_t&gt;/insert/pos,p 3624877 3357058 1.08<br>string&lt;char16_t&gt;/iteration 6766787933 581916665 11.63 *<br>string&lt;char16_t&gt;/operator[] 4820827 2335579 2.06 *<br>string&lt;char16_t&gt;/push_back 59812962 6757466 8.85 *<br>string&lt;char16_t&gt;/replace/pos,n,p,n 4371279 4459713 0.98<br>string&lt;char16_t&gt;/reserve 2307530 1919386 1.20 *<br>string&lt;char16_t&gt;/rfind/p,pos,n 734826 372615 1.97 *<br>string&lt;char16_t&gt;/size 41608 28866 1.44 *<br>string&lt;char16_t&gt;/swap 1033932 1490994 0.69 *<br><br>string&lt;char8_t&gt;/compare 63086797 64194771 0.98<br>string&lt;char8_t&gt;/erase/pos,n 2045687 1960270 1.04<br>string&lt;char8_t&gt;/find/p,pos,n 123872549 471364764 0.26 *<br>string&lt;char8_t&gt;/find_first_not_of/p,pos,n 140013 130271 1.07<br>string&lt;char8_t&gt;/find_first_of/p,pos,n 8051906 8749994 0.92<br>string&lt;char8_t&gt;/find_last_of/p,pos,n 1318835 1230715 1.07<br>string&lt;char8_t&gt;/insert/pos,p 1770610 1724234 1.03<br>string&lt;char8_t&gt;/iteration 28112136 2544475 11.05 *<br>string&lt;char8_t&gt;/operator[] 4810525 2255841 2.13 *<br>string&lt;char8_t&gt;/push_back 54869634 6127447 8.95 *<br>string&lt;char8_t&gt;/replace/pos,n,p,n 2737578 2847900 0.96<br>string&lt;char8_t&gt;/reserve 1123395 394902 2.84 *<br>string&lt;char8_t&gt;/rfind/p,pos,n 737299 368518 2.00 *<br>string&lt;char8_t&gt;/size 42245 26801 1.58 *<br>string&lt;char8_t&gt;/swap 1036142 1491028 0.69 *<br><br>vector&lt;uint64&gt;/erase 56417135 55770251 1.01<br>vector&lt;uint64&gt;/insert 56617761 56100468 1.01<br>vector&lt;uint64&gt;/iteration 10413895 1291269 8.06 *<br>vector&lt;uint64&gt;/operator[] 23507193 3479390 6.76 *<br>vector&lt;uint64&gt;/push_back 34687939 13806627 2.51 *<br>vector&lt;uint64&gt;/sort 256886550 84669657 3.03 *<br><br></pre>
+
+
+
+
+
+</div>
+
+
+
+
+
+<h2>
+
+
+
+
+ <a name="Win32.VC71.MS.Release"></a>Win32.VC71.MS.Release<span style="font-weight: bold;"></span><span style="font-weight: bold;"></span></h2>
+<div style="margin-left: 40px;">
+<pre>EASTL version: 0.96.00<br>Platform: Windows on X86<br>Compiler: Microsoft Visual C++ compiler, version 1310<br>Allocator: PPMalloc::GeneralAllocator. Thread safety enabled.<br>Build: Full optimization. Inlining enabled.<br><br>Values are times to complete tests; smaller values are better.<br>Alarm indicates a greater than 10% difference.<br><br>Test VC++ EASTL Ratio Alarm<br>----------------------------------------------------------------------------------------<br>algorithm/adj_find/vector&lt;TestObject&gt; 2783546 2750660 1.01<br>algorithm/copy/vector&lt;LargePOD&gt; 6474025 4972738 1.30 *<br>algorithm/copy/vector&lt;uint32_t&gt; 157267 173162 0.91<br>algorithm/copy_backward/vector&lt;LargePOD&gt; 4836406 4374780 1.11 *<br>algorithm/copy_backward/vector&lt;uint32_t&gt; 104780 120912 0.87 *<br>algorithm/count/vector&lt;uint64_t&gt; 1368440 1368696 1.00<br>algorithm/equal_range/vector&lt;uint64_t&gt; 114199387 102783938 1.11 *<br>algorithm/fill/bool[] 253215 27353 9.26 *<br>algorithm/fill/char[]/'d' 253164 27404 9.24 *<br>algorithm/fill/vector&lt;char&gt;/'d' 253105 27362 9.25 *<br>algorithm/fill/vector&lt;char&gt;/0 253275 27353 9.26 *<br>algorithm/fill/vector&lt;uint64_t&gt; 397001 394323 1.01<br>algorithm/fill/vector&lt;void*&gt; 547196 642362 0.85 *<br>algorithm/fill_n/bool[] 229177 27361 8.38 *<br>algorithm/fill_n/char[] 228845 27404 8.35 *<br>algorithm/fill_n/vector&lt;uint64_t&gt; 565233 1376822 0.41 *<br>algorithm/find_end/string/end 2107116 82356 25.59 *<br>algorithm/find_end/string/middle 2111672 664283 3.18 *<br>algorithm/find_end/string/none 2110423 1519596 1.39 *<br>algorithm/lex_cmp/schar[] 741021 176162 4.21 *<br>algorithm/lex_cmp/vector&lt;TestObject&gt; 2610494 2642183 0.99<br>algorithm/lex_cmp/vector&lt;uchar&gt; 697595 167866 4.16 *<br>algorithm/lower_bound/vector&lt;TestObject&gt; 62462233 58146664 1.07<br>algorithm/min_element/vector&lt;TestObject&gt; 4350385 2671227 1.63 *<br>algorithm/rand_shuffle/vector&lt;uint64_t&gt; 10868261 11300818 0.96<br>algorithm/reverse/list&lt;TestObject&gt; 483718 470024 1.03<br>algorithm/reverse/vector&lt;TestObject&gt; 476739 484322 0.98<br>algorithm/search/string&lt;char&gt; 2560387 1259496 2.03 *<br>algorithm/search_n/string&lt;char&gt; 2770991 458524 6.04 *<br>algorithm/unique/vector&lt;TestObject&gt; 4194520 4658910 0.90 *<br>algorithm/unique/vector&lt;uint32_t&gt; 538730 787924 0.68 *<br>algorithm/unique/vector&lt;uint64_t&gt; 3169829 2575636 1.23 *<br>algorithm/upper_bound/vector&lt;uint32_t&gt; 27495562 25321593 1.09<br><br>bitset&lt;1500&gt;/&gt;&gt;=/1 33464228 33469719 1.00<br>bitset&lt;1500&gt;/count 18736116 18814903 1.00<br>bitset&lt;1500&gt;/flip 19299309 18605438 1.04<br>bitset&lt;1500&gt;/reset 22200487 15262847 1.45 *<br>bitset&lt;1500&gt;/set() 14418193 17557319 0.82 *<br>bitset&lt;1500&gt;/set(i) 1599250 1599199 1.00<br>bitset&lt;1500&gt;/test 1599241 1599233 1.00<br><br>bitset&lt;15&gt;/&gt;&gt;=/1 2199222 2264442 0.97<br>bitset&lt;15&gt;/count 1399406 1399193 1.00<br>bitset&lt;15&gt;/flip 1266712 1199197 1.06<br>bitset&lt;15&gt;/reset 1399364 1399109 1.00<br>bitset&lt;15&gt;/set() 1199197 999201 1.20 *<br>bitset&lt;15&gt;/set(i) 1599258 1462952 1.09<br>bitset&lt;15&gt;/test 1599275 1599224 1.00<br><br>bitset&lt;35&gt;/&gt;&gt;=/1 2599266 1933376 1.34 *<br>bitset&lt;35&gt;/count 2599240 2592559 1.00<br>bitset&lt;35&gt;/flip 1693124 1199188 1.41 *<br>bitset&lt;35&gt;/reset 1399406 999201 1.40 *<br>bitset&lt;35&gt;/set() 1599403 1199205 1.33 *<br>bitset&lt;35&gt;/set(i) 1599241 1599190 1.00<br>bitset&lt;35&gt;/test 1599250 1599232 1.00<br><br>bitset&lt;75&gt;/&gt;&gt;=/1 4199332 4199213 1.00<br>bitset&lt;75&gt;/count 2999497 2199341 1.36 *<br>bitset&lt;75&gt;/flip 2399499 1830178 1.31 *<br>bitset&lt;75&gt;/reset 2199468 1199197 1.83 *<br>bitset&lt;75&gt;/set() 1999387 1199851 1.67 *<br>bitset&lt;75&gt;/set(i) 1599266 1599198 1.00<br>bitset&lt;75&gt;/test 1599241 1662651 0.96<br><br>deque&lt;ValuePair&gt;/erase 90444165 37113253 2.44 *<br>deque&lt;ValuePair&gt;/insert 93299349 36175167 2.58 *<br>deque&lt;ValuePair&gt;/iteration 2756414 2122076 1.30 *<br>deque&lt;ValuePair&gt;/operator[] 5117969 4632075 1.10<br>deque&lt;ValuePair&gt;/push_back 30300757 3060357 9.90 *<br>deque&lt;ValuePair&gt;/push_front 25498529 2808392 9.08 *<br>deque&lt;ValuePair&gt;/sort 142283047 111292464 1.28 *<br><br>hash_map&lt;string, uint32_t&gt;/clear 146769 389699 0.38 *<br>hash_map&lt;string, uint32_t&gt;/count 13059434 3460324 3.77 *<br>hash_map&lt;string, uint32_t&gt;/erase pos 184246 331925 0.56 *<br>hash_map&lt;string, uint32_t&gt;/erase range 382432 167237 2.29 *<br>hash_map&lt;string, uint32_t&gt;/erase val 6187898 3302114 1.87 *<br>hash_map&lt;string, uint32_t&gt;/find 11289369 3459024 3.26 *<br>hash_map&lt;string, uint32_t&gt;/find_as/char* 13559192 3662387 3.70 *<br>hash_map&lt;string, uint32_t&gt;/insert 17514012 14095176 1.24 *<br>hash_map&lt;string, uint32_t&gt;/iteration 801014 218450 3.67 *<br>hash_map&lt;string, uint32_t&gt;/operator[] 11457065 3690385 3.10 *<br><br>hash_map&lt;uint32_t, TestObject&gt;/clear 141865 265379 0.53 *<br>hash_map&lt;uint32_t, TestObject&gt;/count 1766045 703613 2.51 *<br>hash_map&lt;uint32_t, TestObject&gt;/erase pos 172337 218458 0.79 *<br>hash_map&lt;uint32_t, TestObject&gt;/erase range 537846 102340 5.26 *<br>hash_map&lt;uint32_t, TestObject&gt;/erase val 2220132 1441787 1.54 *<br>hash_map&lt;uint32_t, TestObject&gt;/find 1612994 1043953 1.55 *<br>hash_map&lt;uint32_t, TestObject&gt;/insert 7141547 4348056 1.64 *<br>hash_map&lt;uint32_t, TestObject&gt;/iteration 199512 169328 1.18 *<br>hash_map&lt;uint32_t, TestObject&gt;/operator[] 1831733 1519707 1.21 *<br><br>heap (uint32_t[])/make_heap 3366247 1949093 1.73 *<br>heap (uint32_t[])/pop_heap 57280514 53779440 1.07<br>heap (uint32_t[])/push_heap 9700217 7582935 1.28 *<br>heap (uint32_t[])/sort_heap 47227751 46131948 1.02<br><br>heap (vector&lt;TestObject&gt;)/make_heap 11458442 11510819 1.00<br>heap (vector&lt;TestObject&gt;)/pop_heap 122897267 119061132 1.03<br>heap (vector&lt;TestObject&gt;)/push_heap 21688481 21176220 1.02<br>heap (vector&lt;TestObject&gt;)/sort_heap 90867380 88869523 1.02<br><br>list&lt;TestObject&gt;/ctor(it) 74591104 69845817 1.07<br>list&lt;TestObject&gt;/ctor(n) 6243998 5838582 1.07<br>list&lt;TestObject&gt;/erase 299509298 206013676 1.45 *<br>list&lt;TestObject&gt;/find 40927185 14514243 2.82 *<br>list&lt;TestObject&gt;/insert 71277251 47234534 1.51 *<br>list&lt;TestObject&gt;/push_back 73780527 44116725 1.67 *<br>list&lt;TestObject&gt;/remove 786197776 326434612 2.41 *<br>list&lt;TestObject&gt;/reverse 49283128 25029678 1.97 *<br>list&lt;TestObject&gt;/size/1 159741 139400 1.15 *<br>list&lt;TestObject&gt;/size/10 159324 346579 0.46 * EASTL intentionally implements list::size as O(n).<br>list&lt;TestObject&gt;/size/100 159188 97235419 0.00 * EASTL intentionally implements list::size as O(n).<br>list&lt;TestObject&gt;/splice 63548584 19322931 3.29 *<br><br>map&lt;TestObject, uint32_t&gt;/clear 167408 170501 0.98<br>map&lt;TestObject, uint32_t&gt;/count 10213685 4748346 2.15 *<br>map&lt;TestObject, uint32_t&gt;/equal_range 9515053 5677558 1.68 *<br>map&lt;TestObject, uint32_t&gt;/erase/key 6646260 4302300 1.54 *<br>map&lt;TestObject, uint32_t&gt;/erase/pos 297135 327938 0.91 MS uses a code bloating implementation of erase.<br>map&lt;TestObject, uint32_t&gt;/erase/range 148614 163702 0.91<br>map&lt;TestObject, uint32_t&gt;/find 5637531 4767055 1.18 *<br>map&lt;TestObject, uint32_t&gt;/insert 9591128 9030349 1.06<br>map&lt;TestObject, uint32_t&gt;/iteration 323595 325261 0.99<br>map&lt;TestObject, uint32_t&gt;/lower_bound 5398239 4784089 1.13 *<br>map&lt;TestObject, uint32_t&gt;/operator[] 5631250 5141166 1.10<br>map&lt;TestObject, uint32_t&gt;/upper_bound 5436336 4762431 1.14 *<br><br>set&lt;uint32_t&gt;/clear 155983 156026 1.00<br>set&lt;uint32_t&gt;/count 9635965 4392146 2.19 *<br>set&lt;uint32_t&gt;/equal_range 8504157 5247832 1.62 *<br>set&lt;uint32_t&gt;/erase range 140488 119408 1.18 *<br>set&lt;uint32_t&gt;/erase/pos 260678 286697 0.91 MS uses a code bloating implementation of erase.<br>set&lt;uint32_t&gt;/erase/val 6008225 4012825 1.50 *<br>set&lt;uint32_t&gt;/find 5145432 4381945 1.17 *<br>set&lt;uint32_t&gt;/insert 8087129 8697251 0.93<br>set&lt;uint32_t&gt;/iteration 271507 304538 0.89 *<br>set&lt;uint32_t&gt;/lower_bound 4666228 4404250 1.06<br>set&lt;uint32_t&gt;/upper_bound 4623600 4402974 1.05<br><br>sort/q_sort/TestObject[] 9596169 5578652 1.72 *<br>sort/q_sort/TestObject[]/sorted 602463 1016132 0.59 *<br>sort/q_sort/vector&lt;TestObject&gt; 9674828 5430199 1.78 *<br>sort/q_sort/vector&lt;TestObject&gt;/sorted 606908 1111647 0.55 *<br>sort/q_sort/vector&lt;ValuePair&gt; 6284194 3423452 1.84 *<br>sort/q_sort/vector&lt;ValuePair&gt;/sorted 711629 569364 1.25 *<br>sort/q_sort/vector&lt;uint32&gt; 5453379 2916146 1.87 *<br>sort/q_sort/vector&lt;uint32&gt;/sorted 537047 419144 1.28 *<br><br>string&lt;char16_t&gt;/compare 435083295 251985824 1.73 *<br>string&lt;char16_t&gt;/erase/pos,n 3454842 3451858 1.00<br>string&lt;char16_t&gt;/find/p,pos,n 401954723 165298157 2.43 *<br>string&lt;char16_t&gt;/find_first_not_of/p,pos,n 131452 65374 2.01 *<br>string&lt;char16_t&gt;/find_first_of/p,pos,n 11657444 4144515 2.81 *<br>string&lt;char16_t&gt;/find_last_of/p,pos,n 1604248 567571 2.83 *<br>string&lt;char16_t&gt;/insert/pos,p 3398734 3355460 1.01<br>string&lt;char16_t&gt;/iteration 218856504 218771844 1.00<br>string&lt;char16_t&gt;/operator[] 714161 240023 2.98 *<br>string&lt;char16_t&gt;/push_back 34968235 2444897 14.30 *<br>string&lt;char16_t&gt;/replace/pos,n,p,n 4226693 4198498 1.01<br>string&lt;char16_t&gt;/reserve 1901765 390805 4.87 *<br>string&lt;char16_t&gt;/rfind/p,pos,n 195483 150985 1.29 *<br>string&lt;char16_t&gt;/size 11169 11245 0.99<br>string&lt;char16_t&gt;/swap 1459280 419807 3.48 *<br><br>string&lt;char8_t&gt;/compare 63071275 77209580 0.82 *<br>string&lt;char8_t&gt;/erase/pos,n 2008652 1944494 1.03<br>string&lt;char8_t&gt;/find/p,pos,n 123201023 167536164 0.74 *<br>string&lt;char8_t&gt;/find_first_not_of/p,pos,n 93372 67864 1.38 *<br>string&lt;char8_t&gt;/find_first_of/p,pos,n 7542492 3375758 2.23 *<br>string&lt;char8_t&gt;/find_last_of/p,pos,n 933972 583576 1.60 *<br>string&lt;char8_t&gt;/insert/pos,p 1737213 1750847 0.99<br>string&lt;char8_t&gt;/iteration 893834 899130 0.99<br>string&lt;char8_t&gt;/operator[] 817879 313437 2.61 *<br>string&lt;char8_t&gt;/push_back 20857734 2004410 10.41 *<br>string&lt;char8_t&gt;/replace/pos,n,p,n 2578696 2607655 0.99<br>string&lt;char8_t&gt;/reserve 915127 85289 10.73 *<br>string&lt;char8_t&gt;/rfind/p,pos,n 196103 148894 1.32 *<br>string&lt;char8_t&gt;/size 11619 11220 1.04<br>string&lt;char8_t&gt;/swap 1461056 419874 3.48 *<br><br>vector&lt;uint64&gt;/erase 55235116 55284587 1.00<br>vector&lt;uint64&gt;/insert 55166046 55142755 1.00<br>vector&lt;uint64&gt;/iteration 553954 509719 1.09<br>vector&lt;uint64&gt;/operator[] 1284239 798516 1.61 *<br>vector&lt;uint64&gt;/push_back 5399549 3867959 1.40 *<br>vector&lt;uint64&gt;/sort 43636314 42619952 1.02<br></pre>
+
+
+
+
+
+</div>
+
+
+
+
+
+<h2>
+
+ <a name="Win32.VC71.STLPort.Debug"></a>Win32.VC71.STLPort.Debug<span style="font-weight: bold;"></span><span style="font-weight: bold;"></span></h2>
+<div style="margin-left: 40px;">
+<pre>EASTL version: 0.96.00<br>Platform: Windows on X86<br>Compiler: Microsoft Visual C++ compiler, version 1310<br>Allocator: PPMalloc::GeneralAllocatorDebug. Thread safety enabled.<br>Build: Debug. Inlining disabled. STL debug features disabled.<br><br>Values are times to complete tests; smaller values are better.<br>Alarm indicates a greater than 10% difference.<br><br>Test STLPort EASTL Ratio Alarm<br>----------------------------------------------------------------------------------------<br>algorithm/adj_find/vector&lt;TestObject&gt; 5661170 5689517 1.00<br>algorithm/copy/vector&lt;LargePOD&gt; 5573815 5124428 1.09<br>algorithm/copy/vector&lt;uint32_t&gt; 148273 125782 1.18 *<br>algorithm/copy_backward/vector&lt;LargePOD&gt; 5429791 4834510 1.12 *<br>algorithm/copy_backward/vector&lt;uint32_t&gt; 156765 163038 0.96<br>algorithm/count/vector&lt;uint64_t&gt; 2730922 2730072 1.00<br>algorithm/equal_range/vector&lt;uint64_t&gt; 639366489 452896251 1.41 *<br>algorithm/fill/bool[] 1299326 27361 47.49 *<br>algorithm/fill/char[]/'d' 27378 27361 1.00<br>algorithm/fill/vector&lt;char&gt;/'d' 34459 27361 1.26 *<br>algorithm/fill/vector&lt;char&gt;/0 1299224 27361 47.48 *<br>algorithm/fill/vector&lt;uint64_t&gt; 1400647 1400145 1.00<br>algorithm/fill/vector&lt;void*&gt; 1308779 1309085 1.00<br>algorithm/fill_n/bool[] 1299156 27352 47.50 *<br>algorithm/fill_n/char[] 1299258 27369 47.47 *<br>algorithm/fill_n/vector&lt;uint64_t&gt; 1451162 1313632 1.10<br>algorithm/find_end/string/end 13089999 2526412 5.18 *<br>algorithm/find_end/string/middle 12627412 20190101 0.63 *<br>algorithm/find_end/string/none 12704185 40728803 0.31 *<br>algorithm/lex_cmp/schar[] 1749844 195806 8.94 *<br>algorithm/lex_cmp/vector&lt;TestObject&gt; 5060968 4799882 1.05<br>algorithm/lex_cmp/vector&lt;uchar&gt; 1668354 189490 8.80 *<br>algorithm/lower_bound/vector&lt;TestObject&gt; 450240945 353437573 1.27 *<br>algorithm/min_element/vector&lt;TestObject&gt; 5861744 5326371 1.10<br>algorithm/rand_shuffle/vector&lt;uint64_t&gt; 40780449 45780090 0.89 *<br>algorithm/reverse/list&lt;TestObject&gt; 2657678 2130627 1.25 *<br>algorithm/reverse/vector&lt;TestObject&gt; 2666424 2124889 1.25 *<br>algorithm/search/string&lt;char&gt; 3110379 3613460 0.86 *<br>algorithm/search_n/string&lt;char&gt; 3061665 1521261 2.01 *<br>algorithm/unique/vector&lt;TestObject&gt; 12423684 9485439 1.31 *<br>algorithm/unique/vector&lt;uint32_t&gt; 3718699 1726596 2.15 *<br>algorithm/unique/vector&lt;uint64_t&gt; 6205110 4591631 1.35 *<br>algorithm/upper_bound/vector&lt;uint32_t&gt; 185391094 139336317 1.33 *<br><br>bitset&lt;1500&gt;/&gt;&gt;=/1 120666960 92449816 1.31 * STLPort is broken, neglects wraparound check.<br>bitset&lt;1500&gt;/count 201709793 52874726 3.81 *<br>bitset&lt;1500&gt;/flip 87360297 81737071 1.07<br>bitset&lt;1500&gt;/reset 23950178 77390323 0.31 *<br>bitset&lt;1500&gt;/set() 84608107 76912011 1.10<br>bitset&lt;1500&gt;/set(i) 18023620 12229604 1.47 *<br>bitset&lt;1500&gt;/test 18006553 13276396 1.36 *<br><br>bitset&lt;15&gt;/&gt;&gt;=/1 11935904 6012695 1.99 * STLPort is broken, neglects wraparound check.<br>bitset&lt;15&gt;/count 9368581 6022742 1.56 *<br>bitset&lt;15&gt;/flip 11600706 6533635 1.78 *<br>bitset&lt;15&gt;/reset 5830957 5874690 0.99<br>bitset&lt;15&gt;/set() 11695328 5701621 2.05 *<br>bitset&lt;15&gt;/set(i) 16363205 12570216 1.30 *<br>bitset&lt;15&gt;/test 16743172 13201452 1.27 *<br><br>bitset&lt;35&gt;/&gt;&gt;=/1 22950918 6774457 3.39 * STLPort is broken, neglects wraparound check.<br>bitset&lt;35&gt;/count 12655309 11736256 1.08<br>bitset&lt;35&gt;/flip 13738575 5800042 2.37 *<br>bitset&lt;35&gt;/reset 15561434 5800510 2.68 *<br>bitset&lt;35&gt;/set() 13564283 5600709 2.42 *<br>bitset&lt;35&gt;/set(i) 18519689 12199973 1.52 *<br>bitset&lt;35&gt;/test 18000569 13103566 1.37 *<br><br>bitset&lt;75&gt;/&gt;&gt;=/1 25579525 16669664 1.53 * STLPort is broken, neglects wraparound check.<br>bitset&lt;75&gt;/count 18740698 8480492 2.21 *<br>bitset&lt;75&gt;/flip 13555630 8300335 1.63 *<br>bitset&lt;75&gt;/reset 15200133 8200000 1.85 *<br>bitset&lt;75&gt;/set() 14408112 8001959 1.80 *<br>bitset&lt;75&gt;/set(i) 18137741 12374257 1.47 *<br>bitset&lt;75&gt;/test 18422135 13100038 1.41 *<br><br>deque&lt;ValuePair&gt;/erase 651933790 326443043 2.00 *<br>deque&lt;ValuePair&gt;/insert 659786183 333304660 1.98 *<br>deque&lt;ValuePair&gt;/iteration 23734592 16173706 1.47 *<br>deque&lt;ValuePair&gt;/operator[] 59126816 23911774 2.47 *<br>deque&lt;ValuePair&gt;/push_back 58056988 31859266 1.82 *<br>deque&lt;ValuePair&gt;/push_front 57780891 31743199 1.82 *<br>deque&lt;ValuePair&gt;/sort 818414195 596568113 1.37 *<br><br>hash_map&lt;string, uint32_t&gt;/clear 3422133 2204517 1.55 *<br>hash_map&lt;string, uint32_t&gt;/count 9869545 8624924 1.14 *<br>hash_map&lt;string, uint32_t&gt;/erase pos 3256350 2069299 1.57 *<br>hash_map&lt;string, uint32_t&gt;/erase range 3230203 1151392 2.81 *<br>hash_map&lt;string, uint32_t&gt;/erase val 16860362 15939778 1.06<br>hash_map&lt;string, uint32_t&gt;/find 10286971 9920910 1.04<br>hash_map&lt;string, uint32_t&gt;/find_as/char* 118136025 9458468 12.49 *<br>hash_map&lt;string, uint32_t&gt;/insert 188948336 174490082 1.08<br>hash_map&lt;string, uint32_t&gt;/iteration 4037049 2021036 2.00 *<br>hash_map&lt;string, uint32_t&gt;/operator[] 11472127 12887699 0.89 *<br><br>hash_map&lt;uint32_t, TestObject&gt;/clear 2522264 1331848 1.89 *<br>hash_map&lt;uint32_t, TestObject&gt;/count 3210739 2897063 1.11 *<br>hash_map&lt;uint32_t, TestObject&gt;/erase pos 1862281 1304783 1.43 *<br>hash_map&lt;uint32_t, TestObject&gt;/erase range 698079 579606 1.20 *<br>hash_map&lt;uint32_t, TestObject&gt;/erase val 8806722 7041298 1.25 *<br>hash_map&lt;uint32_t, TestObject&gt;/find 3604875 4709645 0.77 *<br>hash_map&lt;uint32_t, TestObject&gt;/insert 40785711 40376342 1.01<br>hash_map&lt;uint32_t, TestObject&gt;/iteration 3064088 1508834 2.03 *<br>hash_map&lt;uint32_t, TestObject&gt;/operator[] 6053742 8176906 0.74 *<br><br>heap (uint32_t[])/make_heap 5799813 5738596 1.01<br>heap (uint32_t[])/pop_heap 113775168 102076134 1.11 *<br>heap (uint32_t[])/push_heap 21649151 16854845 1.28 *<br>heap (uint32_t[])/sort_heap 97535213 83290735 1.17 *<br><br>heap (vector&lt;TestObject&gt;)/make_heap 22215557 22277063 1.00<br>heap (vector&lt;TestObject&gt;)/pop_heap 275392171 277340039 0.99<br>heap (vector&lt;TestObject&gt;)/push_heap 51479442 47342577 1.09<br>heap (vector&lt;TestObject&gt;)/sort_heap 214474736 218497540 0.98<br><br>list&lt;TestObject&gt;/ctor(it) 767753795 753421427 1.02<br>list&lt;TestObject&gt;/ctor(n) 74185322 73386245 1.01<br>list&lt;TestObject&gt;/erase 1021003824 1033873589 0.99<br>list&lt;TestObject&gt;/find 77666072 74917622 1.04<br>list&lt;TestObject&gt;/insert 788071150 774188737 1.02<br>list&lt;TestObject&gt;/push_back 760490154 737327348 1.03<br>list&lt;TestObject&gt;/remove 1682511938 1434771006 1.17 *<br>list&lt;TestObject&gt;/reverse 87237327 80394623 1.09<br>list&lt;TestObject&gt;/size/1 3828111 599530 6.39 *<br>list&lt;TestObject&gt;/size/10 9600605 1329535 7.22 * EASTL intentionally implements list::size as O(n).<br>list&lt;TestObject&gt;/size/100 62952334 15022551 4.19 * EASTL intentionally implements list::size as O(n).<br>list&lt;TestObject&gt;/splice 96536412 60804817 1.59 *<br><br>map&lt;TestObject, uint32_t&gt;/clear 1142127 1099066 1.04<br>map&lt;TestObject, uint32_t&gt;/count 19659726 14647548 1.34 *<br>map&lt;TestObject, uint32_t&gt;/equal_range 36680687 18219086 2.01 *<br>map&lt;TestObject, uint32_t&gt;/erase/key 28892154 16037774 1.80 *<br>map&lt;TestObject, uint32_t&gt;/erase/pos 1209643 1185495 1.02<br>map&lt;TestObject, uint32_t&gt;/erase/range 715402 670539 1.07<br>map&lt;TestObject, uint32_t&gt;/find 21020992 13429575 1.57 *<br>map&lt;TestObject, uint32_t&gt;/insert 59530871 51120640 1.16 *<br>map&lt;TestObject, uint32_t&gt;/iteration 972825 1191946 0.82 *<br>map&lt;TestObject, uint32_t&gt;/lower_bound 18852651 12495034 1.51 *<br>map&lt;TestObject, uint32_t&gt;/operator[] 22889573 16676736 1.37 *<br>map&lt;TestObject, uint32_t&gt;/upper_bound 18603584 12406922 1.50 *<br><br>set&lt;uint32_t&gt;/clear 919555 882988 1.04<br>set&lt;uint32_t&gt;/count 17561110 12461084 1.41 *<br>set&lt;uint32_t&gt;/equal_range 31522488 15230282 2.07 *<br>set&lt;uint32_t&gt;/erase range 687582 564765 1.22 *<br>set&lt;uint32_t&gt;/erase/pos 1044352 1045355 1.00<br>set&lt;uint32_t&gt;/erase/val 25525304 12940774 1.97 *<br>set&lt;uint32_t&gt;/find 17140751 10704866 1.60 *<br>set&lt;uint32_t&gt;/insert 56035051 45555664 1.23 *<br>set&lt;uint32_t&gt;/iteration 682669 640831 1.07<br>set&lt;uint32_t&gt;/lower_bound 16339932 10475740 1.56 *<br>set&lt;uint32_t&gt;/upper_bound 17779424 10652599 1.67 *<br><br>sort/q_sort/TestObject[] 17000866 14823515 1.15 *<br>sort/q_sort/TestObject[]/sorted 6658559 3263328 2.04 *<br>sort/q_sort/vector&lt;TestObject&gt; 17476629 14953285 1.17 *<br>sort/q_sort/vector&lt;TestObject&gt;/sorted 6667034 3327435 2.00 *<br>sort/q_sort/vector&lt;ValuePair&gt; 15391357 10820848 1.42 *<br>sort/q_sort/vector&lt;ValuePair&gt;/sorted 6617122 3232949 2.05 *<br>sort/q_sort/vector&lt;uint32&gt; 8343906 6014846 1.39 *<br>sort/q_sort/vector&lt;uint32&gt;/sorted 3039430 1003127 3.03 *<br><br>string&lt;char16_t&gt;/compare 1489709846 532664000 2.80 *<br>string&lt;char16_t&gt;/erase/pos,n 3528690 3439864 1.03<br>string&lt;char16_t&gt;/find/p,pos,n 2521448321 443752189 5.68 *<br>string&lt;char16_t&gt;/find_first_not_of/p,pos,n 661206 137419 4.81 *<br>string&lt;char16_t&gt;/find_first_of/p,pos,n 54746434 8521335 6.42 *<br>string&lt;char16_t&gt;/find_last_of/p,pos,n 10607778 1212414 8.75 *<br>string&lt;char16_t&gt;/insert/pos,p 3445016 3360126 1.03<br>string&lt;char16_t&gt;/iteration 580955636 579452556 1.00<br>string&lt;char16_t&gt;/operator[] 2206353 1987809 1.11 *<br>string&lt;char16_t&gt;/push_back 22421368 6007808 3.73 *<br>string&lt;char16_t&gt;/replace/pos,n,p,n 5138454 4464786 1.15 *<br>string&lt;char16_t&gt;/reserve 4922413418 335622 100.00 *<br>string&lt;char16_t&gt;/rfind/p,pos,n 1440308 380578 3.78 *<br>string&lt;char16_t&gt;/size 25355 25398 1.00<br>string&lt;char16_t&gt;/swap 2122704 1490823 1.42 *<br><br>string&lt;char8_t&gt;/compare 77222134 77443134 1.00<br>string&lt;char8_t&gt;/erase/pos,n 1965344 1956521 1.00<br>string&lt;char8_t&gt;/find/p,pos,n 2468091951 474205522 5.20 *<br>string&lt;char8_t&gt;/find_first_not_of/p,pos,n 660960 130211 5.08 *<br>string&lt;char8_t&gt;/find_first_of/p,pos,n 55020899 9240171 5.95 *<br>string&lt;char8_t&gt;/find_last_of/p,pos,n 10576210 1239053 8.54 *<br>string&lt;char8_t&gt;/insert/pos,p 1822756 1750880 1.04<br>string&lt;char8_t&gt;/iteration 2617889 2540148 1.03<br>string&lt;char8_t&gt;/operator[] 2254794 2256443 1.00<br>string&lt;char8_t&gt;/push_back 12463022 5210321 2.39 *<br>string&lt;char8_t&gt;/replace/pos,n,p,n 3744862 2855260 1.31 *<br>string&lt;char8_t&gt;/reserve 1372046888 218815 100.00 *<br>string&lt;char8_t&gt;/rfind/p,pos,n 1446232 366902 3.94 *<br>string&lt;char8_t&gt;/size 26859 25431 1.06<br>string&lt;char8_t&gt;/swap 2123350 1490509 1.42 *<br><br>vector&lt;uint64&gt;/erase 55164013 56417449 0.98<br>vector&lt;uint64&gt;/insert 55872973 56432664 0.99<br>vector&lt;uint64&gt;/iteration 1329102 1324623 1.00<br>vector&lt;uint64&gt;/operator[] 5264738 3136746 1.68 *<br>vector&lt;uint64&gt;/push_back 14903245 13171175 1.13 *<br>vector&lt;uint64&gt;/sort 88429095 88542171 1.00<br></pre>
+
+
+
+
+
+</div>
+
+
+
+
+
+<h2>
+
+
+
+
+ <a name="Win32.VC71.STLPort.Release"></a>Win32.VC71.STLPort.Release<span style="font-weight: bold;"></span><span style="font-weight: bold;"></span></h2>
+<div style="margin-left: 40px;">
+<pre>EASTL version: 0.96.00<br>Platform: Windows on X86<br>Compiler: Microsoft Visual C++ compiler, version 1310<br>Allocator: PPMalloc::GeneralAllocator. Thread safety enabled.<br>Build: Full optimization. Inlining enabled.<br><br>Values are times to complete tests; smaller values are better.<br>Alarm indicates a greater than 10% difference.<br><br>Test STLPort EASTL Ratio Alarm<br>----------------------------------------------------------------------------------------<br>algorithm/adj_find/vector&lt;TestObject&gt; 2741046 2731441 1.00<br>algorithm/copy/vector&lt;LargePOD&gt; 6065923 5085142 1.19 *<br>algorithm/copy/vector&lt;uint32_t&gt; 158304 165555 0.96<br>algorithm/copy_backward/vector&lt;LargePOD&gt; 4710258 4896476 0.96<br>algorithm/copy_backward/vector&lt;uint32_t&gt; 146030 142630 1.02<br>algorithm/count/vector&lt;uint64_t&gt; 1395921 1406334 0.99<br>algorithm/equal_range/vector&lt;uint64_t&gt; 211692764 118969493 1.78 *<br>algorithm/fill/bool[] 366078 33737 10.85 *<br>algorithm/fill/char[]/'d' 33736 33771 1.00<br>algorithm/fill/vector&lt;char&gt;/'d' 28466 33720 0.84 *<br>algorithm/fill/vector&lt;char&gt;/0 366086 33728 10.85 *<br>algorithm/fill/vector&lt;uint64_t&gt; 466250 401591 1.16 *<br>algorithm/fill/vector&lt;void*&gt; 521603 693481 0.75 *<br>algorithm/fill_n/bool[] 599709 33762 17.76 *<br>algorithm/fill_n/char[] 599573 33711 17.79 *<br>algorithm/fill_n/vector&lt;uint64_t&gt; 434971 1374084 0.32 *<br>algorithm/find_end/string/end 1494742 85349 17.51 *<br>algorithm/find_end/string/middle 1480700 687208 2.15 *<br>algorithm/find_end/string/none 1540540 1546431 1.00<br>algorithm/lex_cmp/schar[] 921638 178797 5.15 *<br>algorithm/lex_cmp/vector&lt;TestObject&gt; 2623559 2643551 0.99<br>algorithm/lex_cmp/vector&lt;uchar&gt; 960899 183608 5.23 *<br>algorithm/lower_bound/vector&lt;TestObject&gt; 60630534 56531528 1.07<br>algorithm/min_element/vector&lt;TestObject&gt; 4209022 2768527 1.52 *<br>algorithm/rand_shuffle/vector&lt;uint64_t&gt; 13762010 15969052 0.86 *<br>algorithm/reverse/list&lt;TestObject&gt; 673387 731825 0.92<br>algorithm/reverse/vector&lt;TestObject&gt; 634576 754511 0.84 *<br>algorithm/search/string&lt;char&gt; 1262599 1387608 0.91<br>algorithm/search_n/string&lt;char&gt; 1166242 458592 2.54 *<br>algorithm/unique/vector&lt;TestObject&gt; 4912193 5336317 0.92<br>algorithm/unique/vector&lt;uint32_t&gt; 809387 809081 1.00<br>algorithm/unique/vector&lt;uint64_t&gt; 4371814 2414255 1.81 *<br>algorithm/upper_bound/vector&lt;uint32_t&gt; 31899081 29555596 1.08<br><br>bitset&lt;1500&gt;/&gt;&gt;=/1 63308136 40553560 1.56 * STLPort is broken, neglects wraparound check.<br>bitset&lt;1500&gt;/count 62523178 22799473 2.74 *<br>bitset&lt;1500&gt;/flip 20302845 19919232 1.02<br>bitset&lt;1500&gt;/reset 18892015 15403148 1.23 *<br>bitset&lt;1500&gt;/set() 15803302 17322192 0.91<br>bitset&lt;1500&gt;/set(i) 2799271 2999310 0.93<br>bitset&lt;1500&gt;/test 2999293 2799262 1.07<br><br>bitset&lt;15&gt;/&gt;&gt;=/1 1199239 3199256 0.37 * STLPort is broken, neglects wraparound check.<br>bitset&lt;15&gt;/count 3599461 2199231 1.64 *<br>bitset&lt;15&gt;/flip 1199231 1199188 1.00<br>bitset&lt;15&gt;/reset 1199188 1199180 1.00<br>bitset&lt;15&gt;/set() 1199214 1199180 1.00<br>bitset&lt;15&gt;/set(i) 2599257 1399262 1.86 *<br>bitset&lt;15&gt;/test 2599274 2599283 1.00<br><br>bitset&lt;35&gt;/&gt;&gt;=/1 6643974 4599239 1.44 * STLPort is broken, neglects wraparound check.<br>bitset&lt;35&gt;/count 5151331 5399438 0.95<br>bitset&lt;35&gt;/flip 1999404 1199273 1.67 *<br>bitset&lt;35&gt;/reset 9805285 1399313 7.01 *<br>bitset&lt;35&gt;/set() 2799279 1199248 2.33 *<br>bitset&lt;35&gt;/set(i) 2799246 1599241 1.75 *<br>bitset&lt;35&gt;/test 2999234 2999251 1.00<br><br>bitset&lt;75&gt;/&gt;&gt;=/1 7002045 6999333 1.00 STLPort is broken, neglects wraparound check.<br>bitset&lt;75&gt;/count 5999351 3002259 2.00 *<br>bitset&lt;75&gt;/flip 3599334 3599163 1.00<br>bitset&lt;75&gt;/reset 9799344 3399218 2.88 *<br>bitset&lt;75&gt;/set() 3599232 3599062 1.00<br>bitset&lt;75&gt;/set(i) 2799228 1599284 1.75 *<br>bitset&lt;75&gt;/test 2999250 2799339 1.07<br><br>deque&lt;ValuePair&gt;/erase 127108651 115258113 1.10<br>deque&lt;ValuePair&gt;/insert 137727889 116552332 1.18 *<br>deque&lt;ValuePair&gt;/iteration 7144182 6009899 1.19 *<br>deque&lt;ValuePair&gt;/operator[] 34241222 20535039 1.67 *<br>deque&lt;ValuePair&gt;/push_back 6585800 3932126 1.67 *<br>deque&lt;ValuePair&gt;/push_front 6805865 3993513 1.70 *<br>deque&lt;ValuePair&gt;/sort 395352323 348778188 1.13 *<br><br>hash_map&lt;string, uint32_t&gt;/clear 426640 447015 0.95<br>hash_map&lt;string, uint32_t&gt;/count 4359344 3883089 1.12 *<br>hash_map&lt;string, uint32_t&gt;/erase pos 584392 458142 1.28 *<br>hash_map&lt;string, uint32_t&gt;/erase range 221034 196078 1.13 *<br>hash_map&lt;string, uint32_t&gt;/erase val 3539867 3790813 0.93<br>hash_map&lt;string, uint32_t&gt;/find 3966831 3811910 1.04<br>hash_map&lt;string, uint32_t&gt;/find_as/char* 11591612 4243710 2.73 *<br>hash_map&lt;string, uint32_t&gt;/insert 16763887 16719194 1.00<br>hash_map&lt;string, uint32_t&gt;/iteration 909968 478609 1.90 *<br>hash_map&lt;string, uint32_t&gt;/operator[] 4360041 4108313 1.06<br><br>hash_map&lt;uint32_t, TestObject&gt;/clear 302634 283722 1.07<br>hash_map&lt;uint32_t, TestObject&gt;/count 916487 907426 1.01<br>hash_map&lt;uint32_t, TestObject&gt;/erase pos 388042 321385 1.21 *<br>hash_map&lt;uint32_t, TestObject&gt;/erase range 122680 116280 1.06<br>hash_map&lt;uint32_t, TestObject&gt;/erase val 1710931 1729529 0.99<br>hash_map&lt;uint32_t, TestObject&gt;/find 1089462 1346527 0.81 *<br>hash_map&lt;uint32_t, TestObject&gt;/insert 4560310 5072350 0.90 *<br>hash_map&lt;uint32_t, TestObject&gt;/iteration 960117 495354 1.94 *<br>hash_map&lt;uint32_t, TestObject&gt;/operator[] 1872830 1890595 0.99<br><br>heap (uint32_t[])/make_heap 3528418 3327257 1.06<br>heap (uint32_t[])/pop_heap 63243859 61011853 1.04<br>heap (uint32_t[])/push_heap 11602424 10045869 1.15 *<br>heap (uint32_t[])/sort_heap 52965362 48744729 1.09<br><br>heap (vector&lt;TestObject&gt;)/make_heap 13191456 13089711 1.01<br>heap (vector&lt;TestObject&gt;)/pop_heap 148555656 144787742 1.03<br>heap (vector&lt;TestObject&gt;)/push_heap 28696689 26618830 1.08<br>heap (vector&lt;TestObject&gt;)/sort_heap 112473989 114018643 0.99<br><br>list&lt;TestObject&gt;/ctor(it) 80186731 74006287 1.08<br>list&lt;TestObject&gt;/ctor(n) 6232311 6128007 1.02<br>list&lt;TestObject&gt;/erase 344556374 212877808 1.62 *<br>list&lt;TestObject&gt;/find 39859075 14591347 2.73 *<br>list&lt;TestObject&gt;/insert 86935153 56138233 1.55 *<br>list&lt;TestObject&gt;/push_back 79569180 46700641 1.70 *<br>list&lt;TestObject&gt;/remove 785786758 324201016 2.42 *<br>list&lt;TestObject&gt;/reverse 45248186 24852759 1.82 *<br>list&lt;TestObject&gt;/size/1 219844 219496 1.00<br>list&lt;TestObject&gt;/size/10 519563 519579 1.00 EASTL intentionally implements list::size as O(n).<br>list&lt;TestObject&gt;/size/100 4567194 101230266 0.05 * EASTL intentionally implements list::size as O(n).<br>list&lt;TestObject&gt;/splice 68321087 23601687 2.89 *<br><br>map&lt;TestObject, uint32_t&gt;/clear 168011 180540 0.93<br>map&lt;TestObject, uint32_t&gt;/count 4830439 5139287 0.94<br>map&lt;TestObject, uint32_t&gt;/equal_range 8700090 6158531 1.41 *<br>map&lt;TestObject, uint32_t&gt;/erase/key 6696776 4617038 1.45 *<br>map&lt;TestObject, uint32_t&gt;/erase/pos 309273 333183 0.93<br>map&lt;TestObject, uint32_t&gt;/erase/range 137419 136068 1.01<br>map&lt;TestObject, uint32_t&gt;/find 4773498 4931352 0.97<br>map&lt;TestObject, uint32_t&gt;/insert 9651877 9311699 1.04<br>map&lt;TestObject, uint32_t&gt;/iteration 372946 416364 0.90 *<br>map&lt;TestObject, uint32_t&gt;/lower_bound 4784234 4915797 0.97<br>map&lt;TestObject, uint32_t&gt;/operator[] 5040254 5183147 0.97<br>map&lt;TestObject, uint32_t&gt;/upper_bound 4724292 4915984 0.96<br><br>set&lt;uint32_t&gt;/clear 165300 173289 0.95<br>set&lt;uint32_t&gt;/count 4958654 4885086 1.02<br>set&lt;uint32_t&gt;/equal_range 8434134 5698681 1.48 *<br>set&lt;uint32_t&gt;/erase range 145554 133960 1.09<br>set&lt;uint32_t&gt;/erase/pos 299914 324760 0.92<br>set&lt;uint32_t&gt;/erase/val 6506155 4335034 1.50 *<br>set&lt;uint32_t&gt;/find 4866879 4556043 1.07<br>set&lt;uint32_t&gt;/insert 8340523 8957257 0.93<br>set&lt;uint32_t&gt;/iteration 294465 343442 0.86 *<br>set&lt;uint32_t&gt;/lower_bound 4548095 4756498 0.96<br>set&lt;uint32_t&gt;/upper_bound 4559196 4521498 1.01<br><br>sort/q_sort/TestObject[] 7316766 7013894 1.04<br>sort/q_sort/TestObject[]/sorted 1668439 1332885 1.25 *<br>sort/q_sort/vector&lt;TestObject&gt; 7331530 7017260 1.04<br>sort/q_sort/vector&lt;TestObject&gt;/sorted 1601629 1247120 1.28 *<br>sort/q_sort/vector&lt;ValuePair&gt; 7071643 7067869 1.00<br>sort/q_sort/vector&lt;ValuePair&gt;/sorted 2136390 1703799 1.25 *<br>sort/q_sort/vector&lt;uint32&gt; 3292891 2943627 1.12 *<br>sort/q_sort/vector&lt;uint32&gt;/sorted 653693 473612 1.38 *<br><br>string&lt;char16_t&gt;/compare 356579259 432760228 0.82 *<br>string&lt;char16_t&gt;/erase/pos,n 3430422 3428645 1.00<br>string&lt;char16_t&gt;/find/p,pos,n 229263402 225830975 1.02<br>string&lt;char16_t&gt;/find_first_not_of/p,pos,n 187391 81404 2.30 *<br>string&lt;char16_t&gt;/find_first_of/p,pos,n 4411831 4413532 1.00<br>string&lt;char16_t&gt;/find_last_of/p,pos,n 731655 726155 1.01<br>string&lt;char16_t&gt;/insert/pos,p 3408628 3319726 1.03<br>string&lt;char16_t&gt;/iteration 309993861 310333547 1.00<br>string&lt;char16_t&gt;/operator[] 580839 579904 1.00<br>string&lt;char16_t&gt;/push_back 3983338 2975553 1.34 *<br>string&lt;char16_t&gt;/replace/pos,n,p,n 4361095 4211504 1.04<br>string&lt;char16_t&gt;/reserve 935141729 247010 100.00 *<br>string&lt;char16_t&gt;/rfind/p,pos,n 248956 223397 1.11 *<br>string&lt;char16_t&gt;/size 13311 13107 1.02<br>string&lt;char16_t&gt;/swap 519129 579445 0.90 *<br><br>string&lt;char8_t&gt;/compare 76695559 76828015 1.00<br>string&lt;char8_t&gt;/erase/pos,n 1951566 1947282 1.00<br>string&lt;char8_t&gt;/find/p,pos,n 185878944 185605039 1.00<br>string&lt;char8_t&gt;/find_first_not_of/p,pos,n 196877 81600 2.41 *<br>string&lt;char8_t&gt;/find_first_of/p,pos,n 4147685 4145356 1.00<br>string&lt;char8_t&gt;/find_last_of/p,pos,n 605897 598222 1.01<br>string&lt;char8_t&gt;/insert/pos,p 1781592 1768264 1.01<br>string&lt;char8_t&gt;/iteration 921502 921272 1.00<br>string&lt;char8_t&gt;/operator[] 361250 359873 1.00<br>string&lt;char8_t&gt;/push_back 3363288 2530493 1.33 *<br>string&lt;char8_t&gt;/replace/pos,n,p,n 2682600 2633130 1.02<br>string&lt;char8_t&gt;/reserve 672517501 78387 100.00 *<br>string&lt;char8_t&gt;/rfind/p,pos,n 226202 200013 1.13 *<br>string&lt;char8_t&gt;/size 11280 11109 1.02<br>string&lt;char8_t&gt;/swap 519393 559759 0.93<br><br>vector&lt;uint64&gt;/erase 55184856 55192217 1.00<br>vector&lt;uint64&gt;/insert 56764267 55682726 1.02<br>vector&lt;uint64&gt;/iteration 423122 424039 1.00<br>vector&lt;uint64&gt;/operator[] 1189397 860991 1.38 *<br>vector&lt;uint64&gt;/push_back 5626609 4027317 1.40 *<br>vector&lt;uint64&gt;/sort 49227036 49231362 1.00<br></pre>
+
+
+
+
+
+</div>
+
+
+
+
+
+<h2>
+
+
+
+
+
+
+
+
+<br>
+
+
+
+<hr style="width: 100%; height: 2px;">
+End of document<br>
+
+
+
+<br>
+
+
+
+<br>
+
+
+
+<br>
+
+
+
+<br>
+
+
+
+</body>
+</html>
diff --git a/EASTL/doc/html/EASTL Best Practices.html b/EASTL/doc/html/EASTL Best Practices.html
new file mode 100644
index 0000000..bc0792e
--- /dev/null
+++ b/EASTL/doc/html/EASTL Best Practices.html
@@ -0,0 +1,1001 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html>
+<head>
+ <title>EASTL Best Practices</title>
+ <meta content="text/html; charset=us-ascii" http-equiv="content-type">
+ <meta name="author" content="Paul Pedriana">
+ <meta name="description" content="Best practices for EASTL usage">
+ <link type="text/css" rel="stylesheet" href="EASTLDoc.css">
+ <style type="text/css">
+<!--
+.style1 {font-size: 12pt}
+-->
+ </style>
+</head>
+<body>
+<h1>EASTL Best Practices</h1>
+<p>In this document we discuss best practices for using EASTL. The primary emphasis is on performance with a secondary
+ emphasis on correctness and maintainability. Some best practices apply only to some situations, and these will be
+ pointed out as we go along. In order to be easily digestible, we present these practices as a list of items in the tone
+ of the Effective C++ series of books.</p>
+<h2>Summary</h2>
+<p>The descriptions here are intentionally terse; this is to make them easier to visually scan.</p>
+<table style="text-align: left; width: 100%;" border="0" cellpadding="1" cellspacing="1">
+<tbody>
+<tr>
+<td style="width: 28px;">1</td>
+<td><a href="#Best.1">Consider intrusive containers.</a></td>
+</tr>
+<tr>
+<td>2</td>
+<td><a href="#Best.2">Consider fixed-size containers.</a></td>
+</tr>
+<tr>
+<td>3</td>
+<td><a href="#Best.3">Consider custom allocators.</a></td>
+</tr>
+<tr>
+<td>4</td>
+<td><a href="#Best.4">Consider hash tables instead of maps.</a></td>
+</tr>
+<tr>
+<td>5</td>
+<td><a href="#Best.5">Consider a vector_map (a.k.a. sorted vector) for unchanging data.</a></td>
+</tr>
+<tr>
+<td>6</td>
+<td><a href="#Best.6">Consider slist instead of list.</a></td>
+</tr>
+<tr>
+<td>7</td>
+<td><a href="#Best.7">Avoid redundant end() and size() in loops.</a></td>
+</tr>
+<tr>
+<td>8</td>
+<td><a href="#Best.8">Iterate containers instead of using operator[].</a></td>
+</tr>
+<tr>
+<td>9</td>
+<td><a href="#Best.9">Learn to use the string class appropriately.</a></td>
+</tr>
+<tr>
+<td>10</td>
+<td><a href="#Best.10">Cache list size if you want size() to be O(1).</a></td>
+</tr>
+<tr>
+<td>11</td>
+<td><a href="#Best.11">Use empty() instead of size() when possible.</a></td>
+</tr>
+<tr>
+<td>12</td>
+<td><a href="#Best.12">Know your container efficiencies.</a></td>
+</tr>
+<tr>
+<td>13</td>
+<td><a href="#Best.13">Use vector::reserve.</a></td>
+</tr>
+<tr>
+<td>14</td>
+<td><a href="#Best.14">Use&nbsp;vector::set_capacity to trim memory usage.</a></td>
+</tr>
+<tr>
+<td>15</td>
+<td><a href="#Best.15">Use swap() instead of a manually implemented version.</a></td>
+</tr>
+<tr>
+<td>16</td>
+<td><a href="#Best.16">Consider storing pointers instead of objects.</a></td>
+</tr>
+<tr>
+<td>17</td>
+<td><a href="#Best.17">Consider smart pointers instead of raw pointers.</a></td>
+</tr>
+<tr>
+<td>18</td>
+<td><a href="#Best.18">Use iterator pre-increment instead of post-increment.</a></td>
+</tr>
+<tr>
+<td>19</td>
+<td><a href="#Best.19">Make temporary references so the code can be traced/debugged.</a></td>
+</tr>
+<tr>
+<td>20</td>
+<td><a href="#Best.20">Consider bitvector or bitset instead of vector&lt;bool&gt;.</a></td>
+</tr>
+<tr>
+<td>21</td>
+<td><a href="#Best.21">Vectors can be treated as contiguous memory.</a></td>
+</tr>
+<tr>
+<td>22</td>
+<td><a href="#Best.22">Search&nbsp;hash_map&lt;string&gt; via find_as() instead of find().</a></td>
+</tr>
+<tr>
+<td>23</td>
+<td><a href="#Best.23">Take advantage of type_traits (e.g. <small style=
+"font-family: Courier New;">EASTL_DECLARE_TRIVIAL_RELOCATE</small>).</a></td>
+</tr>
+<tr>
+<td>24</td>
+<td><a href="#Best.24">Name containers to track memory usage.</a></td>
+</tr>
+<tr>
+<td>25</td>
+<td><a href="#Best.25">Learn the algorithms.</a></td>
+</tr>
+<tr>
+<td>26</td>
+<td><a href="#Best.26">Pass and return containers by reference instead of value.</a></td>
+</tr>
+<tr>
+<td>27</td>
+<td><a href="#Best.27">Consider using reset_lose_memory() for fast container teardown.</a></td>
+</tr>
+<tr>
+<td>28</td>
+<td><a href="#Best.28">Consider using fixed_substring instead of copying strings.</a></td>
+</tr>
+<tr>
+ <td>29</td>
+ <td><a href="#Best.29">Consider using vector::push_back(void).</a></td>
+</tr>
+</tbody>
+</table>
+<h2>Detail</h2>
+<p class="faq-question"><a name="Best.1"></a>1
+ Consider intrusive containers.
+</p>
+<p class="faq-answer">Intrusive containers (such as intrusive_list) differ from regular containers (such as list) in that they use the stored objects to manage the linked list instead of using nodes allocated from a memory heap. The result is better usage of memory. Additionally intrusive_list objects can be removed from their list without knowing what list they belong to. To make an intrusive_list of Widgets, you have Widget inherit from intrusive_list_node or simply have mpPrev/mpNext member variables.</p>
+<p class="faq-answer">To create an intrusive_list container, you can use the following code:</p>
+<p class="code-example">class Widget : public intrusive_list_node<br>
+{ };<br>
+<br>
+intrusive_list&lt;Widget&gt; widgetList;<br>
+widgetList.push_back(someWidget);</p>
+<p></p>
+<p class="faq-question"><a name="Best.2"></a>2
+ Consider fixed-size containers.
+</p>
+<p class="faq-answer">Fixed-size containers (such as fixed_list) are variations of regular containers (such as list) in that they allocate from a fixed block of local memory instead of allocating from a generic heap. The result is better usage of memory due to reduced fragmentation, better cache behavior, and faster allocation/deallocation. The presence of fixed-size containers negate the most common complaint that people have about STL: that it fragments the heap or &quot;allocates all over the place.&quot;</p>
+<p class="faq-answer">EASTL fixed containers include:</p>
+<ul>
+ <li>fixed_list</li>
+ <li>fixed_slist</li>
+ <li>fixed_vector</li>
+ <li>fixed_string</li>
+ <li>fixed_map</li>
+ <li>fixed_multimap</li>
+ <li>fixed_set</li>
+ <li>fixed_multiset</li>
+ <li>fixed_hash_map</li>
+ <li>fixed_hash_multimap</li>
+ <li>fixed_hash_set</li>
+ <li>fixed_hash_multiset</li>
+</ul>
+<p class="faq-answer">To create a fixed_set, you can use the following code:</p>
+<p class="code-example">fixed_set&lt;int, 25&gt; intSet; // Create a set capable of holding 25 elements.<br>
+intSet.push_back(37);</p>
+<p></p>
+<p class="faq-question"><a name="Best.3"></a>3
+ Consider custom allocators.
+</p>
+<p class="faq-answer">While EASTL provides fixed-size containers in order to control container memory usage, EASTL lets you assign a custom allocator to any container. This lets you define your own memory pool. EASTL has a more flexible and powerful mechanism of doing this that standard STL, as EASTL understands object alignment requirements, allows for debug naming, allows for sharing allocators across containers, and allows dynamic allocator assignment.</p>
+<p class="faq-answer">To create a list container that uses your custom allocator and uses block naming, you can use the following code:</p>
+<p class="code-example">list&lt;int&gt; intList(pSomeAllocator, &quot;graphics/intList&quot;);<br>
+intList.push_back(37);</p>
+<p class="faq-question"><a name="Best.4"></a>4
+Consider hash tables instead of maps.</p>
+<p class="faq-answer">Hash containers (such as hash_map) provide the same interface as associative containers (such as map) but have faster lookup and use less memory. The primary disadvantage relative to associative containers is that hash containers are not sorted.</p>
+<p class="faq-answer">To make a hash_map (dictionary) of integers to strings, you can use the following code:</p>
+<p class="code-example">hash_map&lt;int, const char*&gt; stringTable;<br>
+stringTable[37] = &quot;hello&quot;;</p>
+<p class="faq-question"><a name="Best.5"></a>5
+ Consider a vector_map (a.k.a. sorted vector) for unchanging data.
+</p>
+<p class="faq-answer">You can improve speed, memory usage, and cache behavior by using a vector_map instead of a map (or vector_set instead of set, etc.). The primary disadvantage of vector_map is that insertions and removal of elements is O(n) instead of O(1). However, if your associative container is not going to be changing much or at all, you can benefit from using a vector_map. Consider calling reserve on the vector_map in order to set the desired capacity up front.</p>
+<p class="faq-answer">To make a vector_set, you can use the following code:</p>
+<p class="code-example">vector_set&lt;int&gt; intSet(16); // Create a vector_set with an initial capacity of 16.<br>
+intSet.insert(37);</p>
+<p class="faq-answer">Note that you can use containers other than vector to implement vector_set. Here's how you do it with deque:</p>
+<p class="code-example">vector_set&lt;int, less&lt;int&gt;, EASTLAllocatorType, deque&lt;int&gt; &gt; intSet;<br>
+intSet.insert(37);</p>
+<p class="faq-question"><a name="Best.6"></a>6
+ Consider slist instead of list.
+</p>
+<p class="faq-answer">An slist is a singly-linked list; it is much like a list except that it can only be traversed in a forward direction and not a backward direction. The benefit is that each node is 4 bytes instead of 8 bytes. This is a small improvement, but if you don't need reverse iteration then it can be an improvement. There's also intrusive_slist as an option.</p>
+<p class="faq-answer">To make an slist, you can use the following code:</p>
+<p class="code-example">slist&lt;int&gt; intSlist;<br>
+intSlist.push_front(37);</p>
+<p class="faq-question"><a name="Best.7"></a>7
+Avoid redundant end() and size() in loops.</p>
+<p class="faq-answer">Instead of writing code like this:<br>
+</p>
+<div class="code-example" style="margin-left: 40px; font-family: Courier New;"><small>for(deque&lt;int&gt;::iterator it = d.begin(); <span style="color: rgb(204, 0, 0);">it != d.end()</span>; ++it)<br>
+&nbsp;&nbsp;&nbsp; ...</small></div>
+<span class="faq-answer">write code like this:<br>
+</span>
+<div class="code-example" style="margin-left: 40px; font-family: Courier New;"><small>for(deque&lt;int&gt;::iterator it = d.begin(), itEnd = d.end(); <span style="color: rgb(51, 204, 0);">it != itEnd</span>; ++it)<br>
+&nbsp;&nbsp;&nbsp; ...</small></div>
+<span class="faq-answer">The latter avoids a function call and return of an object (which in deque's case happens to be more than just a pointer). The above only works when the container is unchanged or for containers that have a constant end value. By "constant end value" we mean containers which can be modified but end always remains the same.</span><br>
+<table style="text-align: left; width: 600px; margin-left: 40px;" border="1" cellpadding="2" cellspacing="2">
+ <tbody>
+ <tr>
+ <td style="text-align: center;">Constant begin</td>
+ <td style="text-align: center;">Non-constant begin</td>
+ <td style="text-align: center;">Constant end</td>
+ <td style="text-align: center;">Non-constant end</td>
+ </tr>
+ <tr>
+ <td style="vertical-align: top;">array<sup>1</sup></td>
+ <td style="vertical-align: top;">string<br>
+ vector<br>
+ deque<br>
+ intrusive_list<br>
+ intrusive_slist<br>
+ vector_map<br>
+ vector_multimap<br>
+ vector_set<br>
+ vector_multiset<br>
+ bit_vector<br>
+ hash_map<br>
+ hash_multimap<br>
+ hash_set<br>
+ hash_multiset<br>
+ intrusive_hash_map<br>
+ intrusive_hash_multimap<br>
+ intrusive_hash_set<br>
+ intrusive_hash_multiset</td>
+ <td style="vertical-align: top;">array<br>
+ list<br>
+ slist<br>
+ intrusive_list<br>
+ intrusive_slist<br>
+ map<br>
+ multimap<br>
+ set<br>
+ multiset<br>
+ hash_map<sup>2</sup><br>
+ hash_multimap<sup>2</sup><br>
+ hash_set<sup>2</sup><br>
+ hash_multiset<sup>2</sup><br>
+ intrusive_hash_map<br>
+ intrusive_hash_multimap<br>
+ intrusive_hash_set<br>
+ intrusive_hash_multiset</td>
+ <td style="vertical-align: top;">string<br>
+ vector<br>
+ deque<br>
+ vector_map<br>
+ vector_multimap<br>
+ vector_set<br>
+ vector_multiset<br>
+ bit_vector<br></td>
+ </tr>
+ </tbody>
+</table>
+<div style="margin-left: 40px;"><sup>1</sup> Arrays can be neither resized nor reallocated.<br>
+ <sup>2</sup> Constant end if the hashtable can't/won't re-hash. Non-constant if it can re-hash.</div>
+<p class="faq-question"> <a name="Best.8"></a>8
+Iterate containers instead of using operator[].
+</p>
+<p class="faq-answer">It's faster to iterate random access containers via iterators than via operator[], though operator[] usage may look simpler.</p>
+<p class="faq-answer">Instead of doing this:</p>
+<p class="code-example">for(unsigned i = 0, iEnd = intVector.size(); i != iEnd; ++i)<br>
+&nbsp;&nbsp;&nbsp;&nbsp;intVector[i] = 37;</p>
+<p class="faq-answer">you can execute more efficiently by doing this:</p>
+<p class="code-example">for(vector&lt;int&gt;::iterator it = intVector.begin(), itEnd = intVector.end(); it != itEnd; ++it)<br>
+&nbsp;&nbsp;&nbsp;&nbsp;*it = 37;</p>
+<p class="faq-question"> <a name="Best.9"></a>9
+Learn to use the string class appropriately.</p>
+<p class="faq-answer">Oddly enough, the most mis-used STL container is easily the string class. The tales of string abuse could rival the 1001 Arabian Nights. Most of the abuses involve doing things in a harder way than need be. In examining the historical mis-uses of string, it is clear that many of the problems stem from the user thinking in terms of C-style string operations instead of object-oriented strings. This explains why statements such as&nbsp;<small><span style="font-family: Courier New;">strlen(s.c_str())</span></small> are so common, whereas the user could just use <small><span style="font-family: Courier New;">s.length()</span></small> instead and be both clearer and more efficient.<br>
+<br>
+Here we provide a table of actual collected examples of things done and how they could have been done instead.</p>
+<table style="text-align: left; width: 90%; margin-left: 40px;" border="1" cellpadding="2" cellspacing="2">
+ <tbody>
+ <tr>
+ <td style="font-weight: bold;">What was written</td>
+ <td style="font-weight: bold;">What could have been written</td>
+ </tr>
+ <tr>
+ <td class="style1" style="font-family: Courier New;"><small><br>
+ s = s.Left(i) + '+' + s.Right(s.length() - i - 1);<br>
+ <br>
+ </small></td>
+ <td class="style1" style="font-family: Courier New;"><small>s[i] = '+';</small></td>
+ </tr>
+ <tr>
+ <td class="style1" style="font-family: Courier New;"><small><br>
+ string s(""); // This is the most commonly found misuse.<br>
+ <br>
+ </small></td>
+ <td class="style1" style="font-family: Courier New;"><small>string s;</small></td>
+ </tr>
+ <tr>
+ <td class="style1" style="font-family: Courier New;"><small><br>
+ s = "";<br>
+ <br>
+ </small></td>
+ <td class="style1" style="font-family: Courier New;"><small>s.clear();</small></td>
+ </tr>
+ <tr>
+ <td class="style1" style="font-family: Courier New;"><small><br>
+ s.c_str()[0] = 'u';<br>
+ <br>
+ </small></td>
+ <td class="style1" style="font-family: Courier New;"><small>s[0] = 'u';</small></td>
+ </tr>
+ <tr>
+ <td class="style1" style="font-family: Courier New;"><small><br>
+ len = strlen(s.c_str());<br>
+ <br>
+ </small></td>
+ <td class="style1" style="font-family: Courier New;"><small>len = s.length();</small></td>
+ </tr>
+ <tr>
+ <td class="style1" style="font-family: Courier New;"><small><br>
+ s = string("u");<br>
+ </small></td>
+ <td class="style1" style="font-family: Courier New;"><small>s = "u";</small></td>
+ </tr>
+ <tr>
+ <td class="style1" style="font-family: Courier New;"><small><br>
+ puts(s + string("u"));<br>
+ <br>
+ </small></td>
+ <td class="style1" style="font-family: Courier New;"><small>puts(s + "u");</small></td>
+ </tr>
+ <tr>
+ <td class="style1" style="font-family: Courier New;"><small><br>
+ string s(" ");<br>
+ puts(s.c_str());<br>
+ <br>
+ </small></td>
+ <td class="style1" style="font-family: Courier New;"><small>puts(" ");</small></td>
+ </tr>
+ <tr>
+ <td class="style1" style="font-family: Courier New;"><small><br>
+ s.sprintf("u");<br>
+ <br>
+ </small></td>
+ <td class="style1" style="font-family: Courier New;"><small>s = "u";</small></td>
+ </tr>
+ <tr>
+ <td class="style1" style="font-family: Courier New;"><small><br>
+ char array[32];<br>
+ sprintf(array, "%d", 10);<br>
+ s = string(array);<br>
+ <br>
+ </small></td>
+ <td class="style1" style="font-family: Courier New;"><small>s.sprintf("%d", 10);</small></td>
+ </tr>
+ </tbody>
+</table>
+<p class="faq-answer"><br>
+The chances are that if you want to do something with a string, there is a very basic way to do it. You don't want your code to appear in a future version of the above table.</p>
+<p class="faq-question"> <a name="Best.10"></a>10
+Cache list size if you want list::size() to be O(1).</p>
+<p class="faq-answer">EASTL's list, slist, intrusive_list, and intrusive_slist containers have a size() implementation which is O(n). That is, these containers don't keep a count (cache) of the current list size and when you call the size() function they iterate the list. This is by design and the reasoning behind it has been deeply debated and considered (and is discussed in the FAQ and the list header file). In summary, list doesn't cache its size because the only function that would benefit is the size function while many others would be negatively impacted and the memory footprint would be negatively impacted, yet list::size is not a very frequently called function in well-designed code. At the same time, nothing prevents the user from caching the size himself, though admittedly it adds some tedium and risk to the code writing process.<br>
+ <br>
+Here's an example of caching the list size manually:<br>
+</p>
+<div class="code-example" style="margin-left: 40px;"><small><span style="font-family: Courier New;">list&lt;int&gt; intList;<br>
+ size_t &nbsp; &nbsp;n = 0;<br>
+ <br>
+ intList.push_back(37);<br>
+ ++n;<br>
+ intList.pop_front();<br>
+ --n;</span></small></div>
+<p class="faq-question"> <a name="Best.11"></a>11
+Use empty() instead of size() when possible.
+</p>
+<p class="faq-answer">All conventional containers have both an empty function and a size function. For all containers empty() executes with O(1) (constant time) efficiency. However, this is not so for size(), as some containers need to calculate the size and others need to do pointer subtraction (which may involve integer division) to find the size.</p>
+<p class="faq-question"><a name="Best.12"></a>12
+Know your container efficiencies.</p>
+<p class="faq-answer">The above two practices lead us to this practice, which is a generalization of the above.
+ We present a table of basic information for the conventional EASTL containers. The values are described at the
+ bottom.</p>
+<table style="width: 90%; margin-left: 40px;" border="1" cellpadding="1" cellspacing="1">
+ <tbody>
+ <tr>
+ <td style="width: 15%; vertical-align: top; height: 13px; font-weight: bold;"><p>Container</p></td>
+ <td style="text-align: center;">empty() efficiency</td>
+ <td style="text-align: center; font-weight: bold;">size() efficiency</td>
+ <td style="text-align: center; font-weight: bold;">operator[] efficiency</td>
+ <td style="font-weight: bold; text-align: center;" height="13" valign="top" width="16%"><p>insert() efficiency</p></td>
+ <td style="font-weight: bold; text-align: center;" height="13" valign="top" width="16%"><p>erase() efficiency</p></td>
+ <td style="font-weight: bold; text-align: center;" height="13" valign="top" width="7%"><p>find() efficiency</p></td>
+ <td style="font-weight: bold; text-align: center;" height="13" valign="top" width="10%"><p>sort efficiency</p></td>
+ </tr>
+ <tr>
+ <td>slist</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">O(n)</td>
+ <td style="text-align: center;">-</td>
+ <td style="text-align: center;">O(1)</td>
+ <td style="text-align: center;">O(1)</td>
+ <td style="text-align: center;">O(n)</td>
+ <td style="text-align: center;">O(n+)</td>
+ </tr>
+ <tr>
+ <td height="13" valign="top" width="15%"><p>list</p></td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">n</td>
+ <td style="text-align: center;">-</td>
+ <td style="text-align: center;" height="13" valign="top" width="16%"><p>1</p></td>
+ <td style="text-align: center;" height="13" valign="top" width="16%"><p>1</p></td>
+ <td style="text-align: center;" height="13" valign="top" width="7%"><p>n</p></td>
+ <td style="text-align: center;" height="13" valign="top" width="10%"><p>n log(n)</p></td>
+ </tr>
+ <tr>
+ <td>intrusive_slist</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">n</td>
+ <td style="text-align: center;">-</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">n+</td>
+ </tr>
+ <tr>
+ <td>intrusive_list</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">n</td>
+ <td style="text-align: center;">-</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">n log(n)</td>
+ </tr>
+ <tr>
+ <td>array</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">-</td>
+ <td style="text-align: center;">-</td>
+ <td style="text-align: center;">n</td>
+ <td style="text-align: center;">n log(n)</td>
+ </tr>
+ <tr>
+ <td>vector</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1<sup>a</sup></td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1&nbsp;at end, else n</td>
+ <td style="text-align: center;">1&nbsp;at end, else n</td>
+ <td style="text-align: center;">n</td>
+ <td style="text-align: center;">n log(n)</td>
+ </tr>
+ <tr>
+ <td>vector_set</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1<sup>a</sup></td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1&nbsp;at end, else n</td>
+ <td style="text-align: center;">1&nbsp;at end, else n</td>
+ <td style="text-align: center;">log(n)</td>
+ <td style="text-align: center;">1</td>
+ </tr>
+ <tr>
+ <td>vector_multiset</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1<sup>a</sup></td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1&nbsp;at end, else n</td>
+ <td style="text-align: center;">1&nbsp;at end, else n</td>
+ <td style="text-align: center;">log(n)</td>
+ <td style="text-align: center;">1</td>
+ </tr>
+ <tr>
+ <td>vector_map</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1<sup>a</sup></td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1&nbsp;at end, else n</td>
+ <td style="text-align: center;">1&nbsp;at end, else n</td>
+ <td style="text-align: center;">log(n)</td>
+ <td style="text-align: center;">1</td>
+ </tr>
+ <tr>
+ <td>vector_multimap</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1<sup>a</sup></td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1&nbsp;at end, else n</td>
+ <td style="text-align: center;">1&nbsp;at end, else n</td>
+ <td style="text-align: center;">log(n)</td>
+ <td style="text-align: center;">1</td>
+ </tr>
+ <tr>
+ <td>deque</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1<sup>a</sup></td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1&nbsp;at begin or end,<br>
+ else n / 2</td>
+ <td style="text-align: center;">1&nbsp;at begin or end,<br>
+ else n / 2</td>
+ <td style="text-align: center;">n</td>
+ <td style="text-align: center;">n log(n)</td>
+ </tr>
+ <tr>
+ <td>bit_vector</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1<sup>a</sup></td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1&nbsp;at end, else n</td>
+ <td style="text-align: center;">1&nbsp;at end, else n</td>
+ <td style="text-align: center;">n</td>
+ <td style="text-align: center;">n log(n)</td>
+ </tr>
+ <tr>
+ <td>string, cow_string</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1<sup>a</sup></td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1&nbsp;at end, else n</td>
+ <td style="text-align: center;">1&nbsp;at end, else n</td>
+ <td style="text-align: center;">n</td>
+ <td style="text-align: center;">n log(n)</td>
+ </tr>
+ <tr>
+ <td>set</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">-</td>
+ <td style="text-align: center;">log(n)</td>
+ <td style="text-align: center;">log(n)</td>
+ <td style="text-align: center;">log(n)</td>
+ <td style="text-align: center;">1</td>
+ </tr>
+ <tr>
+ <td>multiset</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">-</td>
+ <td style="text-align: center;">log(n)</td>
+ <td style="text-align: center;">log(n)</td>
+ <td style="text-align: center;">log(n)</td>
+ <td style="text-align: center;">1</td>
+ </tr>
+ <tr>
+ <td>map</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">log(n)</td>
+ <td style="text-align: center;">log(n)</td>
+ <td style="text-align: center;">log(n)</td>
+ <td style="text-align: center;">log(n)</td>
+ <td style="text-align: center;">1</td>
+ </tr>
+ <tr>
+ <td>multimap</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">-</td>
+ <td style="text-align: center;">log(n)</td>
+ <td style="text-align: center;">log(n)</td>
+ <td style="text-align: center;">log(n)</td>
+ <td style="text-align: center;">1</td>
+ </tr>
+ <tr>
+ <td>hash_set</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">-</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">-</td>
+ </tr>
+ <tr>
+ <td>hash_multiset</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">-</td>
+ <td style="text-align: center;">1<br></td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">-</td>
+ </tr>
+ <tr>
+ <td>hash_map</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">-</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">-</td>
+ </tr>
+ <tr>
+ <td>hash_multimap</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">-</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">-</td>
+ </tr>
+ <tr>
+ <td>intrusive_hash_set</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">-</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">-</td>
+ </tr>
+ <tr>
+ <td>intrusive_hash_multiset</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">-</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">-</td>
+ </tr>
+ <tr>
+ <td>intrusive_hash_map</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">-</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">-</td>
+ </tr>
+ <tr>
+ <td>intrusive_hash_multimap</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">-</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">1</td>
+ <td style="text-align: center;">-</td>
+ </tr>
+ </tbody>
+</table>
+<p class="faq-answer"><br>
+ Notes:
+</p>
+<ul>
+ <li>- means that the operation does not exist.</li>
+ <li>1 means amortized constant time. Also known as O(1)</li>
+ <li>n means time proportional to the container size. Also known as O(n)</li>
+ <li>log(n) means time proportional to the natural logarithm of the container size. Also known as O(log(n))</li>
+ <li>n log(n) means time proportional to log(n) times the size of the container. Also known as O(n log(n))</li>
+ <li>n+ means that the time is at least n, and possibly higher.</li>
+ <li>Inserting at the end of a vector may cause the vector to be resized; resizing a vector is O(n). However, the amortized time complexity for vector insertions at the end is constant.</li>
+ <li>Sort assumes the usage of the best possible sort for a large container of random data. Some sort algorithms (e.g. quick_sort) require random access iterators and so the sorting of some containers requires a different sort algorithm. We do not include bucket or radix sorts, as they are always O(n).</li>
+ <li><sup>a</sup> vector, deque, string size is O(1) but involves pointer subtraction and thus&nbsp;integer division and so is not as efficient as containers that store the size directly.</li>
+</ul>
+<p class="faq-question"><a name="Best.13"></a>13
+Use vector::reserve.</p>
+<p class="faq-answer">You can prevent vectors (and strings) from reallocating as you add items by specifying up front how many items you will be requiring. You can do this in the constructor or by calling the reserve function at any time. The capacity function returns the amount of space which is currently reserved.<br>
+ <br>
+Here's how you could specify reserved capacity in a vector:<br>
+</p>
+<div class="code-example" style="margin-left: 40px; font-family: Courier New;"><small>vector&lt;Widget&gt; v(37); &nbsp; // Reserve space to hold up to 37 items.<br>
+&nbsp; &nbsp; or<br>
+vector&lt;Widget&gt; v; &nbsp; &nbsp; &nbsp; // This empty construction causes to memory to be allocated or reserved.<br>
+ v.reserve(37);<br>
+</small></div>
+<span class="faq-answer">The EASTL vector (and string) implementation looks like this:</span>
+<span class="code-example"><small>template &lt;typename T&gt;<br>
+ class vector {<br>
+&nbsp;&nbsp;&nbsp; T* mpBegin; &nbsp; &nbsp; // Beginning of used element memory.<br>
+&nbsp; &nbsp; T* mpEnd; &nbsp; &nbsp; &nbsp; // End of used element memory.<br>
+&nbsp; &nbsp; T* mpCapacity; &nbsp;// End of storage capacity. Is &gt;= mpEnd<br>
+ </small> <small>}</small></span><small> </small><span class="faq-answer">
+Another approach to being efficient with vector memory usage is to use fixed_vector.</span>
+ <p class="faq-question"><a name="Best.14"></a>14
+Use vector::set_capacity to trim memory usage.</p>
+<p class="faq-answer">A commonly asked question about vectors and strings is, "How do I reduce the capacity of a vector?" The conventional solution for std STL is to use the somewhat non-obvious trick of using <small><span style=
+"font-family: Courier New;">vector&lt;Widget&gt;(v).swap(v)</span></small>. EASTL provides the same functionality via a member function called set_capacity() which is present in both the vector and string classes.&nbsp;<br>
+ <br>
+An example of reducing a vector is the following:</p>
+<span class="code-example"><small>vector&lt;Widget&gt; v;<br>
+...<br>
+</small> <small>v.set_capacity();</small></span><small> </small><span class="faq-answer">
+An example of resizing to zero and completely freeing the memory of a vector is the following:<br>
+</span>
+<div class="code-example" style="margin-left: 40px; font-family: Courier New;"><small>vector&lt;Widget&gt; v;<br>
+ ...<br>
+ </small> <small>v.set_capacity(0);</small></div>
+<p class="faq-question"><a name="Best.15"></a>15 Use swap() instead of a manually implemented version.</p>
+<p class="faq-answer">The generic swap algorithm provides a basic version for any kind of object. However, each EASTL container provides a specialization of swap which is optimized for that container. For example, the list container implements swap by simply swapping the internal member pointers and not by moving individual elements.</p>
+<p class="faq-question"> <a name="Best.16"></a>16
+Consider storing pointers instead of objects.</p>
+<p class="faq-answer">There are times when storing pointers to objects is more efficient or useful than storing objects directly in containers. It can be more efficient to store pointers when the objects are big and the container may need to construct, copy, and destruct objects during sorting or resizing. Moving pointers is usually faster than moving objects. It can be useful to store pointers instead of objects when somebody else owns the objects or the objects are in another container. It might be useful for a Widget to be in a list and in a hash table at the same time.</p>
+<p class="faq-question"><a name="Best.17"></a>17
+ Consider smart pointers instead of raw pointers.
+</p>
+<p class="faq-answer">If you take the above recommendation and store objects as pointers instead of as objects, you may want to consider storing them as smart pointers instead of as regular pointers. This is particularly useful for when you want to delete the object when it is removed from the container. Smart pointers will automatically delete the pointed-to object when the smart pointer is destroyed. Otherwise, you will have to be careful about how you work with the list so that you don't generate memory leaks. Smart pointers implement a shared reference count on the stored pointer, as so any operation you do on a smart pointer container will do the right thing. Any pointer can be stored in a smart pointer, and custom new/delete mechanisms can work with smart pointers. The primary smart pointer is shared_ptr.</p>
+<p class="faq-answer">Here is an example of creating and using a shared_ptr:</p>
+<p class="code-example">typedef shared_ptr&lt;Widget&gt; WPtr;<br>
+ list&lt;WPtr&gt; wList;<br>
+ <br>
+ wList.push_back(WPtr(new Widget)); // The user may&nbsp;have operator new/delete overrides.<br>
+wList.pop_back();&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; // Implicitly deletes the Widget.</p>
+<p class="faq-answer">Here is an example of creating and using a shared_ptr that uses a custom allocation and deallocation mechanism:</p>
+<p class="code-example">typedef shared_ptr&lt;Widget, EASTLAllocatorType, WidgetDelete&gt; WPtr; // WidgetDelete is a custom destroyer.<br>
+ list&lt;WPtr&gt; wList;<br>
+ <br>
+ wList.push_back(WPtr(WidgetCreate(Widget))); // WidgetCreate is a custom allocator.<br>
+wList.pop_back(); &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;// Implicitly calls WidgetDelete.</p>
+<p class="faq-question"><a name="Best.18"></a>18
+ Use iterator pre-increment instead of post-increment.
+</p>
+<p class="faq-answer">Pre-increment (e.g. ++x) of iterators is better than post-increment (x++) when the latter is not specifically needed. It is common to find code that uses post-incrementing when it could instead use pre-incrementing; presumably this is due to post-increment looking a little better visually. The problem is that the latter constructs a temporary object before doing the increment. With built-in types such as pointers and integers, the compiler will recognize that the object is a trivial built-in type and that the temporary is not needed, but the compiler cannot do this for other types, even if the compiler sees that the temporary is not used; this is because the constructor may have important side effects and the compiler would be broken if it didn't construct the temporary object.</p>
+<p class="faq-answer">EASTL iterators are usually not trivial types and so it's best not to hope the compiler will do the best thing. Thus you should always play it safe an use pre-increment of iterators whenever post-increment is not required.</p>
+<p class="faq-answer">Here is an example of using iterator pre-increment; for loops like this should always use pre-increment:</p>
+<p class="code-example">for(set&lt;int&gt;::iterator it(intSet.begin()), itEnd(intSet.end()); it != itEnd; ++it)<br>
+ &nbsp;&nbsp;&nbsp;&nbsp;*it = 37;</p>
+<p class="faq-question"> <a name="Best.19"></a>19
+ Make temporary references so the code can be traced/debugged.
+</p>
+<p class="faq-answer">Users want to be able to inspect or modify variables which are referenced by iterators. While EASTL containers and iterators are designed to make this easier than other STL implementations, it makes things very easy if the code explicitly declares a reference to the iterated element. In addition to making the variable easier to debug, it also makes code easier to read and makes the debug (and possibly release) version of the application run more efficiently.</p>
+<p class="faq-answer">Instead of doing this:</p>
+<p class="code-example">for(list&lt;Widget&gt;::iterator it = wl.begin(), itEnd = wl.end(); it != itEnd; ++it) {<br>
+ &nbsp;&nbsp;&nbsp;&nbsp;(*it).x = 37;<br>
+ &nbsp;&nbsp;&nbsp;&nbsp;(*it).y = 38;<br>
+ &nbsp;&nbsp;&nbsp;&nbsp;(*it).z = 39;<br>
+ }</p>
+<p class="faq-answer">Consider doing this:</p>
+<p class="code-example">for(list&lt;Widget&gt;::iterator it = wl.begin(), itEnd = wl.end(); it != itEnd; ++it) {<br>
+ &nbsp;&nbsp;&nbsp;&nbsp;Widget&amp; w = *it; // The user can easily inspect or modify w here.<br>
+ &nbsp;&nbsp;&nbsp;&nbsp;w.x = 37;<br>
+ &nbsp;&nbsp;&nbsp;&nbsp;w.y = 38;<br>
+ &nbsp;&nbsp;&nbsp;&nbsp;w.z = 39;<br>
+ }</p>
+<p class="faq-question"><a name="Best.20"></a>20
+ Consider bitvector or bitset instead of vector&lt;bool&gt;. </p>
+<p class="faq-answer">In EASTL, a vector of bool is exactly that. It intentionally does not attempt to make a specialization which implements a packed bit array. The bitvector class is specifically designed for this purpose. There are arguments either way, but if vector&lt;bool&gt; were allowed to be something other than an array of bool, it would go against user expectations and prevent users from making a true array of bool. There's a mechanism for specifically getting the bit packing, and it is bitvector.</p>
+<p class="faq-answer">Additionally there is bitset, which is not a conventional iterateable container but instead acts like bit flags. bitset may better suit your needs than bitvector if you need to do flag/bit operations instead of array operations. bitset does have an operator[], though.</p>
+<p class="faq-question"> <a name="Best.21"></a>21
+Vectors can be treated as contiguous memory.</p>
+<p class="faq-answer">EASTL vectors (and strings) guarantee that elements are present in a linear contiguous array. This means that you can use a vector as you would a C-style array by using the vector data() member function or by using &amp;v[0].</p>
+<p class="faq-answer">To use a vector as a pointer to an array, you can use the following code:</p>
+<p class="code-example">struct Widget {<br>
+ &nbsp;&nbsp;&nbsp;&nbsp;uint32_t x;<br>
+ &nbsp;&nbsp;&nbsp;&nbsp;uint32_t y;<br>
+ };<br>
+ <br>
+ vector&lt;Widget&gt; v;<br>
+ <br>
+ quick_sort((uint64_t*)v.data(), (uint64_t*)(v.data() + v.size()));</p>
+<p class="faq-question"><a name="Best.22"></a>22
+Search&nbsp;hash_map&lt;string&gt; via find_as() instead of find(). </p>
+<p class="faq-answer">EASTL hash tables offer a bonus function called find_as when lets you search a hash table by something other than the container type. This is particularly useful for hash tables of string objects that you want to search for by string literals (e.g. &quot;hello&quot;) or char pointers. If you search for a string via the find function, your string literal will necessarily be converted to a temporary string object, which is inefficient.</p>
+<p class="faq-answer">To use find_as, you can use the following code:</p>
+<p class="code-example">hash_map&lt;string, int&gt; hashMap;<br>
+ hash_map&lt;string, int&gt;::iterator it = hashMap.find_as(&quot;hello&quot;); // Using default hash and compare.</p>
+<p class="faq-question"> <a name="Best.23"></a>23
+Take advantage of type_traits (e.g. <small style=
+"font-family: Courier New;">EASTL_DECLARE_TRIVIAL_RELOCATE</small>).</p>
+<p class="faq-answer">EASTL includes a fairly serious type traits library that is on par with the one found in Boost but offers some additional performance-enhancing help as well. The type_traits library provides information about class <span style="font-style: italic;">types</span>, as opposed to class instances. For example, the is_integral type trait tells if a type is one of int, short, long, char, uint64_t, etc.<br>
+ <br>
+There are three primary uses of type traits:</p>
+<ul>
+ <li>Allowing for optimized operations on some data types.</li>
+ <li>Allowing for different logic pathways based on data types.</li>
+ <li>Allowing for compile-type assertions about data type expectations.</li>
+</ul>
+<span class="faq-answer">Most of the type traits are automatically detected and implemented by the compiler. However, EASTL&nbsp;allows for the user to explicitly give the compiler hints about type traits that the compiler cannot know, via the EASTL_DECLARE declarations. If the user has a class that is relocatable (i.e. can safely use memcpy to copy values), the user can use the EASTL_DECLARE_TRIVIAL_RELOCATE declaration to tell the compiler that the class can be copied via memcpy. This will automatically significantly speed up some containers and algorithms that use that class.<br>
+<br>
+Here is an example of using type traits to tell if a value is a floating point value or not:<br>
+</span>
+<div class="code-example" style="margin-left: 40px; font-family: Courier New;"><small>template &lt;typename T&gt;<br>
+ DoSomething(T t) {<br>
+&nbsp; &nbsp; assert(is_floating_point&lt;T&gt;::value);<br>
+ }</small></div>
+<span class="faq-answer">Here is an example of declaring a class as relocatable and using it in a vector.<br>
+</span>
+<div class="code-example" style="margin-left: 40px; font-family: Courier New;"><small>EASTL_DECLARE_TRIVIAL_RELOCATE(Widget); // Usually you put this at the Widget class declaration.<br>
+ vector&lt;Widget&gt; wVector;<br>
+ wVector.erase(wVector.begin()); &nbsp;&nbsp;&nbsp; &nbsp;&nbsp;&nbsp; // This operation will be optimized via using memcpy.</small></div>
+<span class="faq-answer">The following is a full list of the currently recognized type traits. Most of these are implemented as of this writing, but if there is one that is missing, feel free to contact the maintainer of this library and request that it be completed.</span>
+<ul>
+ <li>is_void</li>
+ <li>is_integral</li>
+ <li>is_floating_point</li>
+ <li>is_arithmetic</li>
+ <li>is_fundamental</li>
+ <li>is_const</li>
+ <li>is_volatile</li>
+ <li>is_abstract</li>
+ <li>is_signed</li>
+ <li>is_unsigned</li>
+ <li>is_array</li>
+ <li>is_pointer</li>
+ <li>is_reference</li>
+ <li>is_member_object_pointer</li>
+ <li>is_member_function_pointer</li>
+ <li>is_member_pointer</li>
+ <li>is_enum</li>
+ <li>is_union</li>
+ <li>is_class</li>
+ <li>is_polymorphic</li>
+ <li>is_function</li>
+ <li>is_object</li>
+ <li>is_scalar</li>
+ <li>is_compound</li>
+ <li>is_same</li>
+ <li>is_convertible</li>
+ <li>is_base_of</li>
+ <li>is_empty</li>
+ <li>is_pod</li>
+ <li>is_aligned</li>
+ <li>has_trivial_constructor</li>
+ <li>has_trivial_copy</li>
+ <li>has_trivial_assign</li>
+ <li>has_trivial_destructor</li>
+ <li>has_trivial_relocate<sup>1</sup></li>
+ <li>has_nothrow_constructor</li>
+ <li>has_nothrow_copy</li>
+ <li>has_nothrow_assign</li>
+ <li>has_virtual_destructor</li>
+ <li>alignment_of</li>
+ <li>rank</li>
+ <li>extent</li>
+</ul>
+<span class="faq-answer"><sup>1</sup> has_trivial_relocate is not found in Boost nor the C++ standard update proposal. However, it is very useful in allowing for the generation of optimized object moving operations. It is similar to the is_pod type trait, but goes further and allows non-pod classes to be categorized as relocatable. Such categorization is something that no compiler can do, as only the user can know if it is such. Thus <small style=
+"font-family: Courier New;">EASTL_DECLARE_TRIVIAL_RELOCATE</small>&nbsp; is provided to allow the user to give the compiler a hint.</span>
+<p class="faq-question"> <a name="Best.24"></a>24
+Name containers to track memory usage.
+</p>
+<p class="faq-answer">All EASTL containers which allocate memory have a built-in function called set_name and have a constructor argument that lets you specify the container name. This name is used in memory tracking and allows for the categorization and measurement of memory usage. You merely need to supply a name for your container to use and it does the rest.</p>
+<p class="faq-answer">Here is an example of creating a list and naming it &quot;collision list&quot;:</p>
+<p class="faq-answer"><span class="code-example">list&lt;CollisionData&gt; collisionList(allocator(&quot;collision list&quot;));</span>or<br>
+ <span class="code-example">list&lt;CollisionData&gt; collisionList;<br>
+collisionList.get_allocator().set_name(&quot;collision list&quot;);</span></p>
+<p class="faq-answer">Note that EASTL containers do not copy the name contents but merely copy the name pointer. This is done for simplicity and efficiency. A user can get around this limitation by creating a persistently present string table. Additionally, the user can get around this by declaring static but non-const strings and modifying them at runtime.</p>
+<p class="faq-question"><a name="Best.25"></a>25
+Learn the algorithms.</p>
+<p><span class="faq-answer">EASTL algorithms provide a variety of optimized implementations of fundamental algorithms. Many of the EASTL algorithms are the same as the STL algorithm set, though EASTL adds additional algorithms and additional optimizations not found in STL implementations such as Microsoft's. The copy algorithm, for example, will memcpy data types that have the has_trivial_relocate type trait instead of doing an element-by-element copy.<br>
+ <br>
+ The classifications we use here are not exactly the same as found in the C++ standard; they have been modified to be a little more intuitive. Not all the functions listed here may be yet available in EASTL as you read this. If you want some function then send a request to the maintainer. Detailed documentation for each algorithm is found in algorithm.h or the otherwise corresponding header file for the algorithm.<br>
+ <br>
+ <span style="font-weight: bold;">Search</span></span></p>
+<ul>
+ <li>find, find_if</li>
+ <li>find_end</li>
+ <li>find_first_of</li>
+ <li>adjacent_find</li>
+ <li>binary_search</li>
+ <li>search,&nbsp;search_n</li>
+ <li>lower_bound</li>
+ <li>upper_bound</li>
+ <li>equal_range</li>
+</ul>
+<p class="faq-answer" style="font-weight: bold;">Sort</p>
+<ul>
+ <li>is_sorted</li>
+ <li>quick_sort</li>
+ <li>insertion_sort</li>
+ <li>shell_sort</li>
+ <li>heap_sort</li>
+ <li>merge_sort,&nbsp;merge_sort_buffer</li>
+ <li>merge</li>
+ <li>inplace_merge</li>
+ <li>partial_sort</li>
+ <li>stable_sort</li>
+ <li>partial_sort_copy</li>
+ <li>&lt;other sort functions found in the EASTL bonus directories&gt;</li>
+</ul>
+<p class="faq-answer" style="font-weight: bold;">Modifying</p>
+<ul>
+ <li>fill, fill_n</li>
+ <li>generate,&nbsp;generate_n</li>
+ <li>random_shuffle</li>
+ <li>swap</li>
+ <li>iter_swap</li>
+ <li>swap_ranges</li>
+ <li>remove,&nbsp;remove_if</li>
+ <li>remove_copy,&nbsp;remove_copy_if</li>
+ <li>replace,&nbsp;replace_if</li>
+ <li>replace_copy,&nbsp;replace_copy_if</li>
+ <li>reverse</li>
+ <li>reverse_copy</li>
+ <li>rotate</li>
+ <li>rotate_copy</li>
+ <li>partition</li>
+ <li>stable_partition</li>
+ <li>transform</li>
+ <li>next_permutation</li>
+ <li>prev_permutation</li>
+ <li>unique</li>
+ <li>unique_copy</li>
+</ul>
+<p class="faq-answer" style="font-weight: bold;">Non-Modifying</p>
+<ul>
+ <li>for_each</li>
+ <li>copy</li>
+ <li>copy_backward</li>
+ <li>count,&nbsp;count_if</li>
+ <li>equal</li>
+ <li>mismatch</li>
+ <li>min</li>
+ <li>max</li>
+ <li>min_element</li>
+ <li>max_element</li>
+ <li>lexicographical_compare</li>
+ <li>nth_element</li>
+</ul>
+<p class="faq-answer" style="font-weight: bold;">Heap</p>
+<ul>
+ <li>is_heap</li>
+ <li>make_heap</li>
+ <li>push_heap</li>
+ <li>pop_heap</li>
+ <li>change_heap</li>
+ <li>sort_heap</li>
+ <li>remove_heap</li>
+</ul>
+<p class="faq-answer" style="font-weight: bold;">Set</p>
+<ul>
+ <li>includes</li>
+ <li>set_difference</li>
+ <li>set_symmetric_difference</li>
+ <li>set_intersection</li>
+ <li>set_union</li>
+</ul>
+<p class="faq-question"> <a name="Best.26"></a>26
+Pass and return containers by reference instead of value.</p>
+<p class="faq-answer">If you aren't paying attention you might accidentally write code like this:</p>
+<p class="code-example">void DoSomething(list&lt;Widget&gt; widgetList) {<br>
+ &nbsp;&nbsp;&nbsp;&nbsp;...<br>
+}</p>
+<p class="faq-answer">The problem with the above is that widgetList is passed by value and not by reference. Thus the a copy of the container is made and passed instead of a reference of the container being passed. This may seem obvious to some but this happens periodically and the compiler gives no warning and the code will often execute properly, but inefficiently. Of course there are some occasions where you really do want to pass values instead of references.</p>
+<p class="faq-question"><a name="Best.27"></a>27
+Consider using reset_lose_memory() for fast container teardown.</p>
+<p class="faq-answer">EASTL containers have a reset function which unilaterally resets the container to a newly constructed state. The contents of the container are forgotten; no destructors are called and no memory is freed. This is a risky but power function for the purpose of implementing very fast temporary containers. There are numerous cases in high performance programming when you want to create a temporary container out of a scratch buffer area, use the container, and then just &quot;vaporize&quot; it, as it would be waste of time to go through the trouble of clearing the container and destroying and freeing the objects. Such functionality is often used with hash tables or maps and with a stack allocator (a.k.a. linear allocator).</p>
+<p class="faq-answer">Here's an example of usage of the reset function and a PPMalloc-like StackAllocator:</p>
+<p class="code-example">pStackAllocator-&gt;push_bookmark();<br>
+ hash_set&lt;Widget, less&lt;Widget&gt;, StackAllocator&gt; wSet(pStackAllocator);<br>
+&lt;use wSet&gt;<br>
+ wSet.reset_lose_memory();<br>
+ pStackAllocator-&gt;pop_bookmark();</p>
+<p></p>
+<p class="faq-question"> <a name="Best.28"></a>28
+Consider using fixed_substring instead of copying strings.
+</p>
+<p class="faq-answer">EASTL provides a fixed_substring class which uses a reference to a character segment instead of allocating its own string memory. This can be a more efficient way to work with strings under some circumstances.</p>
+<p class="faq-answer">Here's an example of usage of fixed_substring:</p>
+<p class="code-example">basic_string&lt;char&gt;&nbsp;str(&quot;hello world&quot;);<br>
+ fixed_substring&lt;char&gt; sub(str, 6, 5);&nbsp;// sub == &quot;world&quot;</p>
+<p class="faq-answer">fixed_substring can refer to any character array and not just one that derives from a string object.</p>
+<p class="faq-question"><a name="Best.29" id="Best.29"></a>29
+ Consider using vector::push_back(void).</p>
+<p class="faq-answer">EASTL provides an alternative way to insert elements into containers that avoids copy construction and/or the creation of temporaries. Consider the following code:</p>
+<p class="code-example">vector&lt;Widget&gt; widgetArray;<br>
+ widgetArray.push_back(Widget());</p>
+<p class="faq-answer">The standard vector push_back function requires you to supply an object to copy from. This incurs the cost of the creation of a temporary and for some types of classes or situations this cost may be undesirable. It additionally requires that your contained class support copy-construction whereas you may not be able to support copy construction. As an alternative, EASTL provides a push_back(void) function which requires nothing to copy from but instead constructs the object in place in the container. So you can do this:</p>
+<p class="code-example">vector&lt;Widget&gt; widgetArray;<br>
+ widgetArray.push_back();<br>
+widgetArray.back().x = 0; // Example of how to reference the new object. </p>
+<p class="faq-answer">Other containers with such copy-less functions include:</p>
+<p class="code-example">vector::push_back()<br>
+ deque::push_back()<br>
+ deque::push_front()<br>
+ list::push_back()<br>
+ list::push_front()<br>
+ slist::push_front()<br>
+ map::insert(const key_type&amp; key) <br>
+ multimap::insert(const key_type&amp; key) <br>
+ hash_map::insert(const key_type&amp; key) <br>
+ hash_multimap::insert(const key_type&amp; key) </p>
+<p class="faq-answer">Note that the map functions above allow you to insert a default value specified by key alone and not a value_type like with the other map insert functions.</p>
+<hr style="width: 100%; height: 2px;">
+<p>End of document<br>
+<br>
+<br>
+<br>
+<br></p>
+</body>
+</html>
diff --git a/EASTL/doc/html/EASTL Design.html b/EASTL/doc/html/EASTL Design.html
new file mode 100644
index 0000000..479dacc
--- /dev/null
+++ b/EASTL/doc/html/EASTL Design.html
@@ -0,0 +1,424 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html>
+<head>
+ <title>EASTL Design</title>
+ <meta content="text/html; charset=us-ascii" http-equiv="content-type">
+ <meta name="author" content="Paul Pedriana">
+ <meta name="description" content="Discusses various design aspects of EASTL.">
+ <link type="text/css" rel="stylesheet" href="EASTLDoc.css">
+</head>
+<body>
+<h1>EASTL Design</h1>
+<h2> Introduction</h2>
+<p>EASTL (EA Standard Template Library) is designed to be a template library which encompasses and extends the
+ functionality of standard C++ STL while improving it in various ways useful to game development. Much of EASTL's design
+ is identical to standard STL, as the large majority of the STL is well-designed for many uses. The primary areas where
+EASTL deviates from standard STL implementations are essentially the following:</p>
+<ul>
+<li>EASTL has a simplified and more flexible custom allocation scheme.</li>
+<li>EASTL has significantly easier to read code.</li>
+<li>EASTL has extension containers and algorithms.</li>
+<li>EASTL has optimizations designed for game development.</li>
+</ul>
+<p>Of the above items, the only one which is an incompatible difference with STL is the case of memory allocation. The
+ method for defining a custom allocator for EASTL is slightly different than that of standard STL, though they are 90%
+ similar. The 10% difference, however, is what makes EASTL generally easier and more powerful to work with than standard
+STL. Containers without custom allocators act identically between EASTL and standard STL.</p>
+<h2>Motivations</h2>
+<p>Our motifications for making EASTL drive the design of EASTL. As identified in the EASTL RFC (Request for Comment), the
+ primary reasons for implementing a custom version of the STL are:
+</p>
+<ul>
+<li><span class="458151900-03082005"><font><font>Some STL implementations (especially Microsoft STL) have inferior
+performance characteristics that make them unsuitable for game development. EASTL is faster than all existing STL
+implementations.</font></font></span></li>
+<li>The STL is sometimes hard to debug, as most STL implementations use cryptic variable names and unusual data
+structures.</li>
+<li>STL allocators are sometimes painful to work with, as they have many requirements and cannot be modified once bound
+to a container.</li>
+<li>The STL includes excess functionality that can lead to larger code than desirable. It's not very easy to tell
+programmers they shouldn't use that functionality.</li>
+<li>The STL is implemented with very deep function calls. This results is unacceptable performance in non-optimized
+builds and sometimes in optimized builds as well.</li>
+<li>The STL doesn't support alignment of contained objects.</li>
+<li>STL containers won't let you insert an entry into a container without supplying an entry to copy from. This can be
+inefficient.</li>
+<li>Useful STL extensions (e.g. slist, hash_map, shared_ptr) found in existing STL implementations such as STLPort are
+not portable because they don't exist in other versions of STL or aren't consistent between STL versions.<br></li>
+<li>The STL lacks useful extensions that game programmers find useful (e.g. intrusive_list) but which could be best
+optimized in a portable STL environment.</li>
+<li>The STL has specifications that limit our ability to use it efficiently. For example, STL vectors are not
+guaranteed to use contiguous memory and so cannot be safely used as an array.</li>
+<li>The STL puts an emphasis on correctness before performance, whereas sometimes you can get significant performance
+gains by making things less academcially pure.</li>
+<li>STL containers have private implementations that don't allow you to work with their data in a portable way, yet
+sometimes this is an important thing to be able to do (e.g. node pools).</li>
+<li>All existing versions of STL allocate memory in empty versions of at least some of their containers. This is not
+ideal and prevents optimizations such as container memory resets that can greatly increase performance in some
+situations.</li>
+<li>The STL is slow to compile, as most modern STL implementations are very large.<br></li>
+<li>There are legal issues that make it hard for us to freely use portable STL implementations such as STLPort.</li>
+<li>We have no say in the design and implementation of the STL and so are unable to change it to work for our
+needs.</li>
+</ul>
+<h2>Prime Directives</h2>
+<p>The implementation of EASTL is guided foremost by the
+following directives which are listed in order of importance.</p>
+<ol>
+<li>Efficiency (speed and memory usage)</li>
+<li>Correctness</li>
+<li>Portability</li>
+<li>Readability</li>
+</ol>
+<p>Note that unlike commercial STL implementations which must put correctness above all, we put a higher value on
+ efficiency. As a result, some functionality may have some usage limitation that is not present in other similar systems
+but which allows for more efficient operation, especially on the platforms of significance to us.</p>
+<p>Portability is significant, but not critical. Yes, EASTL must compile and run on all platforms that we will ship games
+ for. But we don't take that to mean under all compilers that could be conceivably used for such platforms. For example,
+ Microsoft VC6 can be used to compile Windows programs, but VC6's C++ support is too weak for EASTL and so you simply
+cannot use EASTL under VC6.</p>
+<p>Readability is something that EASTL achieves better than many other templated libraries, particularly Microsoft STL and
+ STLPort. We make every attempt to make EASTL code clean and sensible. Sometimes our need to provide optimizations
+ (particularly related to type_traits and iterator types) results in less simple code, but efficiency happens to be our
+prime directive and so it overrides all other considerations.</p>
+<h2> Thread Safety</h2>
+<p>It's not simple enough to simply say that EASTL is thread-safe or thread-unsafe. However, we can say that with respect
+to thread safety that EASTL does the right thing.</p>
+<p>Individual EASTL containers are not thread-safe. That is,&nbsp;access to an instance of a container from multiple
+ threads at the same time is unsafe if any of those accesses are modifying operations. A given container can be read
+ from multiple threads simultaneously as well as any other standalone data structure. If a user wants to be able to have
+ modifying access an instance of a container from multiple threads, it is up to the user to ensure that proper thread
+synchronization occurs. This usually means using a mutex.</p>
+<p>EASTL classes other than containers are the same as containers with respect to thread safety. EASTL functions (e.g.
+ algorithms) are inherently thread-safe as they have no instance data and operate entirely on the stack. As of this
+writing, no EASTL function allocates memory and thus doesn't bring thread safety issues via that means.</p>
+<p>The user may well need to be concerned about thread safety with respect to memory allocation. If the user modifies
+ containers from multiple threads, then allocators are going to be accessed from multiple threads. If an allocator is
+ shared across multiple container instances (of the same type of container or not), then mutexes (as discussed above)
+ the user uses to protect access to indivudual instances will not suffice to provide thread safety for allocators used
+ across multiple instances. The conventional solution here is to use a mutex within the allocator if it is exected to be
+used by multiple threads.</p>
+<p>EASTL&nbsp;uses neither static nor global variables and thus there are no inter-instance dependencies that would make
+thread safety difficult for the user to implement.</p>
+<h2> Container Design</h2>
+<p>All EASTL containers follow a set of consistent conventions. Here we define the prototypical container which has the
+ minimal functionality that all (non-adapter) containers must have. Some containers (e.g. stack) are explicitly adapter
+ containers and thus wrap or inherit the properties of the wrapped container in a way that is implementation
+ specific.<br>
+</p>
+<div class="code-example" style="margin-left: 40px;"><small><span style="font-family: Courier New;">template &lt;class T, class Allocator =
+EASTLAllocator&gt;<br>
+class container<br>
+{<br>
+public:<br>
+&nbsp; &nbsp; typedef container&lt;T, Allocator&gt; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;this_type;<br>
+&nbsp;&nbsp;&nbsp; typedef
+T&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; &nbsp; &nbsp;
+&nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; value_type;<br>
+&nbsp;&nbsp;&nbsp; typedef T*&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;
+&nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;&nbsp; pointer;<br>
+&nbsp;&nbsp;&nbsp; typedef const T*&nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;
+&nbsp; &nbsp;const_pointer;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;typedef
+T&amp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;reference;<br>
+
+&nbsp;&nbsp;&nbsp;&nbsp;typedef const
+T&amp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;const_reference;<br>
+
+&nbsp;&nbsp;&nbsp;&nbsp;typedef
+ptrdiff_t&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;difference_type;<br>
+
+&nbsp;&nbsp;&nbsp;&nbsp;typedef
+impl_defined&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;size_type;<br>
+
+&nbsp;&nbsp;&nbsp; typedef impl-defined&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;
+&nbsp; &nbsp; iterator;<br>
+&nbsp;&nbsp;&nbsp; typedef impl-defined&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;
+&nbsp; &nbsp; const_iterator;<br>
+&nbsp;&nbsp;&nbsp; typedef reverse_iterator&lt;iterator&gt; &nbsp; &nbsp; &nbsp; &nbsp; reverse_iterator;<br>
+&nbsp;&nbsp;&nbsp; typedef reverse_iterator&lt;const_iterator&gt; &nbsp; reverse_const_iterator;<br>
+&nbsp;&nbsp;&nbsp; typedef Allocator&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; &nbsp; &nbsp; &nbsp; &nbsp;
+&nbsp; &nbsp; &nbsp; &nbsp; allocator_type;<br>
+<br>
+public:<br>
+&nbsp;&nbsp;&nbsp; container(</span></small><small><span style="font-family: Courier New;">const</span></small>
+<small><span style="font-family: Courier New;">allocator_type&amp; allocator = allocator_type());<br>
+&nbsp;&nbsp;&nbsp; container(const</span></small> <small><span style=
+"font-family: Courier New;">this_type</span></small><small><span style="font-family: Courier New;">&amp;
+x</span></small><small><span style="font-family: Courier New;">);<br>
+<br>
+&nbsp;&nbsp;&nbsp;&nbsp;</span></small><small><span style=
+"font-family: Courier New;">this_type</span></small><small><span style="font-family: Courier New;">&amp;
+operator=(</span></small><small><span style="font-family: Courier New;">this_type</span></small><small><span style=
+"font-family: Courier New;">&amp; x);<br>
+&nbsp; &nbsp; void swap(</span></small><small><span style=
+"font-family: Courier New;">this_type</span></small><small><span style="font-family: Courier New;">&amp; x);<br>
+&nbsp;&nbsp;&nbsp; void reset();<br>
+<br>
+&nbsp;&nbsp;&nbsp; allocator_type&amp; get_allocator();<br>
+&nbsp;&nbsp;&nbsp; void &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;set_allocator(allocator_type&amp; allocator);<br>
+<br>
+&nbsp;&nbsp;&nbsp;&nbsp;iterator&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; begin();<br>
+&nbsp;&nbsp;&nbsp;&nbsp;const_iterator begin() const;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;iterator&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; end();<br>
+&nbsp;&nbsp;&nbsp;&nbsp;const_iterator end() const;<br>
+<br>
+&nbsp;&nbsp;&nbsp; bool validate() const;<br></span></small> <small><span style=
+"font-family: Courier New;">&nbsp;&nbsp;&nbsp; int&nbsp; validate_iterator(const_iterator i)
+const;<br></span></small><br>
+<small><span style="font-family: Courier New;">protected:<br>
+&nbsp;&nbsp;&nbsp; allocator_type mAllocator;<br>
+};<br>
+<br>
+template &lt;class T,</span></small> <small><span style="font-family: Courier New;">class
+Allocator</span></small><small><span style="font-family: Courier New;">&gt;<br>
+bool operator==(const container&lt;T, Allocator&gt;&amp; a, const container&lt;T,</span></small> <small><span style=
+"font-family: Courier New;">Allocator</span></small><small><span style="font-family: Courier New;">&gt;&amp; b);<br>
+<br>
+template &lt;class T,</span></small> <small><span style="font-family: Courier New;">class
+Allocator</span></small><small><span style="font-family: Courier New;">&gt;<br>
+bool operator!=(const container&lt;T,</span></small> <small><span style=
+"font-family: Courier New;">Allocator</span></small><small><span style="font-family: Courier New;">&gt;&amp; a, const
+container&lt;T,</span></small> <small><span style=
+"font-family: Courier New;">Allocator</span></small><small><span style="font-family: Courier New;">&gt;&amp;
+b);</span></small></div>
+<br>
+Notes:
+<ul>
+<li>Swapped containers do not swap their allocators.</li>
+<li>Newly constructed empty containers do no memory allocation. Some STL and other container libraries allocate an
+initial node from the class memory allocator. EASTL containers by design never do this. If a container needs an initial
+node, that node should be made part of the container itself or be a static empty node object.</li>
+<li>Empty containers (new or otherwise) contain no constructed objects, including those that might be in an 'end' node.
+Similarly, no user object (e.g. of type T) should be constructed unless required by the design and unless documented in
+the cotainer/algorithm contract.&nbsp;</li>
+<li>The reset function is a special extension function which unilaterally resets the container to an empty state
+without freeing the memory of the contained objects. This is useful for very quickly tearing down a container built
+into scratch memory. No memory is allocated by reset, and the container has no allocatedmemory after the reset is
+executed.</li>
+<li>The validate and validate_iterator functions provide explicit container and iterator validation. EASTL provides an option to do implicit automatic iterator and container validation, but full validation (which can be potentially extensive) has too much of a performance cost to execute implicitly, even in a debug build. So EASTL provides these explicit functions which can be called by the user at the appropriate time and in optimized builds as well as debug builds. </li>
+</ul>
+<h2>Allocator Design</h2>
+<p>The most significant difference between EASTL and standard C++ STL is that standard STL containers are templated on an
+ allocator class with the interface defined in std::allocator. std::allocator is defined in the C++ standard as
+ this:<br>
+</p>
+<div class="code-example" style="margin-left: 40px;"><small><span style="font-family: Courier New;">// Standard C++ allocator<br>
+ <br>
+ template &lt;class T&gt;<br>
+class allocator</span><br style="font-family: Courier New;">
+<span style="font-family: Courier New;">{</span><br style="font-family: Courier New;">
+<span style="font-family: Courier New;">public:</span><br style="font-family: Courier New;">
+<span style="font-family: Courier New;">&nbsp;&nbsp;&nbsp; typedef size_t &nbsp; &nbsp;size_type;</span><br style=
+"font-family: Courier New;">
+<span style="font-family: Courier New;">&nbsp;&nbsp;&nbsp; typedef ptrdiff_t difference_type;</span><br style=
+"font-family: Courier New;">
+<span style="font-family: Courier New;">&nbsp;&nbsp;&nbsp; typedef T* &nbsp; &nbsp; &nbsp;&nbsp; pointer;</span><br style="font-family: Courier New;">
+<span style="font-family: Courier New;">&nbsp;&nbsp;&nbsp; typedef const T* &nbsp;const_pointer;</span><br style=
+"font-family: Courier New;">
+<span style="font-family: Courier New;">&nbsp;&nbsp;&nbsp; typedef T&amp; &nbsp; &nbsp; &nbsp;
+&nbsp;reference;</span><br style="font-family: Courier New;">
+<span style="font-family: Courier New;">&nbsp;&nbsp;&nbsp; typedef const
+T&amp;&nbsp;&nbsp;const_reference;</span><br style="font-family: Courier New;">
+<span style="font-family: Courier New;">&nbsp;&nbsp;&nbsp; typedef T &nbsp; &nbsp; &nbsp; &nbsp; value_type;</span><br style="font-family: Courier New;">
+<br style="font-family: Courier New;">
+<span style="font-family: Courier New;">&nbsp;&nbsp;&nbsp; template &lt;class U&gt;<br>
+&nbsp; &nbsp;&nbsp;struct rebind { typedef allocator&lt;U&gt; other; };</span><br style="font-family: Courier New;">
+<br style="font-family: Courier New;">
+<span style="font-family: Courier New;">&nbsp;&nbsp;&nbsp; allocator() throw();</span><br style=
+"font-family: Courier New;">
+<span style="font-family: Courier New;">&nbsp;&nbsp;&nbsp; allocator(const allocator&amp;) throw();</span><br style=
+"font-family: Courier New;">
+<span style="font-family: Courier New;">&nbsp;&nbsp;&nbsp; template &lt;class U&gt;<br>
+&nbsp; &nbsp; allocator(const allocator&lt;U&gt;&amp;) throw();<br>
+<br style="font-family: Courier New;">
+</span> <span style="font-family: Courier New;">&nbsp;&nbsp;&nbsp;~allocator()
+throw();<br>
+<br style="font-family: Courier New;">
+</span> <span style="font-family: Courier New;">&nbsp;&nbsp;&nbsp; pointer &nbsp;
+&nbsp; &nbsp; address(reference x) const;</span><br style="font-family: Courier New;">
+<span style="font-family: Courier New;">&nbsp;&nbsp;&nbsp; const_pointer address(const_reference x)
+const;</span><br style="font-family: Courier New;">
+<span style="font-family: Courier New;">&nbsp;&nbsp;&nbsp; pointer &nbsp; &nbsp; &nbsp; allocate(size_type, typename
+allocator&lt;void&gt;::const_pointer hint = 0);</span><br style="font-family: Courier New;">
+<span style="font-family: Courier New;">&nbsp;&nbsp;&nbsp; void &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;deallocate(pointer p,
+size_type n);</span><br style="font-family: Courier New;">
+<span style="font-family: Courier New;">&nbsp;&nbsp;&nbsp; size_type &nbsp; &nbsp; max_size() const
+throw();</span><br style="font-family: Courier New;">
+<span style="font-family: Courier New;">&nbsp;&nbsp;&nbsp; void &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;construct(pointer p,
+const T&amp; val);</span><br style="font-family: Courier New;">
+<span style="font-family: Courier New;">&nbsp;&nbsp;&nbsp; void &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;destroy(pointer
+p);</span><br style="font-family: Courier New;">
+<span style="font-family: Courier New;">};</span></small></div>
+<p> Each STL container needs to have an allocator templated on container type T associated with it. The problem with this
+is that allocators for containers are defined at the class level and not the instance level. This makes it painful to
+define custom allocators for containers and adds to code bloat. Also, it turns out that the containers don't actually
+use allocator&lt;T&gt; but instead use allocator&lt;T&gt;::rebind&lt;U&gt;::other. Lastly, you cannot access this
+allocator after the container is constructed. There are some good academic reasons why the C++ standard works this way,
+but it results in a lot of unnecessary pain and makes concepts like memory tracking much harder to implement.</p>
+<p>What EASTL does is use a more familiar memory allocation pattern whereby there is only one allocator class interface
+ and it is used by all containers. Additionally EASTL containers let you access their allocators and query them, name
+them, change them, etc.</p>
+<p>EASTL has chosen to make allocators not be copied between containers during container swap and assign operations. This
+ means that if container A swaps its contents with container B, both containers retain their original allocators.
+ Similarly, assigning container A to container B causes container B to retain its original allocator. Containers that
+ are equivalent should report so via operator==; EASTL will do a smart swap if allocators are equal, and a brute-force
+ swap otherwise.<br>
+</p>
+<div class="code-example" style="margin-left: 40px;"><small><span style="font-family: Courier New;">// EASTL allocator<br>
+<br>
+class allocator<br>
+{<br>
+public:<br>
+&nbsp;&nbsp;&nbsp; allocator(const char* pName = NULL);<br>
+<br>
+&nbsp;&nbsp;&nbsp; void* allocate(size_t n, int flags = 0);<br>
+&nbsp;&nbsp;&nbsp; void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0);<br>
+&nbsp;&nbsp;&nbsp; void&nbsp; deallocate(void* p, size_t n);<br>
+<br>
+&nbsp;&nbsp;&nbsp; const char* get_name() const;<br>
+&nbsp;&nbsp;&nbsp; void&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; set_name(const char* pName);<br>
+};<br>
+<br>
+allocator* GetDefaultAllocator();</span></small></div>
+<h2>Fixed Size Container Design</h2>
+<p>EASTL supplies a set of&nbsp;fixed-size containers that the user can use, though the user can also implement their own
+ versions. So in addition to class list there is class fixed_list. The fixed_list class implements a linked list via a
+ fixed-size pool of contiguous memory which has no space overhead (unlike with a regular heap), doesn't cause
+fragmentation, and allocates very quickly.</p>
+<p>EASTL implements fixed containers via subclasses of regular containers which set the regular container's allocator to
+ point to themselves. Thus the implementation for fixed_list is very tiny and consists of little more
+ than&nbsp;constructor and allocator functions. This design has some advantages but has one small disadvantage. The
+ primary advantages are primarily that code bloat is reduced and that the implementation is simple and the user can
+ easily extend it. The primary disadvantage is that the parent list class ends up with a pointer to itself and thus has
+ 4 bytes that could arguably be saved if system was designed differently. That different design would be to make the
+ list class have a policy template parameter which specifies that it is a fixed pool container. EASTL chose not to
+ follow the policy design because it would complicate the implementation, make it harder for the user to extend the
+ container, and would potentially waste more memory due to code bloat than it would save due to the 4 byte savings it
+achieves in container instances.</p>
+<h2>Algorithm Design</h2>
+<p>EASTL algorithms very much follow the philosophy of standard C++ algorithms, as this philosophy is sound and efficient.
+ One of the primary aspects of algorithms is that they work on iterators and not containers. You will note for example
+ that the find algorithm takes a first and last iterator as arguments and not a container. This has two primary
+ benefits: it allows the user to specify a subrange of the container to search within and it allows the user to apply
+the find algorithm to sequences that aren't containers (e.g. a C array).</p>
+<p>EASTL algorithms are optimized at least as well as the best STL algorithms found in commercial libraries and are
+ significantly optimized over the algorithms that come with the first-party STLs that come with compilers. Most significantly, EASTL algorithms take advantage of type traits of contained classes and
+ take advantage of iterator types to optimize code generation. For example, if you resize an array of integers (or other "pod" type), EASTL will detect that this can be done with a memcpy instead of a slow object-by-object move as would
+Micrsoft STL.</p>
+<p>The optimizations found in EASTL algorithms and the supporting code in EASTL type traits consistts of some fairly
+ tricky advanced C++ and while it is fairly easy to read, it requires a C++ expert (language lawyer, really) to
+ implement confidently. The result of this is that it takes more effort to develop and maintain EASTL than it would to
+maintain a simpler library. However, the performance advantages have been deemed worth the tradeoff.</p>
+<h2>Smart Pointer Design</h2>
+<p>EASTL implements the following smart pointer types:</p>
+<ul>
+<li>shared_ptr</li>
+<li>shared_array</li>
+<li>weak_ptr</li>
+<li>instrusive_ptr</li>
+<li>scoped_ptr</li>
+<li>scoped_array</li>
+<li>linked_ptr</li>
+<li>linked_array</li>
+</ul>
+All but linked_ptr/linked_array are well-known smart pointers from the Boost library. The behaviour of these smart
+pointers is very similar to those from Boost with two exceptions:
+<ul>
+<li>EASTL smart pointers allow you to assign an allocator to them.</li>
+<li>EASTL shared_ptr implements deletion via a templated parameter instead of a dynamically allocated&nbsp;virtual
+member object interface.</li>
+</ul>
+<p>With respect to assigning an allocator, this gives EASTL more control over memory allocation and tracking, as Boost
+smart pointers unilaterally use global operator new to allocate memory from the global heap.</p>
+<p>With respect to shared_ptr deletion, EASTL's current design of using a templated parameter is questionable, but does
+ have some reason. The advantage is that EASTL avoids a heap allocation, avoids virtual function calls, and avoids
+ templated class proliferation. The disadvantage is that EASTL shared_ptr containers which hold void pointers can't call
+ the destructors of their contained objects unless the user manually specifies a custom deleter template parameter. This
+ is case whereby EASTL is more efficient but less safe. We can revisit this topic in the future if it becomes an
+ issue.</p>
+<h2>list::size is O(n)</h2>
+<p>As of this writing, EASTL has three linked list classes: list, slist, and intrusive_list. In each of these classes, the
+ size of the list is not cached in a member size variable. The result of this is that getting the size of a list is not
+ a fast operation, as it requires traversing the list and counting the nodes. We could make the list::size function be
+ fast by having a member mSize variable which tracks the size as we insert and delete items. There are reasons for
+ having such functionality and reasons for not having such functionality. We currently choose to not have a member mSize
+ variable as it would add four bytes to the class, add a tiny amount of processing to functions such as insert and
+ erase, and would only serve to improve the size function, but no others. In the case of intrusive_list, it would do
+ additional harm. The alternative&nbsp;argument is that the C++ standard states that std::list should be an O(1)
+ operation (i.e. have a member size variable), that many C++ standard library list&nbsp;implementations do so, that the
+ size is but an integer which is quick to update, and that many users expect to have a fast size function. In the final
+ analysis, we are developing a library for game development and performance is paramount, so we choose to not cache the
+list size. The user can always implement a size cache himself.</p>
+<h2>basic_string doesn't use copy-on-write</h2>
+<p>The primary benefit of CoW is that it allows for the sharing of string data between two string objects. Thus if you say
+ this:</p>
+<p class="code-example"> string a("hello");<br style=
+"font-family: Courier New;">
+ string b(a);</p>
+<p>the "hello" will be shared between a and b. If you then say this:</p>
+<p class="code-example"> a = "world";</p>
+<p>then <span style="font-family: Courier New;">a</span> will release its reference to "hello" and leave b with the only
+ reference to it. Normally this functionality is accomplished via reference counting and with atomic operations or
+mutexes.</p>
+<p> The C++ standard does not say anything about basic_string and CoW. However, for a basic_string implementation to be
+ standards-conforming, a number of issues arise which dictate some things about how one would have to implement a CoW
+ string. The discussion of these issues will not be rehashed here, as you can read the references below for better
+ detail than can be provided in the&nbsp;space we have here. However, we can say that the C++ standard is sensible
+ and&nbsp;that anything we try to do here to allow for an efficient CoW implementation would result in a generally
+unacceptable string interface.</p>
+<p>The disadvantages of CoW strings are:</p>
+<ul>
+<li>A reference count needs to exist with the string, which increases string memory usage.</li>
+<li>With thread safety, atomic operations and mutex locks are expensive, especially&nbsp;on weaker memory systems such
+as console gaming platforms.</li>
+<li>All non-const string accessor functions need to do a sharing check the the first such check needs to detach the
+string. Similarly, all string assignments need to do a sharing check as well. If you access the string before doing an
+assignment, the assignment doesn't result in a shared string, because the string has already been detached.</li>
+<li>String sharing doesn't happen the large majority of the time. In some cases,&nbsp;the total sum of the reference
+count memory can exceed any memory savings gained by the strings that share representations.&nbsp;</li>
+</ul>
+<p>The addition of a cow_string class is under consideration for EASTL. There are conceivably some systems which have
+ string usage patterns which would benefit from CoW sharing. Such functionality is best saved for a separate
+string&nbsp;implementation so that the other string uses aren't penalized.</p>
+<p>This is a good starting HTML reference on the topic:</p>
+<blockquote>
+ <p>
+ <a href=
+"http://www.gotw.ca/publications/optimizations.htm">http://www.gotw.ca/publications/optimizations.htm</a></p>
+</blockquote>
+<p>Here is a well-known Usenet discussion on the topic:</p>
+<blockquote>
+ <p><a href=
+"http://groups-beta.google.com/group/comp.lang.c++.moderated/browse_thread/thread/3dc6af5198d0bf7/886c8642cb06e03d">http://groups-beta.google.com/group/comp.lang.c++.moderated/browse_thread/thread/3dc6af5198d0bf7/886c8642cb06e03d</a></p>
+</blockquote>
+<hr style="width: 100%; height: 2px;">
+End of document<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+</body>
+</html>
diff --git a/EASTL/doc/html/EASTL FAQ.html b/EASTL/doc/html/EASTL FAQ.html
new file mode 100644
index 0000000..04b1578
--- /dev/null
+++ b/EASTL/doc/html/EASTL FAQ.html
@@ -0,0 +1,2385 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html>
+<head>
+ <title>EASTL FAQ</title>
+ <meta content="text/html; charset=us-ascii" http-equiv="content-type">
+ <meta name="author" content="Paul Pedriana">
+ <meta name="description" content="Frequently asked questions about EASTL.">
+ <link type="text/css" rel="stylesheet" href="EASTLDoc.css">
+<style type="text/css">
+<!--
+.style1 {font-family: "Courier New"}
+.style2 {color: #339900}
+.style3 {color: #FF0000}
+.style4 {color: #999999}
+.style5 {font-size: 10pt}
+-->
+</style>
+</head>
+<body>
+<h1>EASTL FAQ</h1>
+<p>We provide a FAQ (frequently asked questions) list here for a number of commonly asked questions about EASTL and STL in
+general. Feel free to suggest new FAQ additions based on your own experience.</p>
+<h2>Information</h2>
+<table style="width: 100%;" border="0" cellpadding="0" cellspacing="0" cols="2">
+<tbody>
+<tr>
+<td>1</td>
+<td><a href="#Info.1">What is EASTL?</a></td>
+</tr>
+<tr>
+<td>2</td>
+<td><a href="#Info.2">What uses&nbsp;are EASTL suitable for?</a></td>
+</tr>
+<tr>
+<td style="width: 28px;">3<br></td>
+<td style="vertical-align: top; text-align: left;"><a href="#Info.3">How does EASTL differ from standard C++
+STL?</a></td>
+</tr>
+<tr>
+<td>4</td>
+<td><a href="#Info.4">Is EASTL thread-safe?</a></td>
+</tr>
+<tr>
+<td>5</td>
+<td><a href="#Info.5">What platforms/compilers does EASTL support?</a></td>
+</tr>
+<tr>
+<td>6</td>
+<td><a href="#Info.6">Why is there EASTL when there is the STL?</a></td>
+</tr>
+<tr>
+<td>7</td>
+<td><a href="#Info.7">Can I mix EASTL with standard C++ STL?</a></td>
+</tr>
+<tr>
+<td>8</td>
+<td><a href="#Info.8">Where can I learn more about STL and EASTL?</a></td>
+</tr>
+<tr>
+<td>9</td>
+<td><a href="#Info.9">What is the legal status of EASTL?</a></td>
+</tr>
+<tr>
+<td>10</td>
+<td><a href="#Info.10">Does EASTL deal with compiler exception handling settings?</a></td>
+</tr>
+<tr>
+<td>11</td>
+<td><a href="#Info.11">What C++ language features does EASTL use (e.g. virtual functions)?</a></td>
+</tr>
+<tr>
+<td>12</td>
+<td><a href="#Info.12">What compiler warning levels does EASTL support?</a></td>
+</tr>
+<tr>
+<td>13</td>
+<td><a href="#Info.13">Is EASTL compatible with Lint?</a></td>
+</tr>
+<tr>
+<td>14</td>
+<td><a href="#Info.14">What compiler settings do I need to compile EASTL?</a></td>
+</tr>
+<tr>
+<td>15</td>
+<td><a href="#Info.15">How hard is it to incorporate EASTL into my project?</a></td>
+</tr>
+<tr>
+<td>16</td>
+<td><a href="#Info.16">Should I use EASTL instead of std STL or instead of my custom&nbsp;library?</a></td>
+</tr>
+<tr>
+<td>17</td>
+<td><a href="#Info.17">I think I've found a bug. What do I do?</a></td>
+</tr>
+<tr>
+<td>18</td>
+<td><a href="#Info.18">Can EASTL be used by third party EA developers?</a></td>
+</tr>
+</tbody>
+</table>
+<h2> Performance
+</h2>
+<table style="width: 100%;" border="0" cellpadding="0" cellspacing="0" cols="2">
+<tbody>
+<tr>
+<td style="width: 28px;">1</td>
+<td><a href="#Perf.1">How efficient is EASTL compared to standard C++ STL implementations?</a></td>
+</tr>
+<tr>
+<td>2</td>
+<td><a href="#Perf.2">How efficient is EASTL in general?</a></td>
+</tr>
+<tr>
+<td>3</td>
+<td><a href="#Perf.3">Strings don't appear to use the "copy-on-write" optimization. Why not?</a></td>
+</tr>
+<tr>
+<td>4</td>
+<td><a href="#Perf.4">Does EASTL cause code bloat, given that it uses templates?</a></td>
+</tr>
+<tr>
+<td>5</td>
+<td><a href="#Perf.5">Don't STL and EASTL containers fragment memory?</a></td>
+</tr>
+<tr>
+<td>6</td>
+<td><a href="#Perf.6">I don't see container optimizations for equivalent scalar types such as pointer types.
+Why?</a></td>
+</tr>
+<tr>
+<td>7</td>
+<td><a href="#Perf.7">I've seen some STL's provide a default quick "node allocator" as the default allocator. Why
+doesn't EASTL do this?</a></td>
+</tr>
+<tr>
+<td>8</td>
+<td><a href="#Perf.8">Templates sometimes seem to take a long time to compile. Why do I do about that?</a></td>
+</tr>
+<tr>
+<td>9</td>
+<td><a href="#Cont.8">How do I assign a custom allocator to an EASTL container?</a></td>
+</tr>
+<tr>
+<td>10</td>
+<td><a href="#Perf.10">How well does EASTL inline?</a></td>
+</tr>
+<tr>
+<td>11</td>
+<td><a href="#Perf.11">How do I control function inlining?</a></td>
+</tr>
+<tr>
+<td>12</td>
+<td><a href="#Perf.12">C++ / EASTL seems to bloat my .obj files much more than C does.</a></td>
+</tr>
+<tr>
+<td>13</td>
+<td><a href="#Perf.13">What are the best compiler settings for EASTL?</a></td>
+</tr>
+</tbody>
+</table>
+<h2>Problems</h2>
+<table style="width: 100%;" border="0" cellpadding="0" cellspacing="0" cols="2">
+<tbody>
+<tr>
+<td style="width: 28px;">1</td>
+<td><a href="#Prob.1">I'm getting screwy behavior in sorting algorithms or sorted containers. What's wrong?</a></td>
+</tr>
+<tr>
+<td>2</td>
+ <td><a href="#Prob.2">I am getting compiler warnings (e.g. C4244, C4242 or C4267) that make no sense. Why?</a></td>
+</tr>
+<tr>
+<td>3</td>
+<td><a href="#Prob.3">I am getting compiler warning C4530, which complains about exception handling and "unwind
+semantics." What gives?</a></td>
+</tr>
+<tr>
+<td>4</td>
+<td><a href="#Prob.4">Why are tree-based containers hard to read with a debugger?</a></td>
+</tr>
+<tr>
+<td>5</td>
+<td><a href="#Prob.5">The EASTL source code is sometimes rather complicated looking. Why is that?</a></td>
+</tr>
+<tr>
+<td>6</td>
+<td><a href="#Prob.6">When I get compilation errors, they are very long and complicated looking. What do I do?</a></td>
+</tr>
+<tr>
+<td>7</td>
+<td><a href="#Prob.7">Templates sometimes seem to take a long time to compile. Why do I do about that?</a></td>
+</tr>
+<tr>
+<td>8</td>
+<td><a href="#Prob.8">I get the compiler error: <small>"template instantiation depth exceeds maximum of 17.&nbsp;use
+-ftemplate-depth-NN to increase the maximum"</small></a></td>
+</tr>
+<tr>
+<td>9</td>
+<td><a href="#Prob.9">I'm getting errors about min and max while compiling.</a></td>
+</tr>
+<tr>
+<td>10</td>
+<td><a href="#Prob.10">C++ / EASTL seems to bloat my .obj files much more than C does.</a></td>
+</tr>
+<tr>
+<td>11</td>
+<td><a href="#Prob.11">I'm getting compiler errors regarding operator new being&nbsp;previously defined.</a></td>
+</tr>
+<tr>
+<td>12</td>
+<td><a href="#Prob.12">I'm getting errors related to wchar_t string &nbsp;functions such as wcslen.</a></td>
+</tr>
+<tr>
+<td>13</td>
+<td><a href="#Prob.13">I'm getting compiler warning C4619: there is no warning number Cxxxx (e.g. C4217).</a></td>
+</tr>
+<tr>
+<td>14</td>
+<td><a href="#Prob.14">My stack-based fixed_vector is not respecting the object alignment requirements.</a></td>
+</tr>
+<tr>
+ <td>15</td>
+ <td><a href="#Prob.15">I am getting compiler errors when using GCC under XCode (Macintosh/iphone).</a></td>
+</tr>
+<tr>
+ <td>16</td>
+ <td><a href="#Prob.16">I am getting linker errors about Vsnprintf8 or Vsnprintf16.</a></td>
+</tr>
+<tr>
+ <td>17</td>
+ <td><a href="#Prob.17">I am getting compiler errors about UINT64_C or UINT32_C</a>. </td>
+</tr>
+<tr>
+ <td>18</td>
+ <td><a href="#Prob.18">I am getting a crash with a global EASTL container. </a></td>
+</tr>
+<tr>
+ <td>19</td>
+ <td><a href="#Prob.19">Why doesn't EASTL support passing NULL to functions with pointer arguments? </a></td>
+</tr>
+</tbody>
+</table>
+<h2>Debug</h2>
+<table style="width: 100%;" border="0" cellpadding="0" cellspacing="0" cols="2">
+<tbody>
+<tr>
+<td style="width: 28px;">1</td>
+<td><a href="#Debug.1">How do I get VC++ mouse-overs to view templated data?</a></td>
+</tr>
+<tr>
+<td>2</td>
+<td><a href="#Debug.2">How do I view containers if the visualizer/tooltip support is not present?</a></td>
+</tr>
+<tr>
+<td>3</td>
+<td><a href="#Debug.3">The EASTL source code is sometimes rather complicated looking. Why is that?</a></td>
+</tr>
+<tr>
+<td>4</td>
+<td><a href="#Debug.4">When I get compilation errors, they are very long and complicated looking. What do I
+do?</a></td>
+</tr>
+<tr>
+<td>5</td>
+<td><a href="#Debug.5">How do I measure hash table balancing?</a></td>
+</tr>
+</tbody>
+</table>
+<h2>Containers</h2>
+<table style="width: 100%;" border="0" cellpadding="0" cellspacing="0" cols="2">
+<tbody>
+<tr>
+<td style="width: 28px;">1</td>
+<td><a href="#Cont.1">Why do some containers have "fixed" versions (e.g. fixed_list) but others(e.g. deque) don't have
+fixed versions?</a></td>
+</tr>
+<tr>
+<td>2</td>
+<td><a href="#Cont.2">Can I mix EASTL with standard C++ STL?</a></td>
+</tr>
+<tr>
+<td>3</td>
+<td><a href="#Cont.3">Why are there so many containers?</a></td>
+</tr>
+<tr>
+<td>4</td>
+<td><a href="#Cont.4">Don't STL and EASTL containers fragment memory?</a></td>
+</tr>
+<tr>
+<td>5</td>
+<td><a href="#Cont.5">I don't see container optimizations for equivalent scalar types such as pointer types.
+Why?</a></td>
+</tr>
+<tr>
+<td>6</td>
+<td><a href="#Cont.6">What about alternative container and algorithm implementations (e.g. treaps, skip lists, avl
+trees)?</a></td>
+</tr>
+<tr>
+<td>7</td>
+<td><a href="#Cont.7">Why are containers hard to read with a debugger?</a></td>
+</tr>
+<tr>
+<td>8</td>
+<td><a href="#Cont.8">How do I assign a custom allocator to an EASTL container?</a></td>
+</tr>
+<tr>
+<td>9</td>
+<td><a href="#Cont.9">How do I set the VC++ debugger to display EASTL container data with tooltips?</a></td>
+</tr>
+<tr>
+<td>10</td>
+<td><a href="#Cont.10">How do I use a memory pool with a container?</a></td>
+</tr>
+<tr>
+<td>11</td>
+<td><a href="#Cont.11">How do I write a comparison (operator&lt;()) for a struct that contains two or more
+members?</a></td>
+</tr>
+<tr>
+<td>12</td>
+<td><a href="#Cont.12">Why doesn't container X have member function Y?</a></td>
+</tr>
+<tr>
+<td>13</td>
+<td><a href="#Cont.13">How do I search a hash_map of strings via a char pointer efficiently? If I use map.find("hello")
+it creates a temporary string, which is inefficient.</a></td>
+</tr>
+<tr>
+<td>14</td>
+<td><a href="#Cont.14">Why are set and hash_set iterators const (i.e. const_iterator)?</a></td>
+</tr>
+<tr>
+<td>15</td>
+<td><a href="#Cont.15">How do I prevent my hash container from re-hashing?</a></td>
+</tr>
+<tr>
+<td>16</td>
+<td><a href="#Cont.16">Which uses less memory, a map or a hash_map?</a></td>
+</tr>
+<tr>
+<td>17</td>
+<td><a href="#Cont.17">How do I write a custom hash function?</a></td>
+</tr>
+<tr>
+<td>18</td>
+<td><a href="#Cont.18">How do I write a custom compare function for a map or set?</a></td>
+</tr>
+<tr>
+<td>19</td>
+<td><a href="#Cont.19">How do I force my vector or string capacity down to the size of the container?</a></td>
+</tr>
+<tr>
+<td>20</td>
+<td><a href="#Cont.20">How do I iterate a container while (selectively) removing items from it?</a></td>
+</tr>
+<tr>
+<td>21</td>
+<td><a href="#Cont.21">How do I store a pointer in a container?</a></td>
+</tr>
+<tr>
+<td>22</td>
+<td><a href="#Cont.22">How do I make a union of two containers? difference? intersection?</a></td>
+</tr>
+<tr>
+<td>23</td>
+<td><a href="#Cont.23">How do I override the default global allocator?</a></td>
+</tr>
+<tr>
+<td>24</td>
+<td><a href="#Cont.24">How do I do trick X with the string class?</a></td>
+</tr>
+<tr>
+<td>25</td>
+<td><a href="#Cont.25">How do EASTL smart pointers compare to Boost smart pointers?</a></td>
+</tr>
+<tr>
+<td>26</td>
+<td><a href="#Cont.26">How do your forward-declare an EASTL container?</a></td>
+</tr>
+<tr>
+ <td>27</td>
+ <td><a href="#Cont.27">How do I make two containers share a memory pool?</a></td>
+</tr>
+<tr>
+ <td>28</td>
+ <td><a href="#Cont.28">Can I use a std (STL) allocator with EASTL?</a></td>
+</tr>
+<tr>
+ <td>29 </td>
+ <td><a href="#Cont.29">What are the requirements of classes stored in containers? </a></td>
+</tr>
+</tbody>
+</table>
+<h2>Algorithms</h2>
+<table style="width: 100%;" border="0" cellpadding="0" cellspacing="0" cols="2">
+<tbody>
+<tr>
+<td style="width: 28px;">1</td>
+<td><a href="#Algo.1">I'm getting screwy behavior in sorting algorithms or sorted containers. What's wrong?</a></td>
+</tr>
+<tr>
+<td>2</td>
+<td><a href="#Algo.2">How do I write a comparison (operator&lt;()) for a struct that contains two or more
+members?</a></td>
+</tr>
+<tr>
+<td>3</td>
+<td><a href="#Algo.3">How do I sort something in reverse order?</a></td>
+</tr>
+<tr>
+<td>4</td>
+<td><a href="#Algo.4">I'm getting errors about min and max while compiling.</a></td>
+</tr>
+<tr>
+<td>5</td>
+<td><a href="#Algo.5">Why don't algorithms take a container as an argument instead of iterators? A container would be
+more convenient.</a></td>
+</tr>
+<tr>
+<td>6</td>
+<td><a href="#Algo.6">Given a container of pointers, how do I find an element by value (instead of by
+pointer)?</a></td>
+</tr>
+<tr>
+<td>7</td>
+<td><a href="#Algo.7">When do stored objects need to support <small><span style="font-family: Courier New;">opertor
+&lt;</span></small> vs. when do they need to support <small><span style="font-family: Courier New;">operator
+==</span></small>?</a></td>
+</tr>
+<tr>
+ <td>8</td>
+ <td><a href="#Algo.8">How do I sort via pointers or array indexes instead of objects directly?</a></td>
+</tr>
+</tbody>
+</table>
+<h2>Iterators</h2>
+<table style="width: 100%;" border="0" cellpadding="0" cellspacing="0" cols="2">
+<tbody>
+<tr>
+<td style="width: 28px;">1</td>
+<td><a href="#Iter.1">What's the difference between iterator, const iterator, and const_iterator?</a></td>
+</tr>
+<tr>
+<td>2</td>
+<td><a href="#Iter.2">How do I tell from an iterator what type of thing it is iterating?</a></td>
+</tr>
+<tr>
+<td>3</td>
+<td><a href="#Iter.3">How do I iterate a container while (selectively) removing items from it?</a></td>
+</tr>
+<tr>
+<td>4</td>
+<td><a href="#Iter.4">What is an insert_iterator?</a></td>
+</tr>
+</tbody>
+</table>
+<h2><br>
+ Information
+</h2>
+<p class="faq-question"><a name="Info.1"></a>Info.1
+What is EASTL?</p>
+<p class="faq-answer">EASTL refers to &quot;EA Standard Template Library.&quot; It is a C++ template library that is analogous to the template facilities of the C++ standard library, which are often referred to as the STL. EASTL consists of the following systems: </p>
+<ul>
+ <li>Containers</li>
+ <li>Iterators</li>
+ <li>Algorithms</li>
+ <li>Utilities</li>
+ <li>Smart pointers</li>
+ <li>Type traits</li>
+</ul>
+<p class="faq-answer">EASTL provides extensions and optimizations over the equivalents in standard C++ STL.</p>
+<p class="faq-answer">EASTL is a professional-level implementation which outperforms commercial implementations (where functionality overlaps) and is significantly easier to read and debug.</p>
+<p class="faq-question"> <a name="Info.2"></a>Info.2
+What uses are EASTL suitable for?</p>
+<p class="faq-answer">EASTL is suitable for any place where templated containers and algorithms would be appropriate. Thus any C++ tools could use it and many C++ game runtimes could use it, especially 2005+ generation game platforms. EASTL has optimizations that make it more suited to the CPUs and memory systems found on console platforms. Additionally, EASTL has some type-traits and iterator-traits-derived template optimizations that make it generally more efficient than home-brew templated containers.</p>
+<p class="faq-question"><a name="Info.3"></a>Info.3
+How does EASTL differ from standard C++ STL?</p>
+<p class="faq-answer">There are three kinds of ways that EASTL differs from standard STL: </p>
+<ol>
+ <li>EASTL equivalents to STL sometimes differ.</li>
+ <li>EASTL implementations sometimes differ from STL implementations of the same thing.</li>
+ <li>EASTL has functionality that doesn't exist in STL.</li>
+</ol>
+<p class="faq-answer">With respect to item #1, the changes are such that they benefit game development and not the type that could silently hurt you if you were more familiar with STL interfaces.</p>
+<p class="faq-answer">With respect to item #2, where EASTL implementations differ from STL implementations it is almost always due to improvements being made in the EASTL versions or tradeoffs being made which are considered better for game development.</p>
+<p class="faq-answer">With respect to item #3, there are a number of facilities that EASTL has that STL doesn't have, such as intrusive_list and slist containers, smart pointers, and type traits. All of these are facilities that assist in making more efficient game code and data.</p>
+<p class="faq-answer">Ways in which EASTL is better than standard STL: </p>
+<ul>
+ <li>Has higher performance in release builds, sometimes dramatically so.</li>
+ <li>Has significantly higher performance in debug builds, due to less call overhead.</li>
+ <li>Has extended per-container functionality, particularly for game development.</li>
+ <li>Has additional containers that are useful for high performance game development.</li>
+ <li>Is easier to read, trace, and debug.</li>
+ <li>Memory allocation is much simpler and more controllable.</li>
+ <li>Has higher portability, as there is a single implementation for all platforms.</li>
+ <li>Has support of&nbsp;object alignment, whereas such functionality is not natively supported by STL.</li>
+ <li>We have control over it, so we can modify it as we like.</li>
+ <li>Has stricter standards for container design and behavior, particularly as this benefits game development.</li>
+</ul>
+<p class="faq-answer">Ways in which EASTL is worse than standard STL: </p>
+<ul>
+ <li>Standard STL implementations are currently very reliable and weather-worn, whereas EASTL is less tested.</li>
+ <li>Standard STL is automatically available with just about every C++ compiler vendor's library.</li>
+ <li>Standard STL is supported by the compiler vendor and somewhat by the Internet community.</li>
+</ul>
+<p style="font-weight: bold;">EASTL coverage of std STL</p>
+<ul style="margin-top: 0in;" type="disc">
+ <li>list</li>
+ <li>vector</li>
+ <li>deque</li>
+ <li>string</li>
+ <li>set</li>
+ <li>multiset</li>
+ <li>map</li>
+ <li>multimap</li>
+ <li>bitset</li>
+ <li>queue</li>
+ <li>stack</li>
+ <li>priority_queue</li>
+ <li>memory</li>
+ <li>numeric</li>
+ <li>algorithm (all but inplace_merge, prev_permutation, next_permutation, nth_element, includes, unique_copy)</li>
+ <li>utility</li>
+ <li>functional</li>
+ <li>iterator</li>
+ <li>string_view</li>
+ <li>variant</li>
+ <li>any</li>
+ <li>optional</li>
+</ul>
+<p>EASTL additions/amendments to std STL</p>
+<ul style="margin-top: 0in;" type="disc">
+ <li>allocators work in a simpler way.</li>
+ <li>exception handling can be disabled.</li>
+ <li>all containers expose/declare their node size, so you can make a node allocator for them.</li>
+ <li>all containers have reset_lose_memory(), which unilaterally forgets their contents.</li>
+ <li>all containers have validate() and validate_iterator() functions.</li>
+ <li>all containers understand and respect object alignment requirements.</li>
+ <li>all containers guarantee no memory allocation upon being newly created as empty.</li>
+ <li>all containers and their iterators can be viewed in a debugger (no other STL does this, believe it or not).</li>
+ <li>linear containers guarantee linear memory.</li>
+ <li>vector has push_back(void).</li>
+ <li>vector has a data() function.</li>
+ <li>vector&lt;bool&gt; is actually a vector of type bool.</li>
+ <li>vector and string have set_capacity().</li>
+ <li>string has sprintf(), append_sprintf(), trim(), compare_i(), make_lower(), make_upper().</li>
+ <li>deque allows you to specify the subarray size.</li>
+ <li>list has a push_back(void) and push_back(void) function.</li>
+ <li>hash_map, hash_set, etc. have find_as().</li>
+</ul>
+<p><span style="font-weight: bold;">EASTL coverage of TR1</span> <font face="Arial" size="2"><span style=
+"font-size: 10pt; font-family: Arial;">(tr1 refers to proposed additions for the next C++ standard library, ~2008)</span></font></p>
+<ul style="margin-top: 0in;" type="disc">
+ <li>array</li>
+ <li>type_traits (there are about 30 of these)</li>
+ <li>unordered_set (EASTL calls it hash_set)</li>
+ <li>unordered_multiset</li>
+ <li>unordered_map</li>
+ <li>unordered_multimap</li>
+ <li>shared_ptr, shared_array, weak_ptr, scoped_ptr, scoped_array, intrusive_ptr</li>
+</ul>
+<p><span style="font-weight: bold;">EASTL additional functionality</span> <font face="Arial" size="1"><span style=
+"font-size: 9pt; font-family: Arial;">(not found elsewhere)</span></font></p>
+<ul style="margin-top: 0in;" type="disc">
+ <li>fixed_list</li>
+ <li>fixed_slist</li>
+ <li>fixed_vector</li>
+ <li>fixed_string</li>
+ <li>fixed_substring</li>
+ <li>fixed_set</li>
+ <li>fixed_multiset</li>
+ <li>fixed_map</li>
+ <li>fixed_multimap</li>
+ <li>fixed_hash_set</li>
+ <li>fixed_hash_multiset</li>
+ <li>fixed_hash_map</li>
+ <li>fixed_hash_multimap</li>
+ <li>fixed_function</li>
+ <li>vector_set</li>
+ <li>vector_multiset</li>
+ <li>vector_map</li>
+ <li>vector_multimap</li>
+ <li>intrusive_list</li>
+ <li>intrusive_slist</li>
+ <li>intrusive_sdlist</li>
+ <li>intrusive_hash_set</li>
+ <li>intrusive_hash_multiset</li>
+ <li>intrusive_hash_map</li>
+ <li>intrusive_hash_multimap</li>
+ <li>slist (STLPort's STL has this)</li>
+ <li>heap</li>
+ <li>linked_ptr, linked_array</li>
+ <li>sparse_matrix (this is not complete as of this writing)</li>
+ <li>ring_buffer</li>
+ <li>compressed_pair</li>
+ <li>call_traits</li>
+ <li>binary_search_i, change_heap, find_first_not_of, find_last_of, find_last_not_of, identical</li>
+ <li>comb_sort, bubble_sort, selection_sort, shaker_sort, bucket_sort</li>
+ <li>equal_to_2, not_equal_to_2, str_equal_to, str_equal_to_i<br>
+ </li>
+</ul>
+<p class="faq-question"> <a name="Info.4"></a>Info.4
+Is EASTL thread-safe?
+</p>
+<p class="faq-answer">It's not simple enough to simply say that EASTL is thread-safe or thread-unsafe. However, we can say that with respect to thread safety that EASTL does the right thing.</p>
+<p class="faq-answer">Individual EASTL containers are not thread-safe. That is,&nbsp;access to an instance of a container from multiple threads at the same time is unsafe if any of those accesses are modifying operations. A given container can be read from multiple threads simultaneously as well as any other standalone data structure. If a user wants to be able to have modifying access an instance of a container from multiple threads, it is up to the user to ensure that proper thread synchronization occurs. This usually means using a mutex.</p>
+<p class="faq-answer">EASTL classes other than containers are the same as containers with respect to thread safety. EASTL functions (e.g. algorithms) are inherently thread-safe as they have no instance data and operate entirely on the stack. As of this writing, no EASTL function allocates memory and thus doesn't bring thread safety issues via that means.</p>
+<p class="faq-answer">The user may well need to be concerned about thread safety with respect to memory allocation. If the user modifies containers from multiple threads, then allocators are going to be accessed from multiple threads. If an allocator is shared across multiple container instances (of the same type of container or not), then mutexes (as discussed above) the user uses to protect access to individual instances will not suffice to provide thread safety for allocators used across multiple instances. The conventional solution here is to use a mutex within the allocator if it is expected to be used by multiple threads.</p>
+<p class="faq-answer">EASTL&nbsp;uses neither static nor global variables and thus there are no inter-instance dependencies that would make thread safety difficult for the user to implement.</p>
+<p class="faq-question"><a name="Info.5"></a>Info.5
+What platforms/compilers does EASTL support?</p>
+<p class="faq-answer">EASTL's support depends entirely on the compiler and not on the platform. EASTL works on any C++ compiler that completely conforms the C++ language standard. Additionally, EASTL is 32 bit and 64 bit compatible. Since EASTL does not use the C or C++ standard library <small><span style=
+"font-family: Arial Narrow;">(with a couple small exceptions)</span></small>, it doesn't matter what kind of libraries are provided (or not provided) by the compiler vendor. However, given that we need to work with some compilers that aren't 100% conforming to the language standard, it will be useful to make a list here of these that are supported and those that are not:</p>
+<blockquote>
+ <table border="1">
+ <tr>
+ <th scope="col">Compiler</th>
+ <th scope="col">Status</th>
+ <th scope="col">Notes</th>
+ </tr>
+ <tr>
+ <td>GCC 3.x+</td>
+ <td>Not Supported</td>
+ <td>Not officially supported due to migration to Clang.</td>
+ </tr>
+ <tr>
+ <td>MSVC 12.0+</td>
+ <td>Supported</td>
+ <td>This compiler is used by the Windows based platforms</td>
+ </tr>
+ <tr>
+ <td>Clang 4.0+</td>
+ <td>Supported</td>
+ <td>This compiler is used by the Linux based platforms</td>
+ </tr>
+ </table>
+</blockquote>
+<p class="faq-question"><a name="Info.6"></a>Info.6
+Why is there EASTL when there is the STL?</p>
+<p class="faq-answer">The STL is largely a fine library for general purpose C++. However, we can improve upon it for our uses and gain other advantages as well. The primary motivations for the existence of EASTL are the following:</p>
+<ul>
+ <li class="458151900-03082005"><font><font>Some STL implementations (especially Microsoft STL) have inferior performance characteristics that make them unsuitable for game development. EASTL is faster than all existing STL implementations.</font></font></li>
+ <li>The STL is sometimes hard to debug, as most STL implementations use cryptic variable names and unusual data structures.</li>
+ <li>STL allocators are sometimes painful to work with, as they have many requirements and cannot be modified once bound to a container.</li>
+ <li>The STL includes excess functionality that can lead to larger code than desirable. It's not very easy to tell programmers they shouldn't use that functionality.</li>
+ <li>The STL is implemented with very deep function calls. This results is unacceptable performance in non-optimized builds and sometimes in optimized builds as well.</li>
+ <li>The STL doesn't support alignment of contained objects.</li>
+ <li>STL containers won't let you insert an entry into a container without supplying an entry to copy from. This can be inefficient.</li>
+ <li>Useful STL extensions (e.g. slist, hash_map, shared_ptr) found in existing STL implementations such as STLPort are not portable because they don't exist in other versions of STL or aren't consistent between STL versions.<br>
+ </li>
+ <li>The STL lacks useful extensions that game programmers find useful (e.g. intrusive_list) but which could be best optimized in a portable STL environment.</li>
+ <li>The STL puts an emphasis on correctness before performance, whereas sometimes you can get significant performance gains by making things less academically pure.</li>
+ <li>STL containers have private implementations that don't allow you to work with their data in a portable way, yet sometimes this is an important thing to be able to do (e.g. node pools).</li>
+ <li>All existing versions of STL allocate memory in empty versions of at least some of their containers. This is not ideal and prevents optimizations such as container memory resets that can greatly increase performance in some situations.</li>
+ <li>The STL is slow to compile, as most modern STL implementations are very large.<br>
+ </li>
+ <li>There are legal issues that make it hard for us to freely use portable STL implementations such as STLPort.</li>
+ <li>We have no say in the design and implementation of the STL and so are unable to change it to work for our needs.</li>
+</ul>
+<p class="faq-answer">Note that there isn't actually anything in the C++ standard called "STL." STL is a term that merely refers to the templated portion of the C++ standard library.</p>
+<p class="faq-question"><a name="Info.7"></a>Info.7
+Can I mix EASTL with standard C++ STL?</p>
+<p class="faq-answer">This is possible to some degree, though the extent depends on the implementation of C++ STL. One of things that makes interoperability is something called iterator categories. Containers and algorithms recognize iterator types via their category and STL iterator categories are not recognized by EASTL and vice versa.<br>
+<br>
+Things that you definitely can do: </p>
+<ul>
+ <li>#include both EASTL and standard STL headers from the same .cpp file.</li>
+ <li>Use EASTL containers to hold STL containers.</li>
+ <li>Construct an STL reverse_iterator from an EASTL&nbsp;iterator.</li>
+ <li>Construct an EASTL reverse_iterator from an&nbsp;STL&nbsp;iterator.</li>
+</ul>
+<p class="faq-answer">Things that you probably will be able to do, though a given std STL implementation may prevent it:
+</p>
+<ul>
+ <li>Use STL containers in EASTL algorithms.</li>
+ <li>Use EASTL containers in STL algorithms.</li>
+ <li>Construct or assign to an STL container via iterators into an EASTL container.</li>
+ <li>Construct or assign to an EASTL container via iterators into an&nbsp;STL container.</li>
+</ul>
+<p class="faq-answer">Things that you would be able to do if the given std STL implementation is bug-free:
+</p>
+<ul>
+ <li>Use STL containers to hold EASTL containers. Unfortunately, VC7.x STL has a confirmed bug that prevents this. Similarly, STLPort versions prior to v5 have a similar but.</li>
+</ul>
+<p class="faq-answer">Things that you definitely can't do:</p>
+<ul>
+ <li>Use an STL allocator directly with an EASTL container (though you can use one indirectly).</li>
+ <li>Use an EASTL allocator directly with an STL container (though you can use one indirectly).</li>
+</ul>
+<p class="faq-question"> <a name="Info.8"></a>Info.8
+Where can I learn more about STL and EASTL?
+</p>
+<p class="faq-answer">EASTL is close enough in philosophy and functionality to standard C++ STL that most of what you read about STL applies to EASTL. This is particularly useful with respect to container specifications. It would take a lot of work to document EASTL containers and algorithms in fine detail, whereas most standard STL documentation applies as-is to EASTL. We won't cover the differences here, as that's found in another FAQ entry.</p>
+<p class="faq-answer">That being said, we provide a list of sources for STL documentation that may be useful to you, especially if you are less familiar with the concepts of STL and template programming in general.</p>
+<ul>
+ <li>The SGI STL web site. Includes a good STL reference.</li>
+ <li>CodeProject STL introduction.</li>
+ <li>Scott Meyers Effective STL book.</li>
+ <li>The Microsoft online STL documentation. Microsoft links go bad every couple months, so try searching for STL at the Microsoft MSDN site.</li>
+ <li>The Dinkumware online STL documentation.&nbsp;</li>
+ <li>The C++ standard, which is fairly readable. You can buy an electronic version for about $18 and in the meantime you can make do with draft revisions of it off the Internet by searching for &quot;c++ draft standard&quot;.</li>
+ <li>STL performance tips, by Pete Isensee</li>
+ <li>STL algorithms vs. hand-written loops, by Scott Meyers.</li>
+ <li>cppreference.com</li>
+ <li>isocpp.org<li>
+</ul>
+<p class="faq-question"><a name="Info.9"></a>Info.9
+What is the legal status of EASTL?</p>
+<p class="faq-answer">EASTL is usable for all uses within Electronic Arts, both for internal usage and for shipping products for all platforms. Any&nbsp;externally derived code would be explicitly stated as such and approved by the legal department if such code ever gets introduced. As of EASTL v1.0, the red_black_tree.cpp file contains two functions derived from the original HP STL and have received EA legal approval for usage in any product.</p>
+<p class="faq-question"><a name="Info.10"></a>Info.10
+Does EASTL deal with compiler exception handling settings?</p>
+<p class="faq-answer">EASTL has automatic knowledge of the compiler's enabling/disabling of exceptions. If your compiler is set to disable exceptions, EASTL automatically detects so and executes without them. Also, you can force-enable or force-disable that setting to override the automatic behavior by #defining EASTL_EXCEPTIONS_ENABLED to 0 or 1. See EASTL's config.h for more information.</p>
+<p class="faq-question"> <a name="Info.11"></a>Info.11
+ What C++ language features does EASTL use (e.g. virtual
+functions)?</p>
+<p class="faq-answer">EASTL uses the following C++ language features: </p>
+<ul>
+ <li>Template functions, classes, member functions.</li>
+ <li>Multiple inheritance.</li>
+ <li>Namespaces.</li>
+ <li>Operator overloading.</li>
+</ul>
+<p class="faq-answer">EASTL does not use the following C++ language features:
+</p>
+<ul>
+ <li>Virtual functions / interfaces.</li>
+ <li>RTTI (dynamic_cast).</li>
+ <li>Global and static variables. There are a couple class static const variables, but they act much like enums.</li>
+ <li>Volatile declarations</li>
+ <li>Template export.</li>
+ <li>Virtual inheritance.</li>
+</ul>
+<p class="faq-answer">EASTL may use the following C++ language features:
+</p>
+<ul>
+ <li>Try/catch. This is an option that the user can enable and it defaults to whatever the compiler is set to use.</li>
+ <li>Floating point math. Hash containers have one floating point calculation, but otherwise floating point is not used.</li>
+</ul>
+<p class="faq-answer">Notes:
+</p>
+<ul>
+ <li>EASTL uses rather little of the standard C or C++ library and uses none of the C++ template library (STL) and iostream library. The memcpy family of functions is one example EASTL C++ library usage.</li>
+ <li>EASTL never uses global new / delete / malloc / free. All allocations are done via user-specified allocators, though a default allocator definition is available.</li>
+</ul>
+<p class="faq-question"><a name="Info.12"></a>Info.12
+ What compiler warning levels does EASTL support?
+</p>
+<p class="faq-answer">For VC++ EASTL should compile without warnings on level 4, and should compile without warnings for &quot;warnings disabled by default&quot; except C4242, C4514, C4710, C4786, and C4820. These latter warnings are somewhat draconian and most EA projects have little choice but to leave them disabled.</p>
+<p class="faq-answer">For GCC, EASTL should compile without warnings with -Wall. Extensive testing beyond that hasn't been done.</p>
+<p class="faq-answer">However, due to the nature of templated code generation and due to the way compilers compile templates, unforeseen warnings may occur in user code that may or may not be addressable by modifying EASTL.</p>
+<p class="faq-question"><a name="Info.13"></a>Info.13
+ Is EASTL compatible with Lint?
+</p>
+<p class="faq-answer">As of EASTL 1.0, minimal lint testing has occurred. Testing with the November 2005 release of Lint (8.00t) demonstrated bugs in Lint that made its analysis not very useful. For example, Lint seems to get confused about the C++ typename keyword and spews many errors with code that uses it. We will work with the makers of Lint to get this resolved so that Lint can provide useful information about EASTL.</p>
+<p class="faq-question"><a name="Info.14"></a>Info.14
+ What compiler settings do I need to compile EASTL?
+</p>
+<p class="faq-answer">EASTL consists mostly of header files with templated C++ code, but there are also a few .cpp files that need to be compiled and linked in order to use some of the modules. EASTL will compile in just about any environment. As mentioned elsewhere in this FAQ, EASTL can be compiled at the highest warning level of most compilers, transparently deals with compiler exception handling settings, is savvy to most or all compilation language options (e.g. wchar_t is built-in or not, for loop variables are local or not), and has almost no platform-specific or compiler-specific code. For the most part, you can just drop it in and it will work. The primary thing that needs to be in place is that EASTL .cpp files need to be compiled with the same struct padding/alignment settings as other code in the project. This of course is the same for just about any C++ source code library.</p>
+<p class="faq-answer">See the Performance section of this FAQ for a discussion of the optimal compiler settings for EASTL performance.</p>
+<p class="faq-question"><a name="Info.15"></a>Info.15
+How hard is it to incorporate EASTL into my project?</p>
+<p>It's probably trivial.<br>
+ <br>
+EASTL has only one dependency: EABase. And EASTL auto-configures itself for most compiler environments and for the most typical configuration choices. Since it is fairly highly warning-free, you won't likely need to modify your compiler warning settings, even if they're pretty strict. EASTL has a few .cpp files which need to be compiled if you want to use the modules associated with those files. You can just compile those files with your regular compiler settings. Alternatively, you can use one of the EASTL project files.<br>
+<br>
+In its default configuration, the only thing you need to provide to make EASTL work is to define implementations of the following operator new functions:</p>
+<pre class="code-example">#include &lt;new&gt;<br>
+void* operator new[](size_t size, const char* pName, int flags, unsigned debugFlags, const char* file, int line);
+void* operator new[](size_t size, size_t alignment, size_t alignmentOffset, const char* pName, int flags, unsigned debugFlags, const char* file, int line);</pre>
+The flags and debugFlags arguments correspond to PPMalloc/RenderWare GeneralAllocator/GeneralAllocatorDebug Malloc equivalents.<br>
+<p class="faq-question"><a name="Info.16"></a>Info.16
+Should I use EASTL instead of std STL or instead of my custom&nbsp;library?</p>
+<p class="faq-answer">There are reasons you may want to use EASTL; there are reasons you may not want to use it. Ditto for std STL or any other library. Here we present a list of reasons (+ and -) for why you might want to use one or another. However, it should be noted that while EASTL contains functionality found in std STL, it has another ~40% of functionality not found in std STL, so EASTL and std STL (and whatever other template library you may have) are not mutually exclusive.<br>
+<br>
+<span style="font-weight: bold;">EASTL</span><br>
+</p>
+<div class="faq-answer" style="margin-left: 40px;"><span style="font-family: Courier New,Courier,monospace;">+</span> Has higher performance than any commercial STL, especially on console platforms.<br>
+ <span style="font-family: Courier New,Courier,monospace;">+</span> Has extended functionality tailored for game development.<br>
+ <span style="font-family: Courier New,Courier,monospace;">+</span> Is highly configurable, and we own it so it can be amended at will. Std STL is owned by a third party committee.<br>
+ <span style="font-family: Courier New,Courier,monospace;">+</span> Is much easier to read and debug than other similar libraries, especially std STL.<br>
+ <br>
+ <span style="font-family: Courier New,Courier,monospace;">-</span> Is highly unit tested, but does not have the same level as std STL.<br>
+ <span style="font-family: Courier New,Courier,monospace;">-</span> Is more complicated than many users' lite template libraries, and may put off some beginners.<br>
+<span style="font-family: Courier New,Courier,monospace;">-</span> EASTL &nbsp;</div>
+<p> <span class="faq-answer" style="font-weight: bold;">Std STL</span>
+</p>
+<div class="faq-answer" style="margin-left: 40px;"><span style="font-family: Courier New,Courier,monospace;">+</span> Is highly portable; your STL code will likely compile and run anywhere.<br>
+ <span style="font-family: Courier New,Courier,monospace;">+</span> Works without the need to&nbsp;install or download any package to use it. It just works.<br>
+ <span style="font-family: Courier New,Courier,monospace;">+</span> Is highly reliable and supported by the compiler vendor. You can have confidence in it.<br>
+ <span style="font-family: Courier New,Courier,monospace;">+</span> Some std STL versions (e.g. STLPort, VC8 STL) have better runtime debug checking than EASTL.<br>
+ <br>
+ <span style="font-family: Courier New,Courier,monospace;">-</span> Has (sometimes greatly) variable implementations, behavior, and performance between implementations.<br>
+ <span style="font-family: Courier New,Courier,monospace;">-</span> Is usually hard to read and debug.<br>
+ <span style="font-family: Courier New,Courier,monospace;">-</span> Doesn't support some of the needs of game development, such as aligned allocations, named allocations, intrusive containers, etc.<br>
+<span style="font-family: Courier New,Courier,monospace;">-</span> Is not as efficient as EASTL, especially on console platforms.</div>
+<p> <span class="faq-answer" style="font-weight: bold;">Your own library</span>
+</p>
+<div class="faq-answer" style="margin-left: 40px;">(please forgive us for implying there may be weaknesses in your libraries)<br>
+
+ <p></p>
+ <span style="font-family: Courier New,Courier,monospace;">+</span> You have control over it and can make it work however you want.<br>
+ <span style="font-family: Courier New,Courier,monospace;">+</span> You can fix bugs in it on the spot and have the fix in your codebase immediately.<br>
+ <span style="font-family: Courier New,Courier,monospace;">+</span> Your own library can be highly integrated into your application code or development environment.<br>
+ <br>
+ <span style="font-family: Courier New,Courier,monospace;">-</span> Many custom libraries don't have the same level of testing as libraries such as std STL or EASTL.<br>
+ <span style="font-family: Courier New,Courier,monospace;">-</span> Many custom libraries don't have the same breadth or depth as std STL or especially EASTL.<br>
+<span style="font-family: Courier New,Courier,monospace;">-</span> Many custom libraries don't have the level of performance tuning that std STL or especially EASTL has.</div>
+<p class="faq-question"><a name="Info.17"></a>Info.17
+I think I've found a bug. What do I do?</p>
+<p class="faq-answer"><span style="font-weight: bold;">Verify that you indeed have a bug</span><br>
+There are various levels of bugs that can occur, which include the following: </p>
+<ol>
+ <li>Compiler warnings generated by EASTL.</li>
+ <li>Compiler errors generated by EASTL (failure to compile well-formed code).</li>
+ <li>Runtime misbehavior by EASTL (function does the wrong thing).</li>
+ <li>Runtime crash or data corruption by EASTL.</li>
+ <li>Mismatch between EASTL documentation and behavior.</li>
+ <li>Mismatch between EASTL behavior and user's expectations (mis-design).</li>
+</ol>
+<p class="faq-answer">Any of the above items can be the fault of EASTL. However, the first four can also be the fault of the user. Your primary goal in verifying a potential bug is to determine if it is an EASTL bug or a user bug. Template errors can sometimes be hard to diagnose. It's probably best if you first show the problem to somebody you know to make sure you are not missing something obvious. Creating a reproducible case may be useful in helping convince yourself, but as is mentioned below, this is not required in order to report the bug.<br>
+ <br>
+ <span style="font-weight: bold;">Report the bug</span><br>
+The first place to try is the standard EA centralized tech support site. As of this writing (10/2005), that tech site is <a href="http://eatech/">http://eatech/</a>. Due to the frequent technology churn that seems to occur within Electronic Arts, the bug reporting system in place when you read this may not be the one that was in place when this FAQ entry was written. If the tech site route fails, consider directly contacting the maintainer of the EASTL package.<br>
+<br>
+In reporting a bug, it is nice if there is a simple reproducible case that can be presented. However, such a case requires time to create, and so you are welcome to initially simply state what you think the bug is without producing a simple reproducible case. It may be that this is a known bug or it may be possible to diagnose the bug without a reproducible case. If more information is needed then the step of trying to produce a reproducible case may be necessary.</p>
+<p class="faq-question"><a name="Info.18"></a>Info.18
+ Can EASTL be used by third party EA developers?</p>
+<p class="faq-answer">EASTL and other core technologies authored by EA (and not licensed from other companies) can be used in source and binary form by designated 3rd parties. The primary case where there is an issue is if the library contains platform specific code for a platform that the 3rd party is not licensed for. In that case the platform-specific code would need to be removed. This doesn&rsquo;t apply to EASTL, nor many of the other core tech packages. </p>
+<h2><span style="font-weight: bold;">Performance</span>
+</h2>
+
+<p class="faq-question"><a name="Perf.1"></a>Perf.1 How efficient is EASTL compared to standard C++ STL implementations?</p>
+<p class="faq-answer">With respect to the functionality that is equivalent between EASTL and standard STL, the short answer to this is that EASTL is as at least as efficient as other STL implementations and in a number of aspects is more so. EASTL has functionality such as intrusive_list and linked_ptr that don't exist in standard STL but are explicitly present to provide significant optimizations over standard STL.</p>
+<p class="faq-answer">The medium length answer is that EASTL is significantly more efficient than Dinkumware STL, and Microsoft Windows STL. EASTL is generally more efficient than Metrowerks STL, but Metrowerks has a few tricks up its sleeve which EASTL doesn't currently implement. EASTL is roughly equal in efficiency to STLPort and GCC 3.x+ STL, though EASTL has some optimizations that these do not.</p>
+<p class="faq-answer">The long answer requires a breakdown of the functionality between various versions of the STL.</p>
+
+<p class="faq-question"><a name="Perf.2"></a>Perf.2 How efficient is EASTL in general?</p>
+<p class="faq-answer">This question is related to the question, &quot;How efficient are templates?&quot; If you understand the effects of templates then you can more or less see the answer for EASTL. Templates are more efficient than the alternative when they are used appropriately, but can be less efficient than the alternative when used under circumstances that don't call for them. The strength of templates is that the compiler sees all the code and data types at compile time and can often reduce statements to smaller and faster code than with conventional non-templated code. The weakness of templates is that the sometimes produce more code and can result in what is often called &quot;code bloat&quot;. However, it's important to note that unused template functions result in no generated nor linked code, so if you have a templated class with 100 functions but you only use one, only that one function will be compiled.</p>
+<p class="faq-answer">EASTL is a rather efficient implementation of a template library and pulls many tricks of the trade in terms of squeezing optimal performance out of the compiler. The only way to beat it is to write custom code for the data types you are working with, and even then people are sometimes surprised to find that their hand-implemented algorithm works no better or even worse than the EASTL equivalent. But certainly there are ways to beat templates, especially if you resort to assembly language programming and some kinds of other non-generic tricks.</p>
+
+<p class="faq-question"> <a name="Perf.3"></a>Perf.3 Strings don't appear to use the "copy-on-write" (CoW) optimization. Why not?</p>
+<p class="faq-answer">
+<span style="font-weight: bold;">Short answer</span><br>
+CoW provides a benefit for a small percentage of uses but provides a disadvantage for the large majority of uses.<br>
+<br>
+<span style="font-weight: bold;">Long answer</span><br>
+The primary benefit of CoW is that it allows for the sharing of string data between two string objects. Thus if you say this:
+<pre class="code-example">string a("hello");
+string b(a);</pre>
+the "hello" will be shared between a and b. If you then say this:
+<pre class="code-example">a = "world";</pre>
+then <span style="font-family: Courier New;">a</span> will release its reference to "hello" and
+leave b with the only reference to it. Normally this functionality is accomplished via reference
+counting and with atomic operations or mutexes.</p>
+
+<p class="faq-answer">The C++ standard does not say anything about basic_string and CoW.
+However, for a basic_string implementation to be standards-conforming, a number of issues arise
+which dictate some things about how one would have to implement a CoW string. The discussion of
+these issues will not be rehashed here, as you can read the references below for better detail
+than can be provided in the space we have here. However, we can say that the C++ standard
+is sensible and that anything we try to do here to allow for an efficient CoW implementation
+would result in a generally unacceptable string interface.</p>
+<p class="faq-answer">The disadvantages of CoW strings are:</p>
+<ul>
+ <li>A reference count needs to exist with the string, which increases string memory usage.</li>
+ <li>With thread safety, atomic operations and mutex locks are expensive, especially on weaker memory systems such as console gaming platforms.</li>
+ <li>All non-const string accessor functions need to do a sharing check and the first such check needs to detach the string. Similarly, all string assignments need to do a sharing check as well. If you access the string before doing an assignment, the assignment doesn't result in a shared string, because the string has already been detached.</li>
+ <li>String sharing doesn't happen the large majority of the time. In some cases, the total sum of the reference count memory can exceed any memory savings gained by the strings that share representations.</li>
+</ul>
+<p class="faq-answer">The addition of a cow_string class is under consideration for EASTL. There are conceivably some systems which have string usage patterns which would benefit from CoW sharing. Such functionality is best saved for a separate string implementation so that the other string uses aren't penalized.</p>
+<p class="faq-answer">References</p>
+<p class="faq-answer">This is a good starting HTML reference on the topic:<br>
+&nbsp; &nbsp; <a href="http://www.gotw.ca/publications/optimizations.htm">http://www.gotw.ca/publications/optimizations.htm</a></p>
+<p class="faq-answer">Here is a well-known Usenet discussion on the topic:<br>
+&nbsp; &nbsp; <a href="http://groups-beta.google.com/group/comp.lang.c++.moderated/browse_thread/thread/3dc6af5198d0bf7/886c8642cb06e03d">http://groups-beta.google.com/group/comp.lang.c++.moderated/browse_thread/thread/3dc6af5198d0bf7/886c8642cb06e03d</a></p>
+
+<p class="faq-question"><a name="Perf.4"></a>Perf.4 Does EASTL cause code bloat, given that it uses templates?</p>
+<p class="faq-answer"> The reason that templated functions and classes might cause an increase in code size
+because each template instantiation theoretically creates a unique piece of code. For example, when you compile this
+code:</p>
+<pre class="code-example">template &lt;typename T&gt;
+const T min(const T a, const T b)
+&nbsp;&nbsp;&nbsp; { return b &lt; a ? b : a; }
+
+int &nbsp; &nbsp;i = min&lt;int&gt;(3, 4);
+double d = min&lt;double&gt;(3.0, 4.0);</pre>
+<p class="faq-answer">the compiler treats it as if you wrote this:</p>
+<pre class="code-example">int min(const int a, const int b)
+&nbsp;&nbsp;&nbsp; { return b &lt; a ? b : a; }<br>
+double min(const double a, const double b)
+&nbsp;&nbsp;&nbsp; { return b &lt; a ? b : a; }</pre>
+<p class="faq-answer">Imagine this same effect happening with containers such as list and map and you can see how it is that templates can cause code proliferation.</p>
+<p class="faq-answer">A couple things offset the possibility of code proliferation: inlining and folding. In practice the above 'min' function would be converted to&nbsp;inlined functions by the compiler which occupy only a few CPU instructions. In many of the simplest cases the inlined version actually occupies less code than the code required to push parameters on the stack and execute a function call. And they will execute much faster as well.</p>
+<p class="faq-answer">Code folding (a.k.a. &quot;COMDAT folding&quot;, &quot;duplicate stripping&quot;, &quot;ICF&quot; / &quot;identical code folding&quot;) is a compiler optimization whereby the compiler realizes that two independent functions have compiled to the same code and thus can be reduced to a single function. The Microsoft VC++ compiler (Since VS2005), and GCC (v 4.5+) can do these kinds of optimizations on all platforms. This can result, for example, in all templated containers of pointers (e.g. vector&lt;char*&gt;, vector&lt;Widget*&gt;, etc.) to be linked as a single implementation. This folding occurs at a function level and so individual member functions can be folded while other member functions are not. A side effect of this optimization is that you aren't likely to gain much much declaring containers of void* instead of the pointer type actually contained.</p>
+<p class="faq-answer">The above two features reduce the extent of code proliferation, but certainly don't eliminate it. What you need to think about is how much code might be generated vs. what your alternatives are. Containers like vector can often inline completely away, whereas more complicated containers such as map can only partially be inlined. In the case of map, if you need such a container for your Widgets, what alternatives do you have that would be more efficient than instantiating a map? This is up to you to answer.</p>
+<p class="faq-answer">It's important to note that C++ compilers will throw away any templated functions that aren't used, including unused member functions of templated classes. However, some argue that by having many functions available to the user that users will choose to use that larger function set rather than stick with a more restricted set.</p>
+<p class="faq-answer">Also, don't be confused by syntax bloat vs. code bloat. In looking at templated libraries such as EASTL you will notice that there is sometimes a lot of text in the definition of a template implementation. But the actual underlying code is what you need to be concerned about.</p>
+<p class="faq-answer">There is a good Usenet discussion on this topic at: <small><a href=
+"http://groups.google.com/group/comp.lang.c++.moderated/browse_frm/thread/2b00649a935997f5">http://groups.google.com/group/comp.lang.c++.moderated/browse_frm/thread/2b00649a935997f5</a></small></p>
+<p class="faq-question"><a name="Perf.5"></a>Perf.5
+Don't STL and EASTL containers fragment memory?</p>
+<p class="faq-answer">They only fragment memory if you use them in a way that does so. This is no different from any other type of container used in a dynamic way. There are various solutions to this problem, and EASTL provides additional help as well:</p>
+<ul>
+ <li>For vectors, use the reserve function (or the equivalent constructor) to set aside a block of memory for the container. The container will not reallocate memory unless you try grow beyond the capacity you reserve.</li>
+ <li>EASTL has "fixed" variations of containers which allow you to specify a fixed block of memory which the container uses for its memory. The container will not allocate any memory with these types of containers and all memory will be cache-friendly due to its locality.</li>
+ <li>You can assign custom allocators to containers instead of using the default global allocator. You would typically use an allocator that has its own private pool of memory.</li>
+ <li>Where possible, add all a container's elements to it at once up front instead of adding them over time. This avoids memory fragmentation and increase cache coherency.</li>
+</ul>
+
+<p class="faq-question"><a name="Perf.6"></a>Perf.6 I don't see container optimizations for equivalent scalar types such as pointer types. Why?</p>
+<p class="faq-answer">Metrowerks (and no other, as of this writing) STL has some container specializations for type
+T* which maps them to type void*. The idea is that a user who declares a list of Widget* and a list of Gadget*
+will generate only one container: a list of void*. As a result, code generation will be smaller. Often this is
+done only in optimized builds, as such containers are harder to view in debug builds due to type information being lost.<br>
+<br>
+The addition of this optimization is under consideration for EASTL, though it might be noted that optimizing
+compilers such as VC++ are already capable of recognizing duplicate generated code and folding it automatically
+as part of link-time code generation (LTCG) (a.k.a. "whole program optimization"). This has been verified
+with VC++, as the following code and resulting disassembly demonstrate:</p>
+<pre class="code-example">eastl::list&lt;int*&gt;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; intPtrList;
+eastl::list&lt;TestObject*&gt; toPtrList;
+
+eastl_size_t n1 = intPtrList.size();
+eastl_size_t n2 = toPtrList.size();
+
+0042D288&nbsp; lea&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; edx,[esp+14h]
+0042D28C&nbsp; <span style="color: rgb(51, 51, 255);">call&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; eastl::list&lt;TestObject&gt;::size (414180h)</span>
+0042D291&nbsp; push&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; eax&nbsp;
+0042D292&nbsp; lea&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; edx,[esp+24h]
+0042D296&nbsp; <span style="color: rgb(51, 51, 255);">call&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; eastl::list&lt;TestObject&gt;::size (414180h)</span></pre>
+<p class="faq-answer">Note that in the above case the compiler folded the two implementations of size() into a single implementation.</p>
+
+<p class="faq-question"><a name="Perf.7"></a>Perf.7
+I've seen some STL's provide a default quick "node allocator" as the default allocator. Why doesn't EASTL do this?</p>
+<p class="faq-answer"><span style="font-weight: bold;">Short answer<br>
+</span>This is a bad, misguided idea.</p>
+<p class="faq-answer"><span style="font-weight: bold;">Long answer</span><br>
+These node allocators implement a heap for all of STL with buckets for various sizes of allocations and implemented fixed-size pools for each of these buckets. These pools are attractive at first because they do well in STL comparison benchmarks, especially when thread safety is disabled. Such benchmarks make it impossible to truly compare STL implementations because you have two different allocators in use and in some cases allocator performance can dominate the benchmark. However, the real problem with these node allocators is that they badly fragment and waste memory. The technical discussion of this topic is outside the scope of this FAQ, but you can learn more about it by researching memory management on the Internet. Unfortunately, the people who implement STL libraries are generally not experts on the topic of memory management. A better approach, especially for game development, is for the user to decide when fixed-size pools are appropriate and use them via custom allocator assignment to containers.</p>
+<p class="faq-question"><a name="Perf.8"></a>Perf.8 Templates sometimes seem to take a long time to compile. Why do I do about that?
+</p>
+<p class="faq-answer">C++ compilers are generally slower than C compilers, and C++ templates are generally slower to compile than regular C++ code. EASTL has some extra functionality (such as type_traits and algorithm specializations) that is not found in most other template libraries and significantly improves performance and usefulness but adds to the amount of code that needs to be compiled. Ironically, we have a case where more source code generates faster and smaller object code.</p>
+<p class="faq-answer">The best solution to the problem is to use pre-compiled headers, which are available on all modern ~2002+) compilers, such as VC6.0+, GCC 3.2+, and Metrowerks 7.0+. In terms of platforms this means all 2002+ platforms.</p>
+<p class="faq-answer">Some users have been speeding up build times by creating project files that put all the source code in one large .cpp file. This has an effect similar to pre-compiled headers. It can go even faster than pre-compiled headers but has downsides in the way of convenience and portability.</p>
+<p class="faq-question"><a name="Perf.10"></a>Perf.10
+How well does EASTL inline?</p>
+<p class="faq-answer">EASTL is written in such as way as to be easier to inline than typical templated libraries such as STL. How is this so? It is so because EASTL reduces the inlining depth of many functions, particularly the simple ones. In doing so it makes the implementation less "academic" but entirely correct. An example of this is the vector operator[] function, which is implemented like so with Microsoft STL:</p>
+<pre class="code-example">reference operator[](size_type n) {
+&nbsp;&nbsp;&nbsp;return *(begin() + n);
+}</pre>
+<span class="faq-answer">EASTL implements the function directly, like so:</span>
+<pre class="code-example">reference operator[](size_type n) {
+&nbsp;&nbsp;&nbsp;&nbsp;return *(mpBegin + n);
+}</pre>
+<span class="faq-answer">Both implementations are correct, but the EASTL implementation will run faster in debug builds, be easier to debug, and will be more likely to be inlined when the usage of this function is within a hierarchy of other functions being inlined. It is not so simple to say that the Microsoft version will always inline in an optimized build, as it could be part of a chain and cause the max depth to be exceeded.<br>
+<br>
+That being said, EASTL appears to inline fairly well under most circumstances, including with GCC, which is the poorest of the compilers in its ability to inline well.</span>
+<p class="faq-question"><a name="Perf.11"></a>Perf.11
+How do I control function inlining?</p>
+<p class="faq-answer">Inlining is an important topic for templated code, as such code often relies on the compiler being able to do good function inlining for maximum performance. GCC,&nbsp;VC++, and Metrowerks are discussed here. We discuss compilation-level inlining and function-level inlining here, though the latter is likely to be of more use to the user of EASTL, as it can externally control how EASTL is inlined. A related topic is GCC's template expansion depth, discussed <a href=
+"file:///f:/Projects/SharedProjects/Core/EASTL/doc/EASTL%20FAQ.html#29">elsewhere</a> in this FAQ. We provide descriptions of inlining options here but don't currently have any advice on how to best use these with EASTL.</p>
+<p class="faq-answer">Compilation-Level Inlining -- VC++</p>
+<p class="faq-answer">VC++ has some basic functionality to control inlining, and the compiler is pretty good at doing aggressive inlining when optimizing on for all platforms.</p>
+<blockquote>
+ <p class="faq-answer"><small><span style="font-family: Courier New;"> #pragma inline_depth( [0... 255] )</span></small></p>
+ <p class="faq-answer">Controls the number of times inline expansion can occur by controlling the number of times that a series of function calls can be expanded (from 0 to 255 times). This pragma controls the inlining of functions marked inline and or inlined automatically under the /Ob2 option. The inline_depth pragma controls the number of times a series of function calls can be expanded. For example, if the inline depth is 4, and if A calls B and B then calls C, all three calls will be expanded inline. However, if the closest inline expansion is 2, only A and B are expanded, and C remains as a function call.</p>
+ <p class="faq-answer"><small><span style="font-family: Courier New;">#pragma inline_recursion( [{on | off}] )</span></small></p>
+ <p class="faq-answer">Controls the inline expansion of direct or mutually recursive function calls. Use this pragma to control functions marked as inline and or functions that the compiler automatically expands under the /Ob2 option. Use of this pragma requires an /Ob compiler option setting of either 1 or 2. The default state for inline_recursion is off. The inline_recursion pragma controls how recursive functions are expanded. If inline_recursion is off, and if an inline function calls itself (either directly or indirectly), the function is expanded only once. If inline_recursion is on, the function is expanded multiple times until it reaches the value set by inline_depth, the default value of 8, or a capacity limit.</p>
+</blockquote>
+<p class="faq-answer">Compilation-Level Inlining -- GCC</p>
+<p class="faq-answer">GCC has a large set of options to control function inlining. Some options are available only&nbsp; in GCC 3.0 and later and thus not present on older platforms.</p>
+<blockquote>
+ <table style="text-align: left; width: 100%;" border="1" cellpadding="2" cellspacing="2">
+ <tbody>
+ <tr>
+ <td>-fno-default-inline</td>
+ <td>Do not make member functions inline by default merely because they are defined inside the class scope (C++ only). Otherwise, when you specify -O, member functions defined inside class scope are compiled inline by default; i.e., you don't need to add `inline' in front of the member function name.</td>
+ </tr>
+ <tr>
+ <td>-fno-inline</td>
+ <td>Don't pay attention to the inline keyword. Normally this option is used to keep the compiler from expanding any functions inline. Note that if you are not optimizing, no functions can be expanded inline.</td>
+ </tr>
+ <tr>
+ <td>-finline-functions</td>
+ <td>Integrate all simple functions into their callers. The compiler heuristically decides which functions are simple enough to be worth integrating in this way. If all calls to a given function are integrated, and the function is declared static, then the function is normally not output as assembler code in its own right. Enabled at level -O3.</td>
+ </tr>
+ <tr>
+ <td>-finline-limit=n</td>
+ <td>By default, GCC limits the size of functions that can be inlined. This flag allows the control of this limit for functions that are explicitly marked as inline (i.e., marked with the inline keyword or defined within the class definition in c++). n is the size of functions that can be inlined in number of pseudo instructions (not counting parameter handling). pseudo-instructions are an internal representation of function size. The default value of n is 600. Increasing this value can result in more inlined code at the cost of compilation time and memory consumption. Decreasing usually makes the compilation faster and less code will be inlined (which presumably means slower programs). This option is particularly useful for programs that use inlining heavily such as those based on recursive templates with C++.<br>
+ <br>
+ Inlining is actually controlled by a number of parameters, which may be specified individually by using --param name=value. The -finline-limit=n option sets some of these parameters as follows:<br>
+ <br>
+ max-inline-insns-single<br>
+&nbsp;&nbsp;&nbsp; is set to n/2.<br>
+ max-inline-insns-auto<br>
+&nbsp;&nbsp;&nbsp; is set to n/2.<br>
+ min-inline-insns<br>
+&nbsp;&nbsp;&nbsp; is set to 130 or n/4, whichever is smaller.<br>
+ max-inline-insns-rtl<br>
+&nbsp;&nbsp;&nbsp; is set to n.<br>
+ <br>
+ See&nbsp;--param below for a documentation of the individual parameters controlling inlining.</td>
+ </tr>
+ <tr>
+ <td>-fkeep-inline-functions</td>
+ <td>Emit&nbsp;all inline functions into the object file, even if they are inlined where used.</td>
+ </tr>
+ <tr>
+ <td>--param name=value</td>
+ <td>In some places, GCC uses various constants to control the amount of optimization that is done. For example, GCC will not inline functions that contain more that a certain number of instructions. You can control some of these constants on the command-line using the --param option.&nbsp;<br>
+ <br>
+ max-inline-insns-single<br>
+ Several parameters control the tree inliner used in gcc. This number sets the maximum number of instructions (counted in GCC's internal representation) in a single function that the tree inliner will consider for inlining. This only affects functions declared inline and methods implemented in a class declaration (C++). The default value is 450.<br>
+ <br>
+ max-inline-insns-auto<br>
+ When you use -finline-functions (included in -O3), a lot of functions that would otherwise not be considered for inlining by the compiler will be investigated. To those functions, a different (more restrictive) limit compared to functions declared inline can be applied. The default value is 90.<br>
+ <br>
+ large-function-insns<br>
+ The limit specifying really large functions. For functions larger than this limit after inlining inlining is constrained by --param large-function-growth. This parameter is useful primarily to avoid extreme compilation time caused by non-linear algorithms used by the backend. This parameter is ignored when -funit-at-a-time is not used. The default value is 2700.<br>
+ <br>
+ large-function-growth<br>
+ Specifies maximal growth of large function caused by inlining in percents. This parameter is ignored when -funit-at-a-time is not used. The default value is 100 which limits large function growth to 2.0 times the original size.<br>
+ <br>
+ inline-unit-growth<br>
+ Specifies maximal overall growth of the compilation unit caused by inlining. This parameter is ignored when -funit-at-a-time is not used. The default value is 50 which limits unit growth to 1.5 times the original size.<br>
+ <br>
+ max-inline-insns-recursive<br>
+ max-inline-insns-recursive-auto<br>
+ Specifies maximum number of instructions out-of-line copy of self recursive inline function can grow into by performing recursive inlining. For functions declared inline --param max-inline-insns-recursive is taken into acount. For function not declared inline, recursive inlining happens only when -finline-functions (included in -O3) is enabled and --param max-inline-insns-recursive-auto is used. The default value is 450.<br>
+ <br>
+ max-inline-recursive-depth<br>
+ max-inline-recursive-depth-auto<br>
+ Specifies maximum recursion depth used by the recursive inlining. For functions declared inline --param max-inline-recursive-depth is taken into acount. For function not declared inline, recursive inlining happens only when -finline-functions (included in -O3) is enabled and --param max-inline-recursive-depth-auto is used. The default value is 450.<br>
+ <br>
+ inline-call-cost<br>
+ Specify cost of call instruction relative to simple arithmetics operations (having cost of 1). Increasing this cost disqualify inlining of non-leaf functions and at same time increase size of leaf function that is believed to reduce function size by being inlined. In effect it increase amount of inlining for code having large abstraction penalty (many functions that just pass the arguments to other functions) and decrease inlining for code with low abstraction penalty. Default value is 16.</td>
+ </tr>
+ <tr>
+ <td>-finline-limit=n </td>
+ <td>By default, GCC limits the size of functions that can be inlined. This flag allows the control of this limit for functions that are explicitly marked as inline (i.e., marked with the inline keyword or defined within the class definition in c++). n is the size of functions that can be inlined in number of pseudo instructions (not counting parameter handling). The default value of n is 600. Increasing this value can result in more inlined code at the cost of compilation time and memory consumption. Decreasing usually makes the compilation faster and less code will be inlined (which presumably means slower programs). This option is particularly useful for programs that use inlining heavily such as those based on recursive templates with C++. </td>
+ </tr>
+ </tbody>
+</table>
+</blockquote>
+<p class="faq-answer">Inlining is actually controlled by a number of parameters, which may be specified individually by using <samp><span class="option">--param</span> <var>name</var><span class="option">=</span><var>value</var></samp>. The <samp><span class="option">-finline-limit=</span><var>n</var></samp> option sets some of these parameters as follows:</p>
+<blockquote>
+ <dl>
+ <dl>
+ <dt><code>max-inline-insns-single</code></dt>
+ <dd>is set to <var>n</var>/2.<br>
+ </dd>
+ <dt><code>max-inline-insns-auto</code></dt>
+ <dd>is set to <var>n</var>/2.<br>
+ </dd>
+ <dt><code>min-inline-insns</code></dt>
+ <dd>is set to 130 or <var>n</var>/4, whichever is smaller.<br>
+ </dd>
+ <dt><code>max-inline-insns-rtl</code></dt>
+ <dd>is set to <var>n</var>.</dd>
+ </dl>
+ </dl>
+</blockquote>
+<p class="faq-answer">See below for a documentation of the individual parameters controlling inlining.</p>
+<p class="faq-answer"><em>Note:</em> pseudo instruction represents, in this particular context, an abstract measurement of function's size. In no way, it represents a count of assembly instructions and as such its exact meaning might change from one release to an another.</p>
+<p class="faq-answer">GCC additionally has the -Winline compiler warning, which emits a warning whenever a function declared as inline was not inlined.</p>
+<p class="faq-answer">Compilation-Level Inlining -- Metrowerks</p>
+<p class="faq-answer">Metrowerks has a number of pragmas (and corresponding compiler settings) to control inlining. These include always_inline, inline_depth, inline_max_size, and inline max_total_size.</p>
+<blockquote>
+ <p class="faq-answer"><small><span style="font-family: Courier New;">#pragma always_inline on | off | reset</span></small></p>
+ <p class="faq-answer">Controls the use of inlined functions. If you enable this pragma, the compiler ignores all inlining limits and attempts to inline all functions where it is legal to do so. This pragma is&nbsp;deprecated. Use the inline_depth pragma instead.<br>
+ <br>
+ <small><span style="font-family: Courier New;">#pragma inline_depth(n)</span><br>
+ <span style="font-family: Courier New;">#pragma inline_depth(smart)</span></small></p>
+ <p class="faq-answer">Controls how many passes are used to expand inline function. Sets the number of passes used to expand inline function calls. The number n is an integer from 0 to 1024 or the smart specifier. It also represents the distance allowed in the call chain from the last function up. For example, if d is the total depth of a call chain, then functions below (d-n) are inlined if they do not exceed the&nbsp;inline_max_size and&nbsp;inline_max_total_size settings which are discussed directly below.<br>
+ <br>
+ <small><span style="font-family: Courier New;">#pragma inline_max_size(n);</span><br>
+ <span style="font-family: Courier New;">#pragma inline_max_total_size(n);</span></small></p>
+ <p class="faq-answer">The first pragma sets the maximum function size to be considered for inlining; the second sets the maximum size to which a function is allowed to grow after the functions it calls are inlined. Here, n is the number of statements, operands, and operators in the function, which<br>
+ turns out to be roughly twice the number of instructions generated by the function. However, this number can vary from function to function. For the inline_max_size pragma, the default value of n is 256; for the inline_max_total_size pragma, the default value of n is 10000. The smart specifier is the default mode, with four passes where the passes 2-4 are limited to small inline functions. All inlineable functions are expanded if inline_depth is set to 1-1024.</p>
+</blockquote>
+<p class="faq-answer">Function-Level Inlining -- VC++</p>
+<blockquote>
+ <p class="faq-answer">To force inline usage under VC++, you use this:</p>
+ <p class="faq-answer"> <small><span style="font-family: Courier New;">&nbsp; &nbsp; __forceinline void foo(){ ... }</span></small></p>
+ <p class="faq-answer">It should be noted that __forceinline has no effect if the compiler is set to disable inlining. It merely tells the compiler that when inlining is enabled that it shouldn't use its judgment to decide if the function should be inlined but instead to always inline it.<br>
+ <br>
+ To disable inline usage under VC++, you need to use this:</p>
+ <p class="faq-answer"><small><span style="font-family: Courier New;">&nbsp; &nbsp; #pragma inline_depth(0) // Disable inlining.</span><br>
+ <span style="font-family: Courier New;">&nbsp; &nbsp; void foo() { ... }</span><br>
+ <span style="font-family: Courier New;">&nbsp; &nbsp; #pragma inline_depth()&nbsp; // Restore&nbsp;default.</span></small></p>
+ <p class="faq-answer">The above is essentially specifying compiler-level inlining control within the code for a specific function.</p>
+</blockquote>
+<p class="faq-answer"><span style="font-weight: bold;">Function-Level Inlining --</span> <span style="font-weight: bold;">GCC / Metrowerks</span></p>
+<blockquote>
+ <p class="faq-answer">To force inline usage under GCC 3.1+, you use this:</p>
+ <p class="faq-answer"> <small><span style="font-family: Courier New;">&nbsp;&nbsp;&nbsp;&nbsp;inline void foo() __attribute__((always_inline)) { ... }</span><br>
+ <span style="font-family: Courier New;">&nbsp;&nbsp;&nbsp; &nbsp; &nbsp;</span></small> or<small><br>
+ <span style="font-family: Courier New;">&nbsp;&nbsp;&nbsp;&nbsp;inline __attribute__((always_inline)) void foo() { ... }</span></small></p>
+ <p class="faq-answer">To disable inline usage under GCC 3+, you use this:</p>
+ <p class="faq-answer"><small><span style="font-family: Courier New;">&nbsp; &nbsp; void foo() __attribute__((noinline)) { ... }</span><br>
+ </small> <small><span style="font-family: Courier New;">&nbsp;&nbsp;&nbsp; &nbsp; &nbsp;</span></small> or<small><br>
+ <span style="font-family: Courier New;">&nbsp; &nbsp; inline __attribute__((noinline)) void foo() { ... }</span></small></p>
+ <p class="faq-answer">EABase has some wrappers for this, such as EA_FORCE_INLINE.</p>
+</blockquote>
+<p class="faq-question"><a name="Perf.12"></a>Perf.12
+ C++ / EASTL seems to bloat my .obj files much more than C does.
+</p>
+<p class="faq-answer">There is no need to worry. The way most C++ compilers compile templates, they compile all seen template code into the current .obj module, which results in larger .obj files and duplicated template code in multiple .obj files. However, the linker will (and in fact must) select only a single version of any given function for the application, and these linked functions will usually be located contiguously.</p>
+<p class="faq-answer">Additionally, the debug information for template definitions is usually larger than that for non-templated C++ definitions, which itself is sometimes larger than C definitions due to name decoration.</p>
+<p class="faq-question"><a name="Perf.13"></a>Perf.13
+What are the best compiler settings for EASTL?</p>
+<p class="faq-answer">We will discuss various aspects of this topic here. As of this writing, more EASTL research on this topic has been done on Microsoft compiler platforms (e.g. Win32) than GCC platforms. Thus currently this discussion focuses on VC++ optimization. Some of the concepts are applicable to GCC, though. EASTL has been successfully compiled and tested (the EASTL unit test) on our major development platforms with the highest optimization settings enabled, including GCC's infamous -O3 level.<br>
+<br>
+<span style="font-weight: bold;">Optimization Topics</span></p>
+<ul>
+ <li>Function inlining.</li>
+ <li>Optimization for speed vs. optimization for size.</li>
+ <li>Link-time code generation (LTCG).</li>
+ <li>Profile-guided optimization (PGO).</li>
+</ul>
+<p class="faq-answer"><span style="font-weight: bold;">Function inlining</span><br>
+ EASTL is a template library and inlining is important for optimal speed. Compilers have various options for enabling inlining and those options are discussed in this FAQ in detail. Most users will want to enable some form of inlining when compiling EASTL and other templated libraries. For users that are most concerned about the compiler's inlining increasing code size may want to try the 'inline only functions marked as inline' compiler option. Here is a table of normalized results from the benchmark project (Win32 platform):<br>
+</p>
+<table style="text-align: left; margin-left: 40px; width: 696px; height: 88px;" border="1" cellpadding="2" cellspacing=
+"2">
+ <tbody>
+ <tr>
+ <td style="font-weight: bold;"></td>
+ <td style="font-weight: bold; text-align: center;">Inlining Disabled</td>
+ <td style="font-weight: bold; text-align: center;">Inline only 'inline'</td>
+ <td style="text-align: center;">Inline any</td>
+ </tr>
+ <tr>
+ <td style="font-weight: bold;">Application size</td>
+ <td style="text-align: center;">100K</td>
+ <td style="text-align: center;">86K</td>
+ <td style="text-align: center;">86K</td>
+ </tr>
+ <tr>
+ <td style="font-weight: bold;">Execution time</td>
+ <td style="text-align: center;">100</td>
+ <td style="text-align: center;">75</td>
+ <td style="text-align: center;">75</td>
+ </tr>
+ </tbody>
+</table>
+<p class="faq-answer"><br>
+The above execution times are highly simplified versions of the actual benchmark data but convey a sense of the general average behaviour that can be expected. In practice, simple functions such as vector::operator[] will execute much faster with inlining enabled but complex functions such as map::insert may execute no faster within inlining enabled.</p>
+<p class="faq-answer"><span style="font-weight: bold;">Optimization for Speed / Size</span><br>
+ Optimization for speed results in the compiler inlining more code than it would otherwise. This results in the inlined code executing faster than if it was not inlined. As mentioned above, basic function inlining can result in smaller code as well as faster code, but after a certain point highly inlined code becomes greater in size than less inlined code and the performance advantages of inlining start to lessen. The EASTL Benchmark project is a medium sized application that is about 80% templated and thus acts as a decent measure of the practical tradeoff between speed and size. Here is a table of normalized results from the benchmark project (Windows platform):<br>
+</p>
+<table style="text-align: left; margin-left: 40px; width: 696px; height: 88px;" border="1" cellpadding="2" cellspacing=
+"2">
+ <tbody>
+ <tr>
+ <td style="font-weight: bold;"></td>
+ <td style="font-weight: bold; text-align: center;">Size</td>
+ <td style="font-weight: bold; text-align: center;">Speed</td>
+ <td style="text-align: center;">Speed + LTCG</td>
+ <td style="text-align: center;">Speed + LTCG + PGO</td>
+ </tr>
+ <tr>
+ <td style="font-weight: bold;">Application size</td>
+ <td style="text-align: center;">80K</td>
+ <td style="text-align: center;">100K</td>
+ <td style="text-align: center;">98K</td>
+ <td style="text-align: center;">98K</td>
+ </tr>
+ <tr>
+ <td style="font-weight: bold;">Execution time</td>
+ <td style="text-align: center;">100</td>
+ <td style="text-align: center;">90</td>
+ <td style="text-align: center;">83</td>
+ <td style="text-align: center;">75</td>
+ </tr>
+ </tbody>
+</table>
+<p class="faq-answer"><br>
+What the above table is saying is that if you are willing to have your EASTL code be 20% larger, it will be 10% faster. Note that it doesn't mean that your app will be 20% larger, only the templated code in it like EASTL will be 20% larger.</p>
+<p class="faq-answer"><span style="font-weight: bold;">Link-time code generation (LTCG)</span><br>
+ LTCG is a mechanism whereby the compiler&nbsp;compiles the application as if it was all in one big .cpp file instead of separate .cpp files that don't see each other. Enabling LTCG optimizations is done by simply setting some compiler and linker settings and results in slower link times. The benchmark results are presented above and for the EASTL Benchmark project show some worthwhile improvement.</p>
+<p class="faq-answer"><span style="font-weight: bold;">Profile-guided optimization (PGO)</span><br>
+ PGO is a mechanism whereby the compiler uses profiling information from one or more runs to optimize the compilation and linking of an application. Enabling PGO optimizations is done by setting some linker settings and doing some test runs of the application, then linking the app with the test run results. Doing PGO optimizations is a somewhat time-consuming task but the benchmark results above demonstrate that for the EASTL Benchmark project that PGO is worth the effort. </p>
+<h2> Problems</h2>
+<p class="faq-question"><a name="Prob.1"></a>Prob.1
+I'm getting screwy behavior in sorting algorithms or sorted containers. What's wrong?</p>
+<p class="faq-answer">It may possible that you are seeing floating point roundoff problems. Many STL algorithms require object comparisons to act consistently. However, floating point values sometimes compare differently between uses because in one situation a value might be in 32 bit form in system memory, whereas in anther situation that value might be in an FPU register with a different precision. These are difficult problems to track down and aren't the fault of EASTL or whatever similar library you might be using. There are various solutions to the problem, but the important thing is to find a way to force the comparisons to be consistent.</p>
+<p class="faq-answer">The code below was an example of this happening, whereby the object pA-&gt;mPos was stored in system memory while pB-&gt;mPos was stored in a register and comparisons were inconsistent and a crash ensued.<br>
+</p>
+<pre class="code-example">class SortByDistance : public&nbsp;binary_function&lt;WorldTreeObject*, WorldTreeObject*, bool&gt;
+{
+private:
+&nbsp;&nbsp;&nbsp;&nbsp;Vector3 mOrigin;
+
+public:
+&nbsp;&nbsp;&nbsp; SortByDistance(Vector3 origin) {
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; mOrigin = origin;
+&nbsp;&nbsp;&nbsp; }
+
+&nbsp;&nbsp;&nbsp; bool operator()(WorldTreeObject* pA, WorldTreeObject* pB) const {
+<span style="color: rgb(204, 0, 0);">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; return ((WorldObject*)pA)-&gt;mPos - mOrigin).GetLength()</span>
+<span style="color: rgb(204, 0, 0);">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; &lt; ((WorldObject*)pB)-&gt;mPos - mOrigin).GetLength();</span>
+&nbsp;&nbsp;&nbsp; }
+};</pre>
+<p class="faq-answer">Another thing to watch out for is the following mistake:<br>
+</p>
+<pre class="code-example">struct ValuePair
+{
+&nbsp;&nbsp;&nbsp; uint32_t a;
+&nbsp;&nbsp;&nbsp; uint32_t b;
+};
+
+// Improve speed by casting the struct to uint64_t
+bool operator&lt;(const ValuePair&amp; vp1, const ValuePair&amp; vp2)
+&nbsp;&nbsp;&nbsp; <span style="color: rgb(204, 0, 0);">{ return *(uint64_t*)&amp;vp1 &lt; *(uint64_t*)&amp;vp2; }</span></pre>
+<p class="faq-answer">The problem is that the ValuePair struct has 32 bit alignment but the comparison assumes 64 bit alignment. The code above has been observed to crash on the PowerPC 64-based machines. The resolution is to declare ValuePair as having 64 bit alignment.<br>
+
+</p>
+<p class="faq-question"><a name="Prob.2"></a>Prob.2 I am getting compiler warnings (e.g. C4244, C4242 or C4267) that make no sense. Why?</p>
+<span class="faq-answer">One cause of this occurs with VC++ when you have code compiled with the /Wp64 (detect 64 bit portability issues) option. This causes pointer types to have a hidden flag called&nbsp;__w64 attached to them by the compiler. So 'ptrdiff_t' is actually known by the compiler as '__w64 int', while 'int' is known by the compilers as simply 'int'. A problem occurs here when you use templates. For example, let's say we have this templated function</span>
+<pre class="code-example">template &lt;typename T&gt;
+T min(const T a, const T b) {
+&nbsp;&nbsp;&nbsp;&nbsp;return b &lt; a ? b : a;
+}</pre>
+<span class="faq-answer">If you compile this code:</span>
+<pre class="code-example">ptrdiff_t a = min(ptrdiff_t(0), ptrdiff_t(1));
+int &nbsp; &nbsp; &nbsp; b = min((int)0, (int)1);</pre>
+<span class="faq-answer">You will get the following warning for the second line, which is somewhat nonsensical:</span>
+<pre class="code-example">warning C4244: 'initializing' : conversion from 'const ptrdiff_t' to 'int', possible loss of data</pre>
+<p class="faq-answer"> This could probably be considered a VC++ bug, but in the meantime you have little choice but to ignore the warning or disable it.</p>
+
+<p class="faq-question"><a name="Prob.3"></a>Prob.3
+I am getting compiler warning C4530, which complains about exception handling and "unwind semantics." What gives?</p>
+<p class="faq-answer">VC++ has a compiler option (/EHsc) that allows you to enable/disable exception handling stack unwinding but still enable try/catch. This is useful because it can save a lot in the way of code generation for your application. Disabling stack unwinding will decrease the size of your executable on at least the Win32 platform by 10-12%.<br>
+<br>
+If you have stack unwinding disabled, but you have try/catch statements, VC++ will generate the following warning:</p>
+<pre class="code-example">warning C4530: C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc</pre>
+<p class="faq-answer"> As of EASTL v1.0, this warning has been disabled within EASTL for EASTL code. However, non-EASTL code such as std STL code may still cause this warning to be triggered. In this case there is not much you can do about this other than to disable the warning.</p>
+<p class="faq-question"> <a name="Prob.4"></a>Prob.4
+ Why are tree-based EASTL containers hard to read with a
+debugger?</p>
+<p class="faq-answer"><span style="font-weight: bold;">Short answer<br>
+</span> Maximum performance and design mandates.</p>
+<p class="faq-answer"><span style="font-weight: bold;">Long answer</span><br>
+You may notice that when you have a tree-based container (e.g. set, map)&nbsp; in the debugger that it isn't automatically able to recognize the tree nodes as containing instances of your contained object. You can get the debugger to do what you want with casting statements in the debug watch window, but this is not an ideal solution. The reason this is happening is that node-based containers always use an anonymous node type as the base class for container nodes. This is primarily done for performance, as it allows the node manipulation code to exist as a single non-templated library of functions and it saves memory because containers will have one or two base nodes as container 'anchors' and you don't want to allocate a node of the size of the user data when you can just use a base node. See list.h for an example of this and some additional in-code documentation on this.</p>
+<p class="faq-answer">Additionally, EASTL has the design mandate that an empty container constructs no user objects. This is both for performance reasons and because it doing so would skew the user's tracking of object counts and might possibly break some expectation the user has about object lifetimes.</p>
+<p class="faq-answer">Currently this debug issue exists only with tree-based containers. Other node-based containers such as list and slist use a trick to get around this problem in debug builds.</p>
+<p class="faq-answer">See <a href="#Debug.2">Debug.2</a> for more.
+<p class="faq-question"><a name="Prob.5"></a>Prob.5
+The EASTL source code is sometimes rather complicated looking. Why is that?</p>
+<p class="faq-answer"><span style="font-weight: bold;">Short answer</span><br>
+Maximum performance.</p>
+<p class="faq-answer"><span style="font-weight: bold;">Long answer</span><br>
+ EASTL uses templates, type_traits, iterator categories, redundancy reduction, and branch reduction in order to achieve optimal performance. A side effect of this is that there are sometimes a lot of template parameters and multiple levels of function calls due to template specialization. The ironic thing about this is that this makes the code (an optimized build, at least) go faster, not slower. In an optimized build the compiler will see through the calls and template parameters and generate a direct optimized inline version.</p>
+<p class="faq-answer">As an example of this, take a look at the implementation of the <span style="font-style: italic;">copy</span> implementation in algorithm.h. If you are copying an array of scalar values or other trivially copyable values, the compiler will see how the code directs this to the memcpy function and will generate nothing but a memcpy in the final code. For non-memcpyable data types the compiler will automatically understand that in do the right thing.</p>
+<p class="faq-answer">EASTL's primary objective is maximal performance, and it has been deemed worthwhile to make the code a little less obvious in order to achieve this goal. Every case where EASTL does something in an indirect way is by design and usually this is for the purpose of achieving the highest possible performance.</p>
+<p class="faq-question"><a name="Prob.6"></a>Prob.6
+When I get compilation errors, they are very long and complicated looking. What do I do?</p>
+<p class="faq-answer">Assuming the bugs are all worked out of EASTL, these errors really do indicate that you have something wrong. EASTL is intentionally very strict about types, as it tries to minimize the chance of users errors. Unfortunately, there is no simple resolution to the problem of long compiler errors other than to deal with them. On the other hand, once you've dealt with them a few times, you tend to realize that most of time they are the same kinds of errors and</p>
+<p class="faq-answer">Top five approaches to dealing with long compilation errors:</p>
+<ol>
+ <li>Look at the line where the compilation error occurred and ignore the text of the error and just look at obvious things that might be wrong.</li>
+ <li>Consider the most common typical causes of templated compilation errors and consider if any of these might be your problem. Usually one of them are.</li>
+ <li>Either read through the error (it's not as hard as it may look on the surface) or copy the error to a text file and remove the extraneous</li>
+ <li>Compile the code under GCC instead of MSVC, as GCC warnings and errors tend to be more helpful than MSVC's. Possibly also consider compiling an isolated version under Comeau C++'s free online compiler at www.comeaucomputing.com or the Dinkumware online compiler at http://dinkumware.com/exam/.&nbsp;</li>
+ <li>Try using an STL filter (http://www.bdsoft.com/tools/stlfilt.html) which automatically boils down template errors to simpler forms. We haven't tried this yet with EASTL. Also there is the more generic TextFilt (http://textfilt.sourceforge.net/).</li>
+</ol>
+<p class="faq-answer">Top five causes of EASTL compilation errors:</p>
+<ol>
+ <li>const-correctness. Perhaps a quarter of container template errors are due to the user not specifying const correctly.</li>
+ <li>Missing hash function. hash_map, hash_set, etc. require that you either specify a hash function or one exists for your class. See functional.h for examples of declarations of hash functions for common data types.</li>
+ <li>Missing operators. Various containers and algorithms require that certain operators exist for your contained classes. For example, list requires that you can test contained objects for equivalence (i.e. operator==), while map requires that you can test contained objects for "less-ness" (operator &lt;). If you define a Widget class and don't have a way to compare two Widgets, you will get errors when trying to put them into a map.</li>
+ <li>Specifying the wrong data type. For example, it is a common mistake to forget that when you insert into a map, you need to insert a pair of objects and not just your key or value type.</li>
+ <li>Incorrect template parameters. When declaring a template instantiation (e.g. map&lt;int, int, less&lt;int&gt; &gt;) you simply need to get the template parameters correct. Also note that when you have "<span style="font-family: Courier New;">&gt;&gt;</span>" next to each other that you need to separate them by one space (e.g. "<span style="font-family: Courier New;">&gt; &gt;</span>").</li>
+</ol>
+<p class="faq-question"><a name="Prob.7"></a>Prob.7
+ Templates sometimes seem to take a long time to compile. Why do I do about that?
+</p>
+<p class="faq-answer">C++ compilers are generally slower than C compilers, and C++ templates are generally slower to compile than regular C++ code. EASTL has some extra functionality (such as type_traits and algorithm specializations) that is not found in most other template libraries and significantly improves performance and usefulness but adds to the amount of code that needs to be compiled. Ironically, we have a case where more source code generates faster and smaller object code.</p>
+<p class="faq-answer">The best solution to the problem is to use pre-compiled headers, which are available on all modern ~2002+) compilers, such as VC6.0+, GCC 3.2+, and Metrowerks 7.0+. In terms of platforms this means all 2002+ platforms.</p>
+<p class="faq-answer">Some users have been speeding up build times by creating project files that put all the source code in one large .cpp file. This has an effect similar to pre-compiled headers. It can go even faster than pre-compiled headers but has downsides in the way of convenience and portability.</p>
+<p class="faq-question"><a name="Prob.8"></a>Prob.8
+I get the compiler error: "template instantiation depth exceeds maximum of 17.&nbsp;use -ftemplate-depth-NN to increase the maximum".&nbsp;</p>
+<p class="faq-answer">This is a GCC error that occurs when a templated function calls a templated function which calls a templated function, etc. past a depth of 17. You can use the GCC command line argument -ftemplate-depth-40 (or some other high number) to get around this. As note below, the syntax starting with GCC 4.5 has changed slightly. </p>
+<p class="faq-answer">The primary reason you would encounter this with EASTL is type traits that are used by algorithms. The type traits library is a (necessarily) highly templated set of types and functions which adds at most about nine levels of inlining. The copy and copy_backward algorithms have optimized pathways that add about four levels of inlining. If you have just a few more layers on top of that in container or user code then the default limit of 17 can be exceeded. We are investigating ways to reduce the template depth in the type traits library, but only so much can be done, as most compilers don't support type traits natively. Metrowerks is the current exception.</p>
+<p class="faq-answer">From the GCC documentation:</p>
+<pre class="code-example">-ftemplate-depth-n
+
+Set the maximum instantiation depth for template classes to n.
+A limit on the template instantiation depth is needed to detect
+endless recursions during template class instantiation ANSI/ISO
+C++ conforming programs must not rely on a maximum depth greater than 17.
+</pre>
+
+<p class="faq-answer">Note that starting with GCC 4.5 the syntax is -ftemplate-depth=N instead of -ftemplate-depth-n.</p>
+<p class="faq-question"><a name="Prob.9"></a>Prob.9
+ I'm getting errors about min and max while compiling.</p>
+<p class="faq-answer">You need to define NOMINMAX under VC++ when this occurs, as it otherwise defines min and max macros that interfere. There may be equivalent issues with other compilers. Also, VC++ has a specific &lt;minmax.h&gt; header file which defines min and max macros but which doesn't pay attention to NOMINMAX and so in that case there is nothing to do but not include that file or to undefine min and max. minmax.h is not a standard file and its min and max macros are not standard C or C++ macros or functions.</p>
+<p class="faq-question"><a name="Prob.10"></a>Prob.10
+C++ / EASTL seems to bloat my .obj files much more than C does.</p>
+<p class="faq-answer"> There is no need to worry. The way most C++ compilers compile templates, they compile all
+seen template code into the current .obj module, which results in larger .obj files and duplicated template code in
+multiple .obj files. However, the linker will (and must) select only a single version of any given function for the
+application, and these linked functions will usually be located contiguously.</p>
+<p class="faq-question"> <a name="Prob.11"></a>Prob.11
+ I'm getting compiler errors regarding placement operator new
+being&nbsp;previously defined.</p>
+<p class="faq-answer">This can happen if you are attempting to define your own versions of placement new/delete. The C++ language standard does not allow the user to override these functions. Section 18.4.3 of the standard states:</p>
+<p class="faq-answer">&nbsp;&nbsp;&nbsp;&nbsp; Placement forms<br>
+&nbsp;&nbsp;&nbsp;&nbsp; 1. These functions are reserved, a C++ program may not define functions that displace the versions in the Standard C++ library.</p>
+<p class="faq-answer">You may find that #defining <small>__PLACEMENT_NEW_INLINE</small> seems to fix your problems under VC++, but it can fail under some circumstances and is not portable and fails with other compilers, which don't have an equivalent workaround.</p>
+<p class="faq-question"> <a name="Prob.12"></a>Prob.12
+I'm getting errors related to wchar_t string &nbsp;functions such as wcslen().</p>
+<p class="faq-answer">EASTL requires EABase-related items that the following be so. If not, then EASTL gets confused about what types it can pass to wchar_t related functions.</p>
+<ul>
+ <li>The #define EA_WCHAR_SIZE is equal to sizeof(wchar_t).</li>
+ <li>If sizeof(wchar_t) == 2, then char16_t is typedef'd to wchar_t.</li>
+ <li>If sizeof(wchar_t) == 4, then char32_t is typedef'd to wchar_t.</li>
+</ul>
+<p class="faq-answer">EABase v2.08 and later automatically does this for most current generation and all next generation platforms. With GCC 2.x, the user may need to predefine EA_WCHAR_SIZE to the appropriate value, due to limitations with the GCC compiler. Note that GCC defaults to sizeof(wchar_t) ==4, but it can be changed to 2 with the -fshort_wchar compiler command line argument. If you are using EASTL without EABase, you will need to make sure the above items are correctly defined.</p>
+<p class="faq-question"> <a name="Prob.13"></a>Prob.13
+ I'm getting compiler warning C4619: there is no warning number Cxxxx
+(e.g. C4217).</p>
+<p class="faq-answer">Compiler warning C4619 is a VC++ warning which is saying that the user is attempting to enable or disable a warning which the compiler doesn't recognize. This warning only occurs if the user has the compiler set to enable warnings that are normally disabled, regardless of the warning level. The problem, however, is that there is no easy way for user code to tell what compiler warnings any given compiler version will recognize. That's why Microsoft normally disables this warning.</p>
+<p class="faq-answer">The only practical solution we have for this is for the user to disable warning 4619 globally or an a case-by-case basis. EA build systems such as nant/framework 2's eaconfig will usually disable 4619. In general, global enabling of 'warnings that are disabled by default' often result in quandrys such as this.</p>
+<p class="faq-question"><a name="Prob.14"></a>Prob.14
+My stack-based fixed_vector is not respecting the object alignment requirements.</p>
+<p class="faq-answer">EASTL fixed_* containers rely on the compiler-supplied alignment directives, such as that implemented by EA_PREFIX_ALIGN. This is normally a good thing because it allows the memory to be local with the container. However, as documented by Microsoft at <a href="http://msdn2.microsoft.com/en-us/library/83ythb65(VS.71).aspx"> http://msdn2.microsoft.com/en-us/library/83ythb65(VS.71).aspx</a>, this doesn't work for stack variables. The two primary means of working around this are: </p>
+<ul>
+ <li>Use something like AlignedObject&lt;&gt; from the EAStdC package's EAAllocator.h file. </li>
+ <li>Use eastl::vector with a custom allocator and have it provide aligned memory. EASTL automatically recognizes that the objects are aligned and will call the aligned version of your allocator allocate() function. You can get this aligned memory from the stack, if you need it, somewhat like how AlignedObject&lt;&gt; works. </li>
+</ul>
+<p class="faq-question"><a name="Prob.15" id="Prob.15"></a>Prob.15 I am getting compiler errors when using GCC under XCode (Macintosh/iphone).</p>
+<p class="faq-answer">The XCode environment has a compiler option which causes it to evaluate include directories recursively. So if you specify /a/b/c as an include directory, it will consider all directories underneath c to also be include directories. This option is enabled by default, though many XCode users disable it, as it is a somewhat dangerous option. The result of enabling this option with EASTL is that &lt;EASTL/string.h&gt; is used by the compiler when you say #include &lt;string.h&gt;. The solution is to disable this compiler option. It's probably a good idea to disable this option anyway, as it typically causes problems for users yet provides minimal benefits. </p>
+<p class="faq-question"><a name="Prob.16" id="Prob.16"></a>Prob.16 I am getting linker errors about Vsnprintf8 or Vsnprintf16.</p>
+<p class="faq-answer">EASTL requires the user to provide a function called Vsnprintf8 if the string::sprintf function is used. vsnprintf is not a standard C function, but most C standard libraries provide some form of it, though in some ways their implementations differ, especially in what the return value means. Also, most implementations of vsnprintf are slow, mostly due to mutexes related to locale functionality. And you can't really use vendor vsnprintf on an SPU due to the heavy standard library size. EASTL is stuck because it doesn't want to depend on something with these problems. EAStdC provides a single consistent fast lightweight, yet standards-conforming, implementation in the form of Vsnprintf(char8_t*, ...), but EASTL can't have a dependency on EAStdC. So the user must provide an implementation, even if all it does is call EAStdC's Vsnprintf or the vendor vsnprintf for that matter.</p>
+<p class="faq-answer">Example of providing Vsnprintf8 via EAStdC:</p>
+<pre class="code-example">#include &lt;EAStdC/EASprintf.h&gt;
+
+int Vsnprintf8(char8_t* pDestination, size_t n, const char8_t* pFormat, va_list arguments)
+{
+ return EA::StdC::Vsnprintf(pDestination, n, pFormat, arguments);
+}
+
+int Vsnprintf16(char16_t* pDestination, size_t n, const char16_t* pFormat, va_list arguments)
+{
+ return EA::StdC::Vsnprintf(pDestination, n, pFormat, arguments);
+}</pre>
+<p>Example of providing Vsnprintf8 via C libraries:</p>
+<pre><span class="code-example">#include &lt;stdio.h&gt;
+
+int Vsnprintf8(char8_t* p, size_t n, const char8_t* pFormat, va_list arguments)
+{
+ &nbsp;&nbsp;&nbsp;#ifdef _MSC_VER
+ &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;return vsnprintf_s(p, n, _TRUNCATE, pFormat, arguments);
+ &nbsp;&nbsp;&nbsp;#else
+ &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;return vsnprintf(p, n, pFormat, arguments);
+ &nbsp;&nbsp;&nbsp;#endif
+}
+
+int Vsnprintf16(char16_t* p, size_t n, const char16_t* pFormat, va_list arguments)
+{
+ &nbsp;&nbsp;&nbsp;#ifdef _MSC_VER
+ &nbsp;&nbsp;&nbsp; &nbsp;&nbsp;&nbsp;return vsnwprintf_s(p, n, _TRUNCATE, pFormat, arguments);
+ &nbsp;&nbsp;&nbsp;#else
+ &nbsp;&nbsp;&nbsp; &nbsp;&nbsp;&nbsp;return vsnwprintf(p, n, pFormat, arguments); <span class="code-example-comment">// Won't work on Unix because its libraries implement wchar_t as int32_t.</span>
+ &nbsp;&nbsp;&nbsp;#endif
+}</span></pre>
+<p class="faq-question"><a name="Prob.17" id="Prob.17"></a>Prob.17 I am getting compiler errors about UINT64_C or UINT32_C.</p>
+<p class="faq-answer">This is usually an order-of-include problem that comes about due to the implementation of __STDC_CONSTANT_MACROS in C++ Standard libraries. The C++ &lt;stdint.h&gt; header file defineds UINT64_C only if __STDC_CONSTANT_MACROS has been defined by the user or the build system; the compiler doesn't automatically define it. The failure you are seeing occurs because user code is #including a system header before #including EABase and without defining __STDC_CONSTANT_MACROS itself or globally. EABase defines __STDC_CONSTANT_MACROS and #includes the appropriate system header. But if the system header was already previously #included and __STDC_CONSTANT_MACROS was not defined, then UINT64_C doesn't get defined by anybody. </p>
+<p class="faq-answer">The real solution that the C++ compiler and standard library wants is for the app to globally define __STDC_CONSTANT_MACROS itself in the build. </p>
+<p class="faq-question"><a name="Prob.18" id="Prob.18"></a>Prob.18 I am getting a crash with a global EASTL container. </p>
+<p class="faq-answer">This usually due to compiler's lack of support for global (and static) C++ class instances. The crash is happening because the global variable exists but its constructor was not called on application startup and it's member data is zeroed bytes. To handle this you need to manually initialize such variables. There are two primary ways:</p>
+<p class="faq-answer">Failing code:</p>
+<pre class="code-example">eastl::list&lt;int&gt; gIntList; // Global variable.
+
+void DoSomething()
+{
+ gIntList.push_back(1); // <span class="style3">Crash</span>. gIntList was never constructed.
+}</pre>
+<p class="faq-answer">Declaring a pointer solution: </p>
+<pre class="code-example">eastl::list&lt;int&gt;* gIntList = NULL;
+
+void DoSomething()
+{
+ if(!gIntList) // Or move this to an init function.
+ gIntList = new eastl::list&lt;int&gt;;
+
+ gIntList-&gt;push_back(1); // <span class="style2">Success</span>
+}</pre>
+<p class="faq-answer">Manual constructor call solution: </p>
+<pre class="code-example">eastl::list&lt;int&gt; gIntList;
+
+void InitSystem()
+{
+ new(&amp;gIntList) eastl::list&lt;int&gt;;
+}
+
+void DoSomething()
+{
+ gIntList.push_back(1); // <span class="style2">Success</span>
+}</pre>
+<p class="faq-question"><a name="Prob.19" id="Prob.19"></a>Prob.19 Why doesn't EASTL support passing NULL string functions? </p>
+<p class="faq-answer"></p>
+<p class="faq-answer">The primary argument is to make functions safer for use. Why crash on NULL pointer access when you can make the code safe? That's a good argument. The counter argument, which EASTL currently makes, is: </p>
+<ul>
+ <li class="faq-answer"> It breaks consistency with the C++ STL library and C libraries, which require strings to be valid.</li>
+ <li class="faq-answer"> It makes the coder slower and bigger for all users, though few need NULL checks. </li>
+ <li class="faq-answer"> The specification for how to handle NULL is simple for some cases but not simple for others. Operator &lt; below a case where the proper handling of it in a consistent way is not simple, as all comparison code (&lt;, &gt;, ==, !=, &gt;=, &lt;=) in EASTL must universally and consistently handle the case where either or both sides are NULL. A NULL string seems similar to an empty string, but doesn't always work out so simply.</li>
+ <li class="faq-answer">What about other invalid string pointers? NULL is merely one invalid value of many, with its only distinction being that sometimes it's intentionally NULL (as opposed to being NULL due to not being initialized). </li>
+ <li class="faq-answer"> How and where to implement the NULL checks in such a way as to do it efficiently is not always simple, given that public functions call public functions. </li>
+ <li class="faq-answer">It's arguable (and in fact the intent of the C++ standard library) that using pointers that are NULL is a user/app mistake. If we really want to be safe then we should be using string objects for everything. You may not entirely buy this argument in practice, but on the other hand one might ask why is the caller of EASTL using a NULL pointer in the first place? The answer of course is that somebody gave it to him. </li>
+</ul>
+<h2>Debug</h2>
+<p class="faq-question"><a name="Debug.1"></a>Debug.1
+How do I set the VC++ debugger to display EASTL container data with tooltips?</p>
+<p class="faq-answer">See <a href="#Cont.9">Cont.9</a></p>
+<p class="faq-question"><a name="Debug.2"></a>Debug.2
+How do I view containers if the visualizer/tooltip support is not present?</p>
+
+<p class="faq-answer">Here is a table of answers about how to manually inspect containers in the debugger.</p>
+<blockquote>
+<table style="text-align: left; width: 100%;" border="1" cellpadding="2" cellspacing="2" id="table4">
+ <tbody>
+ <tr>
+ <td style="font-weight: bold;">&nbsp;Container</td>
+ <td style="font-weight: bold;">Approach</td>
+ </tr>
+ <tr>
+ <td>slist<br>
+ fixed_slist</td>
+ <td>slist is a singly-linked list. Look at the slist mNode variable. You can walk the list by looking at mNode.mpNext, etc.</td>
+ </tr>
+ <tr>
+ <td>list<br>
+ fixed_list</td>
+ <td>list is a doubly-linked list. Look at the list mNode variable. You can walk the list forward by looking at mNode.mpNext, etc. and backward by looking at mpPrev, etc.</td>
+ </tr>
+ <tr>
+ <td>intrusive_list<br>
+ intrusive_slist<sup>&dagger;</sup></td>
+ <td>Look at the list mAnchor node. This lets you walk forward and backward in the list via mpNext and mpPrev.</td>
+ </tr>
+ <tr>
+ <td>array</td>
+ <td>View the array mValue member in the debugger. It's simply a C style array.</td>
+ </tr>
+ <tr>
+ <td>vector<br>
+ fixed_vector</td>
+ <td>View the vector mpBegin value in the debugger. If the string is long, use &quot;, N&quot; to limit the view length, as with someVector.mpBegin, 32</td>
+ </tr>
+ <tr>
+ <td>vector_set<br>
+ vector_multiset<br>
+ vector_map<br>
+ vector_multimap<br></td>
+ <td>These are containers that are implemented as a sorted vector, deque, or array. They are searched via a standard binary search. You can view them the same way you view a vector or deque.</td>
+ </tr>
+ <tr>
+ <td style="vertical-align: top;">deque<br></td>
+ <td style="vertical-align: top;">deque is implemented as an array of arrays, where the arrays implement successive equally-sized segments of the deque. The mItBegin deque member points the deque begin() position. </td>
+ </tr>
+ <tr>
+ <td>bitvector</td>
+ <td>Look at the bitvector mContainer variable. If it's a vector, then see vector above.</td>
+ </tr>
+ <tr>
+ <td>bitset</td>
+ <td>Look at the bitset mWord variable. The bitset is nothing but one or more uint32_t mWord items.</td>
+ </tr>
+ <tr>
+ <td>set<br>
+ multiset<br>
+ fixed_set<br>
+ fixed_multiset<br></td>
+ <td>The set containers are implemented as a tree of elements. The set mAnchor.mpNodeParent points to the top of the tree; the mAnchor.mpNodeLeft points to the far left node of the tree (set begin()); the mAnchor.mpNodeRight points to the right of the tree (set end()).</td>
+ </tr>
+ <tr>
+ <td>map<br>
+ multimap<br>
+ fixed_map<br>
+ fixed_multimap</td>
+ <td>The map containers are implemented as a tree of pairs, where pair.first is the map key and pair.second is the map value. The map mAnchor.mpNodeParent points to the top of the tree; the mAnchor.mpNodeLeft points to the far left node of the tree (map begin()); the mAnchor.mpNodeRight points to the right of the tree (map end()).</td>
+ </tr>
+ <tr>
+ <td>hash_map<br>
+ hash_multimap<br>
+ fixed_hash_map<br>
+ fixed_hash_multimap</td>
+ <td>hash tables in EASTL are implemented as an array of singly-linked lists. The array is the mpBucketArray member. Each element in the list is a pair, where the first element of the pair is the map key and the second is the map value.</td>
+ </tr>
+ <tr>
+ <td>intrusive_hash_map<br>
+ intrusive_hash_multimap<br>
+ intrusive_hash_set<br>
+ intrusive_hash_multiset</td>
+ <td>intrusive hash tables in EASTL are implemented very similarly to regular hash tables. See the hash_map and hash_set entries for more info.</td>
+ </tr>
+ <tr>
+ <td>hash_set<br>
+ hash_multiset<br>
+ fixed_hash_set<br>
+ fixed_hash_map<br></td>
+ <td>hash tables in EASTL are implemented as an array of singly-linked lists. The array is the mpBucketArray member. </td>
+ </tr>
+ <tr>
+ <td>basic_string<br>
+ fixed_string<br>
+ fixed_substring</td>
+ <td>View the string mpBegin value in the debugger. If the string is long, use &quot;, N&quot; to limit the view length, as with someString.mpBegin, 32</td>
+ </tr>
+ <tr>
+ <td style="vertical-align: top;">heap<br></td>
+ <td style="vertical-align: top;">A heap is an array of data (e.g. EASTL vector) which is organized in a tree whereby the highest priority item is array[0], The next two highest priority items are array[1] and [2]. Underneath [1] in priority are items [3] and [4], and underneath item [2] in priority are items [5] and [6]. etc.</td>
+ </tr>
+ <tr>
+ <td style="vertical-align: top;">stack<br></td>
+ <td style="vertical-align: top;">View the stack member c value in the debugger. That member will typically be a list or deque. </td>
+ </tr>
+ <tr>
+ <td style="vertical-align: top;">queue<br></td>
+ <td style="vertical-align: top;">View the queue member c value in the debugger. That member will typically be a list or deque. </td>
+ </tr>
+ <tr>
+ <td style="vertical-align: top;">priority_queue<br></td>
+ <td style="vertical-align: top;">View the priority_queue member c value in the debugger. That member will typically be a vector or deque which is organized as a heap. See the heap section above for how to view a heap. </td>
+ </tr>
+ <tr>
+ <td>smart_ptr</td>
+ <td>View the mpValue member.</td>
+ </tr>
+ </tbody>
+</table>
+</blockquote>
+<p class="faq-question"><a name="Debug.3"></a>Debug.3
+The EASTL source code is sometimes rather complicated looking. Why is that?</p>
+<p class="faq-answer"><span style="font-weight: bold;">Short answer</span><br>
+Maximum performance.</p>
+<p class="faq-answer"><span style="font-weight: bold;">Long answer</span><br>
+ EASTL uses templates, type_traits, iterator categories, redundancy reduction, and branch reduction in order to achieve optimal performance. A side effect of this is that there are sometimes a lot of template parameters and multiple levels of function calls due to template specialization. The ironic thing about this is that this makes the code (an optimized build, at least) go faster, not slower. In an optimized build the compiler will see through the calls and template parameters and generate a direct optimized inline version.</p>
+<p class="faq-answer">As an example of this, take a look at the implementation of the <span style="font-style: italic;">copy</span> implementation in algorithm.h. If you are copying an array of scalar values or other trivially copyable values, the compiler will see how the code directs this to the memcpy function and will generate nothing but a memcpy in the final code. For non-memcpyable data types the compiler will automatically understand that in do the right thing.</p>
+<p class="faq-answer">EASTL's primary objective is maximal performance, and it has been deemed worthwhile to make the code a little less obvious in order to achieve this goal. Every case where EASTL does something in an indirect way is by design and usually this is for the purpose of achieving the highest possible performance.</p>
+<p class="faq-question"><a name="Debug.4"></a>Debug.4
+When I get compilation errors, they are very long and complicated looking. What do I do?</p>
+<p class="faq-answer">Assuming the bugs are all worked out of EASTL, these errors really do indicate that you have something wrong. EASTL is intentionally very strict about types, as it tries to minimize the chance of users errors. Unfortunately, there is no simple resolution to the problem of long compiler errors other than to deal with them. On the other hand, once you've dealt with them a few times, you tend to realize that most of time they are the same kinds of errors and<br>
+<br>
+Top five approaches to dealing with long compilation errors:</p>
+<ol>
+ <li>Look at the line where the compilation error occurred and ignore the text of the error and just look at obvious things that might be wrong.</li>
+ <li>Consider the most common typical causes of templated compilation errors and consider if any of these might be your problem. Usually one of them are.</li>
+ <li>Either read through the error (it's not as hard as it may look on the surface) or copy the error to a text file and remove the extraneous</li>
+ <li>Compile the code under GCC instead of MSVC, as GCC warnings and errors tend to be more helpful than MSVC's. Possibly also consider compiling an isolated version under Comeau C++'s free online compiler at www.comeaucomputing.com or the Dinkumware online compiler at http://dinkumware.com/exam/.&nbsp;</li>
+ <li>Try using an STL filter (http://www.bdsoft.com/tools/stlfilt.html) which automatically boils down template errors to simpler forms. We haven't tried this yet with EASTL. Also there is the more generic TextFilt (http://textfilt.sourceforge.net/).</li>
+</ol>
+<p class="faq-answer">Top five causes of EASTL compilation errors:</p>
+<ol>
+ <li>const-correctness. Perhaps a quarter of container template errors are due to the user not specifying const correctly.</li>
+ <li>Missing hash function. hash_map, hash_set, etc. require that you either specify a hash function or one exists for your class. See functional.h for examples of declarations of hash functions for common data types.</li>
+ <li>Missing operators. Various containers and algorithms require that certain operators exist for your contained classes. For example, list requires that you can test contained objects for equivalence (i.e. operator==), while map requires that you can test contained objects for "less-ness" (operator &lt;). If you define a Widget class and don't have a way to compare two Widgets, you will get errors when trying to put them into a map.</li>
+ <li>Specifying the wrong data type. For example, it is a common mistake to forget that when you insert into a map, you need to insert a pair of objects and not just your key or value type.</li>
+ <li>Incorrect template parameters. When declaring a template instantiation (e.g. map&lt;int, int, less&lt;int&gt; &gt;) you simply need to get the template parameters correct. Also note that when you have "<span style="font-family: Courier New;">&gt;&gt;</span>" next to each other that you need to separate them by one space (e.g. "<span style="font-family: Courier New;">&gt; &gt;</span>").</li>
+</ol>
+<p class="faq-question"><a name="Debug.5"></a>Debug.5
+How do I measure hash table balancing?</p>
+<p class="faq-answer">The following functionality lets you spelunk hash container layout.</p>
+<ul>
+ <li>There is the load_factor function which tells you the overall hashtable load, but doesn't tell you if a load is unevenly distributed.</li>
+ <li>You can control the load factor and thus the automated bucket redistribution with set_load_factor.</li>
+ <li>The local_iterator begin(size_type n) and local_iterator end(size_type) functions lets you iterate each bucket individually. You can use this to examine the elements in a bucket.</li>
+ <li>You can use the above to get the size of any bucket, but there is also simply the bucket_size(size_type n) function.</li>
+ <li>The bucket_count function tells you the count of buckets. So with this you can completely visualize the layout of the hash table.</li>
+ <li>There is also iterator find_by_hash(hash_code_t c), for what it's worth.</li>
+</ul>
+<p class="faq-answer">The following function draws an ASCII bar graph of the hash table for easy visualization of bucket distribution:</p>
+<blockquote>
+ <p><font face="Courier New" size="1">#include &lt;EASTL/hash_map.h&gt;<br>
+ #include &lt;EASTL/algorithm.h&gt;<br>
+ #include &lt;stdio.h&gt;<br>
+ <br>
+ template &lt;typename HashTable&gt;<br>
+ void VisualizeHashTableBuckets(const HashTable&amp; h)<br>
+ {<br>
+&nbsp;&nbsp;&nbsp; eastl_size_t bucketCount&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; = h.bucket_count();<br>
+&nbsp;&nbsp;&nbsp; eastl_size_t largestBucketSize = 0;<br>
+ <br>
+&nbsp;&nbsp;&nbsp; for(eastl_size_t i = 0; i &lt; bucketCount; i++)<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; largestBucketSize = eastl::max_alt(largestBucketSize, h.bucket_size(i));<br>
+ <br>
+&nbsp;&nbsp;&nbsp; YourPrintFunction(&quot;\n --------------------------------------------------------------------------------\n&quot;);<br>
+ <br>
+&nbsp;&nbsp;&nbsp; for(eastl_size_t i = 0; i &lt; bucketCount; i++)<br>
+&nbsp;&nbsp;&nbsp; {<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; const eastl_size_t k = h.bucket_size(i) * 80 / largestBucketSize;<br>
+ <br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; char buffer[16];<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; sprintf(buffer, &quot;%3u|&quot;, (unsigned)i);<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; YourPrintFunction(buffer);<br>
+ <br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; for(eastl_size_t j = 0; j &lt; k; j++)<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; YourPrintFunction(&quot;*&quot;);<br>
+ <br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; YourPrintFunction(&quot;\n&quot;);<br>
+&nbsp;&nbsp;&nbsp; }<br>
+ <br>
+&nbsp;&nbsp;&nbsp; YourPrintFunction(&quot; --------------------------------------------------------------------------------\n&quot;);<br>
+ }</font></p>
+</blockquote>
+<p class="faq-answer"> This results in a graph that looks like the following (with one horizontal bar per bucket). This hashtable has a large number of collisions in each of its 10 buckets.
+<blockquote>
+ <p><font face="Courier New" size="2">&nbsp;&nbsp; ------------------------------------------------------<br>
+&nbsp;0|********************************************<br>
+&nbsp;1|************************************************<br>
+&nbsp;2|***************************************<br>
+&nbsp;3|********************************************<br>
+&nbsp;4|*****************************************************<br>
+&nbsp;5|*************************************************<br>
+&nbsp;6|****************************************<br>
+&nbsp;7|***********************************************<br>
+&nbsp;8|********************************************<br>
+&nbsp;9|**************************************<br>
+ 10|********************************************<br>
+&nbsp;&nbsp; -----------------------------------------------------</font>
+</blockquote>
+<h2> Containers</h2>
+<p class="faq-question"><a name="Cont.1"></a>Cont.1
+Why do some containers have "fixed" versions (e.g. fixed_list) but others(e.g. deque) don't have fixed versions?</p>
+<p class="faq-answer">Recall that fixed containers are those that are implemented via a single contiguous block of memory and don't use a general purpose heap to allocate memory from. For example, fixed_list is a list container that implements its list by a user-configurable fixed block of memory. Such containers have an upper limit to how many items they can hold, but have the advantage of being more efficient with memory use and memory access coherency.</p>
+<p class="faq-answer">The reason why some containers don't have fixed versions is that such functionality doesn't make sense with these containers. Containers which don't have fixed versions include:</p>
+<pre class="code-example">array, deque, bitset, stack, queue, priority_queue,
+intrusive_list, intrusive_hash_map, intrusive_hash_set,
+intrusive_hash_multimap, intrusive_hash_multimap,
+vector_map, vector_multimap, vector_set, vector_multiset.</pre>
+<p class="faq-answer">Some of these containers are adapters which wrap other containers and thus there is no need for a fixed version because you can just wrap a fixed container. In the case of intrusive containers, the user is doing the allocation and so there are no memory allocations. In the case of array, the container is a primitive type which doesn't allocate memory. In the case of deque, it's primary purpose for being is to dynamically resize and thus the user would likely be better of using a fixed_vector.</p>
+<p class="faq-question"> <a name="Cont.2"></a>Cont.2
+Can I mix EASTL with standard C++ STL?</p>
+<p class="faq-answer">This is possible to some degree, though the extent depends on the implementation of C++ STL. One of things that makes interoperability is something called iterator categories. Containers and algorithms recognize iterator types via their category and STL iterator categories are not recognized by EASTL and vice versa.</p>
+<p class="faq-answer">Things that you definitely can do:</p>
+<ul>
+ <li>#include both EASTL and standard STL headers from the same .cpp file.</li>
+ <li>Use EASTL containers to hold STL containers.</li>
+ <li>Construct an STL reverse_iterator from an EASTL&nbsp;iterator.</li>
+ <li>Construct an EASTL reverse_iterator from an&nbsp;STL&nbsp;iterator.</li>
+</ul>
+<p class="faq-answer">Things that you probably will be able to do, though a given std STL implementation may prevent it:
+</p>
+<ul>
+ <li>Use STL containers in EASTL algorithms.</li>
+ <li>Use EASTL containers in STL algorithms.</li>
+ <li>Construct or assign to an STL container via iterators into an EASTL container.</li>
+ <li>Construct or assign to an EASTL container via iterators into an&nbsp;STL container.</li>
+</ul>
+<p class="faq-answer">Things that you would be able to do if the given std STL implementation is bug-free:
+</p>
+<ul>
+ <li>Use STL containers to hold EASTL containers. Unfortunately, VC7.x STL has a confirmed bug that prevents this. Similarly, STLPort versions prior to v5 have a similar but.</li>
+</ul>
+<p class="faq-answer">Things that you definitely can't do:
+</p>
+<ul>
+ <li>Use an STL allocator directly with an EASTL container (though you can use one indirectly).</li>
+ <li>Use an EASTL allocator directly with an STL container (though you can use one indirectly).</li>
+</ul>
+<p class="faq-question"> <a name="Cont.3"></a>Cont.3
+Why are there so many containers?</p>
+<p class="faq-answer">EASTL has a large number of container types (e.g vector, list, set) and often has a number of variations of given types (list, slist, intrusive_list, fixed_list). The reason for this is that each container is tuned and to a specific need and there is no single container that works for all needs. The more the user is concerned about squeezing the most performance out of their system, the more the individual container variations become significant. It's important to note that having additional container types generally does not mean generating additional code or code bloat. Templates result in generated code regardless of what templated class they come from, and so for the most part you get optimal performance by choosing the optimal container for your needs.</p>
+<p class="faq-question"> <a name="Cont.4"></a>Cont.4
+Don't STL and EASTL containers fragment memory?</p>
+<p class="faq-answer">They only fragment memory if you use them in a way that does so. This is no different from any other type of container used in a dynamic way. There are various solutions to this problem, and EASTL provides additional help as well:</p>
+<ul>
+ <li>For vectors, use the reserve function (or the equivalent constructor) to set aside a block of memory for the container. The container will not reallocate memory unless you try grow beyond the capacity you reserve.</li>
+ <li>EASTL has "fixed" variations of containers which allow you to specify a fixed block of memory which the container uses for its memory. The container will not allocate any memory with these types of containers and all memory will be cache-friendly due to its locality.</li>
+ <li>You can assign custom allocators to containers instead of using the default global allocator. You would typically use an allocator that has its own private pool of memory.</li>
+ <li>Where possible, add all a container's elements to it at once up front instead of adding them over time. This avoids memory fragmentation and increase cache coherency.</li>
+</ul>
+<p class="faq-question"><a name="Cont.5"></a>Cont.5
+ I don't see container optimizations for equivalent scalar types such
+as pointer types. Why?</p>
+<p>Metrowerks (and no other, as of this writing) STL has some container specializations for type T* which maps them to type void*. The idea is that a user who declares a list of Widget* and a list of Gadget* will generate only one container: a list of void*. As a result, code generation will be smaller. Often this is done only in optimized builds, as such containers are harder to view in debug builds due to type information being lost.<br>
+<br>
+The addition of this optimization is under consideration for EASTL, though it might be noted that optimizing compilers such as VC++ are already capable of recognizing duplicate generated code and folding it automatically as part of link-time code generation (LTCG) (a.k.a. "whole program optimization"). This has been verified with VC++, as the following code and resulting disassembly demonstrate:</p>
+<pre class="code-example">eastl::list&lt;int*&gt;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; intPtrList;
+eastl::list&lt;TestObject*&gt; toPtrList;
+
+eastl_size_t n1 = intPtrList.size();
+eastl_size_t n2 = toPtrList.size();
+
+0042D288&nbsp; lea&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; edx,[esp+14h]
+0042D28C&nbsp; <span style="color: rgb(51, 51, 255);">call&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; eastl::list&lt;TestObject&gt;::size (414180h)</span>
+0042D291&nbsp; push&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; eax&nbsp;
+0042D292&nbsp; lea&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; edx,[esp+24h]
+0042D296&nbsp; <span style="color: rgb(51, 51, 255);">call&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; eastl::list&lt;TestObject&gt;::size (414180h)</span></pre>
+Note that in the above case the compiler folded the two implementations of size() into a single implementation.<br>
+<p class="faq-question"><a name="Cont.6"></a>Cont.6
+What about alternative container and algorithm implementations (e.g. treaps, skip lists, avl trees)?</p>
+<p class="faq-answer">EASTL chooses to implement some alternative containers and algorithms and not others. It's a matter of whether or not the alternative provides truly complementary or improved functionality over existing containers. The following is a list of some implemented and non-implemented alternatives and the rationale behind each:</p>
+<p class="faq-answer">Implemented:</p>
+<ul>
+ <li>intrusive_list, etc. -- Saves memory and improves cache locality.</li>
+ <li>vector_map, etc. -- Saves memory and improves cache locality.</li>
+ <li>ring_buffer -- Useful for some types of operations and has no alternative.</li>
+ <li>shell_sort -- Useful sorting algorithm.</li>
+ <li>sparse_matrix -- Useful for some types of operations and has no alternative.</li>
+</ul>
+<p class="faq-answer">Not implemented:
+</p>
+<ul>
+ <li>skip lists (alternative to red-black tree) -- These use more memory and usually perform worse than rbtrees.</li>
+ <li>treap (alternative to red-black tree) -- These are easier and smaller than rbtrees, but perform worse.</li>
+ <li>avl tree (alternative to red-black tree) -- These have slightly better search performance than rbtrees, but significantly worse insert/remove performance.</li>
+ <li>btree (alternative to red-black tree) --&nbsp; These are no better than rbtrees.</li>
+</ul>
+<p class="faq-answer">If you have an idea of something that should be implemented, please suggest it or even provide at least a prototypical implementation.</p>
+<p class="faq-question"><a name="Cont.7"></a>Cont.7
+Why are tree-based EASTL containers hard to read with a debugger?</p>
+<p class="faq-answer"><span style="font-weight: bold;">Short answer<br>
+</span> Maximum performance and design mandates.</p>
+<p class="faq-answer"><span style="font-weight: bold;">Long answer</span><br>
+You may notice that when you have a tree-based container (e.g. set, map)&nbsp; in the debugger that it isn't automatically able to recognize the tree nodes as containing instances of your contained object. You can get the debugger to do what you want with casting statements in the debug watch window, but this is not an ideal solution. The reason this is happening is that node-based containers always use an anonymous node type as the base class for container nodes. This is primarily done for performance, as it allows the node manipulation code to exist as a single non-templated library of functions and it saves memory because containers will have one or two base nodes as container 'anchors' and you don't want to allocate a node of the size of the user data when you can just use a base node. See list.h for an example of this and some additional in-code documentation on this.</p>
+<p class="faq-answer">Additionally, EASTL has the design mandate that an empty container constructs no user objects. This is both for performance reasons and because it doing so would skew the user's tracking of object counts and might possibly break some expectation the user has about object lifetimes.</p>
+<p class="faq-answer">Currently this debug issue exists only with tree-based containers. Other node-based containers such as list and slist use a trick to get around this problem in debug builds.</p>
+<p class="faq-question"><a name="Cont.8"></a>Cont.8
+How do I assign a custom allocator to an EASTL container?</p>
+<p class="faq-answer">There are two ways of doing this:</p>
+<ol>
+ <li>Use the set_allocator function that is present in each container.</li>
+ <li>Specify a new allocator type via the Allocator template parameter that is present in each container.</li>
+</ol>
+<p class="faq-answer">For item #1, EASTL expects that you provide an instance of an allocator of the type that EASTL recognizes. This is simple but has the disadvantage that all such allocators must be of the same class. The class would need to have C++ virtual functions in order to allow a given instance to act differently from another instance.</p>
+<p class="faq-answer">For item #2, you specify that the container use your own allocator class. The advantage of this is that your class can be implemented any way you want and doesn't require virtual functions for differentiation from other instances. Due to the way C++ works your class would necessarily have to use the same member function names as the default allocator class type. In order to make things easier, we provide a skeleton allocator here which you can copy and fill in with your own implementation.</p>
+<pre class="code-example">class custom_allocator
+{
+public:
+&nbsp;&nbsp;&nbsp; custom_allocator(const char* pName = EASTL_NAME_VAL("custom allocator"))
+&nbsp;&nbsp;&nbsp; {
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; #if EASTL_NAME_ENABLED
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; mpName = pName ? pName : EASTL_ALLOCATOR_DEFAULT_NAME;
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; #endif
+
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; // Possibly do something here.
+&nbsp;&nbsp;&nbsp; }
+
+&nbsp;&nbsp;&nbsp; custom_allocator(const allocator&amp; x, const char* pName = EASTL_NAME_VAL("custom allocator"));
+&nbsp;&nbsp;&nbsp; {
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; #if EASTL_NAME_ENABLED
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; mpName = pName ? pName : EASTL_ALLOCATOR_DEFAULT_NAME;
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; #endif
+
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; // Possibly copy from x here.
+&nbsp;&nbsp;&nbsp; }
+
+&nbsp;&nbsp;&nbsp; ~custom_allocator();
+&nbsp;&nbsp;&nbsp; {
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; // Possibly do something here.
+&nbsp;&nbsp;&nbsp; }
+
+&nbsp;&nbsp;&nbsp; custom_allocator&amp; operator=(const custom_allocator&amp; x)
+&nbsp;&nbsp;&nbsp; {
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; // Possibly copy from x here.
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; return *this;
+&nbsp;&nbsp;&nbsp; }
+
+&nbsp;&nbsp;&nbsp; void* allocate(size_t n, int flags = 0)
+&nbsp;&nbsp;&nbsp; {
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; // Implement the allocation here.
+&nbsp;&nbsp;&nbsp; }
+
+&nbsp;&nbsp;&nbsp; void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0)
+&nbsp;&nbsp;&nbsp; {
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; // Implement the allocation here.
+&nbsp;&nbsp;&nbsp; }
+
+&nbsp;&nbsp;&nbsp; void deallocate(void* p, size_t n)
+&nbsp;&nbsp;&nbsp; {
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; // Implement the deallocation here.
+&nbsp;&nbsp;&nbsp; }
+
+&nbsp;&nbsp;&nbsp; const char* get_name() const
+&nbsp;&nbsp;&nbsp; {
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; #if EASTL_NAME_ENABLED
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; return mpName;
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; #else
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; return "custom allocator";
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; #endif
+&nbsp;&nbsp;&nbsp; }
+
+&nbsp;&nbsp;&nbsp; void set_name(const char* pName)
+&nbsp;&nbsp;&nbsp; {
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; #if EASTL_NAME_ENABLED
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; mpName = pName;
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; #endif
+&nbsp;&nbsp;&nbsp; }
+
+protected:
+&nbsp;&nbsp;&nbsp; // Possibly place instance data here.
+
+&nbsp;&nbsp;&nbsp; #if EASTL_NAME_ENABLED
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; const char* mpName; // Debug name, used to track memory.
+&nbsp;&nbsp;&nbsp; #endif
+};
+
+
+inline bool operator==(const allocator&amp; a, const allocator&amp; b)
+{
+&nbsp;&nbsp;&nbsp; // Provide a comparison here.
+}
+
+inline bool operator!=(const allocator&amp; a, const allocator&amp; b)
+{
+&nbsp;&nbsp;&nbsp; // Provide a negative comparison here.
+}</pre>
+<p class="faq-answer"> Here's an example of how to use the above custom allocator:</p>
+<pre class="code-example">// Declare a Widget list and have it default construct.
+list&lt;Widget, custom_allocator&gt; widgetList;
+
+// Declare a Widget list and have it construct with a copy of some global allocator.
+list&lt;Widget, custom_allocator&gt; widgetList2(gSomeGlobalAllocator);
+
+// Declare a Widget list and have it default construct, but assign
+// an underlying implementation after construction.
+list&lt;Widget, custom_allocator&gt; widgetList;
+widgetList.get_allocator().mpIAllocator = new WidgetAllocatorImpl;</pre>
+
+<p class="faq-question"><a name="Cont.9"></a>Cont.9 How do I set the VC++ debugger to display EASTL container data with tooltips?</p>
+<p class="faq-answer">Visual Studio supports this via the AutoExp.dat file, an example of which is <a href="AutoExp.dat">present</a> with this documentation. </p>
+<p class="faq-answer">Sometimes the AutoExp.dat doesn't seem to work. Avery Lee's explanation:</p>
+<blockquote>
+ <p class="faq-answer style5"> If I had to take a guess, the problem is most likely in the cast to the concrete node type. These are always tricky because, for some strange reason, the debugger is whitespace sensitive with regard to specifying template types. You might try manually checking one of the routines of the specific map instantiation and checking that the placement of whitespace and const within the template expression still matches exactly. In some cases the compiler uses different whitespace rules depending on the value type which makes it impossible to correctly specify a single visualizer &ndash; this was the case for eastl::list&lt;&gt;, for which I was forced to include sections for both cases. The downside is that you have a bunch of (error) entries either way. </p>
+</blockquote>
+<p class="faq-question"> <a name="Cont.10"></a>Cont.10
+How do I use a memory pool with a container?</p>
+<p class="faq-answer">Using custom memory pools is a common technique for decreasing memory fragmentation and increasing memory cache locality. EASTL gives you the flexibility of defining your own memory pool systems for containers. There are two primary ways of doing this:</p>
+<ul>
+ <li>Assign a custom allocator to a container. eastl::fixed_pool provides an implementation.</li>
+ <li>Use one of the EASTL fixed containers, such as fixed_list.</li>
+</ul>
+<p class="faq-answer"><span style="font-weight: bold;">Custom Allocator</span><br>
+In the custom allocator case, you will want to create a memory pool and assign it to the container. For purely node-based containers such as list, slist, map, set, multimap, and multiset, your pool simply needs to be able to allocate list nodes. Each of these containers has a member typedef called node_type which defines the type of node allocated by the container.&nbsp;So if you have a memory pool that has a constructor that takes the size of pool items and the count of pool items, you would do this (assuming that MemoryPool implements the Allocator interface):</p>
+<pre class="code-example">typedef list&lt;Widget, MemoryPool&gt; WidgetList;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; // Declare your WidgetList type.
+
+MemoryPool myPool(sizeof(WidgetList::node_type), 100); // Make a pool of 100 Widget nodes.
+WidgetList myList(&amp;myPool);&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; // Create a list that uses the pool.</pre>
+<p class="faq-answer">In the case of containers that are array-based, such as vector and basic_string, memory pools don't work very well as these containers work on a realloc-basis instead of by adding incremental nodes. What we want to do with these containers is assign a sufficient block of memory to them and&nbsp;reserve() the container's capacity to the size of the memory.</p>
+<p class="faq-answer">In the case of mixed containers which are partly array-based and partly node-based, such as hash containers and deque, you can use a memory pool for the nodes but will need a single array block to supply for the buckets (hash containers and deque both use a bucket-like system).</p>
+<p class="faq-answer">You might consider using eastl::fixed_pool as such an allocator, as it provides such functionality and allows the user to provide the actual memory used for the pool. Here is some example code:</p>
+<pre class="code-example">char buffer[256];
+
+list&lt;Widget, fixed_pool&gt;&nbsp;myList;
+myList.get_allocator().init(buffer, 256);</pre>
+<p class="faq-answer"><span style="font-weight: bold;">Fixed Container</span><br>
+In the fixed container case, the container does all the work for you. To use a list which implements a private pool of memory, just declare it like so:</p>
+<pre class="code-example">fixed_list&lt;Widget, 100&gt; fixedList; // Declare a fixed_list that can hold 100 Widgets</pre>
+<p class="faq-question"><a name="Cont.11"></a>Cont.11
+How do I write a comparison (operator&lt;()) for a struct that contains two or more members?&nbsp;</p>
+<p class="faq-answer">See <a href="#Algo.2">Algo.2</a></p>
+<p class="faq-question"> <a name="Cont.12"></a>Cont.12
+Why doesn't container X have member function Y?</p>
+<p class="faq-answer">Why don't the list or vector containers have a find() function? Why doesn't the vector container have a sort() function? Why doesn't the string container have a mid() function? These are common examples of such questions.</p>
+<p class="faq-answer">The answer usually boils down to two reasons:</p>
+<ul>
+ <li>The functionality exists in a more centralized location elsewhere, such as the algorithms.</li>
+ <li>The functionality can be had by using other member functions.</li>
+</ul>
+<p class="faq-answer">In the case of find and sort functions not being part of containers, the find algorithm and sort algorithm are centralized versions that apply to <span style="font-style: italic;">any</span> container. Additionally, the algorithms allow you to specify a sub-range of the container on which to apply the algorithm. So in order to find an element in a list, you would do this:<br>
+</p>
+<div class="code-example">list&lt;int&gt;::iterator i =&nbsp;find(list.begin(), list.end(), 3);</div>
+<p class="faq-answer">And in order to sort a vector, you would do this:<br>
+</p>
+<div class="code-example">quick_sort(v.begin(), v.end()); &nbsp; // Sort the entire array.
+ <br>
+quick_sort(&amp;v[3], &amp;v[8]); &nbsp;&nbsp;&nbsp; &nbsp;&nbsp;&nbsp; // Sort the items at the indexes in the range of [3, 8).</div>
+<p class="faq-answer">In the case of functionality that can be had by using other member functions,
+note that EASTL follows the philosophy that duplicated functionality should not exist in a container,
+with exceptions being made for cases where mistakes and unsafe practices commonly happen if the given
+function isn't present. In the case of string not having a mid function, this is because there is a
+string constructor that takes a sub-range of another string. So to make a string out of the middle of
+another, you would do this:</p>
+<pre class="code-example">string strMid(str, 3, 5); // Make a new string of the characters from the source range of [3, 3+5).</pre>
+<p class="faq-answer"> It might be noted that the EASTL string class is unique among EASTL containers in that it sometimes violates the minimum functionality rule. This is so because the std C++ string class similarly does so and EASTL aims to be compatible.</p>
+<p class="faq-question"><a name="Cont.13"></a>Cont.13
+How do I search a hash_map of strings via a char pointer efficiently? If I use map.find("hello") it creates a temporary string, which is inefficient.</p>
+<p class="faq-answer">The problem is illustrated with this example:</p>
+<pre class="code-example">map&lt;string, Widget&gt; swMap;
+ ...
+map&lt;string, Widget&gt;::iterator it = swMap.find("blue"); // A temporary string object is created here.</pre>
+<p class="faq-answer">In this example, the find function expects a string object and not a string literal and so (silently!) creates a temporary string object for the duration of the find. There are two solutions to this problem:
+</p>
+<ul>
+ <li>Make the map a map of char pointers instead of string objects. Don't forget to write a custom compare or else the default comparison function will compare pointer values instead of string contents.</li>
+ <li>Use the EASTL hash_map::find_as function, which allows you to find an item in a hash container via an alternative key than the one the hash table uses.</li>
+</ul>
+<p class="faq-question"><a name="Cont.14"></a>Cont.14
+Why are set and hash_set iterators const (i.e. const_iterator)?</p>
+<p class="faq-answer">The situation is illustrated with this example:</p>
+<pre class="code-example">set&lt;int&gt; intSet;
+
+intSet.insert(1);
+set&lt;int&gt;::iterator i = intSet.begin();
+*i = 2; // Error: iterator i is const.</pre>
+<p class="faq-answer">In this example, the iterator is a regular iterator and not a const_iterator, yet the compiler gives an error when trying to change the iterator value. The reason this is so is that a set is an ordered container and changing the value would make it out of order. Thus, set and multiset iterators are always const_iterators. If you need to change the value and are sure the change will not alter the container order, use const_cast or declare mutable member variables for your contained object. This resolution is the one blessed by the C++ standardization committee.</p>
+
+<p class="faq-question"><a name="Cont.15"></a>Cont.15
+How do I prevent my hash container from re-hashing?</p>
+<p class="faq-answer">If you want to make a hashtable never re-hash (i.e. increase/reallocate its bucket count),
+call set_max_load_factor with a very high value such as 100000.f.</p>
+<p class="faq-answer">Similarly, you can control the bucket growth factor with the rehash_policy function.
+By default, when buckets reallocate, they reallocate to about twice their previous count.
+You can control that value as with the example code here:</p>
+<pre class="code-example">hash_set&lt;int&gt; hashSet;
+hashSet.rehash_policy().mfGrowthFactor = 1.5f</pre>
+
+<p class="faq-question">
+ <a name="Cont.16"></a>Cont.16
+Which uses less memory, a map or a hash_map?
+</p>
+<p class="faq-answer">A hash_map will virtually always use less memory. A hash_map will use an average of two pointers per stored element, while a map uses three pointers per stored element.</p>
+<p class="faq-question"> <a name="Cont.17"></a>Cont.17
+How do I write a custom hash function?</p>
+<p class="faq-answer">You can look at the existing hash functions in functional.h, but we provide a couple examples here.</p>
+<p class="faq-answer">To write a specific hash function for a Widget class, you would do this:</p>
+<pre class="code-example">struct WidgetHash {
+&nbsp; &nbsp; size_t operator()(const Widget&amp; w) const
+&nbsp; &nbsp; &nbsp; &nbsp; { return w.id; }
+};
+
+hash_set&lt;Widget, WidgetHash&gt; widgetHashSet;</pre>
+<p class="faq-answer">To write a generic (templated) hash function for a set of similar classes (in this case that have an id member), you would do this:<br>
+</p>
+<pre class="code-example">template &lt;typename T&gt;
+struct GeneralHash {
+&nbsp; &nbsp; size_t operator()(const T&amp; t) const
+&nbsp; &nbsp; &nbsp; &nbsp; { return t.id; }
+};
+
+hash_set&lt;Widget,&nbsp;GeneralHash&lt;Widget&gt; &gt; widgetHashSet;
+hash_set&lt;Dogget,&nbsp;GeneralHash&lt;Dogget&gt; &gt; doggetHashSet;</pre>
+
+<p class="faq-question"> <a name="Cont.18"></a>Cont.18
+How do I write a custom compare function for a map or set?</p>
+<p class="faq-answer"> The sorted containers require that an operator&lt; exist for the stored values or that the user provide a suitable custom comparison function. A custom can be implemented like so:<br>
+</p>
+<div class="code-example">struct WidgetLess {
+&nbsp; &nbsp; bool operator()(const Widget&amp; w1, const Widget&amp; w2) const
+&nbsp; &nbsp; &nbsp; &nbsp; { return w.id &lt; w2.id; }
+};
+
+set&lt;Widget, WidgetLess&gt; wSet;</div>
+<p class="faq-answer">It's important that your comparison function must be consistent in its behaviour, else the container will either be unsorted or a crash will occur. This concept is called "strict weak ordering."</p>
+<p class="faq-question"><a name="Cont.19"></a>Cont.19
+How do I force my vector or string capacity down to the size of the container?</p>
+<p class="faq-answer">You can simply use the set_capacity() member function which is present in both vector and string. This is a function that is not present in std STL vector and string functions.</p>
+<pre class="code-example">eastl::vector&lt;Widget&gt; x;
+x.set_capacity(); &nbsp; // Shrink x's capacity to be equal to its size.
+
+eastl::vector&lt;Widget&gt; x;
+x.set_capacity(0); &nbsp;// Completely clear x.</pre>
+<p> To compact your vector or string in a way that would also work with std STL you need to do the following.</p>
+<p> How to shrink a vector's capacity to be equal to its size:</p>
+<pre class="code-example">std::vector&lt;Widget&gt; x;
+std::vector&lt;Widget&gt;(x).swap(x); // Shrink x's capacity.</pre>
+How to completely clear a std::vector (size = 0, capacity = 0, no allocation):<br>
+<pre class="code-example">std::vector&lt;Widget&gt; x;
+std::vector&lt;Widget&gt;().swap(x); // Completely clear x.
+</pre>
+<p class="faq-question"> <a name="Cont.20"></a>Cont.20
+How do I iterate a container while (selectively) removing items from it?</p>
+<p class="faq-answer">All EASTL containers have an erase function which takes an iterator as an argument and returns an iterator to the next item. Thus, you can erase items from a container while iterating it like so:</p>
+<pre class="code-example">set&lt;int&gt; intSet;<br>
+set&lt;int&gt;::iterator i = intSet.begin();<br>
+while(i != intSet.end())
+{
+ if(*i &amp; 1) &nbsp;<span class="code-example-comment">// Erase all odd integers from the container.</span>
+&nbsp;&nbsp;&nbsp; &nbsp;&nbsp;&nbsp; i = intSet.erase(i);
+&nbsp;&nbsp;&nbsp; else
+&nbsp;&nbsp;&nbsp; &nbsp;&nbsp;&nbsp; ++i;
+}</pre>
+<p class="faq-question"><a name="Cont.21"></a>Cont.21
+How do I store a pointer in a container?</p>
+<p class="faq-answer"> The problem with storing pointers in containers is that clearing the container will not
+free the pointers automatically. There are two conventional resolutions to this problem:</p>
+<ul>
+ <li>Manually free pointers when removing them from containers.&nbsp;</li>
+ <li>Store the pointer as a smart pointer instead of a "raw"pointer.</li>
+</ul>
+<p class="faq-answer">The advantage of the former is that it makes the user's intent obvious and prevents the possibility of smart pointer "thrashing" with some containers. The disadvantage of the former is that it is more tedicous and error-prone.</p>
+<p class="faq-answer">The advantage of the latter is that your code will be cleaner and will always be error-free. The disadvantage is that it is perhaps slightly obfuscating and with some uses of some containers it can cause smart pointer thrashing, whereby a resize of a linear container (e.g. vector) can cause shared pointers to be repeatedly incremented and decremented with no net effect.</p>
+<p class="faq-answer">It's important that you use a shared smart pointer and not an unshared one such as C++ auto_ptr, as the latter will result in crashes upon linear container resizes. Here we provide an example of how to create a list of smart pointers:</p>
+<pre class="code-example">list&lt; shared_ptr&lt;Widget&gt; &gt; wList;
+
+wList.push_back(shared_ptr&lt;Widget&gt;(new Widget));
+wList.pop_back(); // The Widget will be freed.</pre>
+<p class="faq-question"><a name="Cont.22"></a>Cont.22
+How do I make a union of two containers? difference? intersection?</p>
+<p class="faq-answer">The best way to accomplish this is to sort your container (or use a sorted container such as set) and then apply the set_union, set_difference, or set_intersection algorithms.</p>
+<p class="faq-question"><a name="Cont.23"></a>Cont.23
+How do I override the default global allocator?&nbsp;</p>
+<p class="faq-answer">There are multiple ways to accomplish this. The allocation mechanism is defined in EASTL/internal/config.h and in allocator.h/cpp. Overriding the default global allocator means overriding these files, overriding what these files refer to, or changing these files outright. Here is a list of things you can do, starting with the simplest:</p>
+<ul>
+ <li>Simply provide the following versions of operator new (which EASTL requires, actually):<br>
+ <small><span style="font-family: Helvetica,Arial,sans-serif;">&nbsp;&nbsp;&nbsp; void* operator new[](size_t size, const char* pName, int flags, unsigned debugFlags, const char* file, int line);</span><br style=
+"font-family: Helvetica,Arial,sans-serif;">
+ <span style="font-family: Helvetica,Arial,sans-serif;">&nbsp;&nbsp;&nbsp; void* operator new[](size_t size, size_t alignment, size_t alignmentOffset, const char* pName, int flags, unsigned debugFlags, const char* file, int line);</span></small></li>
+ <li>Predefine the config.h macros for EASTLAlloc, EASTLFree, etc.&nbsp;See config.h for this.</li>
+ <li>Override config.h entirely via EASTL_USER_CONFIG_HEADER. See config.h for this.</li>
+ <li>Provide your own version of allocator.h/cpp</li>
+ <li>Provide your own version of config.h.&nbsp;</li>
+</ul>
+<p class="faq-answer">If you redefine the allocator class, you can make it work however you want.</p>
+<p class="faq-answer">Note that config.h defines EASTLAllocatorDefault, which returns the default allocator instance. As documented in config.h, this is not a global allocator which implements all container allocations but is the allocator that is used when EASTL needs to allocate memory internally. There are very few cases where EASTL allocates memory internally, and in each of these it is for a sensible reason that is documented to behave as such.</p>
+<p class="faq-question"> <a name="Cont.24"></a>Cont.24
+How do I do trick X with the string container?</p>
+<p class="faq-answer">There seem to be many things users want to do with strings. Perhaps the most commonly requested EASTL container extensions are string class shortcut functions. While some of these requests are being considered, we provide some shortcut functions here.<br>
+<br>
+<span style="font-weight: bold;">find_and_replace</span></p>
+<pre class="code-example">template &lt;typename String&gt;
+void find_and_replace(String&amp; s, const typename String::value_type* pFind,&nbsp;const typename String::value_type* pReplace)&nbsp;&nbsp;&nbsp;&nbsp;
+{
+ for(size_t i; (i = source.find(pFind)) != T::npos; )
+ &nbsp;&nbsp;&nbsp; s.replace(i, eastl::CharStrlen(pFind),&nbsp;pReplace);
+}
+
+Example:<span class="style1">
+</span> find_and_replace(s, "hello", "hola");</pre>
+<p class="faq-answer"><span style="font-weight: bold;">trim front</span> (multiple chars)</p>
+<pre class="code-example">template &lt;typename String&gt;
+void trim_front(String&amp; s, const typename String::value_type* pValues)
+{
+&nbsp; &nbsp; s.erase(0, s.find_first_not_of(pValues));
+}
+
+Example:
+ trim_front(s, " \t\n\r");</pre>
+<p class="faq-answer"><span style="font-weight: bold;">trim back</span> (multiple chars)</p>
+<pre class="code-example">template &lt;typename String&gt;
+void trim_front(String&amp; s, const typename String::value_type* pValues)
+{
+&nbsp; &nbsp; s.resize(s.find_last_not_of(pValues) + 1);
+}
+
+Example:
+ trim_back(s, " \t\n\r");</pre>
+<p class="faq-answer">prepend</p>
+<pre class="code-example">template &lt;typename String&gt;
+void prepend(String&amp; s, const typename String::value_type* p)
+{
+&nbsp;&nbsp;&nbsp; s.insert(0,&nbsp;p);
+}
+
+Example:
+ prepend(s, "log: ");</pre>
+<p><span class="faq-answer" style="font-weight: bold;">begins_with</span>
+</p>
+<pre class="code-example">template &lt;typename String&gt;
+bool begins_with(const String&amp; s, const typename String::value_type* p)
+{
+&nbsp; &nbsp; return&nbsp;s.compare(0,&nbsp;eastl::CharStrlen(p), p) == 0;
+}
+
+Example:
+ if(begins_with(s, "log: ")) ...</pre>
+<p class="faq-answer">ends_with</p>
+<pre class="code-example">template &lt;typename String&gt;
+bool ends_with(const String&amp; s, const typename String::value_type* p)
+{
+&nbsp; &nbsp; const typename String::size_type n1 = s.size();
+&nbsp; &nbsp; const typename String::size_type n2 = eastl::CharStrlen(p);
+&nbsp;&nbsp;&nbsp; return ((n1 &gt;= n2) &amp;&amp; s.compare(n1 - n2, n2, p) == 0);
+}
+
+Example:
+ if(ends_with(s, "test.")) ...</pre>
+<p class="faq-answer"><span style="font-weight: bold;">tokenize</span><br>
+Here is a simple tokenization function that acts very much like the C strtok function.&nbsp;</p>
+<pre class="code-example">template &lt;typename String&gt;
+size_t tokenize(const String&amp; s, const typename String::value_type* pDelimiters,
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; String* resultArray, size_t resultArraySize)
+{
+&nbsp;&nbsp;&nbsp; size_t n = 0;
+&nbsp;&nbsp;&nbsp; typename String::size_type lastPos = s.find_first_not_of(pDelimiters, 0);
+&nbsp;&nbsp;&nbsp; typename String::size_type pos &nbsp; &nbsp; = s.find_first_of(pDelimiters, lastPos);
+
+&nbsp;&nbsp;&nbsp; while((n &lt; resultArraySize) &amp;&amp; (pos != String::npos) || (lastPos != String::npos))
+&nbsp;&nbsp;&nbsp; {
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; resultArray[n++].assign(s, lastPos, pos - lastPos);
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; lastPos = s.find_first_not_of(pDelimiters, pos);
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; pos &nbsp; &nbsp; = s.find_first_of(pDelimiters, lastPos);
+&nbsp;&nbsp;&nbsp; }
+
+&nbsp;&nbsp;&nbsp;&nbsp;return n;
+}
+
+Example:
+ string resultArray[32];
+tokenize(s, " \t", resultArray, 32));</pre>
+
+<p class="faq-question"><a name="Cont.25"></a>Cont.25 How do EASTL smart pointers compare to Boost smart pointers?&nbsp;</p>
+<p class="faq-answer">EASTL's smart pointers are nearly identical to Boost (including all that crazy member template and dynamic cast functionality in shared_ptr), but are not using the Boost source code. EA legal has already stated that it is fine to have smart pointer classes with the same names and functionality as those present in Boost. EA legal specifically looked at the smart pointer classes in EASTL for this. There are two differences between EASTL smart pointers and Boost smart pointers:</p>
+<ul>
+ <li>EASTL smart pointers don't have thread safety built-in. It was deemed that this is too much overhead and that thread safety is something best done at a higher level. By coincidence the C++ library proposal to add shared_ptr also omits the thread safety feature. FWIW, I put a thread-safe shared_ptr in EAThread, though it doesn't attempt to do all the fancy member template things that Boost shared_ptr does. Maybe I'll add that some day if people care.</li>
+</ul>
+<ul>
+ <li>EASTL shared_ptr object deletion goes through a deletion object instead of through a virtual function interface. 95% of the time this makes no difference (aside from being more efficient), but the primary case where it matters is when you have shared_ptr&lt;void&gt; and assign to is something like "new Widget". The problem is that shared_ptr&lt;void&gt; doesn't know what destructor to call and so doesn't call a destructor unless you specify a custom destructor object as part of the template specification. I don't know what to say about this one, as it is less safe, but forcing everybody to have the overhead of additional templated classes and virtual destruction functions doesn't seem to be in the spirit of high performance or lean game development.</li>
+</ul>
+<p class="faq-answer">There is the possibility of making a shared_ptr_boost which is completely identical to Boost shared_ptr. So perhaps that will be done some day.</p>
+<p class="faq-question"><a name="Cont.26"></a>Cont.26
+How do your forward-declare an EASTL container?</p>
+<p class="faq-answer">Here is are some examples of how to do this:</p>
+<pre class="code-example">namespace eastl
+{
+&nbsp;&nbsp;&nbsp; template &lt;typename T, typename Allocator&gt; class basic_string;
+ typedef basic_string&lt;char, allocator&gt; string8; <span class="code-example-comment">// Forward declare EASTL's string8 type.</span>
+
+&nbsp;&nbsp;&nbsp; template &lt;typename T, typename Allocator&gt; class vector;
+ typedef vector&lt;char, allocator&gt; CharArray;
+
+&nbsp;&nbsp;&nbsp; template &lt;typename Value, typename Hash, typename Predicate, typename Allocator, bool bCacheHashCode&gt; class hash_set;
+
+&nbsp;&nbsp;&nbsp; template &lt;typename Key, typename T, typename Compare, typename Allocator&gt; class map;
+}</pre>
+<p class="faq-answer">The forward declaration can be used to declare a pointer or reference to such a class. It cannot be used to declare an instance of a class or refer to class data, static or otherwise. Nevertheless, forward declarations for pointers and references are useful for reducing the number of header files a header file needs to include.</p>
+<p class="faq-question"> <a name="Cont.27" id="Cont.27"></a>Cont.27
+How do I make two containers share a memory pool?</p>
+<p class="faq-answer">EASTL (and std STL) allocators are specified by value semantics and not reference semantics. Value semantics is more powerful (because a value can also be a reference, but not the other way around), but is not always what people expects if they're used to writing things the other way.</p>
+<p class="faq-answer">Here is some example code:</p>
+<pre class="code-example">struct fixed_pool_reference<br>{<br>public:<br> fixed_pool_reference()<br> {<br> mpFixedPool = NULL;<br> }<br> <br> fixed_pool_reference(eastl::fixed_pool&amp; fixedPool)<br> {<br> mpFixedPool = &amp;fixedPool;<br> }<br> <br> fixed_pool_reference(const fixed_pool_reference&amp; x)<br> {<br> mpFixedPool = x.mpFixedPool;<br> }<br> <br> fixed_pool_reference&amp; operator=(const fixed_pool_reference&amp; x)<br> {<br> mpFixedPool = x.mpFixedPool;<br> return *this;<br> }<br> <br> void* allocate(size_t /*n*/, int /*flags*/ = 0)<br> {<br> return mpFixedPool-&gt;allocate();<br> }<br> <br> void* allocate(size_t /*n*/, size_t /*alignment*/, size_t /*offset*/, int /*flags*/ = 0)<br> {<br> return mpFixedPool-&gt;allocate();<br> }<br> <br> void deallocate(void* p, size_t /*n*/)<br> {<br> return mpFixedPool-&gt;deallocate(p);<br> }<br> <br> const char* get_name() const<br> {<br> return &quot;fixed_pool_reference&quot;;<br> }<br> <br> void set_name(const char* /*pName*/)<br> {<br> }<br> <br>protected:<br> friend bool operator==(const fixed_pool_reference&amp; a, const fixed_pool_reference&amp; b);<br> friend bool operator!=(const fixed_pool_reference&amp; a, const fixed_pool_reference&amp; b);<br> <br> eastl::fixed_pool* mpFixedPool;<br>};
+
+inline bool operator==(const fixed_pool_reference&amp; a, const fixed_pool_reference&amp; b)
+{
+ return (a.mpFixedPool == b.mpFixedPool);<br>}
+
+inline bool operator!=(const fixed_pool_reference&amp; a, const fixed_pool_reference&amp; b)
+{
+ return (a.mpFixedPool != b.mpFixedPool);
+}</pre>
+ <p class="faq-answer"> Example usage of the above:</p>
+<pre class="code-example">typedef eastl::list&lt;int, fixed_pool_reference&gt; IntList;
+
+IntList::node_type buffer[2];
+eastl::fixed_pool myPool(buffer, sizeof(buffer), sizeof(Int::node_type), 2);
+
+IntList myList1(myPool);
+IntList myList2(myPool);
+
+myList1.push_back(37);
+myList2.push_back(39);</pre>
+<p class="faq-question"><a name="Cont.28"></a>Cont.28
+Can I use a std (STL) allocator with EASTL?</p>
+<p class="faq-answer">No. EASTL allocators are similar in interface to std STL allocators, but not 100% compatible. If it was possible to make them compatible with std STL allocators but also match the design of EASTL then compatibility would exist. The primary reasons for lack of compatibility are:</p>
+<ul>
+ <li>EASTL allocators have a different allocate function signature.</li>
+ <li>EASTL allocators have as many as four extra required functions: ctor(name), get_name(), set_name(), allocate(size, align, offset).</li>
+ <li>EASTL allocators have an additional allocate function specifically for aligned allocations, as listed directly above.</li>
+</ul>
+<p class="faq-question"><a name="Cont.29" id="Cont.29"></a>What are the requirements of classes stored in containers?</p>
+<p class="faq-answer">Class types stored in containers must have:</p>
+<ul>
+ <li>a public copy constructor</li>
+ <li>a public assignment operator</li>
+ <li>a public destructor</li>
+ <li>an operator &lt; that compares two such classes (sorted containers only).</li>
+ <li>an operator == that compares two such classes (hash containers only). </li>
+</ul>
+<p class="faq-answer">Recall that the compiler generates basic versions these functions for you when you don't implement them yourself, so you can omit any of the above if the compiler-generated version is sufficient. </p>
+<p class="faq-answer">For example, the following code will act incorrectly, because the user forgot to implement an assignment operator. The compiler-generated assignment operator will assign the refCount value, which the user doesn't want, and which will be called by the vector during resizing. </p>
+<pre class="code-example">struct NotAPod
+{
+ NotAPod(const NotAPod&amp;) {} <span class="code-example-comment">// Intentionally don't copy the refCount </span><br>
+ &nbsp;int refCount; <span class="code-example-comment">// refCounts should not be copied between NotAPod instances.</span>
+};
+
+eastl::vector&lt;NotAPod&gt; v;</pre>
+<h2>Algorithms</h2>
+<p class="faq-question"> <a name="Algo.1"></a>Algo.1
+ I'm getting screwy behavior in sorting algorithms or sorted
+containers. What's wrong?</p>
+<p class="faq-answer">It may possible that you are seeing floating point roundoff problems. Many STL algorithms require object comparisons to act consistently. However, floating point values sometimes compare differently between uses because in one situation a value might be in 32 bit form in system memory, whereas in anther situation that value might be in an FPU register with a different precision. These are difficult problems to track down and aren't the fault of EASTL or whatever similar library you might be using. There are various solutions to the problem, but the important thing is to find a way to force the comparisons to be consistent.</p>
+<p class="faq-answer">The code below was an example of this happening, whereby the object pA-&gt;mPos was stored in system memory while pB-&gt;mPos was stored in a register and comparisons were inconsistent and a crash ensued.</p>
+<pre class="code-example">class SortByDistance : public binary_function&lt;WorldTreeObject*, WorldTreeObject*, bool&gt;
+{
+private:
+&nbsp;&nbsp;&nbsp; Vector3 mOrigin;
+
+public:
+&nbsp;&nbsp;&nbsp; SortByDistance(Vector3 origin) {
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; mOrigin = origin;
+&nbsp;&nbsp;&nbsp; }
+
+&nbsp;&nbsp;&nbsp; bool operator()(WorldTreeObject* pA, WorldTreeObject* pB) const {
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; return ((WorldObject*)pA)-&gt;mPos - mOrigin).GetLength()
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; &lt; ((WorldObject*)pB)-&gt;mPos - mOrigin).GetLength();
+&nbsp;&nbsp;&nbsp; }
+};</pre>
+
+<p class="faq-question"><a name="Algo.2"></a>Algo.2
+How do I write a comparison (operator&lt;()) for a struct that contains two or more members?&nbsp;</p>
+<p class="faq-answer">For a struct with two members such as the following:</p>
+<pre class="code-example">struct X {
+&nbsp;&nbsp;&nbsp; Blah m1;
+&nbsp;&nbsp;&nbsp; Blah m2;
+};</pre>
+<p class="faq-answer">You would write the comparison function like this:</p>
+<pre class="code-example">bool operator&lt;(const X&amp; a, const X&amp; b) {
+&nbsp;&nbsp;&nbsp; return (a.m1 == b.m1) ? (a.m2 &lt; b.m2) : (a.m1 &lt; b.m1);
+}</pre>
+<p class="faq-answer">or, using only operator &lt; but more instructions:</p>
+<pre class="code-example">bool operator&lt;(const X&amp; a, const X&amp; b) {
+&nbsp;&nbsp;&nbsp; return (a.m1 &lt; b.m1) || (!(b.m1 &lt; a.m1) &amp;&amp; (a.m2 &lt; b.m2));
+}</pre>
+<p class="faq-answer"> For a struct with three members, you would have:</p>
+<pre class="code-example">bool operator&lt;(const X&amp; a, const X&amp; b) {
+&nbsp;&nbsp;&nbsp; if(a.m1 != b.m1)
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; return (a.m1 &lt; b.m1);
+&nbsp;&nbsp;&nbsp; if(a.m2 != b.m2)
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; return (a.m2 &lt; b.m2);
+&nbsp;&nbsp;&nbsp; return (a.mType &lt; b.mType);
+}</pre>
+<p class="faq-answer">And a somewhat messy implementation if you wanted to use only operator &lt;.</p>
+<p class="faq-answer">Note also that you can use the above technique to implement operator &lt; for spatial types such as vectors, points, and rectangles. You would simply treat the members of the struct as an array of values and ignore the fact that they have spatial meaning. All operator &lt; cares about is that things order consistently.</p>
+<pre class="code-example">bool operator&lt;(const Point2D&amp; a, const Point2D&amp; b) {
+&nbsp;&nbsp;&nbsp; return (a.x == b.x) ? (a.y &lt; b.y) : (a.x &lt; b.x);
+}</pre>
+<p class="faq-question"><a name="Algo.3"></a>Algo.3
+How do I sort something in reverse order?</p>
+<p class="faq-answer">Normally sorting puts the lowest value items first in the sorted range. You can change this by simply reversing the comparison. For example:<br>
+</p>
+<div class="code-example">sort(intVector.begin(), intVector.end(), greater&lt;int&gt;());</div>
+<p class="faq-answer"> It's important that you use operator &gt; instead of &gt;=. The comparison function must return false for every case where values are equal.</p>
+<p class="faq-question"><a name="Algo.4"></a>Algo.4
+I'm getting errors about min and max while compiling.</p>
+<p class="faq-answer">You need to define NOMINMAX under VC++ when this occurs, as it otherwise defines min and max macros that interfere. There may be equivalent issues with other compilers. Also, VC++ has a specific &lt;minmax.h&gt; header file which defines min and max macros but which doesn't pay attention to NOMINMAX and so in that case there is nothing to do but not include that file or to undefine min and max. minmax.h is not a standard file and its min and max macros are not standard C or C++ macros or functions.</p>
+<p class="faq-question"><a name="Algo.5"></a>Algo.5
+Why don't algorithms take a container as an argument instead of iterators? A container would be more convenient.</p>
+<p class="faq-answer">Having algorithms that use containers instead of algorithms would reduce reduce functionality with no increase in performance. This is because the use of iterators allows for the application of algorithms to sub-ranges of containers and allows for the application of algorithms to containers aren't formal C++ objects, such as C-style arrays.</p>
+<p class="faq-answer">Providing additional algorithms that use containers would introduce redundancy with respect to the existing algorithms that use iterators.</p>
+<p class="faq-question"><a name="Algo.6"></a>Algo.6
+Given a container of pointers, how do I find an element by value (instead of by pointer)?</p>
+<p class="faq-answer">Functions such as&nbsp;find_if help you find a T element in a container of Ts. But if you have a container of pointers such as vector&lt;Widget*&gt;, these functions will enable you to find an element that matches a given Widget* pointer, but they don't let you find an element that matches a given Widget object.</p>
+<p class="faq-answer">You can write your own iterating 'for' loop and compare values, or you can use a generic function object to do the work if this is a common task:</p>
+<pre class="code-example">template&lt;typename T&gt;
+struct dereferenced_equal
+{
+&nbsp; &nbsp; const T&amp; mValue;
+
+&nbsp; &nbsp;&nbsp;dereferenced_equal(const T&amp; value) : mValue(value) { } &nbsp;&nbsp;&nbsp;&nbsp;
+&nbsp; &nbsp;&nbsp;bool operator==(const T* pValue) const { return *pValue ==&nbsp;mValue; }
+};
+
+...
+
+find_if(container.begin(), container.end(),&nbsp;dereferenced_equal&lt;Widget&gt;(someWidget));</pre>
+
+<p class="faq-question"><a name="Algo.7"></a>Algo.7
+When do stored objects need to support <small><span style="font-family: Courier New;">operator &lt;</span></small> vs. when do they need to support <small><span style="font-family: Courier New;">operator ==</span></small>?</p>
+<p class="faq-answer">Any object which is sorted needs to have operator &lt; defined for it, implicitly via operator &lt; or explicitly via a user-supplied Compare function. Sets and map containers require operator &lt;, while sort, binary search, and min/max algorithms require operator &lt;.</p>
+<p class="faq-answer">Any object which is compared for equality needs to have operator == defined for it, implicitly via operator == or explicitly via a user-supplied BinaryPredicate function. Hash containers required operator ==, while many of the algorithms other than those mentioned above for operator &lt; require operator ==.</p>
+<p class="faq-answer">Some algorithms and containers require neither &lt; nor ==. Interestingly, no algorithm or container requires both &lt; and ==.</p>
+<p class="faq-question"><a name="Algo.8"></a>Algo.8 How do I sort via pointers or array indexes instead of objects directly?</p>
+<p class="faq-answer">Pointers </p>
+<pre class="code-example"><span class="style4">vector&lt;TestObject&gt; toArray;
+vector&lt;TestObject*&gt; topArray;
+
+for(eastl_size_t i = 0; i &lt; 32; i++)
+ toArray.push_back(TestObject(rng.RandLimit(20)));
+for(eastl_size_t i = 0; i &lt; 32; i++) // This needs to be a second loop because the addresses might change in the first loop due to container resizing.
+ topArray.push_back(&amp;toArray[i]);
+</span>
+struct TestObjectPtrCompare
+{
+ bool operator()(TestObject* a, TestObject* b)
+ { return a-&gt;mX &lt; a-&gt;mX; }
+};
+
+quick_sort(topArray.begin(), topArray.end(), TestObjectPtrCompare());</pre>
+<p class="faq-answer">Array indexes</p>
+<pre class="code-example"><span class="style4">vector&lt;TestObject&gt; toArray;
+vector&lt;eastl_size_t&gt; toiArray;
+
+for(eastl_size_t i = 0; i < 32; i++)
+{
+ toArray.push_back(TestObject(rng.RandLimit(20)));
+ toiArray.push_back(i);
+}</span>
+
+struct TestObjectIndexCompare
+{
+ vector<TestObject>* mpArray;
+
+ TestObjectIndexCompare(vector&lt;TestObject&gt;* pArray) : mpArray(pArray) { }
+ TestObjectIndexCompare(const TestObjectIndexCompare& x) : mpArray(x.mpArray){ }
+ TestObjectIndexCompare& operator=(const TestObjectIndexCompare& x) { mpArray = x.mpArray; return *this; }
+
+ bool operator()(eastl_size_t a, eastl_size_t b)
+ { return (*mpArray)[a] &lt; (*mpArray)[b]; }
+};
+
+quick_sort(toiArray.begin(), toiArray.end(), TestObjectIndexCompare(&toArray));
+</pre>
+<p class="faq-answer">Array indexes (simpler version using toArray as a global variable) </p>
+<pre class="code-example"><span class="style4">vector&lt;TestObject&gt; toArray;
+vector&lt;eastl_size_t&gt; toiArray;
+
+for(eastl_size_t i = 0; i < 32; i++)
+{
+ toArray.push_back(TestObject(rng.RandLimit(20)));
+ toiArray.push_back(i);
+}</span>
+
+struct TestObjectIndexCompare
+{
+ bool operator()(eastl_size_t a, eastl_size_t b)
+ { return toArray[a] &lt; toArray[b]; }
+};
+
+quick_sort(toiArray.begin(), toiArray.end(), TestObjectIndexCompare(&toArray));</pre>
+<h2>Iterators</h2>
+<p class="faq-question"><a name="Iter.1"></a>Iter.1
+What's the difference between iterator, const iterator, and const_iterator?</p>
+<p class="faq-answer">An iterator can be modified and item it points to can be modified.<br>
+A const iterator cannot be modified, but the items it points to can be modified.<br>
+A const_iterator can be modified, but the items it points to cannot be modified.<br>
+A const const_iterator cannot be modified, nor can the items it points to.</p>
+<p class="faq-answer">This situation is much like with char pointers:</p>
+<div style="margin-left: 40px;">
+ <table style="text-align: left; width: 400px;" border="1" cellpadding="2" cellspacing="2">
+ <tbody>
+ <tr>
+ <td>Iterator type</td>
+ <td>Pointer equivalent</td>
+ </tr>
+ <tr>
+ <td>iterator</td>
+ <td>char*</td>
+ </tr>
+ <tr>
+ <td>const iterator</td>
+ <td>char* const</td>
+ </tr>
+ <tr>
+ <td>const_iterator</td>
+ <td>const char*</td>
+ </tr>
+ <tr>
+ <td>const const_iterator</td>
+ <td>const char* const</td>
+ </tr>
+ </tbody>
+ </table>
+</div>
+<p class="faq-question"><a name="Iter.2"></a>Iter.2 How do I tell from an iterator what type of thing it is iterating?</p>
+<p class="faq-answer">Use the value_type typedef from iterator_traits, as in this example</p>
+<pre class="code-example">template &lt;typename Iterator&gt;
+void DoSomething(Iterator first, Iterator last)
+{
+&nbsp;&nbsp;&nbsp; typedef typename iterator_traits&lt;Iterator&gt;::value_type;
+
+&nbsp; &nbsp; // use value_type
+}</pre>
+<p class="faq-question"><a name="Iter.3"></a>Iter.3
+How do I iterate a container while (selectively) removing items from it?</p>
+<p class="faq-answer">All EASTL containers have an erase function which takes an iterator as an
+argument and returns an iterator to the next item. Thus, you can erase items from a container
+while iterating it like so:</p>
+<pre class="code-example">set&lt;int&gt; intSet;
+set&lt;int&gt;::iterator i = intSet.begin();
+
+while(i != intSet.end())
+{
+&nbsp;&nbsp;&nbsp;&nbsp;if(*i &amp; 1) // Erase all odd integers from the container.
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;i = intSet.erase(i);
+&nbsp;&nbsp;&nbsp;&nbsp;else
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;++i;
+}</pre>
+<p class="faq-question"><a name="Iter.4"></a>Iter.4
+What is an insert_iterator?</p>
+<p class="faq-answer">An insert_iterator is a utility class which is like an iterator except that when you assign a value to it, the insert_iterator inserts the value into the container (via insert()) and increments the iterator. Similarly, there are front_insert_iterator and back_insert_iterator, which are similar to insert_iterator except that assigning a value to them causes then to call push_front and push_back, respectively, on the container. These utilities may seem a slightly abstract, but they have uses in generic programming.<br>
+</p>
+<hr style="width: 100%; height: 2px;">
+End of document<br>
+<br>
+<br>
+<br>
+<br>
+</body>
+</html>
diff --git a/EASTL/doc/html/EASTL Glossary.html b/EASTL/doc/html/EASTL Glossary.html
new file mode 100644
index 0000000..bd4b865
--- /dev/null
+++ b/EASTL/doc/html/EASTL Glossary.html
@@ -0,0 +1,490 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html>
+<head>
+ <title>EASTL Glossary</title>
+ <meta content="text/html; charset=us-ascii" http-equiv="content-type">
+ <meta name="author" content="Paul Pedriana">
+ <meta name="description" content="Definitions of common terms related to EASTL.">
+ <link type="text/css" rel="stylesheet" href="EASTLDoc.css">
+</head>
+<body>
+<h1>EASTL Glossary</h1>
+<p>This document provides definitions to various terms related to EASTL. Items that are capitalized are items that are
+used as template parameters.</p>
+<table style="width: 100%; text-align: left;" border="1" cellpadding="2" cellspacing="2">
+<tbody>
+<tr>
+<td>adapter</td>
+<td>An adapter is something that encapsulates a component to provide another interface, such as a C++ class which makes
+a stack out of a list.</td>
+</tr>
+<tr>
+<td style="width: 150px; vertical-align: top;">algorithm<br></td>
+<td style="vertical-align: top;">Algorithms are standalone functions which manipulate data which usually but not
+necessarily comes from a container. Some algorithms change the data while others don't. Examples are reverse, sort,
+find, and remove.<br></td>
+</tr>
+<tr>
+<td>associative container</td>
+<td>An associative container is a variable-sized container that supports efficient retrieval of elements (values) based
+on keys. It supports insertion and removal of elements, but differs from a sequence in that it does not provide a
+mechanism for inserting an element at a specific position. Associative containers include map, multimap, set, multiset,
+hash_map, hash_multimap, hash_set, hash_multiset.</td>
+</tr>
+<tr>
+<td>array</td>
+<td>An array is a C++ container which directly implements a C-style fixed array but which adds STL container semantics
+to it.</td>
+</tr>
+<tr>
+<td>basic_string</td>
+<td>A templated string class which is usually used to store char or wchar_t strings.</td>
+</tr>
+<tr>
+<td>begin</td>
+<td>The function used by all conventional containers to return the first item in the container.</td>
+</tr>
+<tr>
+<td>BidirectionalIterator</td>
+<td>An input iterator which is like ForwardIterator except it can be read in a backward direction as well.</td>
+</tr>
+<tr>
+<td>BinaryOperation&nbsp;</td>
+<td>A function which takes two arguments and returns a value (which will usually be assigned to a third object).</td>
+</tr>
+<tr>
+<td>BinaryPredicate</td>
+<td>A function which takes two arguments and returns true if some criteria is met (e.g. they are equal).</td>
+</tr>
+<tr>
+<td>binder1st, binder2nd</td>
+<td>These are function objects which convert one function object into another. &nbsp;In particular, they implement a
+binary function whereby you can specify one of the arguments.This is a somewhat abstract concept but has its uses.</td>
+</tr>
+<tr>
+<td>bit vector</td>
+<td>A specialized container that acts like vector&lt;bool&gt; but is implemented via one bit per entry. STL
+vector&lt;bool&gt; is usually implemented as a bit vector but EASTL avoids this in favor of a specific bit vector
+container.</td>
+</tr>
+<tr>
+<td>bitset</td>
+<td>An extensible yet efficient implementation of bit flags. Not strictly a conventional STL container and not the same
+thing as vector&lt;bool&gt; or a bit_vector, both of which are formal iterate-able containers.</td>
+</tr>
+<tr>
+<td>capacity</td>
+<td>Refers to the amount of total storage available in an array-based container such as vector, string, and array.
+Capacity is always &gt;= container size and is &gt; size in order to provide extra space for a container to grow
+into.</td>
+</tr>
+<tr>
+<td>const_iterator</td>
+<td>An iterator whose iterated items are cannot be modified. A const_iterator is akin to a const pointer such as 'const
+char*'.</td>
+</tr>
+<tr>
+<td>container</td>
+<td>A container is an object that stores other objects (its elements), and that has methods for accessing its elements.
+In particular, every type that is a model of container has an associated iterator type that can be used to iterate
+through the container's elements.</td>
+</tr>
+<tr>
+<td>copy constructor</td>
+<td>A constructor for a type which takes another object of that type as its argument. For a hypothetical Widget class,
+the copy constructor is of the form Widget(const Widget&amp; src);</td>
+</tr>
+<tr>
+<td>Compare</td>
+<td>A function which takes two arguments and returns the lesser of the two.</td>
+</tr>
+<tr>
+<td>deque</td>
+<td>The name deque is pronounced "deck" and stands for "double-ended queue."<br>
+<br>
+A deque is very much like a vector: like vector, it is a sequence that supports random access to elements, constant
+time insertion and removal of elements at the end of the sequence, and linear time insertion and removal of elements in
+the middle.<br>
+<br>
+The main way in which deque differs from vector is that deque also supports constant time insertion and removal of
+elements at the beginning of the sequence. Additionally, deque does not have any member functions analogous to vector's
+capacity() and reserve(), and does not provide the guarantees on iterator validity that are associated with those
+member functions.</td>
+</tr>
+<tr>
+<td>difference_type</td>
+<td>The typedef'd type used by all conventional containers and iterators to define the distance between two iterators.
+It is usually the same thing as the C/C++ ptrdiff_t data type.</td>
+</tr>
+<tr>
+<td>empty</td>
+<td>The function used by all conventional containers to tell if a container has a size of zero. In many cases empty is
+more efficient than checking for size() == 0.</td>
+</tr>
+<tr>
+<td>element</td>
+<td>An element refers to a member of a container.</td>
+</tr>
+<tr>
+<td>end</td>
+<td>The function used by all conventional containers to return one-past the last item in the container.</td>
+</tr>
+<tr>
+<td>equal_range</td>
+<td>equal_range is a version of binary search: it attempts to find the element value in an ordered range [first, last).
+The value returned by equal_range is essentially a combination of the values returned by lower_bound and upper_bound:
+it returns a pair of iterators i and j such that i is the first position where value could be inserted without
+violating the ordering and j is the last position where value could be inserted without violating the ordering. It
+follows that every element in the range [i, j) is equivalent to value, and that [i, j) is the largest subrange of
+[first, last) that has this property.</td>
+</tr>
+<tr>
+<td>explicit instantiation</td>
+<td>Explicit instantiation lets you create an instantiation of a templated class or function without actually using it
+in your code. Since this is useful when you are creating library files that use templates for distribution,
+uninstantiated template definitions are not put into object files.&nbsp;An example of the syntax for explicit
+instantiation is:<br>
+<small><span style="font-family: Courier New;">&nbsp; &nbsp;</span></small> <small><span style=
+"font-family: Courier New;">template class vector&lt;char&gt;;<br>
+&nbsp; &nbsp; template void min&lt;int&gt;(int, int);<br>
+&nbsp; &nbsp; template void min(int, int);</span></small></td>
+</tr>
+<tr>
+<td>ForwardIterator</td>
+<td>An input iterator which is like InputIterator except it can be reset back to the beginning.</td>
+</tr>
+<tr>
+<td>Function</td>
+<td>A function which takes one argument and applies some operation to the target.</td>
+</tr>
+<tr>
+<td>function object, functor</td>
+<td>A function object or functor is a&nbsp;class that has the function-call operator (<tt>operator()</tt>)
+defined.</td>
+</tr>
+<tr>
+<td>Generator</td>
+<td>A function which takes no arguments and returns a value (which will usually be assigned to an object).</td>
+</tr>
+<tr>
+<td>hash_map, hash_multimap, hash_set, hash_multiset</td>
+<td>The hash containers are implementations of map, multimap, set, and multiset via a hashtable instead of via a tree.
+Searches are O(1) (fast) but the container is not sorted.</td>
+</tr>
+<tr>
+<td>heap</td>
+<td>A heap is a data structure which is not necessarily sorted but is organized such that the highest priority item is
+at the top. A heap is synonymous with a priority queue and has numerous applications in computer science.</td>
+</tr>
+<tr>
+<td>InputIterator</td>
+<td>An input iterator (iterator you read from) which allows reading each element only once and only in a forward
+direction.</td>
+</tr>
+<tr>
+<td>intrusive_list, intrusive_hash_map, etc.</td>
+<td>Intrusive containers are containers which don't allocate memory but instead use their contained object to manage
+the container's memory. While list allocates nodes (with mpPrev/mpNext pointers) that contain the list items,
+intrusive_list doesn't allocate nodes but instead the container items have the mpPrev/mpNext pointers.</td>
+</tr>
+<tr>
+<td>intrusive_ptr</td>
+<td>intrusive_ptr is a smart pointer which doesn't allocate memory but instead uses the contained object to manage
+lifetime via addref and release functions.</td>
+</tr>
+<tr>
+<td>iterator</td>
+<td>An iterator is the fundamental entity of reading and enumerating values in a&nbsp;container. Much like a pointer
+can be used to walk through a character array, an iterator is used to walk through a linked list.</td>
+</tr>
+<tr>
+<td>iterator category</td>
+<td>An iterator category defines the functionality the iterator provides. The conventional iterator categories are
+InputIterator, ForwardIterator, BidirectionalIterator, RandomAccessIterator, and OutputIterator. See the definitions of
+each of these for more information.Iterator category is synonymous with <span style=
+"font-style: italic;">iterator_tag</span>.</td>
+</tr>
+<tr>
+<td>iterator_tag</td>
+<td>See <span style="font-style: italic;">iterator category</span>.</td>
+</tr>
+<tr>
+<td>key_type, Key</td>
+<td>A Key or key_type is the identifier used by associative (a.k.a. dictionary) containers (e.g. map, hash_map) to
+identify the type used to index the mapped_type. If you have a dictionary of strings that you access by an integer id,
+the ids are the keys and the strings are the mapped types.</td>
+</tr>
+<tr>
+<td>lexicographical compare</td>
+<td>A lexicographical compare is a comparison of two containers that compares them element by element, much like the C
+strcmp function compares two strings.</td>
+</tr>
+<tr>
+<td>linked_ptr</td>
+<td>A linked_ptr is a shared smart pointer which implements object lifetime via a linked list of all linked_ptrs that
+are referencing the object. linked_ptr, like intrusive_ptr, is a non-memory-allocating alternative to shared_ptr.</td>
+</tr>
+<tr>
+<td>list</td>
+<td>A list is a doubly linked list. It is a sequence that supports both forward and backward traversal, and (amortized)
+constant time insertion and removal of elements at the beginning or the end, or in the middle. Lists have the important
+property that insertion and splicing do not invalidate iterators to list elements, and that even removal invalidates
+only the iterators that point to the elements that are removed. The ordering of iterators may be changed (that is,
+list&lt;T&gt;::iterator might have a different predecessor or successor after a list operation than it did before), but
+the iterators themselves will not be invalidated or made to point to different elements unless that invalidation or
+mutation is explicit.</td>
+</tr>
+<tr>
+<td>lower_bound</td>
+<td>lower_bound is a version of binary search: it attempts to find the element value in an ordered range [first, last).
+Specifically, it returns the first position where value could be inserted without violating the ordering.</td>
+</tr>
+<tr>
+<td>map</td>
+<td>Map is a sorted associative container that associates objects of type Key with objects of type T. Map is a pair
+associative container, meaning that its value type is pair&lt;const Key, T&gt;. It is also a unique associative
+container, meaning that no two elements have the same key. It is implemented with a tree structure.</td>
+</tr>
+<tr>
+<td>mapped_type</td>
+<td>A mapped_type is a typedef used by associative containers to identify the container object which is accessed by a
+key. If you have a dictionary of strings that you access by an integer id, the ids are the keys and the strings are the
+mapped types.</td>
+</tr>
+<tr>
+<td>member template</td>
+<td>A member template is a templated function of a templated class. Thus with a member template function there are two
+levels of templating -- the class and the function.</td>
+</tr>
+<tr>
+<td>multimap,&nbsp;</td>
+<td>Multimap is a sorted associative&nbsp;container that associates objects of type Key with objects of type T.
+multimap is a pair associative container, meaning that its value type is pair&lt;const Key, T&gt;. It is also a
+multiple associative container, meaning that there is no limit on the number of elements with the same key.It is
+implemented with a tree structure.</td>
+</tr>
+<tr>
+<td>multiset</td>
+<td>Multiset is a sorted associative container that stores objects of type Key. Its value type, as well as its key
+type, is Key. It is also a multiple associative container, meaning that two or more elements may be identical.&nbsp;It
+is implemented with a tree structure.</td>
+</tr>
+<tr>
+<td>node</td>
+<td>A node is a little holder class used by many containers to hold the contained items. A linked-list, for example,
+defines a node which has three members: mpPrev, mpNext, and T (the contained object).</td>
+</tr>
+<tr>
+<td>npos</td>
+<td>npos is used by the string class to identify a non-existent index. Some string functions return npos to indicate
+that the function failed.</td>
+</tr>
+<tr>
+<td>rel_ops</td>
+<td>rel_ops refers to "relational operators" and is a set of templated functions which provide operator!= for classes
+that&nbsp; have only operator== and provide operator &gt; for classes that have only operator &lt;, etc. Unfortunately,
+rel_ops have a habit of polluting the global operator space and creating conflicts. They must be used with
+discretion.</td>
+</tr>
+<tr>
+<td>reverse_iterator</td>
+<td>A reverse_iterator is an iterator which wraps a bidirectional or random access iterator and allows the iterator to
+be read in reverse direction. The difference between using reverse_iterators and just decrementing regular iterators is
+that reverse_iterators use operator++ to move backwards and thus work in any algorithm that calls ++ to move through a
+container.</td>
+</tr>
+<tr>
+<td>OutputIterator</td>
+<td>An output iterator (iterator you write to) which allows writing each element only once in only in a forward
+direction.</td>
+</tr>
+<tr>
+<td>POD</td>
+<td>POD means Plain Old Data. It refers to C++ classes which act like built-in types and C structs. These are useful to
+distinguish because some algorithms can be made more efficient when they can detect that they are working with PODs
+instead of regular classes.&nbsp;</td>
+</tr>
+<tr>
+<td>Predicate</td>
+<td>A function which takes one argument returns true if the argument meets some criteria.</td>
+</tr>
+<tr>
+<td>priority_queue</td>
+<td>A priority_queue is an adapter container which implements a heap via a random access container such as vector or
+deque.</td>
+</tr>
+<tr>
+<td>queue</td>
+<td>A queue is an adapter container which implements a FIFO (first-in, first-out) container with which you can add
+items to the back and get items from the front.</td>
+</tr>
+<tr>
+<td>RandomAccessIterator</td>
+<td>An input iterator which can be addressed like an array. It is a superset of all other input iterators.</td>
+</tr>
+<tr>
+<td>red-black tree</td>
+<td>A red-black tree is a binary tree which has the property of being always balanced. The colors red and black are
+somewhat arbitrarily named monikers for nodes used to measure the balance of the tree. Red-black trees are considered
+the best all-around data structure for sorted containers.</td>
+</tr>
+<tr>
+<td>scalar</td>
+<td>A scalar is a data type which is implemented via a numerical value. In C++ this means integers, floating point
+values, enumerations, and pointers.&nbsp;</td>
+</tr>
+<tr>
+<td>scoped_ptr</td>
+<td>A scoped_ptr is a smart pointer which is the same as C++ auto_ptr except that it cannot be copied.</td>
+</tr>
+<tr>
+<td>set</td>
+<td>Set is a sorted associative container that stores objects of type Key. Its value type, as well as its key type, is
+Key. It is also a unique associative container, meaning that no two elements are the same.It is implemented with a tree
+structure.</td>
+</tr>
+<tr>
+<td>sequence</td>
+<td>A sequence is a variable-sized container whose elements are arranged in a strict linear (though not necessarily
+contiguous) order. It supports insertion and removal of elements. Sequence containers include vector, deque, array,
+list, slist.</td>
+</tr>
+<tr>
+<td>size</td>
+<td>All conventional containers have a size member function which returns the count of elements in the container. The
+efficiency of the size function differs between containers.</td>
+</tr>
+<tr>
+<td>size_type</td>
+<td>The type that a container uses to define its size and counts. This is similar to the C/C++ size_t type but may be
+specialized for the container. It defaults to size_t, but it is possible to force it to be 4 bytes for 64 bit machines by defining EASTL_SIZE_T_32BIT.</td>
+</tr>
+<tr>
+<td>skip list</td>
+<td>A skip-list is a type of container which is an alternative to a binary tree for finding data.</td>
+</tr>
+<tr>
+<td>shared_ptr</td>
+<td>A shared_ptr is a smart pointer which allows multiple references (via multiple shared_ptrs) to the same object.
+When the last shared_ptr goes away, the pointer is freed. shared_ptr is implemented via a shared count between all
+instances.</td>
+</tr>
+<tr>
+<td>slist</td>
+<td>An slist is like a list but is singly-linked instead of doubly-linked. It can only be iterated in a
+forward-direction.</td>
+</tr>
+<tr>
+<td>smart pointer</td>
+<td>Smart pointer is a term that identifies a family of utility classes which store pointers and free them when the
+class instance goes out of scope. Examples of smart pointers are shared_ptr, linked_ptr, intrusive_ptr, and
+scoped_ptr.</td>
+</tr>
+<tr>
+<td>splice</td>
+<td>Splicing refers to the moving of a subsequence of one Sequence into another Sequence.</td>
+</tr>
+<tr>
+<td>stack</td>
+<td>A stack is a adapter container which implements LIFO (last-in, first, out) access via another container such as a
+list or deque.</td>
+</tr>
+<tr>
+<td>STL</td>
+<td>Standard Template Library.&nbsp;</td>
+</tr>
+<tr>
+<td>StrictWeakOrdering</td>
+<td>A BinaryPredicate that compares two objects, returning true if the first precedes the second. Like Compare but has
+additional requirements. Used for sorting routines.<br>
+<br>
+This predicate must satisfy the standard mathematical definition of a strict weak ordering. A StrictWeakOrdering has to
+behave the way that "less than" behaves: if a is less than b then b is not less than a, if a is less than b and b is
+less than c then a is less than c, and so on.</td>
+</tr>
+<tr>
+<td>string</td>
+<td>See basic_string.</td>
+</tr>
+<tr>
+<td>T</td>
+<td>T is the template parameter name used by most containers to identify the contained element type.&nbsp;</td>
+</tr>
+<tr>
+<td>template parameter</td>
+<td>A template parameter is the templated type used to define a template function or class. In the declaration
+'template &lt;typename T&gt; class vector{ },' &nbsp;T is a template parameter.</td>
+</tr>
+<tr>
+<td>template specialization</td>
+<td>A template specialization is a custom version of a template which overrides the default version and provides
+alternative functionality, often for the purpose of providing improved or specialized functionality.</td>
+</tr>
+<tr>
+<td>treap</td>
+<td>A tree-like structure implemented via a heap. This is an alternative to a binary tree (e.g. red-black tree),
+skip-list, and sorted array as a mechanism for a fast-access sorted container.</td>
+</tr>
+<tr>
+<td>type traits</td>
+<td>Type traits are properties of types. If you have a templated type T and you want to know if it is a pointer, you
+would use the is_pointer type trait. If you want to know if the type is a POD, you would use the is_pod type trait.
+Type traits are very useful for allowing the implementation of optimized generic algorithms and for asserting that
+types have properties expected by the function or class contract. For example, you can use type_traits to tell if a
+type can be copied via memcpy instead of a slower element-by-element copy.</td>
+</tr>
+<tr>
+<td>typename</td>
+<td>Typename is a C++ keyword used in templated function implementations which identifies to the compiler that the
+following expression is a type and not a value. It is used extensively in EASTL, particularly in the algorithms.</td>
+</tr>
+<tr>
+<td>UnaryOperation</td>
+<td>A function which takes one argument and returns a value (which will usually be assigned to second object).</td>
+</tr>
+<tr>
+<td>upper_bound</td>
+<td>upper_bound is a version of binary search: it attempts to find the element value in an ordered range [first, last).
+Specifically, it returns the last position where value could be inserted without violating the ordering.</td>
+</tr>
+<tr>
+<td>value_type, Value</td>
+<td>A value_type is a typedef used by all containers to identify the elements they contain. In most cases value_type is
+simply the same thing as the user-supplied T template parameter. The primary exception is the associative containers
+whereby value_type is the pair of key_type and mapped_type.</td>
+</tr>
+<tr>
+<td>vector</td>
+<td>A vector is a Sequence that supports random access to elements, constant time insertion and removal of elements at
+the end, and linear time insertion and removal of elements at the beginning or in the middle. The number of elements in
+a vector may vary dynamically; memory management is automatic. Vector is the simplest of the container classes, and in
+many cases the most efficient.</td>
+</tr>
+<tr>
+<td>vector_map,&nbsp;vector_multimap,&nbsp;vector_set,&nbsp;vector_multiset</td>
+<td>These are containers that implement the functionality of map, multimap, set, and multiset via a vector or deque
+instead of a tree. They use less memory and find items faster, but are slower to modify and modification invalidates
+iterators.</td>
+</tr>
+<tr>
+<td>weak_ptr</td>
+<td>A weak_ptr is an adjunct to shared_ptr which doesn't increment the reference on the contained object but can safely
+tell you if the object still exists and access it if so. It has uses in preventing circular references in
+shared_ptrs.</td>
+</tr>
+</tbody>
+</table>
+<br>
+
+<hr style="width: 100%; height: 2px;">
+End of document<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+</body>
+</html>
diff --git a/EASTL/doc/html/EASTL Gotchas.html b/EASTL/doc/html/EASTL Gotchas.html
new file mode 100644
index 0000000..daa8f7a
--- /dev/null
+++ b/EASTL/doc/html/EASTL Gotchas.html
@@ -0,0 +1,175 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html>
+<head>
+ <title>EASTL Gotchas</title>
+ <meta content="text/html; charset=us-ascii" http-equiv="content-type">
+ <meta name="author" content="Paul Pedriana">
+ <meta name="description" content="Desciptions of potential pitfalls that exist in EASTL.">
+ <link type="text/css" rel="stylesheet" href="EASTLDoc.css">
+ <style type="text/css">
+<!--
+.style1 {color: #FF0000}
+.style2 {color: #009933}
+-->
+ </style>
+</head>
+<body>
+<h1>EASTL Gotchas</h1>
+<p> There are some cases where the EASTL design results in "gotchas" or behavior that isn't necessarily what the new user
+ would expect. These are all situations in which this behavior may be undesirable. One might ask, "Why not change EASTL
+ to make these gotchas go away?" The answer is that in each case making the gotchas go away would either be impossible
+ or would compromise the functionality of the library.</p>
+<h2>Summary</h2>
+<p>The descriptions here are intentionally terse; this is to make them easier to visually scan.</p>
+<table style="text-align: left; width: 100%;" border="0" cellpadding="1" cellspacing="1">
+<tbody>
+<tr>
+<td style="width: 28px;">1</td>
+<td><a href="#Gotchas.1">map::operator[] can create elements.</a></td>
+</tr>
+<tr>
+<td style="width: 28px;">2</td>
+<td><a href="#Gotchas.2">char* converts to string silently.</a></td>
+</tr>
+<tr>
+<td style="width: 28px;">3</td>
+<td><a href="#Gotchas.3">char* is compared by ptr and not by contents.</a></td>
+</tr>
+<tr>
+<td style="width: 28px;">4</td>
+<td><a href="#Gotchas.4">Iterators can be invalidated by container mutations.</a></td>
+</tr>
+<tr>
+<td style="width: 28px;">5</td>
+<td><a href="#Gotchas.5">Vector resizing may cause ctor/dtor cascades.</a></td>
+</tr>
+<tr>
+<td style="width: 28px;">6</td>
+<td><a href="#Gotchas.6">Vector and string insert/push_back/resize can reallocate.</a></td>
+</tr>
+<tr>
+<td style="width: 28px;">7</td>
+<td><a href="#Gotchas.7">Deriving from containers may not work.</a></td>
+</tr>
+<tr>
+<td style="width: 28px;">8</td>
+<td><a href="#Gotchas.8">set::iterator is const_iterator.</a></td>
+</tr>
+<tr>
+<td style="width: 28px;">9</td>
+<td><a href="#Gotchas.9">Inserting elements means copying by value.</a></td>
+</tr>
+<tr>
+<td style="width: 28px;">10</td>
+<td><a href="#Gotchas.10">Containers of pointers can leak if you aren't careful.</a></td>
+</tr>
+<tr>
+<td style="width: 28px;">11</td>
+<td><a href="#Gotchas.11">Containers of auto_ptrs can crash.</a></td>
+</tr>
+<tr>
+<td style="width: 28px;">12</td>
+<td><a href="#Gotchas.12">Remove algorithms don't actually remove elements.</a></td>
+</tr>
+<tr>
+<td style="width: 28px;">13</td>
+<td><a href="#Gotchas.13">list::size() is O(n).</a></td>
+</tr>
+<tr>
+<td style="width: 28px;">14</td>
+<td><a href="#Gotchas.14">vector and deque::size() may incur integer division.</a></td>
+</tr>
+<tr>
+<td style="width: 28px;">15</td>
+<td><a href="#Gotchas.15">Be careful making custom Compare functions.</a></td>
+</tr>
+<tr>
+<td style="width: 28px;">16</td>
+<td><a href="#Gotchas.16">Comparisons involving floating point are dangerous.</a></td>
+</tr>
+<tr>
+ <td style="width: 28px;">17</td>
+ <td><a href="#Gotchas.17">Writing beyond string::size and vector::size is dangerous. </a></td>
+</tr>
+<tr>
+ <td style="width: 28px;">18</td>
+ <td><a href="#Gotchas.18">Container operator=() doesn't copy allocators. </a></td>
+</tr>
+</tbody>
+</table>
+<h2> Detail</h2>
+<p class="faq-question"><a name="Gotchas.1"></a>1
+map::operator[] can create elements.</p>
+<p class="faq-answer">By design, map operator[] creates a value for you if it isn't already present. The reason for this is that the alternative behavior would be to throw an exception, and such behavior isn't desirable. The resolution is to simply use the map::find function instead of operator[].</p>
+<p class="faq-question"><a name="Gotchas.2"></a>2
+char* converts to string silently.</p>
+<p class="faq-answer">The string class has a non-explicit constructor that takes char* as an argument. Thus if you pass char* to a function that takes a string object, a temporary string will be created. In some cases this is undesirable behavior but the user may not notice it right away, as the compiler gives no warnings. The reason that the string constructor from char* is not declared explicit is that doing so would prevent the user from expressions such as: string s = &quot;hello&quot;. In this example, no temporary string object is created, but the syntax is not possible if the char* constructor is declared explicit. Thus a decision to make the string char* constructor explicit involves tradeoffs.</p>
+<p class="faq-answer">There is an EASTL configuration option called EASTL_STRING_EXPLICIT which makes the string char* ctor explicit and avoids the behaviour described above.</p>
+<p class="faq-question"><a name="Gotchas.3"></a>3
+char* is compared by ptr and not by contents.</p>
+<p class="faq-answer">If you have a set of strings declared as set&lt;char*&gt;, the find function will compare via the pointer value and not the string contents. The workaround is to make a set of string objects or, better, to supply a custom string comparison function to the set. The workaround is not to declare a global operator&lt; for type char*, as that could cause other systems to break.</p>
+<p class="faq-question"><a name="Gotchas.4"></a>4 Iterators can be invalidated by container mutations</p>
+<p class="faq-answer">With some containers, modifications of them may invalidate iterators into them. With other containers, modifications of them only an iterator if the modification involves the element that iterator refers to. Containers in the former category include vector, deque, basic_string (string), vector_map, vector_multimap, vector_set, and vector_multiset. Containers in the latter category include list, slist, map, multimap, multiset, all hash containers, and all intrusive containers.</p>
+<p class="faq-question"><a name="Gotchas.5"></a>5 Vector resizing may cause ctor/dtor cascades.</p>
+<p>If elements are inserted into a vector in middle of the sequence, the elements from the insertion point to the end will be copied upward. This will necessarily cause a series of element constructions and destructions as the elements are copied upward. Similarly, if an element is appended to a vector but the vector capacity is exhausted and needs to be reallocated, the entire vector will undergo a construction and destruction pass as the values are copied to the new storage. This issue exists for deque as well, though to a lesser degree. For vector, the resolution is to reserve enough space in your vector to prevent such reallocation. For deque the resolution is to set its subarray size to enough to prevent such reallocation. Another solution that can often be used is to take advantage of the has_trivial_relocate type trait, which can cause such moves to happen via memcpy instead of via ctor/dtor calls. If your class can be safely memcpy'd, you can use EASTL_DECLARE_TRIVIAL_RELOCATE to tell the compiler it can be memcpy'd. Note that built-in scalars (e.g. int) already are automatically memcpy'd by EASTL.</p>
+<p class="faq-question"><a name="Gotchas.6"></a>6
+Vector and string insert/push_back/resize can reallocate.</p>
+<p>If you create an empty vector and use push_back to insert 100 elements, the vector will reallocate itself at least three or four times during the operation. This can be an undesirable thing. The best thing to do if possible is to reserve the size you will need up front in the vector constructor or before you add any elements.</p>
+<p class="faq-question"><a name="Gotchas.7"></a>7
+Deriving from containers may not work.</p>
+<p>EASTL containers are not designed with the guarantee that they can be arbitrarily subclassed. This is by design and is done for performance reasons, as such guarantees would likely involve making containers use virtual functions. However, some types of subclassing can be successful and EASTL does such subclassing internally to its advantage. The primary problem with subclassing results when a parent class function calls a function that the user wants to override. The parent class cannot see the overridden function and silent unpredictable behavior will likely occur. If your derived container acts strictly as a wrapper for the container then you will likely be able to successfully subclass it.</p>
+<p class="faq-question"><a name="Gotchas.8"></a>8
+set::iterator is const_iterator.</p>
+<p class="faq-answer">The reason this is so is that a set is an ordered container and changing the value referred to by an iterator could make the set be out of order. Thus, set and multiset iterators are always const_iterators. If you need to change the value and are sure the change will not alter the container order, use const_cast or declare mutable member variables for your contained object. This resolution is the one blessed by the C++ standardization committee. This issue is addressed in more detail in the EASTL FAQ.</p>
+<p class="faq-question"><a name="Gotchas.9"></a>9
+Inserting elements means copying by value.</p>
+<p class="faq-answer">When you insert an element into a (non-intrusive) container, the container makes a copy of the element. There is no provision to take over ownership of an object from the user. The exception to this is of course when you use a container of pointers instead of a container of values. See the entry below regarding containers of pointers. Intrusive containers (e.g. intrusive_list) do in fact take over the user-provided value, and thus provide another advantage over regular containers in addition to avoiding memory allocation.</p>
+<p class="faq-question"><a name="Gotchas.10"></a>10
+ Containers of pointers can leak if you aren't careful.</p>
+<p class="faq-answer">Containers of points don't know or care about the possibility that the pointer may have been allocated and need to be freed. Thus if you erase such elements from a container they are not freed. The resolution is to manually free the pointers when removing them or to instead use a container of smart pointers (shared smart pointers, in particular). This issue is addressed in more detail in the EASTL FAQ and the auto_ptr-related entry below.</p>
+<p class="faq-question"><a name="Gotchas.11"></a>11
+Containers of auto_ptrs can crash</p>
+<p class="faq-answer">We suggested above that the user can use a container of smart pointers to automatically manage contained pointers. However, you don't want to use auto_ptr, as auto_ptrs cannot be safely assigned to each other; doing so results in a stale pointer and most likely a crash.</p>
+<p class="faq-question"><a name="Gotchas.12"></a>12
+Remove algorithms don't actually remove elements.</p>
+<p class="faq-answer">Algorithms such as remove, remove_if, remove_heap, and unique do not erase elements from the sequences they work on. Instead, they return an iterator to the new end of the sequence and the user must call erase with that iterator in order to actually remove the elements from the container. This behavior exists because algorithms work on sequences via iterators and don't know how to work with containers. Only the container can know how to best erase its own elements. In each case, the documentation for the algorithm reminds the user of this behavior. Similarly, the copy algorithm copies elements from one sequence to another and doesn't modify the size of the destination sequence. So the destination must hold at least as many items as the source, and if it holds more items, you may want to erase the items at the end after the copy.</p>
+<p class="faq-question"><a name="Gotchas.13"></a>13
+list::size() is O(n).</p>
+<p class="faq-answer">By this we mean that calling size() on a list will iterate the list and add the size as it goes. Thus, getting the size of a list is not a fast operation, as it requires traversing the list and counting the nodes. We could make list::size() be fast by having a member mSize variable. There are reasons for having such functionality and reasons for not having such functionality. We currently choose to not have a member mSize variable as it would add four bytes to the class, add&nbsp;processing to functions such as insert and erase, and would only serve to improve the size function, but no other function. The alternative&nbsp;argument is that the C++ standard states that std::list&nbsp;should be an O(1) operation (i.e. have a member size variable), most C++ standard library list implementations do so, the size is but an integer which is quick to update, and many users expect to have a fast size function. All of this applies to slist and intrusive_list as well.</p>
+<p class="faq-answer">Note that EASTL's config.h file has an option in it to cause list and slist to cache their size with an mSize variable and thus make size() O(1). This option is disabled by default.</p>
+<p class="faq-question"> <a name="Gotchas.14"></a>14
+ vector and deque::size() may incur integer division.</p>
+<p class="faq-answer">Some containers (vector and deque in particular) calculate their size by pointer subtraction. For example, the implementation of vector::size() is 'return mpEnd - mpBegin'. This looks like a harmless subtraction, but if the size of the contained object is not an even power of two then the compiler will likely need to do an integer division to calculate the value of the subtracted pointers. One might suggest that vector use mpBegin and mnSize as member variables instead of mpBegin and mpEnd, but that would incur costs in other vector operations. The suggested workaround is to iterate a vector instead of using a for loop and operator[] and for those cases where you do use a for loop and operator[], get the size once at the beginning of the loop instead of repeatedly during the condition test.</p>
+<p class="faq-question"><a name="Gotchas.15"></a>15
+ Be careful making custom Compare functions.
+</p>
+<p class="faq-answer">A Compare function compares two values and returns true if the first is less than the second. This is easy to understand for integers and strings, but harder to get right for more complex structures. Many a time have people decided to come up with a fancy mechanism for comparing values and made mistakes. The FAQ has a couple entries related to this. See http://blogs.msdn.com/oldnewthing/archive/2003/10/23/55408.aspx for a story about how this can go wrong by being overly clever.</p>
+<p class="faq-question"> <a name="Gotchas.16"></a>16
+ Comparisons involving floating point are dangerous.</p>
+<p class="faq-answer">Floating point comparisons between two values that are very nearly equal can result in inconsistent results. Similarly, floating point comparisons between NaN values will always generate inconsistent results, as NaNs by definition always compare as non-equal. You thus need to be careful when using comparison functions that work with floating point values. Conversions to integral values may help the problem, but not necessarily.</p>
+<p class="faq-question"><a name="Gotchas.17" id="Gotchas.17"></a>17 Writing beyond string::size and vector::size is dangerous.</p>
+<p>A trick that often comes to mind when working with strings is to set the string capacity to some maximum value, strcpy data into it, and then resize the string when done. This can be done with EASTL, but only if you resize the string to the maximum value and not reserve the string to the maximum value. The reason is that when you resize a string from size (n) to size (n + count), the count characters are zeroed and overwrite the characters that you strcpyd. </p>
+<p class="faq-answer">The following code is broken: </p>
+<p class="code-example">string mDataDir;<br>
+ <br>
+ mDataDir.<span class="style1">reserve</span>(kMaxPathLength);<br>
+ strcpy(&amp;mDataDir[0], &quot;blah/blah/blah&quot;);<br>
+mDataDir.resize(strlen(&amp;mDataDir[0])); // Overwrites your blah/... with 00000...</p>
+<p class="faq-answer">This following code is OK: </p>
+<p class="code-example">string mDataDir;<br>
+ <br>
+ mDataDir.<span class="style2">resize</span>(kMaxPathLength);<br>
+ strcpy(&amp;mDataDir[0], &quot;blah/blah/blah&quot;);<br>
+mDataDir.resize(strlen(&amp;mDataDir[0]));</p>
+<p class="faq-question"><a name="Gotchas.18" id="Gotchas.18"></a>18 Container operator=() doesn't copy allocators.
+</p>
+<p class="faq-answer">EASTL container assignment (e.g. vector::operator=(const vector&amp;)) doesn't copy the allocator. There are good and bad reasons for doing this, but that's how it acts. So you need to beware that you need to assign the allocator separately or make a container subclass which overrides opeator=() and does this. </p>
+<br>
+<hr style="width: 100%; height: 2px;">
+End of document<br>
+<br>
+<br>
+<br>
+<br>
+</body>
+</html>
diff --git a/EASTL/doc/html/EASTL Introduction.html b/EASTL/doc/html/EASTL Introduction.html
new file mode 100644
index 0000000..0e9b23c
--- /dev/null
+++ b/EASTL/doc/html/EASTL Introduction.html
@@ -0,0 +1,47 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html>
+<head>
+ <title>EASTL Introduction</title>
+ <meta content="text/html; charset=us-ascii" http-equiv="content-type">
+ <meta name="author" content="Paul Pedriana">
+ <meta name="description" content="Basic introduction to EASTL.">
+ <link type="text/css" rel="stylesheet" href="EASTLDoc.css">
+</head>
+<body>
+<h1>EASTL Introduction</h1>
+<p>EASTL stands for Electronic Arts Standard Template Library. It is a C++ template library of containers, algorithms, and
+ iterators useful for runtime and tool development across multiple platforms. It is a fairly extensive and robust
+ implementation of such a library and has an emphasis on high performance above all other considerations.</p>
+<h2>Intended Audience</h2>
+<p>This is a short document intended to provide a basic introduction to EASTL for
+ those new to the concept of EASTL or STL. If you are familiar with the C++ STL
+ or have worked with other templated container/algorithm libraries, you probably
+ don't need to read this. If you have no familiarity with C++ templates at all,
+ then you probably will need more than this document to get you up to speed. In
+ this case you need to understand that templates, when used properly, are powerful
+ vehicles for the ease of creation of optimized C++ code. A description of C++
+ templates is outside the scope of this documentation, but there is plenty of such
+ documentation on the Internet. See the <a href="EASTL%20FAQ.html">EASTL FAQ.html</a>
+ document for links to information related to learning templates and STL.</p>
+<h2>EASTL Modules</h2>
+<p>EASTL consists primarily of containers, algorithms, and iterators. An example of a container is a linked list, while an
+ example of an algorithm is a sort function; iterators are the entities of traversal for containers and algorithms.
+ EASTL containers a fairly large number of containers and algorithms, each of which is a very clean, efficient, and
+ unit-tested implementation. We can say with some confidence that you are not likely to find better implementations of
+ these (commercial or otherwise), as these are the result of years of wisdom and diligent work. For a detailed list of
+ EASTL modules, see <a href="EASTL%20Modules.html">EASTL Modules.html</a>.</p>
+<h2>EASTL Suitability</h2>
+<p>What uses are EASTL suitable for? Essentially any situation in tools and shipping applications where the functionality
+ of EASTL is useful. Modern compilers are capable of producing good code with templates and many people are using them
+ in both current generation and future generation applications on multiple platforms from embedded systems to servers
+ and mainframes.</p>
+<hr style="width: 100%; height: 2px;">
+End of document<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+</body>
+</html>
diff --git a/EASTL/doc/html/EASTL Maintenance.html b/EASTL/doc/html/EASTL Maintenance.html
new file mode 100644
index 0000000..aaca955
--- /dev/null
+++ b/EASTL/doc/html/EASTL Maintenance.html
@@ -0,0 +1,292 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html>
+<head>
+ <title>EASTL Maintenance</title>
+ <meta content="text/html; charset=us-ascii" http-equiv="content-type">
+ <meta name="author" content="Paul Pedriana">
+ <meta name="description" content="Information for the EASTL maintainer.">
+ <link type="text/css" rel="stylesheet" href="EASTLDoc.css">
+</head>
+<body>
+
+<h1>EASTL Maintenance</h1>
+<h2><span style="font-style: italic;"><a name="Introduction" id="Introduction"></a></span>Introduction</h2>
+<p>The purpose of this document is to provide some necessary background for anybody who might do work on EASTL. Writing
+ generic templated systems like EASTL can be surprisingly tricky. There are numerous details of the C++ language that
+ you need to understand which don't usually come into play during the day-to-day C++ coding that many people do. It is
+ easy to make a change to some function that seems proper and works for your test case but either violates the design
+ expectations or simply breaks under other circumstances.<br>
+ <br>
+ It may be useful to start with an example. Here we provide an implementation of the count algorithm which is seems
+simple enough. Except it is wrong and while it&nbsp;will compile in some cases it won't compile in others:</p>
+<pre class="code-example">template &lt;class InputIterator, class T&gt;
+int count(InputIterator first, InputIterator last, const T&amp; value)
+{
+ &nbsp;&nbsp;&nbsp; int result = 0;
+
+ &nbsp;&nbsp;&nbsp; for(; first &lt; last; ++first){
+ &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; if(*first == value)
+ &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; ++result;
+ &nbsp;&nbsp;&nbsp; }
+
+ &nbsp;&nbsp;&nbsp; return result;
+ } </pre>
+<p>The problem is with the comparison 'first &lt; last'. The count algorithm takes an InputIterator and operator&lt; is
+not guaranteed to exist for any given InputIterator (and indeed while operator&lt; exists for vector::iterator, it
+doesn't exist for list::iterator). The comparison in the above algorithm must instead be implemented as 'first !=
+last'. If we were working with a RandomAccessIterator then 'first &lt; last' would be valid.</p>
+<p>In the following sections we cover various topics of interest regarding the development and maintentance of EASTL.
+ Unfortunately, this document can't cover every aspect of EASTL maintenance issues, but at least it should give you a
+sense of the kinds of issues.</p>
+
+<h2> <a name="Language_Standard" id="Language_Standard"></a>C++ Language Standard</h2>
+<p>First and foremost, you need to be familiar with the C++ standard. In particular, the sections of the standard related
+ to containers, algorithms, and iterators are of prime significance. We'll talk about some of this in more detail below.
+ Similarly, a strong understanding of the basic data types is required. What is the difference between ptrdiff_t and
+intptr_t; unsigned int and size_t; char and signed char?</p>
+<p>In addition to the C++ language standard, you'll want to be familiar with the C++ Defect Report. This is a continuously
+ updated document which lists flaws in the original C++ language specification and the current thinking as the
+resolutions of those flaws. You will notice various references to the Defect Report in EASTL source code.</p>
+<p>Additionally, you will want to be familiar with the C++ Technical Report 1 (as of this writing there is only one). This
+ document is the evolving addendum to the C++ standard based on both the Defect Report and based on desired additions to
+the C++ language and standard library.</p>
+<p>Additionally, you will probably want to have some familiarity with Boost. It also helps to&nbsp;keep an eye on
+ comp.std.c++ Usenet discussions. However, watch out for what people say on Usenet. They tend to defend GCC, Unix, std
+ STL, and C++&nbsp;to a sometimes&nbsp;unreasonable degree. Many discussions ignore performance implications and
+concentrate only on correctness and sometimes academic correctness above usability.</p>
+<h2> <a name="Langauge_Use" id="Langauge_Use"></a>Language Use</h2>
+<p>Macros are (almost) not allowed in EASTL.&nbsp;A prime directive of EASTL is to be easier to read by users and most of
+ the time macros are an impedence to this. So we avoid macros at all costs, even if it ends up making our development
+ and maintenance more difficult. That being said, you will notice that the EASTL config.h file uses macros to control
+ various options. This is an exception to the rule; when we talk about not using macros, we mean with the EASTL
+implementation itself.</p>
+<p>EASTL assumes a compliant and intelligent C++ compiler, and thus all language facilities are usable. However, we
+nevertheless choose to stay away from some language functionality. The primary language features we avoid are:</p>
+<ul>
+ <li>RTTI (run-time-type-identification) (this is deemed too costly)</li>
+<li>Template export (few compilers support this)</li>
+<li>Exception specifications (most compilers ignore them)</li>
+</ul>
+<p>Use of per-platform or per-compiler code should be avoided when possible but where there is a significant advantage to
+ be gained it can and indeed should be used. An example of this is the GCC __builtin_expect feature, which allows the
+ user to give the compiler a hint about whether an expression is true or false. This allows for the generation of code
+that executes faster due to more intelligent branch prediction.</p>
+<h2> <a name="Prime_Directives" id="Prime_Directives"></a>Prime Directives</h2>
+<p>The
+implementation of EASTL is guided foremost by the following directives which are listed in order of importance.</p>
+<ol>
+<li>Efficiency (speed and memory usage)</li>
+<li>Correctness (doesn't have bugs)</li>
+<li>Portability (works on all required platforms with minimal specialized code)</li>
+<li>Readability (code is legible and comments are present and useful)</li>
+</ol>
+<p>Note that unlike commercial STL implementations which must put correctness above all, we put a higher value on
+ efficiency. As a result, some functionality may have some usage limitation that is not present in other similar systems
+but which allows for more efficient operation, especially on the platforms of significance to us.</p>
+<p>Portability is significant, but not critical. Yes, EASTL must compile and run on all platforms that we will ship games
+ for. But we don't take that to mean under all compilers that could be conceivably used for such platforms. For example,
+ Microsoft VC6 can be used to compile Windows programs, but VC6's C++ support is too weak for EASTL and so you simply
+cannot use EASTL under VC6.</p>
+<p>Readability is something that EASTL achieves better than many other templated libraries, particularly Microsoft STL and
+ STLPort. We make every attempt to make EASTL code clean and sensible. Sometimes our need to provide optimizations
+ (particularly related to type_traits and iterator types) results in less simple code, but efficiency happens to be our
+prime directive and so it overrides all other considerations.</p>
+
+<h2> <a name="Coding_Conventions" id="Coding_Conventions"></a>Coding Conventions</h2>
+<p>Here we provide a list of coding conventions to follow when maintaining or adding to EASTL, starting with the three
+language use items from above:</p>
+<ul>
+<li>No RTTI use.</li>
+<li>No use of exception specifications (e.g. appending the 'throw' declarator to a function).</li>
+<li>No use of exception handling itself except where explicitly required by the implementation (e.g. vector::at).</li>
+<li>Exception use needs to savvy to EASTL_EXCEPTIONS_ENABLED.</li>
+<li>No use of macros (outside of config.h). Macros make things more difficult for the user.</li>
+<li>No use of static or global variables.</li>
+<li>No use of global new, delete, malloc, or free. All memory must be user-specifyable via an Allocator parameter
+(default-specified or explicitly specified).</li>
+<li>Containers use protected member data and functions as opposed to private. This is because doing so allows
+subclasses to extend the container without the creation of intermediary functions. Recall from our <a href="#Prime_Directives">prime directives</a> above that performance and simplicity overrule all.</li>
+<li>No use of multithreading primitives.&nbsp;</li>
+<li>No use of the export keyword.</li>
+<li>We don't have a rule about C-style casts vs. C++ static_cast&lt;&gt;, etc. We would always use static_cast except
+that debuggers can't evaluate them and so in practice they can get in the way of debugging and tracing. However, if the
+cast is one that users don't tend to need to view in a debugger, C++ casts are preferred.</li>
+<li>No external library dependencies whatsoever, including standard STL. EASTL is dependent&nbsp;on only EABase and the
+C++ compiler.&nbsp;</li>
+<li>All code must be const-correct. This isn't just for readability -- compilation can fail unless const-ness is used
+correctly everywhere.&nbsp;</li>
+<li>Algorithms do not refer to containers; they refer only to iterators.</li>
+<li>Algorithms in general do not allocate memory. If such a situation arises, there should be a version of the
+algorithm which allows the user to provide the allocator.</li>
+<li>No inferior implementations. No facility should be added to EASTL unless it is of professional
+quality.</li>
+<li>The maintainer should emulate the EASTL style of code layout, regardless of the maintainer's personal preferences.
+When in Rome, do as the Romans do. EASTL uses 4 spaces for indents, which is how the large majority of code within EA
+is written.</li>
+<li>No major changes should be done without consulting a peer group.</li>
+</ul>
+
+<h2><a name="Compiler_Issues" id="Compiler_Issues"></a>Compiler Issues</h2>
+<p>Historically, templates are the feature of C++ that has given C++ compilers the most fits. We are still working with
+ compilers that don't completely and properly support templates. Luckily, most compilers are now good enough to handle
+what EASTL requires. Nevertheless, there are precautions we must take.</p>
+<p>It turns out that the biggest problem in writing portable EASTL code is that VC++ allows you to make illegal statements
+ which are not allowed by other compilers. For example, VC++ will allow you to neglect using the typename keyword in
+template references, whereas GCC (especially 3.4+) requires it.</p>
+<p>In order to feel comfortable that your EASTL code is C++ correct and is portable, you must do at least these two
+things:</p>
+<ul>
+<li>Test under at least VS2005, GCC 3.4+, GCC 4.4+, EDG, and clang. </li>
+<li>Test all functions that you write, as compilers will often skip the compilation of a template function if it isn't
+used.</li>
+</ul>
+<p>The two biggest issues to watch out for are 'typename' and a concept called "dependent names". In both cases VC++ will
+ accept non-conforming syntax whereas most other compilers will not. Whenever you reference a templated type (and not a templated
+ value) in a template, you need to prefix it by 'typename'. Whenever your class function refers to a base class member (data or
+ function), you need to refer to it by "this-&gt;", "base_type::", or by placing a "using" statement in your class to
+declare that you will be referencing the given base class member.</p>
+
+<h2> <a name="Iterator_Issues" id="Iterator_Issues"></a>Iterator Issues</h2>
+<p>The most important thing to understand about iterators is the concept of iterator types and their designated
+ properties. In particular, we need to understand the difference between InputIterator, ForwardIterator,
+ BidirectionalIterator, RandomAccessIterator, and OutputIterator. These differences dictate both how we implement our
+ algorithms and how we implement our optimizations. Please read the C++ standard for a reasonably well-implemented
+ description of these iterator types.</p>
+<p>Here's an example from EASTL/algorithm.h which demonstrates how we use iterator types to optimize the reverse algorithm
+based on the kind of iterator passed to it:</p>
+<pre class="code-example">template &lt;class BidirectionalIterator&gt;
+inline void reverse_impl(BidirectionalIterator first, BidirectionalIterator last, bidirectional_iterator_tag)<br>{
+&nbsp;&nbsp;&nbsp; for(; (first != last) &amp;&amp; (first != --last); ++first) <span class="code-example-comment">// We are not allowed to use operator &lt;, &lt;=, &gt;, &gt;= with</span>
+&nbsp;&nbsp;&nbsp; &nbsp;&nbsp;&nbsp; iter_swap(first, last);&nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; <span class="code-example-comment">// a generic (bidirectional or otherwise) iterator.</span>
+}<br>
+
+template &lt;typename RandomAccessIterator&gt;
+inline void reverse_impl(RandomAccessIterator first, RandomAccessIterator last, random_access_iterator_tag)
+{
+&nbsp;&nbsp;&nbsp; for(; first &lt; --last; ++first) <span class="code-example-comment">// With a random access iterator, we can use operator &lt; to more efficiently implement</span>
+&nbsp;&nbsp;&nbsp; &nbsp;&nbsp;&nbsp; iter_swap(first, last);&nbsp;&nbsp;&nbsp; <span class="code-example-comment">// this algorithm. A generic iterator doesn't necessarily have an operator &lt; defined.</span>
+}<br><br>
+template &lt;class BidirectionalIterator&gt;
+inline void reverse(BidirectionalIterator first, BidirectionalIterator last)
+{
+&nbsp;&nbsp;&nbsp; typedef typename iterator_traits&lt;BidirectionalIterator&gt;::iterator_category IC;
+&nbsp;&nbsp;&nbsp; reverse_impl(first, last, IC());
+}</pre>
+
+<h2> <a name="Exception_Handling" id="Exception_Handling"></a>Exception Handling</h2>
+<p>You will notice that EASTL uses try/catch in some places (particularly in containers) and uses
+ the&nbsp;EASTL_EXCEPTIONS_ENABLED define. For starters, any EASTL code that uses try/catch should always be wrapped
+ within #if EASTL_EXCEPTIONS_ENABLED (note: #if, not #ifdef).</p>
+<p>This is simple enough, but what you may be wondering is how it is that EASTL decides to use try/catch for some sections
+ of code and not for others. EASTL follows the C++ standard library conventions with respect to exception handling, and
+ you will see similar exception handling in standard STL. The code that you need to wrap in try/catch is code that can
+ throw a C++ exception (not to be confused with CPU exception) and needs to have something unwound (or fixed) as a
+ result. The important thing is that the container be in a valid state after encountering such exceptions. In general
+the kinds of things that require such try/catch are:</p>
+<ul>
+<li>Memory allocation failures (which throw exceptions)</li>
+<li>Constructor exceptions</li>
+</ul>
+<p>Take a look at the cases in EASTL where try/catch is used and see what it is doing.</p>
+<h2> <a name="Type_Traits" id="Type_Traits"></a>Type Traits </h2>
+<p>EASTL provides a facility called type_traits which is very similar to the type_traits being proposed by the C++ TR1
+ (see above). type_traits are useful because they tell you about properties of types at compile time. This allows you to
+ do things such as assert that a data type is scalar or that a data type is const. The way we put them to use in EASTL
+ is to take advantage of them to implement different pathways for functions based on types. For example, we can copy a
+ contiguous array of scalars much faster via memcpy than we can via a for loop, though we could not safely employ the
+ for loop for a non-trivial C++ class.</p>
+<p>As mentioned in the GeneralOptimizations section below, EASTL should take advantage of type_traits information to the
+extent possible to achive maximum effiiciency.</p>
+<h2> <a name="General_Optimizations" id="General_Optimizations"></a>General
+Optimizations</h2>
+<p>One of the primary goals of EASTL is to achieve the highest possible efficiency. In cases where EASTL functionality
+ overlaps standard C++ STL functionality, standard STL implementations provided by compiler vendors are a benchmark upon
+ which EASTL strives to beat. Indeed EASTL is more efficient than all other current STL implementations&nbsp;(with some
+ exception in the case of some Metrowerks STL facilities). Here we list some of the things to look for when considering
+ optimization of EASTL code These items can be considered general optimization suggestions for any code, but this
+particular list applies to EASTL:</p>
+<ul>
+<li>Take advantage of type_traits to the extent possible (e.g. to use memcpy to move data instead of a for loop when
+possible).</li>
+<li>Take advantage of iterator types to the extent possible.</li>
+<li>Take advantage of the compiler's expectation that if statements are expected to evaluate as true and for loop
+conditions are expected to evaluate as false.</li>
+<li>Make inline-friendly code. This often means avoiding temporaries to the extent possible.</li>
+<li>Minimize branching (i.e. minimize 'if' statements). Where branching is used, make it so that 'if' statements
+execute as true.</li>
+<li>Use EASTL_LIKELY/EASTL_UNLIKELY to give branch hints to the compiler when you are confident it will be
+beneficial.</li>
+<li>Use&nbsp;restricted pointers (EABase's EA_RESTRICT or various compiler-specific versions of __restrict).</li>
+<li>Compare unsigned values to &lt; max instead of comparing signed values to &gt;= 0 &amp;&amp; &lt; max.</li>
+<li>Employ power of 2 integer math instead of math with any kind of integer.</li>
+<li>Use template specialization where possible to implement improved functionality.</li>
+<li>Avoid function calls when the call does something trivial. This improves debug build speed (which matters) and
+sometimes release build speed as well, though sometimes makes the code intent less clear. A comment next to the code
+saying what call it is replacing makes the intent clear without sacrificing performance.</li>
+</ul>
+<h2><a name="Unit_Tests" id="Unit_Tests"></a>Unit Tests</h2>
+<p>Writing robust templated containers and algorithms is difficult or impossible without a heavy unit test suite in place.
+ EASTL has a pretty extensive set of unit tests for all containers and algorithms. While the successful automated unit
+ testing of shipping application programs may be a difficult thing to pull off, unit testing of libraries such as this
+ is of huge importance and cannot be understated. </p>
+<ul>
+<li>When making a new unit test, start by copying one of the existing unit tests and follow its conventions.</li>
+<li>Test containers of both scalars and classes.</li>
+<li>Test algorithms on both container iterators (e.g. vector.begin()) and pointer iterators (e.g. int*).</li>
+<li>Make sure that algorithm or container member functions which take iterators work with the type of iterator they
+claim to (InputIterator, ForwardIterator, BidirectionalIterator, RandomAccessIterator).&nbsp;</li>
+<li>Test for const-correctness. If a user is allowed to modify something that is supposed to be const, silent errors
+can go undetected.</li>
+<li>Make sure that unit tests cover all functions and all pathways of the tested code. This means that in writing the
+unit test you need to look at the source code to understand all the pathways.</li>
+<li>Consider using a random number generator (one is provided in the test library) to do 'monkey' testing whereby
+unexpected input is given to a module being tested. When doing so, make sure you seed the generator in a way that
+problems can be reproduced.</li>
+<li>While we avoid macros in EASTL user code, macros to assist in unit tests aren't considered a problem. However,
+consider that a number of macros could be replaced by templated functions and thus be easier to work with.</li>
+<li>Unit tests don't need to be efficient; feel free to take up all the CPU power and time you need to test a module
+sufficiently.</li>
+<li>EASTL containers are not thread-safe, by design. Thus there is no need to do multithreading tests as long as you
+stay away from the usage of static and global variables.</li>
+<li>Unit tests must succeed with no memory leaks and of course no memory corruption. The heap system should be
+configured to test for this, and heap validation functions are available to the unit tests while in the middle of
+runs.</li>
+</ul>
+
+<h2><a name="Things_to_Keep_in_Mind" id="Things_to_Keep_in_Mind"></a>Things to Keep in Mind</h2>
+<ul>
+<li>When referring to EASTL functions and types from EASTL code, make sure to preface the type with the EASTL
+namespace. If you don't do this you can get collisions due to the compiler not knowing if it should use the EASTL
+namespace or the namespace of the templated type for the function or type.</li>
+<li>Newly constructed empty containers do no memory allocation. Some STL and other container libraries allocate an
+initial node from the class memory allocator. EASTL containers by design never do this.&nbsp;If a container needs an
+initial node, that node should be made part of the container itself or be a static empty node object.</li>
+<li>Empty containers (new or otherwise) contain no constructed objects, including those that might be in an 'end' node.
+Similarly, no user object (e.g. of type T) should be constructed unless required by the design and unless documented in
+the cotainer/algorithm contract.&nbsp;</li>
+<li>When creating a new container class, it's best to copy from an existing similar class to the extent possible. This
+helps keep the library consistent and resolves subtle problems that can happen in the construction of containers.</li>
+<li>Be very careful about tweaking the code. It's easy to think (for example) that a &gt; could be switch to a &gt;=
+where instead it is a big deal. Just about every line of code in EASTL has been thought through and has a purpose. Unit
+tests may or may not currently test every bit of EASTL, so you can't necessarily rely on them to give you 100%
+confidence in changes. If you are not sure about something, contact the original author and he will tell you for
+sure.</li>
+<li>Algorithm templates always work with iterators and not containers. A given container may of course implement an
+optimized form or an algorithm itself.</li>
+<li>Make sure everything is heavily unit tested. If somebody finds a bug, fix the bug and make a unit test to make sure
+the bug doesn't happen again.</li>
+<li>It's easy to get iterator categories confused or forgotten while implementing algorithms and containers.</li>
+<li>Watch out for the strictness of GCC 3.4+. There is a bit of syntax &#8212; especially related to templates &#8212; that other
+compilers accept but GCC 3.4+ will not.</li>
+<li>Don't forget to update the config.h EASTL_VERSION define before publishing.</li>
+<li>The vector and string classes define iterator to be T*. We want to always leave this so &#8212; at least in release
+builds &#8212; as this gives some algorithms an advantage that optimizers cannot get around.</li>
+</ul>
+<hr style="width: 100%; height: 2px;">
+<br>
+<br>
+<br>
+<br>
+<br>
+</body>
+</html>
diff --git a/EASTL/doc/html/EASTL Modules.html b/EASTL/doc/html/EASTL Modules.html
new file mode 100644
index 0000000..620937e
--- /dev/null
+++ b/EASTL/doc/html/EASTL Modules.html
@@ -0,0 +1,666 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html>
+<head>
+ <title>EASTL Modules</title>
+ <meta content="text/html; charset=us-ascii" http-equiv="content-type">
+ <meta name="author" content="Paul Pedriana">
+ <meta name="description" content="Lists the top-level modules present in EASTL.">
+ <link type="text/css" rel="stylesheet" href="EASTLDoc.css">
+ <style type="text/css">
+<!--
+.style1 {font-size: 10pt}
+-->
+ </style>
+</head>
+<body>
+<h1><font size="+3">EASTL Modules</font></h1>
+<h2> Introduction</h2>
+<p>We provide here a list of all top-level modules present or planned for future presence in EASTL. In some cases (e.g.
+ algorithm), the module consists of many smaller submodules which are not described in detail here. In those cases you
+ should consult the source code for those modules or consult the detailed documentation for those modules. This document
+is a high level overview and not a detailed document.</p>
+<h2>Module List</h2>
+<table style="text-align: left; width: 100%;" border="1" cellpadding="2" cellspacing="2">
+<tbody>
+<tr>
+<td style="font-weight: bold;">&nbsp;Module</td>
+<td style="font-weight: bold;">Description</td>
+</tr>
+<tr>
+<td>config</td>
+<td>Configuration header. Allows for changing some compile-time options.</td>
+</tr>
+<tr>
+<td>slist<br>
+fixed_slist</td>
+<td>Singly-linked list.<br>
+fixed_slist is a version which is implemented via a fixed block of contiguous memory.</td>
+</tr>
+<tr>
+<td>list<br>
+fixed_list</td>
+<td>Doubly-linked list.</td>
+</tr>
+<tr>
+<td>intrusive_list<br>
+intrusive_slist</td>
+<td>List whereby the contained item provides the node implementation.</td>
+</tr>
+<tr>
+<td>array</td>
+<td>Wrapper for a C-style array which extends it to act like an STL container.</td>
+</tr>
+<tr>
+<td>vector<br>
+fixed_vector</td>
+<td>Resizable array container.</td>
+</tr>
+<tr>
+<td>vector_set<br>
+vector_multiset<br></td>
+<td>Set implemented via a vector instead of a tree. Speed and memory use is improved but resizing is slower.</td>
+</tr>
+<tr>
+<td>vector_map<br>
+vector_multimap<br></td>
+<td>Map implemented via a vector instead of a tree. Speed and memory use is improved but resizing is slower.</td>
+</tr>
+<tr>
+<td style="vertical-align: top;">deque<br></td>
+<td style="vertical-align: top;">Double-ended queue, but also with random access. Acts like a vector but insertions and
+removals are efficient.<br></td>
+</tr>
+<tr>
+<td>bit_vector</td>
+<td>Implements a vector of bool, but the actual storage is done with one bit per bool. Not the same thing as a
+bitset.</td>
+</tr>
+<tr>
+<td>bitset</td>
+<td>Implements an efficient arbitrarily-sized bitfield. Note that this is not strictly the same thing as a vector of
+bool (bit_vector), as it is optimized to act like an arbitrary set of flags and not to be a generic container which can
+be iterated, inserted, removed, etc.</td>
+</tr>
+<tr>
+<td>set<br>
+multiset<br>
+fixed_set<br>
+fixed_multiset<br></td>
+<td>A set is a sorted unique collection, multiset is sorted but non-unique collection.</td>
+</tr>
+<tr>
+<td>map<br>
+multimap<br>
+fixed_map<br>
+fixed_multimap</td>
+<td>A map is a sorted associative collection implemented via a tree. It is also known as dictionary.</td>
+</tr>
+<tr>
+<td>hash_map<br>
+hash_multimap<br>
+fixed_hash_map<br>
+fixed_hash_multimap</td>
+<td>Map implemented via a hash table.</td>
+</tr>
+<tr>
+<td>intrusive_hash_map<br>
+intrusive_hash_multimap<br>
+intrusive_hash_set<br>
+intrusive_hash_multiset</td>
+<td>hash_map whereby the contained item provides the node implementation, much like intrusive_list.</td>
+</tr>
+<tr>
+<td>hash_set<br>
+hash_multiset<br>
+fixed_hash_set<br>
+fixed_hash_map<br></td>
+<td>Set implemented via a hash table.</td>
+</tr>
+<tr>
+<td>basic_string<br>
+fixed_string<br>
+fixed_substring</td>
+<td>basic_string is a character string/array.<br>
+fixed_substring is a string which is a reference to a range within another string or character array.<br>
+cow_string is a string which implements copy-on-write.</td>
+</tr>
+<tr>
+<td>algorithm</td>
+<td>min/max, find, binary_search, random_shuffle, reverse, etc.&nbsp;</td>
+</tr>
+<tr>
+<td style="vertical-align: top;">sort<br></td>
+<td style="vertical-align: top;">Sorting functionality, including functionality not in STL. quick_sort, heap_sort,
+merge_sort, shell_sort, insertion_sort, etc.<br></td>
+</tr>
+<tr>
+<td>numeric</td>
+<td>Numeric algorithms: accumulate, inner_product, partial_sum, adjacent_difference, etc.</td>
+</tr>
+<tr>
+<td style="vertical-align: top;">heap<br></td>
+<td style="vertical-align: top;">Heap structure functionality: make_heap, push_heap, pop_heap, sort_heap, is_heap,
+remove_heap, etc.<br></td>
+</tr>
+<tr>
+<td style="vertical-align: top;">stack<br></td>
+<td style="vertical-align: top;">Adapts any container into a stack.<br></td>
+</tr>
+<tr>
+<td style="vertical-align: top;">queue<br></td>
+<td style="vertical-align: top;">Adapts any container into a queue.<br></td>
+</tr>
+<tr>
+<td style="vertical-align: top;">priority_queue<br></td>
+<td style="vertical-align: top;">Implements a conventional priority queue via a heap structure.<br></td>
+</tr>
+<tr>
+<td>type_traits</td>
+<td>Type information, useful for writing optimized and robust code. Also used for implementing optimized containers and
+algorithms.</td>
+</tr>
+<tr>
+<td style="vertical-align: top;">utility<br></td>
+<td style="vertical-align: top;">pair, make_pair, rel_ops, etc.<br></td>
+</tr>
+<tr>
+<td style="vertical-align: top;">functional<br></td>
+<td style="vertical-align: top;">Function objects.<br></td>
+</tr>
+<tr>
+<td style="vertical-align: top;">iterator<br></td>
+<td style="vertical-align: top;">Iteration for containers and algorithms.<br></td>
+</tr>
+<tr>
+<td>smart_ptr</td>
+<td>Smart pointers:&nbsp;shared_ptr, shared_array, weak_ptr, scoped_ptr, scoped_array, linked_ptr, linked_array,
+intrusive_ptr.</td>
+</tr>
+</tbody>
+</table>
+<p>&nbsp;</p>
+<h2>Module Behaviour</h2>
+<p>The overhead sizes listed here refer to an optimized release build; debug builds may add some additional overhead. Some
+ of the overhead sizes may be off by a little bit (usually at most 4 bytes). This is because the values reported here
+ are those that refer to when EASTL's container optimizations have been complete. These optimizations may not have been
+ completed as you are reading this.</p>
+<table style="width: 100%;" border="1" cellpadding="1" cellspacing="1">
+<tbody>
+<tr>
+<td style="width: 15%; vertical-align: top; height: 13px; font-weight: bold;">
+<p>Container</p>
+</td>
+<td style="font-weight: bold; text-align: center;" height="13" valign="top" width="10%">
+<p>Stores</p>
+</td>
+<td style="font-weight: bold; text-align: center;">Container Overhead (32 bit)</td>
+<td style="font-weight: bold; text-align: center;">Container Overhead (64 bit)</td>
+<td style="font-weight: bold; text-align: center;" height="13" valign="top" width="10%">
+<p>Node Overhead (32 bit)</p>
+</td>
+<td style="font-weight: bold; text-align: center;">Node Overhead (64 bit)</td>
+<td style="font-weight: bold; text-align: center;" height="13" valign="top" width="9%">
+<p>Iterator category</p>
+</td>
+<td style="text-align: center; font-weight: bold;">size() efficiency</td>
+<td style="text-align: center; font-weight: bold;">operator[] efficiency</td>
+<td style="font-weight: bold; text-align: center;" height="13" valign="top" width="16%">
+<p>Insert efficiency</p>
+</td>
+<td style="font-weight: bold; text-align: center;" height="13" valign="top" width="16%">
+<p>Erase via Iterator efficiency</p>
+</td>
+<td style="font-weight: bold; text-align: center;" height="13" valign="top" width="7%">
+<p>Find efficiency</p>
+</td>
+<td style="font-weight: bold; text-align: center;" height="13" valign="top" width="10%">
+<p>Sort efficiency</p>
+</td>
+</tr>
+<tr>
+<td>slist</td>
+<td style="text-align: center;">T</td>
+<td style="text-align: center;">8</td>
+<td style="text-align: center;">16</td>
+<td style="text-align: center;">4</td>
+<td style="text-align: center;">8</td>
+<td style="text-align: center;">f</td>
+<td style="text-align: center;">n</td>
+<td style="text-align: center;">-</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">n</td>
+<td style="text-align: center;">n+</td>
+</tr>
+<tr>
+<td height="13" valign="top" width="15%">
+<p>list</p>
+</td>
+<td style="text-align: center;" height="13" valign="top" width="10%">
+<p>T</p>
+</td>
+<td style="text-align: center;">12</td>
+<td style="text-align: center;">24</td>
+<td style="text-align: center;" height="13" valign="top" width="10%">
+<p>8</p>
+</td>
+<td style="text-align: center;">16</td>
+<td style="text-align: center;" height="13" valign="top" width="9%">
+<p>b</p>
+</td>
+<td style="text-align: center;">n</td>
+<td style="text-align: center;">-</td>
+<td style="text-align: center;" height="13" valign="top" width="16%">
+<p>1</p>
+</td>
+<td style="text-align: center;" height="13" valign="top" width="16%">
+<p>1</p>
+</td>
+<td style="text-align: center;" height="13" valign="top" width="7%">
+<p>n</p>
+</td>
+<td style="text-align: center;" height="13" valign="top" width="10%">
+<p>n log(n)</p>
+</td>
+</tr>
+<tr>
+<td>intrusive_slist</td>
+<td style="text-align: center;">T</td>
+<td style="text-align: center;">4</td>
+<td style="text-align: center;">8</td>
+<td style="text-align: center;">4</td>
+<td style="text-align: center;">8</td>
+<td style="text-align: center;">f</td>
+<td style="text-align: center;">n</td>
+<td style="text-align: center;">-</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">n</td>
+<td style="text-align: center;">n+</td>
+</tr>
+<tr>
+<td>intrusive_list</td>
+<td style="text-align: center;">T</td>
+<td style="text-align: center;">8</td>
+<td style="text-align: center;">16</td>
+<td style="text-align: center;">8</td>
+<td style="text-align: center;">16</td>
+<td style="text-align: center;">b</td>
+<td style="text-align: center;">n</td>
+<td style="text-align: center;">-</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">n</td>
+<td style="text-align: center;">n log(n)</td>
+</tr>
+<tr>
+<td>array</td>
+<td style="text-align: center;">T</td>
+<td style="text-align: center;">0</td>
+<td style="text-align: center;">0</td>
+<td style="text-align: center;">0</td>
+<td style="text-align: center;">0</td>
+<td style="text-align: center;">r</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">-</td>
+<td style="text-align: center;">-</td>
+<td style="text-align: center;">n</td>
+<td style="text-align: center;">n log(n)</td>
+</tr>
+<tr>
+<td>vector</td>
+<td style="text-align: center;">T</td>
+<td style="text-align: center;">16</td>
+<td style="text-align: center;">32</td>
+<td style="text-align: center;">0</td>
+<td style="text-align: center;">0</td>
+<td style="text-align: center;">r</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">1&nbsp;at end, else n</td>
+<td style="text-align: center;">1&nbsp;at end, else n</td>
+<td style="text-align: center;">n</td>
+<td style="text-align: center;">n log(n)</td>
+</tr>
+<tr>
+<td>vector_set</td>
+<td style="text-align: center;">T</td>
+<td style="text-align: center;">16</td>
+<td style="text-align: center;">32</td>
+<td style="text-align: center;">0</td>
+<td style="text-align: center;">0</td>
+<td style="text-align: center;">r</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">1&nbsp;at end, else n</td>
+<td style="text-align: center;">1&nbsp;at end, else n</td>
+<td style="text-align: center;">log(n)</td>
+<td style="text-align: center;">1</td>
+</tr>
+<tr>
+<td>vector_multiset</td>
+<td style="text-align: center;">T</td>
+<td style="text-align: center;">16</td>
+<td style="text-align: center;">32</td>
+<td style="text-align: center;">0</td>
+<td style="text-align: center;">0</td>
+<td style="text-align: center;">r</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">1&nbsp;at end, else n</td>
+<td style="text-align: center;">1&nbsp;at end, else n</td>
+<td style="text-align: center;">log(n)</td>
+<td style="text-align: center;">1</td>
+</tr>
+<tr>
+<td>vector_map</td>
+<td style="text-align: center;">Key, T</td>
+<td style="text-align: center;">16</td>
+<td style="text-align: center;">32</td>
+<td style="text-align: center;">0</td>
+<td style="text-align: center;">0</td>
+<td style="text-align: center;">r</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">1&nbsp;at end, else n</td>
+<td style="text-align: center;">1&nbsp;at end, else n</td>
+<td style="text-align: center;">log(n)</td>
+<td style="text-align: center;">1</td>
+</tr>
+<tr>
+<td>vector_multimap</td>
+<td style="text-align: center;">Key, T</td>
+<td style="text-align: center;">16</td>
+<td style="text-align: center;">32</td>
+<td style="text-align: center;">0</td>
+<td style="text-align: center;">0</td>
+<td style="text-align: center;">r</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">1&nbsp;at end, else n</td>
+<td style="text-align: center;">1&nbsp;at end, else n</td>
+<td style="text-align: center;">log(n)</td>
+<td style="text-align: center;">1</td>
+</tr>
+<tr>
+<td>deque</td>
+<td style="text-align: center;">T</td>
+<td style="text-align: center;">44</td>
+<td style="text-align: center;">84</td>
+<td style="text-align: center;">0</td>
+<td style="text-align: center;">0</td>
+<td style="text-align: center;">r</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">1&nbsp;at begin or end,<br>
+else n / 2</td>
+<td style="text-align: center;">1&nbsp;at begin or end,<br>
+else n / 2</td>
+<td style="text-align: center;">n</td>
+<td style="text-align: center;">n log(n)</td>
+</tr>
+<tr>
+<td>bit_vector</td>
+<td style="text-align: center;">bool</td>
+<td style="text-align: center;">8</td>
+<td style="text-align: center;">16</td>
+<td style="text-align: center;">0</td>
+<td style="text-align: center;">0</td>
+<td style="text-align: center;">r</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">1&nbsp;at end, else n</td>
+<td style="text-align: center;">1&nbsp;at end, else n</td>
+<td style="text-align: center;">n</td>
+<td style="text-align: center;">n log(n)</td>
+</tr>
+<tr>
+<td>string (all types)</td>
+<td style="text-align: center;">T</td>
+<td style="text-align: center;">16</td>
+<td style="text-align: center;">32</td>
+<td style="text-align: center;">0</td>
+<td style="text-align: center;">0</td>
+<td style="text-align: center;">r</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">1&nbsp;at end, else n</td>
+<td style="text-align: center;">1&nbsp;at end, else n</td>
+<td style="text-align: center;">n</td>
+<td style="text-align: center;">n log(n)</td>
+</tr>
+<tr>
+<td>set</td>
+<td style="text-align: center;">T</td>
+<td style="text-align: center;">24</td>
+<td style="text-align: center;">44</td>
+<td style="text-align: center;">16</td>
+<td style="text-align: center;">28</td>
+<td style="text-align: center;">b</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">-</td>
+<td style="text-align: center;">log(n)</td>
+<td style="text-align: center;">log(n)</td>
+<td style="text-align: center;">log(n)</td>
+<td style="text-align: center;">1</td>
+</tr>
+<tr>
+<td>multiset</td>
+<td style="text-align: center;">T</td>
+<td style="text-align: center;">24</td>
+<td style="text-align: center;">44</td>
+<td style="text-align: center;">16</td>
+<td style="text-align: center;">28</td>
+<td style="text-align: center;">b</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">-</td>
+<td style="text-align: center;">log(n)</td>
+<td style="text-align: center;">log(n)</td>
+<td style="text-align: center;">log(n)</td>
+<td style="text-align: center;">1</td>
+</tr>
+<tr>
+<td>map</td>
+<td style="text-align: center;">Key, T</td>
+<td style="text-align: center;">24</td>
+<td style="text-align: center;">44</td>
+<td style="text-align: center;">16</td>
+<td style="text-align: center;">28</td>
+<td style="text-align: center;">b</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">log(n)</td>
+<td style="text-align: center;">log(n)</td>
+<td style="text-align: center;">log(n)</td>
+<td style="text-align: center;">log(n)</td>
+<td style="text-align: center;">1</td>
+</tr>
+<tr>
+<td>multimap</td>
+<td style="text-align: center;">Key, T</td>
+<td style="text-align: center;">24</td>
+<td style="text-align: center;">44</td>
+<td style="text-align: center;">16</td>
+<td style="text-align: center;">28</td>
+<td style="text-align: center;">b</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">-</td>
+<td style="text-align: center;">log(n)</td>
+<td style="text-align: center;">log(n)</td>
+<td style="text-align: center;">log(n)</td>
+<td style="text-align: center;">1</td>
+</tr>
+<tr>
+<td>hash_set</td>
+<td style="text-align: center;">T</td>
+<td style="text-align: center;">16</td>
+<td style="text-align: center;">20</td>
+<td style="text-align: center;">4</td>
+<td style="text-align: center;">8</td>
+<td style="text-align: center;">b</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">-</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">-</td>
+</tr>
+<tr>
+<td>hash_multiset</td>
+<td style="text-align: center;">T</td>
+<td style="text-align: center;">16</td>
+<td style="text-align: center;">20</td>
+<td style="text-align: center;">4</td>
+<td style="text-align: center;">8</td>
+<td style="text-align: center;">b</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">-</td>
+<td style="text-align: center;">1<br></td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">-</td>
+</tr>
+<tr>
+<td>hash_map</td>
+<td style="text-align: center;">Key, T</td>
+<td style="text-align: center;">16</td>
+<td style="text-align: center;">20</td>
+<td style="text-align: center;">4</td>
+<td style="text-align: center;">8</td>
+<td style="text-align: center;">b</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">-</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">-</td>
+</tr>
+<tr>
+<td>hash_multimap</td>
+<td style="text-align: center;">Key, T</td>
+<td style="text-align: center;">16</td>
+<td style="text-align: center;">20</td>
+<td style="text-align: center;">4</td>
+<td style="text-align: center;">8</td>
+<td style="text-align: center;">b</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">-</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">-</td>
+</tr>
+<tr>
+<td>intrusive_hash_set</td>
+<td style="text-align: center;">T</td>
+<td style="text-align: center;">16</td>
+<td style="text-align: center;">20</td>
+<td style="text-align: center;">4</td>
+<td style="text-align: center;">8</td>
+<td style="text-align: center;">b</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">-</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">-</td>
+</tr>
+<tr>
+<td>intrusive_hash_multiset</td>
+<td style="text-align: center;">T</td>
+<td style="text-align: center;">16</td>
+<td style="text-align: center;">20</td>
+<td style="text-align: center;">4</td>
+<td style="text-align: center;">8</td>
+<td style="text-align: center;">b</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">-</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">-</td>
+</tr>
+<tr>
+<td>intrusive_hash_map</td>
+<td style="text-align: center;">T <small>(Key == T)</small></td>
+<td style="text-align: center;">16</td>
+<td style="text-align: center;">20</td>
+<td style="text-align: center;">4</td>
+<td style="text-align: center;">8</td>
+<td style="text-align: center;">b</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">-</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">-</td>
+</tr>
+<tr>
+<td>intrusive_hash_multimap</td>
+<td style="text-align: center;">T <small>(Key == T)&nbsp;</small></td>
+<td style="text-align: center;">16</td>
+<td style="text-align: center;">20</td>
+<td style="text-align: center;">4</td>
+<td style="text-align: center;">8</td>
+<td style="text-align: center;">b</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">-</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">1</td>
+<td style="text-align: center;">-</td>
+</tr>
+</tbody>
+</table>
+<ul>
+<li>- means that the operation does not exist.</li>
+<li>1 means amortized constant time. Also known as O(1)</li>
+<li>n means time proportional to the container size. Also known as O(n)</li>
+<li>log(n) means time proportional to the natural logarithm of the container size. Also known as O(log(n))</li>
+<li>n log(n) means time proportional to log(n) times the size of the container. Also known as O(n log(n))</li>
+<li>n+ means that the time is at least n, and possibly higher.</li>
+<li>Iterator meanings are: f = forward iterator; b = bidirectional iterator, r = random iterator.</li>
+<li>Overhead indicates approximate per-element overhead memory required in bytes. Overhead doesn't include possible
+additional overhead that may be imposed by the memory heap used to allocate nodes. General heaps tend to have between 4
+and 16 bytes of overhead per allocation, depending on the heap.</li>
+<li>Some overhead values are dependent on the structure alignment characteristics in effect. The values reported here
+are those that would be in effect for a system that requires pointers to be aligned on boundaries of their size and
+allocations with a minimum of 4 bytes (thus one byte values get rounded up to 4).</li>
+<li>Some overhead values are dependent on the size_type used by containers. size_type defaults to size_t, but it is possible to force it to be 4 bytes for 64 bit machines by defining EASTL_SIZE_T_32BIT.</li>
+<li>Inserting at the end of a vector may cause the vector to be resized; resizing a vector is O(n). However, the
+amortized time complexity for vector insertions at the end is constant.</li>
+<li>Sort assumes the usage of the best possible sort for a large container of random data. Some sort algorithms (e.g.
+quick_sort) require random access iterators and so the sorting of some containers requires a different sort algorithm.
+We do not include bucket or radix sorts, as they are always O(n).</li>
+<li>Some containers (e.g. deque, hash*) have unusual data structures that make per-container and per-node overhead
+calculations not quite account for all memory.</li>
+</ul>
+<hr style="width: 100%; height: 2px;">
+End of document<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+<br>
+</body>
+</html>
diff --git a/EASTL/doc/html/EASTLDoc.css b/EASTL/doc/html/EASTLDoc.css
new file mode 100644
index 0000000..b2656d8
--- /dev/null
+++ b/EASTL/doc/html/EASTLDoc.css
@@ -0,0 +1,86 @@
+body
+{
+ font-family: Georgia, "Times New Roman", Times, serif;
+ font-size: 12pt;
+}
+
+h1
+{
+ font-family: Verdana, Arial, Helvetica, sans-serif;
+ display: block;
+ background-color: #BBCCDD;
+ border: 2px solid #000000;
+ font-size: 16pt;
+ font-weight: bold;
+ padding: 6px;
+}
+
+h2
+{
+ font-size: 14pt;
+ font-family: Verdana;
+ border-bottom: 2px solid black;
+}
+
+h3
+{
+ font-family: Verdana;
+ font-size: 13pt;
+ font-weight: bold;
+}
+
+.code-example
+{
+ display: block;
+ background-color: #D1DDE9;
+ margin-left: 3em;
+ margin-right: 3em;
+ margin-top: 1em;
+ margin-bottom: 1em;
+ padding: 8px;
+ border: 2px solid #7993C8;
+ font-family: "Courier New", Courier, mono;
+ font-size: 10pt;
+ white-space: pre;
+}
+
+.code-example-span
+{
+ font-family: "Courier New", Courier, mono;
+ font-size: 10pt;
+ white-space: pre;
+}
+
+.code-example-comment
+{
+ background-color: #e0e0f0;
+ padding: 0px 0px;
+ font-family: "Courier New", Courier, mono;
+ font-size: 10pt;
+ white-space: pre;
+ color: #999999;
+ margin: auto auto;
+}
+
+
+.faq-question
+{
+ background-color: #D9E2EC;
+ font-size: 12pt;
+ font-weight: bold;
+ margin-top: 0em;
+ padding-left:5px;
+ padding-right:8px;
+ padding-top:2px;
+ padding-bottom:3px;
+ margin-bottom: 0.5em;
+}
+
+.faq-answer
+{
+ display: block;
+ margin: 4pt 1em 0.8em;
+}
+.indented {
+ margin-left: 50px;
+}
diff --git a/EASTL/doc/quick-reference.pdf b/EASTL/doc/quick-reference.pdf
new file mode 100644
index 0000000..b62ff9d
--- /dev/null
+++ b/EASTL/doc/quick-reference.pdf
Binary files differ
diff --git a/EASTL/include/EASTL/algorithm.h b/EASTL/include/EASTL/algorithm.h
new file mode 100644
index 0000000..6257514
--- /dev/null
+++ b/EASTL/include/EASTL/algorithm.h
@@ -0,0 +1,4342 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements some of the primary algorithms from the C++ STL
+// algorithm library. These versions are just like that STL versions and so
+// are redundant. They are provided solely for the purpose of projects that
+// either cannot use standard C++ STL or want algorithms that have guaranteed
+// identical behaviour across platforms.
+///////////////////////////////////////////////////////////////////////////////
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Definitions
+//
+// You will notice that we are very particular about the templated typenames
+// we use here. You will notice that we follow the C++ standard closely in
+// these respects. Each of these typenames have a specific meaning;
+// this is why we don't just label templated arguments with just letters
+// such as T, U, V, A, B. Here we provide a quick reference for the typenames
+// we use. See the C++ standard, section 25-8 for more details.
+// --------------------------------------------------------------
+// typename Meaning
+// --------------------------------------------------------------
+// T The value type.
+// Compare A function which takes two arguments and returns the lesser of the two.
+// Predicate A function which takes one argument returns true if the argument meets some criteria.
+// BinaryPredicate A function which takes two arguments and returns true if some criteria is met (e.g. they are equal).
+// StrickWeakOrdering A BinaryPredicate that compares two objects, returning true if the first precedes the second. Like Compare but has additional requirements. Used for sorting routines.
+// Function A function which takes one argument and applies some operation to the target.
+// Size A count or size.
+// Generator A function which takes no arguments and returns a value (which will usually be assigned to an object).
+// UnaryOperation A function which takes one argument and returns a value (which will usually be assigned to second object).
+// BinaryOperation A function which takes two arguments and returns a value (which will usually be assigned to a third object).
+// InputIterator An input iterator (iterator you read from) which allows reading each element only once and only in a forward direction.
+// ForwardIterator An input iterator which is like InputIterator except it can be reset back to the beginning.
+// BidirectionalIterator An input iterator which is like ForwardIterator except it can be read in a backward direction as well.
+// RandomAccessIterator An input iterator which can be addressed like an array. It is a superset of all other input iterators.
+// OutputIterator An output iterator (iterator you write to) which allows writing each element only once in only in a forward direction.
+//
+// Note that with iterators that a function which takes an InputIterator will
+// also work with a ForwardIterator, BidirectionalIterator, or RandomAccessIterator.
+// The given iterator type is merely the -minimum- supported functionality the
+// iterator must support.
+///////////////////////////////////////////////////////////////////////////////
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Optimizations
+//
+// There are a number of opportunities for opptimizations that we take here
+// in this library. The most obvious kinds are those that subsitute memcpy
+// in the place of a conventional loop for data types with which this is
+// possible. The algorithms here are optimized to a higher level than currently
+// available C++ STL algorithms from vendors such as Microsoft. This is especially
+// so for game programming on console devices, as we do things such as reduce
+// branching relative to other STL algorithm implementations. However, the
+// proper implementation of these algorithm optimizations is a fairly tricky
+// thing.
+//
+// The various things we look to take advantage of in order to implement
+// optimizations include:
+// - Taking advantage of random access iterators.
+// - Taking advantage of POD (plain old data) data types.
+// - Taking advantage of type_traits in general.
+// - Reducing branching and taking advantage of likely branch predictions.
+// - Taking advantage of issues related to pointer and reference aliasing.
+// - Improving cache coherency during memory accesses.
+// - Making code more likely to be inlinable by the compiler.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Supported Algorithms
+//
+// Algorithms that we implement are listed here. Note that these items are not
+// all within this header file, as we split up the header files in order to
+// improve compilation performance. Items marked with '+' are items that are
+// extensions which don't exist in the C++ standard.
+//
+// -------------------------------------------------------------------------------
+// Algorithm Notes
+// -------------------------------------------------------------------------------
+// adjacent_find
+// adjacent_find<Compare>
+// all_of C++11
+// any_of C++11
+// none_of C++11
+// binary_search
+// binary_search<Compare>
+// +binary_search_i
+// +binary_search_i<Compare>
+// +change_heap Found in heap.h
+// +change_heap<Compare> Found in heap.h
+// clamp
+// copy
+// copy_if C++11
+// copy_n C++11
+// copy_backward
+// count
+// count_if
+// equal
+// equal<Compare>
+// equal_range
+// equal_range<Compare>
+// fill
+// fill_n
+// find
+// find_end
+// find_end<Compare>
+// find_first_of
+// find_first_of<Compare>
+// +find_first_not_of
+// +find_first_not_of<Compare>
+// +find_last_of
+// +find_last_of<Compare>
+// +find_last_not_of
+// +find_last_not_of<Compare>
+// find_if
+// find_if_not
+// for_each
+// generate
+// generate_n
+// +identical
+// +identical<Compare>
+// iter_swap
+// lexicographical_compare
+// lexicographical_compare<Compare>
+// lexicographical_compare_three_way
+// lower_bound
+// lower_bound<Compare>
+// make_heap Found in heap.h
+// make_heap<Compare> Found in heap.h
+// min
+// min<Compare>
+// max
+// max<Compare>
+// +min_alt Exists to work around the problem of conflicts with min/max #defines on some systems.
+// +min_alt<Compare>
+// +max_alt
+// +max_alt<Compare>
+// +median
+// +median<Compare>
+// merge Found in sort.h
+// merge<Compare> Found in sort.h
+// min_element
+// min_element<Compare>
+// max_element
+// max_element<Compare>
+// mismatch
+// mismatch<Compare>
+// move
+// move_backward
+// nth_element Found in sort.h
+// nth_element<Compare> Found in sort.h
+// partial_sort Found in sort.h
+// partial_sort<Compare> Found in sort.h
+// push_heap Found in heap.h
+// push_heap<Compare> Found in heap.h
+// pop_heap Found in heap.h
+// pop_heap<Compare> Found in heap.h
+// random_shuffle<Random>
+// remove
+// remove_if
+// +apply_and_remove
+// +apply_and_remove_if
+// remove_copy
+// remove_copy_if
+// +remove_heap Found in heap.h
+// +remove_heap<Compare> Found in heap.h
+// replace
+// replace_if
+// replace_copy
+// replace_copy_if
+// reverse_copy
+// reverse
+// random_shuffle
+// rotate
+// rotate_copy
+// search
+// search<Compare>
+// search_n
+// set_difference
+// set_difference<Compare>
+// set_difference_2
+// set_difference_2<Compare>
+// set_decomposition
+// set_decomposition<Compare>
+// set_intersection
+// set_intersection<Compare>
+// set_symmetric_difference
+// set_symmetric_difference<Compare>
+// set_union
+// set_union<Compare>
+// sort Found in sort.h
+// sort<Compare> Found in sort.h
+// sort_heap Found in heap.h
+// sort_heap<Compare> Found in heap.h
+// stable_sort Found in sort.h
+// stable_sort<Compare> Found in sort.h
+// swap
+// swap_ranges
+// transform
+// transform<Operation>
+// unique
+// unique<Compare>
+// upper_bound
+// upper_bound<Compare>
+// is_permutation
+// is_permutation<Predicate>
+// next_permutation
+// next_permutation<Compare>
+//
+// Algorithms from the C++ standard that we don't implement are listed here.
+// Most of these items are absent because they aren't used very often.
+// They also happen to be the more complicated than other algorithms.
+// However, we can implement any of these functions for users that might
+// need them.
+// includes
+// includes<Compare>
+// inplace_merge
+// inplace_merge<Compare>
+// partial_sort_copy
+// partial_sort_copy<Compare>
+// paritition
+// prev_permutation
+// prev_permutation<Compare>
+// search_n<Compare>
+// stable_partition
+// unique_copy
+// unique_copy<Compare>
+//
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ALGORITHM_H
+#define EASTL_ALGORITHM_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/type_traits.h>
+#include <EASTL/internal/move_help.h>
+#include <EASTL/internal/copy_help.h>
+#include <EASTL/internal/fill_help.h>
+#include <EASTL/initializer_list.h>
+#include <EASTL/iterator.h>
+#include <EASTL/functional.h>
+#include <EASTL/utility.h>
+#include <EASTL/internal/generic_iterator.h>
+#include <EASTL/random.h>
+#include <EASTL/compare.h>
+
+EA_DISABLE_ALL_VC_WARNINGS();
+
+ #if defined(EA_COMPILER_MSVC) && (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64))
+ #include <intrin.h>
+ #endif
+
+ #include <stddef.h>
+ #include <string.h> // memcpy, memcmp, memmove
+
+EA_RESTORE_ALL_VC_WARNINGS();
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// min/max workaround
+//
+// MSVC++ has #defines for min/max which collide with the min/max algorithm
+// declarations. The following may still not completely resolve some kinds of
+// problems with MSVC++ #defines, though it deals with most cases in production
+// game code.
+//
+#if EASTL_NOMINMAX
+ #ifdef min
+ #undef min
+ #endif
+ #ifdef max
+ #undef max
+ #endif
+#endif
+
+
+
+
+namespace eastl
+{
+ /// min_element
+ ///
+ /// min_element finds the smallest element in the range [first, last).
+ /// It returns the first iterator i in [first, last) such that no other
+ /// iterator in [first, last) points to a value smaller than *i.
+ /// The return value is last if and only if [first, last) is an empty range.
+ ///
+ /// Returns: The first iterator i in the range [first, last) such that
+ /// for any iterator j in the range [first, last) the following corresponding
+ /// condition holds: !(*j < *i).
+ ///
+ /// Complexity: Exactly 'max((last - first) - 1, 0)' applications of the
+ /// corresponding comparisons.
+ ///
+ template <typename ForwardIterator>
+ ForwardIterator min_element(ForwardIterator first, ForwardIterator last)
+ {
+ if(first != last)
+ {
+ ForwardIterator currentMin = first;
+
+ while(++first != last)
+ {
+ if(*first < *currentMin)
+ currentMin = first;
+ }
+ return currentMin;
+ }
+ return first;
+ }
+
+
+ /// min_element
+ ///
+ /// min_element finds the smallest element in the range [first, last).
+ /// It returns the first iterator i in [first, last) such that no other
+ /// iterator in [first, last) points to a value smaller than *i.
+ /// The return value is last if and only if [first, last) is an empty range.
+ ///
+ /// Returns: The first iterator i in the range [first, last) such that
+ /// for any iterator j in the range [first, last) the following corresponding
+ /// conditions hold: compare(*j, *i) == false.
+ ///
+ /// Complexity: Exactly 'max((last - first) - 1, 0)' applications of the
+ /// corresponding comparisons.
+ ///
+ template <typename ForwardIterator, typename Compare>
+ ForwardIterator min_element(ForwardIterator first, ForwardIterator last, Compare compare)
+ {
+ if(first != last)
+ {
+ ForwardIterator currentMin = first;
+
+ while(++first != last)
+ {
+ if(compare(*first, *currentMin))
+ currentMin = first;
+ }
+ return currentMin;
+ }
+ return first;
+ }
+
+
+ /// max_element
+ ///
+ /// max_element finds the largest element in the range [first, last).
+ /// It returns the first iterator i in [first, last) such that no other
+ /// iterator in [first, last) points to a value greater than *i.
+ /// The return value is last if and only if [first, last) is an empty range.
+ ///
+ /// Returns: The first iterator i in the range [first, last) such that
+ /// for any iterator j in the range [first, last) the following corresponding
+ /// condition holds: !(*i < *j).
+ ///
+ /// Complexity: Exactly 'max((last - first) - 1, 0)' applications of the
+ /// corresponding comparisons.
+ ///
+ template <typename ForwardIterator>
+ ForwardIterator max_element(ForwardIterator first, ForwardIterator last)
+ {
+ if(first != last)
+ {
+ ForwardIterator currentMax = first;
+
+ while(++first != last)
+ {
+ if(*currentMax < *first)
+ currentMax = first;
+ }
+ return currentMax;
+ }
+ return first;
+ }
+
+
+ /// max_element
+ ///
+ /// max_element finds the largest element in the range [first, last).
+ /// It returns the first iterator i in [first, last) such that no other
+ /// iterator in [first, last) points to a value greater than *i.
+ /// The return value is last if and only if [first, last) is an empty range.
+ ///
+ /// Returns: The first iterator i in the range [first, last) such that
+ /// for any iterator j in the range [first, last) the following corresponding
+ /// condition holds: compare(*i, *j) == false.
+ ///
+ /// Complexity: Exactly 'max((last - first) - 1, 0)' applications of the
+ /// corresponding comparisons.
+ ///
+ template <typename ForwardIterator, typename Compare>
+ ForwardIterator max_element(ForwardIterator first, ForwardIterator last, Compare compare)
+ {
+ if(first != last)
+ {
+ ForwardIterator currentMax = first;
+
+ while(++first != last)
+ {
+ if(compare(*currentMax, *first))
+ currentMax = first;
+ }
+ return currentMax;
+ }
+ return first;
+ }
+
+
+ #if EASTL_MINMAX_ENABLED
+
+ /// min
+ ///
+ /// Min returns the lesser of its two arguments; it returns the first
+ /// argument if neither is less than the other. The two arguments are
+ /// compared with operator <.
+ ///
+ /// This min and our other min implementations are defined as returning:
+ /// b < a ? b : a
+ /// which for example may in practice result in something different than:
+ /// b <= a ? b : a
+ /// in the case where b is different from a (though they compare as equal).
+ /// We choose the specific ordering here because that's the ordering
+ /// done by other STL implementations.
+ ///
+ /// Some compilers (e.g. VS20003 - VS2013) generate poor code for the case of
+ /// scalars returned by reference, so we provide a specialization for those cases.
+ /// The specialization returns T by value instead of reference, which is
+ /// not that the Standard specifies. The Standard allows you to use
+ /// an expression like &max(x, y), which would be impossible in this case.
+ /// However, we have found no actual code that uses min or max like this and
+ /// this specialization causes no problems in practice. Microsoft has acknowledged
+ /// the problem and may fix it for a future VS version.
+ ///
+ template <typename T>
+ inline EA_CONSTEXPR typename eastl::enable_if<eastl::is_scalar<T>::value, T>::type
+ min(T a, T b)
+ {
+ return b < a ? b : a;
+ }
+
+ template <typename T>
+ inline EA_CONSTEXPR typename eastl::enable_if<!eastl::is_scalar<T>::value, const T&>::type
+ min(const T& a, const T& b)
+ {
+ return b < a ? b : a;
+ }
+
+ inline EA_CONSTEXPR float min(float a, float b) { return b < a ? b : a; }
+ inline EA_CONSTEXPR double min(double a, double b) { return b < a ? b : a; }
+ inline EA_CONSTEXPR long double min(long double a, long double b) { return b < a ? b : a; }
+
+ #endif // EASTL_MINMAX_ENABLED
+
+
+ /// min_alt
+ ///
+ /// This is an alternative version of min that avoids any possible
+ /// collisions with Microsoft #defines of min and max.
+ ///
+ /// See min(a, b) for detailed specifications.
+ ///
+ template <typename T>
+ inline EA_CONSTEXPR typename eastl::enable_if<eastl::is_scalar<T>::value, T>::type
+ min_alt(T a, T b)
+ {
+ return b < a ? b : a;
+ }
+
+ template <typename T>
+ inline typename eastl::enable_if<!eastl::is_scalar<T>::value, const T&>::type
+ min_alt(const T& a, const T& b)
+ {
+ return b < a ? b : a;
+ }
+
+ inline EA_CONSTEXPR float min_alt(float a, float b) { return b < a ? b : a; }
+ inline EA_CONSTEXPR double min_alt(double a, double b) { return b < a ? b : a; }
+ inline EA_CONSTEXPR long double min_alt(long double a, long double b) { return b < a ? b : a; }
+
+
+ #if EASTL_MINMAX_ENABLED
+
+ /// min
+ ///
+ /// Min returns the lesser of its two arguments; it returns the first
+ /// argument if neither is less than the other. The two arguments are
+ /// compared with the Compare function (or function object), which
+ /// takes two arguments and returns true if the first is less than
+ /// the second.
+ ///
+ /// See min(a, b) for detailed specifications.
+ ///
+ /// Example usage:
+ /// struct A{ int a; };
+ /// struct Struct{ bool operator()(const A& a1, const A& a2){ return a1.a < a2.a; } };
+ ///
+ /// A a1, a2, a3;
+ /// a3 = min(a1, a2, Struct());
+ ///
+ /// Example usage:
+ /// struct B{ int b; };
+ /// inline bool Function(const B& b1, const B& b2){ return b1.b < b2.b; }
+ ///
+ /// B b1, b2, b3;
+ /// b3 = min(b1, b2, Function);
+ ///
+ template <typename T, typename Compare>
+ inline const T&
+ min(const T& a, const T& b, Compare compare)
+ {
+ return compare(b, a) ? b : a;
+ }
+
+ #endif // EASTL_MINMAX_ENABLED
+
+
+ /// min_alt
+ ///
+ /// This is an alternative version of min that avoids any possible
+ /// collisions with Microsoft #defines of min and max.
+ ///
+ /// See min(a, b) for detailed specifications.
+ ///
+ template <typename T, typename Compare>
+ inline const T&
+ min_alt(const T& a, const T& b, Compare compare)
+ {
+ return compare(b, a) ? b : a;
+ }
+
+
+ #if EASTL_MINMAX_ENABLED
+
+ /// max
+ ///
+ /// Max returns the greater of its two arguments; it returns the first
+ /// argument if neither is greater than the other. The two arguments are
+ /// compared with operator < (and not operator >).
+ ///
+ /// This min and our other min implementations are defined as returning:
+ /// a < b ? b : a
+ /// which for example may in practice result in something different than:
+ /// a <= b ? b : a
+ /// in the case where b is different from a (though they compare as equal).
+ /// We choose the specific ordering here because that's the ordering
+ /// done by other STL implementations.
+ ///
+ template <typename T>
+ inline EA_CONSTEXPR typename eastl::enable_if<eastl::is_scalar<T>::value, T>::type
+ max(T a, T b)
+ {
+ return a < b ? b : a;
+ }
+
+ template <typename T>
+ inline EA_CONSTEXPR typename eastl::enable_if<!eastl::is_scalar<T>::value, const T&>::type
+ max(const T& a, const T& b)
+ {
+ return a < b ? b : a;
+ }
+
+ inline EA_CONSTEXPR float max(float a, float b) { return a < b ? b : a; }
+ inline EA_CONSTEXPR double max(double a, double b) { return a < b ? b : a; }
+ inline EA_CONSTEXPR long double max(long double a, long double b) { return a < b ? b : a; }
+
+ #endif // EASTL_MINMAX_ENABLED
+
+
+ /// max_alt
+ ///
+ /// This is an alternative version of max that avoids any possible
+ /// collisions with Microsoft #defines of min and max.
+ ///
+ template <typename T>
+ inline EA_CONSTEXPR typename eastl::enable_if<eastl::is_scalar<T>::value, T>::type
+ max_alt(T a, T b)
+ {
+ return a < b ? b : a;
+ }
+
+ template <typename T>
+ inline EA_CONSTEXPR typename eastl::enable_if<!eastl::is_scalar<T>::value, const T&>::type
+ max_alt(const T& a, const T& b)
+ {
+ return a < b ? b : a;
+ }
+
+ inline EA_CONSTEXPR float max_alt(float a, float b) { return a < b ? b : a; }
+ inline EA_CONSTEXPR double max_alt(double a, double b) { return a < b ? b : a; }
+ inline EA_CONSTEXPR long double max_alt(long double a, long double b) { return a < b ? b : a; }
+
+
+ #if EASTL_MINMAX_ENABLED
+ /// max
+ ///
+ /// Min returns the lesser of its two arguments; it returns the first
+ /// argument if neither is less than the other. The two arguments are
+ /// compared with the Compare function (or function object), which
+ /// takes two arguments and returns true if the first is less than
+ /// the second.
+ ///
+ template <typename T, typename Compare>
+ inline const T&
+ max(const T& a, const T& b, Compare compare)
+ {
+ return compare(a, b) ? b : a;
+ }
+ #endif
+
+
+ /// max_alt
+ ///
+ /// This is an alternative version of max that avoids any possible
+ /// collisions with Microsoft #defines of min and max.
+ ///
+ template <typename T, typename Compare>
+ inline const T&
+ max_alt(const T& a, const T& b, Compare compare)
+ {
+ return compare(a, b) ? b : a;
+ }
+
+
+ /// min(std::initializer_list)
+ ///
+ template <typename T >
+ T min(std::initializer_list<T> ilist)
+ {
+ return *eastl::min_element(ilist.begin(), ilist.end());
+ }
+
+ /// min(std::initializer_list, Compare)
+ ///
+ template <typename T, typename Compare>
+ T min(std::initializer_list<T> ilist, Compare compare)
+ {
+ return *eastl::min_element(ilist.begin(), ilist.end(), compare);
+ }
+
+
+ /// max(std::initializer_list)
+ ///
+ template <typename T >
+ T max(std::initializer_list<T> ilist)
+ {
+ return *eastl::max_element(ilist.begin(), ilist.end());
+ }
+
+ /// max(std::initializer_list, Compare)
+ ///
+ template <typename T, typename Compare>
+ T max(std::initializer_list<T> ilist, Compare compare)
+ {
+ return *eastl::max_element(ilist.begin(), ilist.end(), compare);
+ }
+
+
+ /// minmax_element
+ ///
+ /// Returns: make_pair(first, first) if [first, last) is empty, otherwise make_pair(m, M),
+ /// where m is the first iterator in [first,last) such that no iterator in the range
+ /// refers to a smaller element, and where M is the last iterator in [first,last) such
+ /// that no iterator in the range refers to a larger element.
+ ///
+ /// Complexity: At most max([(3/2)*(N - 1)], 0) applications of the corresponding predicate,
+ /// where N is distance(first, last).
+ ///
+ template <typename ForwardIterator, typename Compare>
+ eastl::pair<ForwardIterator, ForwardIterator>
+ minmax_element(ForwardIterator first, ForwardIterator last, Compare compare)
+ {
+ eastl::pair<ForwardIterator, ForwardIterator> result(first, first);
+
+ if(!(first == last) && !(++first == last))
+ {
+ if(compare(*first, *result.first))
+ {
+ result.second = result.first;
+ result.first = first;
+ }
+ else
+ result.second = first;
+
+ while(++first != last)
+ {
+ ForwardIterator i = first;
+
+ if(++first == last)
+ {
+ if(compare(*i, *result.first))
+ result.first = i;
+ else if(!compare(*i, *result.second))
+ result.second = i;
+ break;
+ }
+ else
+ {
+ if(compare(*first, *i))
+ {
+ if(compare(*first, *result.first))
+ result.first = first;
+
+ if(!compare(*i, *result.second))
+ result.second = i;
+ }
+ else
+ {
+ if(compare(*i, *result.first))
+ result.first = i;
+
+ if(!compare(*first, *result.second))
+ result.second = first;
+ }
+ }
+ }
+ }
+
+ return result;
+ }
+
+
+ template <typename ForwardIterator>
+ eastl::pair<ForwardIterator, ForwardIterator>
+ minmax_element(ForwardIterator first, ForwardIterator last)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+
+ return eastl::minmax_element(first, last, eastl::less<value_type>());
+ }
+
+
+
+ /// minmax
+ ///
+ /// Requires: Type T shall be LessThanComparable.
+ /// Returns: pair<const T&, const T&>(b, a) if b is smaller than a, and pair<const T&, const T&>(a, b) otherwise.
+ /// Remarks: Returns pair<const T&, const T&>(a, b) when the arguments are equivalent.
+ /// Complexity: Exactly one comparison.
+ ///
+
+ // The following optimization is a problem because it changes the return value in a way that would break
+ // users unless they used auto (e.g. auto result = minmax(17, 33); )
+ //
+ // template <typename T>
+ // inline EA_CONSTEXPR typename eastl::enable_if<eastl::is_scalar<T>::value, eastl::pair<T, T> >::type
+ // minmax(T a, T b)
+ // {
+ // return (b < a) ? eastl::make_pair(b, a) : eastl::make_pair(a, b);
+ // }
+ //
+ // template <typename T>
+ // inline typename eastl::enable_if<!eastl::is_scalar<T>::value, eastl::pair<const T&, const T&> >::type
+ // minmax(const T& a, const T& b)
+ // {
+ // return (b < a) ? eastl::make_pair(b, a) : eastl::make_pair(a, b);
+ // }
+
+ // It turns out that the following conforming definition of minmax generates a warning when used with VC++ up
+ // to at least VS2012. The VS2012 version of minmax is a broken and non-conforming definition, and we don't
+ // want to do that. We could do it for scalars alone, though we'd have to decide if we are going to do that
+ // for all compilers, because it changes the return value from a pair of references to a pair of values.
+ template <typename T>
+ inline eastl::pair<const T&, const T&>
+ minmax(const T& a, const T& b)
+ {
+ return (b < a) ? eastl::make_pair(b, a) : eastl::make_pair(a, b);
+ }
+
+
+ template <typename T, typename Compare>
+ eastl::pair<const T&, const T&>
+ minmax(const T& a, const T& b, Compare compare)
+ {
+ return compare(b, a) ? eastl::make_pair(b, a) : eastl::make_pair(a, b);
+ }
+
+
+
+ template <typename T>
+ eastl::pair<T, T>
+ minmax(std::initializer_list<T> ilist)
+ {
+ typedef typename std::initializer_list<T>::iterator iterator_type;
+ eastl::pair<iterator_type, iterator_type> iteratorPair = eastl::minmax_element(ilist.begin(), ilist.end());
+ return eastl::make_pair(*iteratorPair.first, *iteratorPair.second);
+ }
+
+ template <typename T, class Compare>
+ eastl::pair<T, T>
+ minmax(std::initializer_list<T> ilist, Compare compare)
+ {
+ typedef typename std::initializer_list<T>::iterator iterator_type;
+ eastl::pair<iterator_type, iterator_type> iteratorPair = eastl::minmax_element(ilist.begin(), ilist.end(), compare);
+ return eastl::make_pair(*iteratorPair.first, *iteratorPair.second);
+ }
+
+ template <typename T>
+ inline T&& median_impl(T&& a, T&& b, T&& c)
+ {
+ if(eastl::less<T>()(a, b))
+ {
+ if(eastl::less<T>()(b, c))
+ return eastl::forward<T>(b);
+ else if(eastl::less<T>()(a, c))
+ return eastl::forward<T>(c);
+ else
+ return eastl::forward<T>(a);
+ }
+ else if(eastl::less<T>()(a, c))
+ return eastl::forward<T>(a);
+ else if(eastl::less<T>()(b, c))
+ return eastl::forward<T>(c);
+ return eastl::forward<T>(b);
+ }
+
+ /// median
+ ///
+ /// median finds which element of three (a, b, d) is in-between the other two.
+ /// If two or more elements are equal, the first (e.g. a before b) is chosen.
+ ///
+ /// Complexity: Either two or three comparisons will be required, depending
+ /// on the values.
+ ///
+ template <typename T>
+ inline const T& median(const T& a, const T& b, const T& c)
+ {
+ return median_impl(a, b, c);
+ }
+
+ /// median
+ ///
+ /// median finds which element of three (a, b, d) is in-between the other two.
+ /// If two or more elements are equal, the first (e.g. a before b) is chosen.
+ ///
+ /// Complexity: Either two or three comparisons will be required, depending
+ /// on the values.
+ ///
+ template <typename T>
+ inline T&& median(T&& a, T&& b, T&& c)
+ {
+ return eastl::forward<T>(median_impl(eastl::forward<T>(a), eastl::forward<T>(b), eastl::forward<T>(c)));
+ }
+
+
+ template <typename T, typename Compare>
+ inline T&& median_impl(T&& a, T&& b, T&& c, Compare compare)
+ {
+ if(compare(a, b))
+ {
+ if(compare(b, c))
+ return eastl::forward<T>(b);
+ else if(compare(a, c))
+ return eastl::forward<T>(c);
+ else
+ return eastl::forward<T>(a);
+ }
+ else if(compare(a, c))
+ return eastl::forward<T>(a);
+ else if(compare(b, c))
+ return eastl::forward<T>(c);
+ return eastl::forward<T>(b);
+ }
+
+
+ /// median
+ ///
+ /// median finds which element of three (a, b, d) is in-between the other two.
+ /// If two or more elements are equal, the first (e.g. a before b) is chosen.
+ ///
+ /// Complexity: Either two or three comparisons will be required, depending
+ /// on the values.
+ ///
+ template <typename T, typename Compare>
+ inline const T& median(const T& a, const T& b, const T& c, Compare compare)
+ {
+ return median_impl<const T&, Compare>(a, b, c, compare);
+ }
+
+ /// median
+ ///
+ /// median finds which element of three (a, b, d) is in-between the other two.
+ /// If two or more elements are equal, the first (e.g. a before b) is chosen.
+ ///
+ /// Complexity: Either two or three comparisons will be required, depending
+ /// on the values.
+ ///
+ template <typename T, typename Compare>
+ inline T&& median(T&& a, T&& b, T&& c, Compare compare)
+ {
+ return eastl::forward<T>(median_impl<T&&, Compare>(eastl::forward<T>(a), eastl::forward<T>(b), eastl::forward<T>(c), compare));
+ }
+
+
+
+
+ /// all_of
+ ///
+ /// Returns: true if the unary predicate p returns true for all elements in the range [first, last)
+ ///
+ template <typename InputIterator, typename Predicate>
+ inline bool all_of(InputIterator first, InputIterator last, Predicate p)
+ {
+ for(; first != last; ++first)
+ {
+ if(!p(*first))
+ return false;
+ }
+ return true;
+ }
+
+
+ /// any_of
+ ///
+ /// Returns: true if the unary predicate p returns true for any of the elements in the range [first, last)
+ ///
+ template <typename InputIterator, typename Predicate>
+ inline bool any_of(InputIterator first, InputIterator last, Predicate p)
+ {
+ for(; first != last; ++first)
+ {
+ if(p(*first))
+ return true;
+ }
+ return false;
+ }
+
+
+ /// none_of
+ ///
+ /// Returns: true if the unary predicate p returns true for none of the elements in the range [first, last)
+ ///
+ template <typename InputIterator, typename Predicate>
+ inline bool none_of(InputIterator first, InputIterator last, Predicate p)
+ {
+ for(; first != last; ++first)
+ {
+ if(p(*first))
+ return false;
+ }
+ return true;
+ }
+
+
+ /// adjacent_find
+ ///
+ /// Returns: The first iterator i such that both i and i + 1 are in the range
+ /// [first, last) for which the following corresponding conditions hold: *i == *(i + 1).
+ /// Returns last if no such iterator is found.
+ ///
+ /// Complexity: Exactly 'find(first, last, value) - first' applications of the corresponding predicate.
+ ///
+ template <typename ForwardIterator>
+ inline ForwardIterator
+ adjacent_find(ForwardIterator first, ForwardIterator last)
+ {
+ if(first != last)
+ {
+ ForwardIterator i = first;
+
+ for(++i; i != last; ++i)
+ {
+ if(*first == *i)
+ return first;
+ first = i;
+ }
+ }
+ return last;
+ }
+
+
+
+ /// adjacent_find
+ ///
+ /// Returns: The first iterator i such that both i and i + 1 are in the range
+ /// [first, last) for which the following corresponding conditions hold: predicate(*i, *(i + 1)) != false.
+ /// Returns last if no such iterator is found.
+ ///
+ /// Complexity: Exactly 'find(first, last, value) - first' applications of the corresponding predicate.
+ ///
+ template <typename ForwardIterator, typename BinaryPredicate>
+ inline ForwardIterator
+ adjacent_find(ForwardIterator first, ForwardIterator last, BinaryPredicate predicate)
+ {
+ if(first != last)
+ {
+ ForwardIterator i = first;
+
+ for(++i; i != last; ++i)
+ {
+ if(predicate(*first, *i))
+ return first;
+ first = i;
+ }
+ }
+ return last;
+ }
+
+
+ /// shuffle
+ ///
+ /// New for C++11
+ /// Randomizes a sequence of values via a user-supplied UniformRandomNumberGenerator.
+ /// The difference between this and the original random_shuffle function is that this uses the more
+ /// advanced and flexible UniformRandomNumberGenerator interface as opposed to the more
+ /// limited RandomNumberGenerator interface of random_shuffle.
+ ///
+ /// Effects: Shuffles the elements in the range [first, last) with uniform distribution.
+ ///
+ /// Complexity: Exactly '(last - first) - 1' swaps.
+ ///
+ /// Example usage:
+ /// struct Rand{ eastl_size_t operator()(eastl_size_t n) { return (eastl_size_t)(rand() % n); } }; // Note: The C rand function is poor and slow.
+ /// Rand randInstance;
+ /// shuffle(pArrayBegin, pArrayEnd, randInstance);
+ ///
+ // See the C++11 Standard, 26.5.1.3, Uniform random number generator requirements.
+ // Also http://en.cppreference.com/w/cpp/numeric/random/uniform_int_distribution
+
+ template <typename RandomAccessIterator, typename UniformRandomNumberGenerator>
+ void shuffle(RandomAccessIterator first, RandomAccessIterator last, UniformRandomNumberGenerator&& urng)
+ {
+ if(first != last)
+ {
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::difference_type difference_type;
+ typedef typename eastl::make_unsigned<difference_type>::type unsigned_difference_type;
+ typedef typename eastl::uniform_int_distribution<unsigned_difference_type> uniform_int_distribution;
+ typedef typename uniform_int_distribution::param_type uniform_int_distribution_param_type;
+
+ uniform_int_distribution uid;
+
+ for(RandomAccessIterator i = first + 1; i != last; ++i)
+ iter_swap(i, first + uid(urng, uniform_int_distribution_param_type(0, i - first)));
+ }
+ }
+
+
+ /// random_shuffle
+ ///
+ /// Randomizes a sequence of values.
+ ///
+ /// Effects: Shuffles the elements in the range [first, last) with uniform distribution.
+ ///
+ /// Complexity: Exactly '(last - first) - 1' swaps.
+ ///
+ /// Example usage:
+ /// eastl_size_t Rand(eastl_size_t n) { return (eastl_size_t)(rand() % n); } // Note: The C rand function is poor and slow.
+ /// pointer_to_unary_function<eastl_size_t, eastl_size_t> randInstance(Rand);
+ /// random_shuffle(pArrayBegin, pArrayEnd, randInstance);
+ ///
+ /// Example usage:
+ /// struct Rand{ eastl_size_t operator()(eastl_size_t n) { return (eastl_size_t)(rand() % n); } }; // Note: The C rand function is poor and slow.
+ /// Rand randInstance;
+ /// random_shuffle(pArrayBegin, pArrayEnd, randInstance);
+ ///
+ template <typename RandomAccessIterator, typename RandomNumberGenerator>
+ inline void random_shuffle(RandomAccessIterator first, RandomAccessIterator last, RandomNumberGenerator&& rng)
+ {
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::difference_type difference_type;
+
+ // We must do 'rand((i - first) + 1)' here and cannot do 'rand(last - first)',
+ // as it turns out that the latter results in unequal distribution probabilities.
+ // http://www.cigital.com/papers/download/developer_gambling.php
+
+ for(RandomAccessIterator i = first + 1; i < last; ++i)
+ iter_swap(i, first + (difference_type)rng((eastl_size_t)((i - first) + 1)));
+ }
+
+
+ /// random_shuffle
+ ///
+ /// Randomizes a sequence of values.
+ ///
+ /// Effects: Shuffles the elements in the range [first, last) with uniform distribution.
+ ///
+ /// Complexity: Exactly '(last - first) - 1' swaps.
+ ///
+ /// Example usage:
+ /// random_shuffle(pArrayBegin, pArrayEnd);
+ ///
+ /// *** Disabled until we decide if we want to get into the business of writing random number generators. ***
+ ///
+ /// template <typename RandomAccessIterator>
+ /// inline void random_shuffle(RandomAccessIterator first, RandomAccessIterator last)
+ /// {
+ /// for(RandomAccessIterator i = first + 1; i < last; ++i)
+ /// iter_swap(i, first + SomeRangedRandomNumberGenerator((i - first) + 1));
+ /// }
+
+
+
+
+
+
+ /// move_n
+ ///
+ /// Same as move(InputIterator, InputIterator, OutputIterator) except based on count instead of iterator range.
+ ///
+ template <typename InputIterator, typename Size, typename OutputIterator>
+ inline OutputIterator
+ move_n_impl(InputIterator first, Size n, OutputIterator result, EASTL_ITC_NS::input_iterator_tag)
+ {
+ for(; n > 0; --n)
+ *result++ = eastl::move(*first++);
+ return result;
+ }
+
+ template <typename RandomAccessIterator, typename Size, typename OutputIterator>
+ inline OutputIterator
+ move_n_impl(RandomAccessIterator first, Size n, OutputIterator result, EASTL_ITC_NS::random_access_iterator_tag)
+ {
+ return eastl::move(first, first + n, result); // Take advantage of the optimizations present in the move algorithm.
+ }
+
+
+ template <typename InputIterator, typename Size, typename OutputIterator>
+ inline OutputIterator
+ move_n(InputIterator first, Size n, OutputIterator result)
+ {
+ typedef typename eastl::iterator_traits<InputIterator>::iterator_category IC;
+ return eastl::move_n_impl(first, n, result, IC());
+ }
+
+
+
+ /// copy_n
+ ///
+ /// Same as copy(InputIterator, InputIterator, OutputIterator) except based on count instead of iterator range.
+ /// Effects: Copies exactly count values from the range beginning at first to the range beginning at result, if count > 0. Does nothing otherwise.
+ /// Returns: Iterator in the destination range, pointing past the last element copied if count>0 or first otherwise.
+ /// Complexity: Exactly count assignments, if count > 0.
+ ///
+ template <typename InputIterator, typename Size, typename OutputIterator>
+ inline OutputIterator
+ copy_n_impl(InputIterator first, Size n, OutputIterator result, EASTL_ITC_NS::input_iterator_tag)
+ {
+ for(; n > 0; --n)
+ *result++ = *first++;
+ return result;
+ }
+
+ template <typename RandomAccessIterator, typename Size, typename OutputIterator>
+ inline OutputIterator
+ copy_n_impl(RandomAccessIterator first, Size n, OutputIterator result, EASTL_ITC_NS::random_access_iterator_tag)
+ {
+ return eastl::copy(first, first + n, result); // Take advantage of the optimizations present in the copy algorithm.
+ }
+
+
+ template <typename InputIterator, typename Size, typename OutputIterator>
+ inline OutputIterator
+ copy_n(InputIterator first, Size n, OutputIterator result)
+ {
+ typedef typename eastl::iterator_traits<InputIterator>::iterator_category IC;
+ return eastl::copy_n_impl(first, n, result, IC());
+ }
+
+
+ /// copy_if
+ ///
+ /// Effects: Assigns to the result iterator only if the predicate is true.
+ ///
+ template <typename InputIterator, typename OutputIterator, typename Predicate>
+ inline OutputIterator
+ copy_if(InputIterator first, InputIterator last, OutputIterator result, Predicate predicate)
+ {
+ // This implementation's performance could be improved by taking a more complicated approach like with the copy algorithm.
+ for(; first != last; ++first)
+ {
+ if(predicate(*first))
+ *result++ = *first;
+ }
+
+ return result;
+ }
+
+
+
+
+ // Implementation moving copying both trivial and non-trivial data via a lesser iterator than random-access.
+ template <typename /*BidirectionalIterator1Category*/, bool /*isMove*/, bool /*canMemmove*/>
+ struct move_and_copy_backward_helper
+ {
+ template <typename BidirectionalIterator1, typename BidirectionalIterator2>
+ static BidirectionalIterator2 move_or_copy_backward(BidirectionalIterator1 first, BidirectionalIterator1 last, BidirectionalIterator2 resultEnd)
+ {
+ while(first != last)
+ *--resultEnd = *--last;
+ return resultEnd; // resultEnd now points to the beginning of the destination sequence instead of the end.
+ }
+ };
+
+ // Specialization for moving non-trivial data via a lesser iterator than random-access.
+ template <typename BidirectionalIterator1Category>
+ struct move_and_copy_backward_helper<BidirectionalIterator1Category, true, false>
+ {
+ template <typename BidirectionalIterator1, typename BidirectionalIterator2>
+ static BidirectionalIterator2 move_or_copy_backward(BidirectionalIterator1 first, BidirectionalIterator1 last, BidirectionalIterator2 resultEnd)
+ {
+ while(first != last)
+ *--resultEnd = eastl::move(*--last);
+ return resultEnd; // resultEnd now points to the beginning of the destination sequence instead of the end.
+ }
+ };
+
+ // Specialization for moving non-trivial data via a random-access iterator. It's theoretically faster because the compiler can see the count when its a compile-time const.
+ template<>
+ struct move_and_copy_backward_helper<EASTL_ITC_NS::random_access_iterator_tag, true, false>
+ {
+ template<typename BidirectionalIterator1, typename BidirectionalIterator2>
+ static BidirectionalIterator2 move_or_copy_backward(BidirectionalIterator1 first, BidirectionalIterator1 last, BidirectionalIterator2 resultEnd)
+ {
+ typedef typename eastl::iterator_traits<BidirectionalIterator1>::difference_type difference_type;
+
+ for(difference_type n = (last - first); n > 0; --n)
+ *--resultEnd = eastl::move(*--last);
+ return resultEnd; // resultEnd now points to the beginning of the destination sequence instead of the end.
+ }
+ };
+
+ // Specialization for copying non-trivial data via a random-access iterator. It's theoretically faster because the compiler can see the count when its a compile-time const.
+ // This specialization converts the random access BidirectionalIterator1 last-first to an integral type. There's simple way for us to take advantage of a random access output iterator,
+ // as the range is specified by the input instead of the output, and distance(first, last) for a non-random-access iterator is potentially slow.
+ template <>
+ struct move_and_copy_backward_helper<EASTL_ITC_NS::random_access_iterator_tag, false, false>
+ {
+ template <typename BidirectionalIterator1, typename BidirectionalIterator2>
+ static BidirectionalIterator2 move_or_copy_backward(BidirectionalIterator1 first, BidirectionalIterator1 last, BidirectionalIterator2 resultEnd)
+ {
+ typedef typename eastl::iterator_traits<BidirectionalIterator1>::difference_type difference_type;
+
+ for(difference_type n = (last - first); n > 0; --n)
+ *--resultEnd = *--last;
+ return resultEnd; // resultEnd now points to the beginning of the destination sequence instead of the end.
+ }
+ };
+
+ // Specialization for when we can use memmove/memcpy. See the notes above for what conditions allow this.
+ template <bool isMove>
+ struct move_and_copy_backward_helper<EASTL_ITC_NS::random_access_iterator_tag, isMove, true>
+ {
+ template <typename T>
+ static T* move_or_copy_backward(const T* first, const T* last, T* resultEnd)
+ {
+ return (T*)memmove(resultEnd - (last - first), first, (size_t)((uintptr_t)last - (uintptr_t)first));
+ // We could use memcpy here if there's no range overlap, but memcpy is rarely much faster than memmove.
+ }
+ };
+
+ template <bool isMove, typename BidirectionalIterator1, typename BidirectionalIterator2>
+ inline BidirectionalIterator2 move_and_copy_backward_chooser(BidirectionalIterator1 first, BidirectionalIterator1 last, BidirectionalIterator2 resultEnd)
+ {
+ typedef typename eastl::iterator_traits<BidirectionalIterator1>::iterator_category IIC;
+
+ const bool canBeMemmoved = internal::can_be_memmoved_helper<BidirectionalIterator1, BidirectionalIterator2>::value;
+
+ return eastl::move_and_copy_backward_helper<IIC, isMove, canBeMemmoved>::move_or_copy_backward(first, last, resultEnd); // Need to chose based on the input iterator tag and not the output iterator tag, because containers accept input ranges of iterator types different than self.
+ }
+
+
+ // We have a second layer of unwrap_iterator calls because the original iterator might be something like move_iterator<generic_iterator<int*> > (i.e. doubly-wrapped).
+ template <bool isMove, typename BidirectionalIterator1, typename BidirectionalIterator2>
+ inline BidirectionalIterator2 move_and_copy_backward_unwrapper(BidirectionalIterator1 first, BidirectionalIterator1 last, BidirectionalIterator2 resultEnd)
+ {
+ return BidirectionalIterator2(eastl::move_and_copy_backward_chooser<isMove>(eastl::unwrap_iterator(first), eastl::unwrap_iterator(last), eastl::unwrap_iterator(resultEnd))); // Have to convert to BidirectionalIterator2 because result.base() could be a T*
+ }
+
+
+ /// move_backward
+ ///
+ /// The elements are moved in reverse order (the last element is moved first), but their relative order is preserved.
+ /// After this operation the elements in the moved-from range will still contain valid values of the
+ /// appropriate type, but not necessarily the same values as before the move.
+ /// Returns the beginning of the result range.
+ /// Note: When moving between containers, the dest range must be valid; this function doesn't resize containers.
+ /// Note: If result is within [first, last), move must be used instead of move_backward.
+ ///
+ /// Example usage:
+ /// eastl::move_backward(myArray.begin(), myArray.end(), myDestArray.end());
+ ///
+ /// Reference implementation:
+ /// template <typename BidirectionalIterator1, typename BidirectionalIterator2>
+ /// BidirectionalIterator2 move_backward(BidirectionalIterator1 first, BidirectionalIterator1 last, BidirectionalIterator2 resultEnd)
+ /// {
+ /// while(last != first)
+ /// *--resultEnd = eastl::move(*--last);
+ /// return resultEnd;
+ /// }
+ ///
+ template <typename BidirectionalIterator1, typename BidirectionalIterator2>
+ inline BidirectionalIterator2 move_backward(BidirectionalIterator1 first, BidirectionalIterator1 last, BidirectionalIterator2 resultEnd)
+ {
+ return eastl::move_and_copy_backward_unwrapper<true>(eastl::unwrap_iterator(first), eastl::unwrap_iterator(last), resultEnd);
+ }
+
+
+ /// copy_backward
+ ///
+ /// copies memory in the range of [first, last) to the range *ending* with result.
+ ///
+ /// Effects: Copies elements in the range [first, last) into the range
+ /// [result - (last - first), result) starting from last 1 and proceeding to first.
+ /// For each positive integer n <= (last - first), performs *(result n) = *(last - n).
+ ///
+ /// Requires: result shall not be in the range [first, last).
+ ///
+ /// Returns: result - (last - first). That is, returns the beginning of the result range.
+ ///
+ /// Complexity: Exactly 'last - first' assignments.
+ ///
+ template <typename BidirectionalIterator1, typename BidirectionalIterator2>
+ inline BidirectionalIterator2 copy_backward(BidirectionalIterator1 first, BidirectionalIterator1 last, BidirectionalIterator2 resultEnd)
+ {
+ const bool isMove = eastl::is_move_iterator<BidirectionalIterator1>::value; EA_UNUSED(isMove);
+
+ return eastl::move_and_copy_backward_unwrapper<isMove>(eastl::unwrap_iterator(first), eastl::unwrap_iterator(last), resultEnd);
+ }
+
+
+ /// count
+ ///
+ /// Counts the number of items in the range of [first, last) which equal the input value.
+ ///
+ /// Effects: Returns the number of iterators i in the range [first, last) for which the
+ /// following corresponding conditions hold: *i == value.
+ ///
+ /// Complexity: At most 'last - first' applications of the corresponding predicate.
+ ///
+ /// Note: The predicate version of count is count_if and not another variation of count.
+ /// This is because both versions would have three parameters and there could be ambiguity.
+ ///
+ template <typename InputIterator, typename T>
+ inline typename eastl::iterator_traits<InputIterator>::difference_type
+ count(InputIterator first, InputIterator last, const T& value)
+ {
+ typename eastl::iterator_traits<InputIterator>::difference_type result = 0;
+
+ for(; first != last; ++first)
+ {
+ if(*first == value)
+ ++result;
+ }
+ return result;
+ }
+
+
+ // C++ doesn't define a count with predicate, as it can effectively be synthesized via count_if
+ // with an appropriate predicate. However, it's often simpler to just have count with a predicate.
+ template <typename InputIterator, typename T, typename Predicate>
+ inline typename eastl::iterator_traits<InputIterator>::difference_type
+ count(InputIterator first, InputIterator last, const T& value, Predicate predicate)
+ {
+ typename eastl::iterator_traits<InputIterator>::difference_type result = 0;
+
+ for(; first != last; ++first)
+ {
+ if(predicate(*first, value))
+ ++result;
+ }
+ return result;
+ }
+
+
+ /// count_if
+ ///
+ /// Counts the number of items in the range of [first, last) which match
+ /// the input value as defined by the input predicate function.
+ ///
+ /// Effects: Returns the number of iterators i in the range [first, last) for which the
+ /// following corresponding conditions hold: predicate(*i) != false.
+ ///
+ /// Complexity: At most 'last - first' applications of the corresponding predicate.
+ ///
+ /// Note: The non-predicate version of count_if is count and not another variation of count_if.
+ /// This is because both versions would have three parameters and there could be ambiguity.
+ ///
+ template <typename InputIterator, typename Predicate>
+ inline typename eastl::iterator_traits<InputIterator>::difference_type
+ count_if(InputIterator first, InputIterator last, Predicate predicate)
+ {
+ typename eastl::iterator_traits<InputIterator>::difference_type result = 0;
+
+ for(; first != last; ++first)
+ {
+ if(predicate(*first))
+ ++result;
+ }
+ return result;
+ }
+
+
+ /// find
+ ///
+ /// finds the value within the unsorted range of [first, last).
+ ///
+ /// Returns: The first iterator i in the range [first, last) for which
+ /// the following corresponding conditions hold: *i == value.
+ /// Returns last if no such iterator is found.
+ ///
+ /// Complexity: At most 'last - first' applications of the corresponding predicate.
+ /// This is a linear search and not a binary one.
+ ///
+ /// Note: The predicate version of find is find_if and not another variation of find.
+ /// This is because both versions would have three parameters and there could be ambiguity.
+ ///
+ template <typename InputIterator, typename T>
+ inline InputIterator
+ find(InputIterator first, InputIterator last, const T& value)
+ {
+ while((first != last) && !(*first == value)) // Note that we always express value comparisons in terms of < or ==.
+ ++first;
+ return first;
+ }
+
+
+ // C++ doesn't define a find with predicate, as it can effectively be synthesized via find_if
+ // with an appropriate predicate. However, it's often simpler to just have find with a predicate.
+ template <typename InputIterator, typename T, typename Predicate>
+ inline InputIterator
+ find(InputIterator first, InputIterator last, const T& value, Predicate predicate)
+ {
+ while((first != last) && !predicate(*first, value))
+ ++first;
+ return first;
+ }
+
+
+
+ /// find_if
+ ///
+ /// finds the value within the unsorted range of [first, last).
+ ///
+ /// Returns: The first iterator i in the range [first, last) for which
+ /// the following corresponding conditions hold: pred(*i) != false.
+ /// Returns last if no such iterator is found.
+ /// If the sequence of elements to search for (i.e. first2 - last2) is empty,
+ /// the find always fails and last1 will be returned.
+ ///
+ /// Complexity: At most 'last - first' applications of the corresponding predicate.
+ ///
+ /// Note: The non-predicate version of find_if is find and not another variation of find_if.
+ /// This is because both versions would have three parameters and there could be ambiguity.
+ ///
+ template <typename InputIterator, typename Predicate>
+ inline InputIterator
+ find_if(InputIterator first, InputIterator last, Predicate predicate)
+ {
+ while((first != last) && !predicate(*first))
+ ++first;
+ return first;
+ }
+
+
+
+ /// find_if_not
+ ///
+ /// find_if_not works the same as find_if except it tests for if the predicate
+ /// returns false for the elements instead of true.
+ ///
+ template <typename InputIterator, typename Predicate>
+ inline InputIterator
+ find_if_not(InputIterator first, InputIterator last, Predicate predicate)
+ {
+ for(; first != last; ++first)
+ {
+ if(!predicate(*first))
+ return first;
+ }
+ return last;
+ }
+
+
+
+
+ /// find_first_of
+ ///
+ /// find_first_of is similar to find in that it performs linear search through
+ /// a range of ForwardIterators. The difference is that while find searches
+ /// for one particular value, find_first_of searches for any of several values.
+ /// Specifically, find_first_of searches for the first occurrance in the
+ /// range [first1, last1) of any of the elements in [first2, last2).
+ /// This function is thus similar to the strpbrk standard C string function.
+ /// If the sequence of elements to search for (i.e. first2-last2) is empty,
+ /// the find always fails and last1 will be returned.
+ ///
+ /// Effects: Finds an element that matches one of a set of values.
+ ///
+ /// Returns: The first iterator i in the range [first1, last1) such that for some
+ /// integer j in the range [first2, last2) the following conditions hold: *i == *j.
+ /// Returns last1 if no such iterator is found.
+ ///
+ /// Complexity: At most '(last1 - first1) * (last2 - first2)' applications of the
+ /// corresponding predicate.
+ ///
+ template <typename ForwardIterator1, typename ForwardIterator2>
+ ForwardIterator1
+ find_first_of(ForwardIterator1 first1, ForwardIterator1 last1,
+ ForwardIterator2 first2, ForwardIterator2 last2)
+ {
+ for(; first1 != last1; ++first1)
+ {
+ for(ForwardIterator2 i = first2; i != last2; ++i)
+ {
+ if(*first1 == *i)
+ return first1;
+ }
+ }
+ return last1;
+ }
+
+
+ /// find_first_of
+ ///
+ /// find_first_of is similar to find in that it performs linear search through
+ /// a range of ForwardIterators. The difference is that while find searches
+ /// for one particular value, find_first_of searches for any of several values.
+ /// Specifically, find_first_of searches for the first occurrance in the
+ /// range [first1, last1) of any of the elements in [first2, last2).
+ /// This function is thus similar to the strpbrk standard C string function.
+ ///
+ /// Effects: Finds an element that matches one of a set of values.
+ ///
+ /// Returns: The first iterator i in the range [first1, last1) such that for some
+ /// integer j in the range [first2, last2) the following conditions hold: pred(*i, *j) != false.
+ /// Returns last1 if no such iterator is found.
+ ///
+ /// Complexity: At most '(last1 - first1) * (last2 - first2)' applications of the
+ /// corresponding predicate.
+ ///
+ template <typename ForwardIterator1, typename ForwardIterator2, typename BinaryPredicate>
+ ForwardIterator1
+ find_first_of(ForwardIterator1 first1, ForwardIterator1 last1,
+ ForwardIterator2 first2, ForwardIterator2 last2,
+ BinaryPredicate predicate)
+ {
+ for(; first1 != last1; ++first1)
+ {
+ for(ForwardIterator2 i = first2; i != last2; ++i)
+ {
+ if(predicate(*first1, *i))
+ return first1;
+ }
+ }
+ return last1;
+ }
+
+
+ /// find_first_not_of
+ ///
+ /// Searches through first range for the first element that does not belong the second input range.
+ /// This is very much like the C++ string find_first_not_of function.
+ ///
+ /// Returns: The first iterator i in the range [first1, last1) such that for some
+ /// integer j in the range [first2, last2) the following conditions hold: !(*i == *j).
+ /// Returns last1 if no such iterator is found.
+ ///
+ /// Complexity: At most '(last1 - first1) * (last2 - first2)' applications of the
+ /// corresponding predicate.
+ ///
+ template <class ForwardIterator1, class ForwardIterator2>
+ ForwardIterator1
+ find_first_not_of(ForwardIterator1 first1, ForwardIterator1 last1,
+ ForwardIterator2 first2, ForwardIterator2 last2)
+ {
+ for(; first1 != last1; ++first1)
+ {
+ if(eastl::find(first2, last2, *first1) == last2)
+ break;
+ }
+
+ return first1;
+ }
+
+
+
+ /// find_first_not_of
+ ///
+ /// Searches through first range for the first element that does not belong the second input range.
+ /// This is very much like the C++ string find_first_not_of function.
+ ///
+ /// Returns: The first iterator i in the range [first1, last1) such that for some
+ /// integer j in the range [first2, last2) the following conditions hold: pred(*i, *j) == false.
+ /// Returns last1 if no such iterator is found.
+ ///
+ /// Complexity: At most '(last1 - first1) * (last2 - first2)' applications of the
+ /// corresponding predicate.
+ ///
+ template <class ForwardIterator1, class ForwardIterator2, class BinaryPredicate>
+ inline ForwardIterator1
+ find_first_not_of(ForwardIterator1 first1, ForwardIterator1 last1,
+ ForwardIterator2 first2, ForwardIterator2 last2,
+ BinaryPredicate predicate)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator1>::value_type value_type;
+
+ for(; first1 != last1; ++first1)
+ {
+ if(eastl::find_if(first2, last2, eastl::bind1st<BinaryPredicate, value_type>(predicate, *first1)) == last2)
+ break;
+ }
+
+ return first1;
+ }
+
+
+ template <class BidirectionalIterator1, class ForwardIterator2>
+ inline BidirectionalIterator1
+ find_last_of(BidirectionalIterator1 first1, BidirectionalIterator1 last1,
+ ForwardIterator2 first2, ForwardIterator2 last2)
+ {
+ if((first1 != last1) && (first2 != last2))
+ {
+ BidirectionalIterator1 it1(last1);
+
+ while((--it1 != first1) && (eastl::find(first2, last2, *it1) == last2))
+ ; // Do nothing
+
+ if((it1 != first1) || (eastl::find(first2, last2, *it1) != last2))
+ return it1;
+ }
+
+ return last1;
+ }
+
+
+ template <class BidirectionalIterator1, class ForwardIterator2, class BinaryPredicate>
+ BidirectionalIterator1
+ find_last_of(BidirectionalIterator1 first1, BidirectionalIterator1 last1,
+ ForwardIterator2 first2, ForwardIterator2 last2,
+ BinaryPredicate predicate)
+ {
+ typedef typename eastl::iterator_traits<BidirectionalIterator1>::value_type value_type;
+
+ if((first1 != last1) && (first2 != last2))
+ {
+ BidirectionalIterator1 it1(last1);
+
+ while((--it1 != first1) && (eastl::find_if(first2, last2, eastl::bind1st<BinaryPredicate, value_type>(predicate, *it1)) == last2))
+ ; // Do nothing
+
+ if((it1 != first1) || (eastl::find_if(first2, last2, eastl::bind1st<BinaryPredicate, value_type>(predicate, *it1)) != last2))
+ return it1;
+ }
+
+ return last1;
+ }
+
+
+ template <class BidirectionalIterator1, class ForwardIterator2>
+ inline BidirectionalIterator1
+ find_last_not_of(BidirectionalIterator1 first1, BidirectionalIterator1 last1,
+ ForwardIterator2 first2, ForwardIterator2 last2)
+ {
+ if((first1 != last1) && (first2 != last2))
+ {
+ BidirectionalIterator1 it1(last1);
+
+ while((--it1 != first1) && (eastl::find(first2, last2, *it1) != last2))
+ ; // Do nothing
+
+ if((it1 != first1) || (eastl::find( first2, last2, *it1) == last2))
+ return it1;
+ }
+
+ return last1;
+ }
+
+
+ template <class BidirectionalIterator1, class ForwardIterator2, class BinaryPredicate>
+ inline BidirectionalIterator1
+ find_last_not_of(BidirectionalIterator1 first1, BidirectionalIterator1 last1,
+ ForwardIterator2 first2, ForwardIterator2 last2,
+ BinaryPredicate predicate)
+ {
+ typedef typename eastl::iterator_traits<BidirectionalIterator1>::value_type value_type;
+
+ if((first1 != last1) && (first2 != last2))
+ {
+ BidirectionalIterator1 it1(last1);
+
+ while((--it1 != first1) && (eastl::find_if(first2, last2, eastl::bind1st<BinaryPredicate, value_type>(predicate, *it1)) != last2))
+ ; // Do nothing
+
+ if((it1 != first1) || (eastl::find_if(first2, last2, eastl::bind1st<BinaryPredicate, value_type>(predicate, *it1))) != last2)
+ return it1;
+ }
+
+ return last1;
+ }
+
+
+
+
+ /// for_each
+ ///
+ /// Calls the Function function for each value in the range [first, last).
+ /// Function takes a single parameter: the current value.
+ ///
+ /// Effects: Applies function to the result of dereferencing every iterator in
+ /// the range [first, last), starting from first and proceeding to last 1.
+ ///
+ /// Returns: function.
+ ///
+ /// Complexity: Applies function exactly 'last - first' times.
+ ///
+ /// Note: If function returns a result, the result is ignored.
+ ///
+ template <typename InputIterator, typename Function>
+ inline Function
+ for_each(InputIterator first, InputIterator last, Function function)
+ {
+ for(; first != last; ++first)
+ function(*first);
+ return function;
+ }
+
+ /// for_each_n
+ ///
+ /// Calls the Function function for each value in the range [first, first + n).
+ /// Function takes a single parameter: the current value.
+ ///
+ /// Effects: Applies function to the result of dereferencing every iterator in
+ /// the range [first, first + n), starting from first and proceeding to last 1.
+ ///
+ /// Returns: first + n.
+ ///
+ /// Complexity: Applies function exactly 'first + n' times.
+ ///
+ /// Note:
+ //// * If function returns a result, the result is ignored.
+ //// * If n < 0, behaviour is undefined.
+ ///
+ template <typename InputIterator, typename Size, typename Function>
+ EA_CPP14_CONSTEXPR inline InputIterator
+ for_each_n(InputIterator first, Size n, Function function)
+ {
+ for (Size i = 0; i < n; ++first, i++)
+ function(*first);
+ return first;
+ }
+
+
+ /// generate
+ ///
+ /// Iterates the range of [first, last) and assigns to each element the
+ /// result of the function generator. Generator is a function which takes
+ /// no arguments.
+ ///
+ /// Complexity: Exactly 'last - first' invocations of generator and assignments.
+ ///
+ template <typename ForwardIterator, typename Generator>
+ inline void
+ generate(ForwardIterator first, ForwardIterator last, Generator generator)
+ {
+ for(; first != last; ++first) // We cannot call generate_n(first, last-first, generator)
+ *first = generator(); // because the 'last-first' might not be supported by the
+ } // given iterator.
+
+
+ /// generate_n
+ ///
+ /// Iterates an interator n times and assigns the result of generator
+ /// to each succeeding element. Generator is a function which takes
+ /// no arguments.
+ ///
+ /// Complexity: Exactly n invocations of generator and assignments.
+ ///
+ template <typename OutputIterator, typename Size, typename Generator>
+ inline OutputIterator
+ generate_n(OutputIterator first, Size n, Generator generator)
+ {
+ for(; n > 0; --n, ++first)
+ *first = generator();
+ return first;
+ }
+
+
+ /// transform
+ ///
+ /// Iterates the input range of [first, last) and the output iterator result
+ /// and assigns the result of unaryOperation(input) to result.
+ ///
+ /// Effects: Assigns through every iterator i in the range [result, result + (last1 - first1))
+ /// a new corresponding value equal to unaryOperation(*(first1 + (i - result)).
+ ///
+ /// Requires: op shall not have any side effects.
+ ///
+ /// Returns: result + (last1 - first1). That is, returns the end of the output range.
+ ///
+ /// Complexity: Exactly 'last1 - first1' applications of unaryOperation.
+ ///
+ /// Note: result may be equal to first.
+ ///
+ template <typename InputIterator, typename OutputIterator, typename UnaryOperation>
+ inline OutputIterator
+ transform(InputIterator first, InputIterator last, OutputIterator result, UnaryOperation unaryOperation)
+ {
+ for(; first != last; ++first, ++result)
+ *result = unaryOperation(*first);
+ return result;
+ }
+
+
+ /// transform
+ ///
+ /// Iterates the input range of [first, last) and the output iterator result
+ /// and assigns the result of binaryOperation(input1, input2) to result.
+ ///
+ /// Effects: Assigns through every iterator i in the range [result, result + (last1 - first1))
+ /// a new corresponding value equal to binaryOperation(*(first1 + (i - result), *(first2 + (i - result))).
+ ///
+ /// Requires: binaryOperation shall not have any side effects.
+ ///
+ /// Returns: result + (last1 - first1). That is, returns the end of the output range.
+ ///
+ /// Complexity: Exactly 'last1 - first1' applications of binaryOperation.
+ ///
+ /// Note: result may be equal to first1 or first2.
+ ///
+ template <typename InputIterator1, typename InputIterator2, typename OutputIterator, typename BinaryOperation>
+ inline OutputIterator
+ transform(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, OutputIterator result, BinaryOperation binaryOperation)
+ {
+ for(; first1 != last1; ++first1, ++first2, ++result)
+ *result = binaryOperation(*first1, *first2);
+ return result;
+ }
+
+
+ /// equal
+ ///
+ /// Returns: true if for every iterator i in the range [first1, last1) the
+ /// following corresponding conditions hold: predicate(*i, *(first2 + (i - first1))) != false.
+ /// Otherwise, returns false.
+ ///
+ /// Complexity: At most last1 first1 applications of the corresponding predicate.
+ ///
+ /// To consider: Make specializations of this for scalar types and random access
+ /// iterators that uses memcmp or some trick memory comparison function.
+ /// We should verify that such a thing results in an improvement.
+ ///
+ template <typename InputIterator1, typename InputIterator2>
+ EA_CPP14_CONSTEXPR inline bool equal(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2)
+ {
+ for(; first1 != last1; ++first1, ++first2)
+ {
+ if(!(*first1 == *first2)) // Note that we always express value comparisons in terms of < or ==.
+ return false;
+ }
+ return true;
+ }
+
+ /* Enable the following if there was shown to be some benefit. A glance and Microsoft VC++ memcmp
+ shows that it is not optimized in any way, much less one that would benefit us here.
+
+ inline bool equal(const bool* first1, const bool* last1, const bool* first2)
+ { return (memcmp(first1, first2, (size_t)((uintptr_t)last1 - (uintptr_t)first1)) == 0); }
+
+ inline bool equal(const char* first1, const char* last1, const char* first2)
+ { return (memcmp(first1, first2, (size_t)((uintptr_t)last1 - (uintptr_t)first1)) == 0); }
+
+ inline bool equal(const unsigned char* first1, const unsigned char* last1, const unsigned char* first2)
+ { return (memcmp(first1, first2, (size_t)((uintptr_t)last1 - (uintptr_t)first1)) == 0); }
+
+ inline bool equal(const signed char* first1, const signed char* last1, const signed char* first2)
+ { return (memcmp(first1, first2, (size_t)((uintptr_t)last1 - (uintptr_t)first1)) == 0); }
+
+ #ifndef EA_WCHAR_T_NON_NATIVE
+ inline bool equal(const wchar_t* first1, const wchar_t* last1, const wchar_t* first2)
+ { return (memcmp(first1, first2, (size_t)((uintptr_t)last1 - (uintptr_t)first1)) == 0); }
+ #endif
+
+ inline bool equal(const int16_t* first1, const int16_t* last1, const int16_t* first2)
+ { return (memcmp(first1, first2, (size_t)((uintptr_t)last1 - (uintptr_t)first1)) == 0); }
+
+ inline bool equal(const uint16_t* first1, const uint16_t* last1, const uint16_t* first2)
+ { return (memcmp(first1, first2, (size_t)((uintptr_t)last1 - (uintptr_t)first1)) == 0); }
+
+ inline bool equal(const int32_t* first1, const int32_t* last1, const int32_t* first2)
+ { return (memcmp(first1, first2, (size_t)((uintptr_t)last1 - (uintptr_t)first1)) == 0); }
+
+ inline bool equal(const uint32_t* first1, const uint32_t* last1, const uint32_t* first2)
+ { return (memcmp(first1, first2, (size_t)((uintptr_t)last1 - (uintptr_t)first1)) == 0); }
+
+ inline bool equal(const int64_t* first1, const int64_t* last1, const int64_t* first2)
+ { return (memcmp(first1, first2, (size_t)((uintptr_t)last1 - (uintptr_t)first1)) == 0); }
+
+ inline bool equal(const uint64_t* first1, const uint64_t* last1, const uint64_t* first2)
+ { return (memcmp(first1, first2, (size_t)((uintptr_t)last1 - (uintptr_t)first1)) == 0); }
+ */
+
+
+
+ /// equal
+ ///
+ /// Returns: true if for every iterator i in the range [first1, last1) the
+ /// following corresponding conditions hold: pred(*i, *(first2 + (i first1))) != false.
+ /// Otherwise, returns false.
+ ///
+ /// Complexity: At most last1 first1 applications of the corresponding predicate.
+ ///
+ template <typename InputIterator1, typename InputIterator2, typename BinaryPredicate>
+ inline bool
+ equal(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, BinaryPredicate predicate)
+ {
+ for(; first1 != last1; ++first1, ++first2)
+ {
+ if(!predicate(*first1, *first2))
+ return false;
+ }
+ return true;
+ }
+
+
+
+ /// identical
+ ///
+ /// Returns true if the two input ranges are equivalent.
+ /// There is a subtle difference between this algorithm and
+ /// the 'equal' algorithm. The equal algorithm assumes the
+ /// two ranges are of equal length. This algorithm efficiently
+ /// compares two ranges for both length equality and for
+ /// element equality. There is no other standard algorithm
+ /// that can do this.
+ ///
+ /// Returns: true if the sequence of elements defined by the range
+ /// [first1, last1) is of the same length as the sequence of
+ /// elements defined by the range of [first2, last2) and if
+ /// the elements in these ranges are equal as per the
+ /// equal algorithm.
+ ///
+ /// Complexity: At most 'min((last1 - first1), (last2 - first2))' applications
+ /// of the corresponding comparison.
+ ///
+ template <typename InputIterator1, typename InputIterator2>
+ bool identical(InputIterator1 first1, InputIterator1 last1,
+ InputIterator2 first2, InputIterator2 last2)
+ {
+ while((first1 != last1) && (first2 != last2) && (*first1 == *first2))
+ {
+ ++first1;
+ ++first2;
+ }
+ return (first1 == last1) && (first2 == last2);
+ }
+
+
+ /// identical
+ ///
+ template <typename InputIterator1, typename InputIterator2, typename BinaryPredicate>
+ bool identical(InputIterator1 first1, InputIterator1 last1,
+ InputIterator2 first2, InputIterator2 last2, BinaryPredicate predicate)
+ {
+ while((first1 != last1) && (first2 != last2) && predicate(*first1, *first2))
+ {
+ ++first1;
+ ++first2;
+ }
+ return (first1 == last1) && (first2 == last2);
+ }
+
+
+
+ /// lexicographical_compare
+ ///
+ /// Returns: true if the sequence of elements defined by the range
+ /// [first1, last1) is lexicographically less than the sequence of
+ /// elements defined by the range [first2, last2). Returns false otherwise.
+ ///
+ /// Complexity: At most 'min((last1 - first1), (last2 - first2))' applications
+ /// of the corresponding comparison.
+ ///
+ /// Note: If two sequences have the same number of elements and their
+ /// corresponding elements are equivalent, then neither sequence is
+ /// lexicographically less than the other. If one sequence is a prefix
+ /// of the other, then the shorter sequence is lexicographically less
+ /// than the longer sequence. Otherwise, the lexicographical comparison
+ /// of the sequences yields the same result as the comparison of the first
+ /// corresponding pair of elements that are not equivalent.
+ ///
+ template <typename InputIterator1, typename InputIterator2>
+ inline bool
+ lexicographical_compare(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2)
+ {
+ for(; (first1 != last1) && (first2 != last2); ++first1, ++first2)
+ {
+ if(*first1 < *first2)
+ return true;
+ if(*first2 < *first1)
+ return false;
+ }
+ return (first1 == last1) && (first2 != last2);
+ }
+
+ inline bool // Specialization for const char*.
+ lexicographical_compare(const char* first1, const char* last1, const char* first2, const char* last2)
+ {
+ const ptrdiff_t n1(last1 - first1), n2(last2 - first2);
+ const int result = memcmp(first1, first2, (size_t)eastl::min_alt(n1, n2));
+ return result ? (result < 0) : (n1 < n2);
+ }
+
+ inline bool // Specialization for char*.
+ lexicographical_compare(char* first1, char* last1, char* first2, char* last2)
+ {
+ const ptrdiff_t n1(last1 - first1), n2(last2 - first2);
+ const int result = memcmp(first1, first2, (size_t)eastl::min_alt(n1, n2));
+ return result ? (result < 0) : (n1 < n2);
+ }
+
+ inline bool // Specialization for const unsigned char*.
+ lexicographical_compare(const unsigned char* first1, const unsigned char* last1, const unsigned char* first2, const unsigned char* last2)
+ {
+ const ptrdiff_t n1(last1 - first1), n2(last2 - first2);
+ const int result = memcmp(first1, first2, (size_t)eastl::min_alt(n1, n2));
+ return result ? (result < 0) : (n1 < n2);
+ }
+
+ inline bool // Specialization for unsigned char*.
+ lexicographical_compare(unsigned char* first1, unsigned char* last1, unsigned char* first2, unsigned char* last2)
+ {
+ const ptrdiff_t n1(last1 - first1), n2(last2 - first2);
+ const int result = memcmp(first1, first2, (size_t)eastl::min_alt(n1, n2));
+ return result ? (result < 0) : (n1 < n2);
+ }
+
+ inline bool // Specialization for const signed char*.
+ lexicographical_compare(const signed char* first1, const signed char* last1, const signed char* first2, const signed char* last2)
+ {
+ const ptrdiff_t n1(last1 - first1), n2(last2 - first2);
+ const int result = memcmp(first1, first2, (size_t)eastl::min_alt(n1, n2));
+ return result ? (result < 0) : (n1 < n2);
+ }
+
+ inline bool // Specialization for signed char*.
+ lexicographical_compare(signed char* first1, signed char* last1, signed char* first2, signed char* last2)
+ {
+ const ptrdiff_t n1(last1 - first1), n2(last2 - first2);
+ const int result = memcmp(first1, first2, (size_t)eastl::min_alt(n1, n2));
+ return result ? (result < 0) : (n1 < n2);
+ }
+
+ #if defined(_MSC_VER) // If using the VC++ compiler (and thus bool is known to be a single byte)...
+ //Not sure if this is a good idea.
+ //inline bool // Specialization for const bool*.
+ //lexicographical_compare(const bool* first1, const bool* last1, const bool* first2, const bool* last2)
+ //{
+ // const ptrdiff_t n1(last1 - first1), n2(last2 - first2);
+ // const int result = memcmp(first1, first2, (size_t)eastl::min_alt(n1, n2));
+ // return result ? (result < 0) : (n1 < n2);
+ //}
+ //
+ //inline bool // Specialization for bool*.
+ //lexicographical_compare(bool* first1, bool* last1, bool* first2, bool* last2)
+ //{
+ // const ptrdiff_t n1(last1 - first1), n2(last2 - first2);
+ // const int result = memcmp(first1, first2, (size_t)eastl::min_alt(n1, n2));
+ // return result ? (result < 0) : (n1 < n2);
+ //}
+ #endif
+
+
+
+ /// lexicographical_compare
+ ///
+ /// Returns: true if the sequence of elements defined by the range
+ /// [first1, last1) is lexicographically less than the sequence of
+ /// elements defined by the range [first2, last2). Returns false otherwise.
+ ///
+ /// Complexity: At most 'min((last1 -first1), (last2 - first2))' applications
+ /// of the corresponding comparison.
+ ///
+ /// Note: If two sequences have the same number of elements and their
+ /// corresponding elements are equivalent, then neither sequence is
+ /// lexicographically less than the other. If one sequence is a prefix
+ /// of the other, then the shorter sequence is lexicographically less
+ /// than the longer sequence. Otherwise, the lexicographical comparison
+ /// of the sequences yields the same result as the comparison of the first
+ /// corresponding pair of elements that are not equivalent.
+ ///
+ /// Note: False is always returned if range 1 is exhausted before range 2.
+ /// The result of this is that you can't do a successful reverse compare
+ /// (e.g. use greater<> as the comparison instead of less<>) unless the
+ /// two sequences are of identical length. What you want to do is reverse
+ /// the order of the arguments in order to get the desired effect.
+ ///
+ template <typename InputIterator1, typename InputIterator2, typename Compare>
+ inline bool
+ lexicographical_compare(InputIterator1 first1, InputIterator1 last1,
+ InputIterator2 first2, InputIterator2 last2, Compare compare)
+ {
+ for(; (first1 != last1) && (first2 != last2); ++first1, ++first2)
+ {
+ if(compare(*first1, *first2))
+ return true;
+ if(compare(*first2, *first1))
+ return false;
+ }
+ return (first1 == last1) && (first2 != last2);
+ }
+
+
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+
+ /// lexicographical_compare_three_way
+ ///
+ /// Returns: The comparison category ordering between both ranges. For the first non-equivalent pair in the ranges,
+ /// the comparison will be returned. Else if the first range is a subset (superset) of the second range, then the
+ /// less (greater) ordering will be returned.
+ ///
+ /// Complexity: At most N iterations, where N = min(last1-first1, last2-first2) of the applications
+ /// of the corresponding comparison.
+ ///
+ /// Note: If two sequences have the same number of elements and their
+ /// corresponding elements are equivalent, then neither sequence is
+ /// lexicographically less than the other. If one sequence is a prefix
+ /// of the other, then the shorter sequence is lexicographically less
+ /// than the longer sequence. Otherwise, the lexicographical comparison
+ /// of the sequences yields the same result as the comparison of the first
+ /// corresponding pair of elements that are not equivalent.
+ ///
+ template <typename InputIterator1, typename InputIterator2, typename Compare>
+ constexpr auto lexicographical_compare_three_way(InputIterator1 first1, InputIterator1 last1,
+ InputIterator2 first2, InputIterator2 last2,
+ Compare compare) -> decltype(compare(*first1, *first2))
+ {
+ for (; (first1 != last1) && (first2 != last2); ++first1, ++first2)
+ {
+ if (auto c = compare(*first1, *first2); c != 0)
+ return c;
+ }
+
+ return (first1 != last1) ? std::strong_ordering::greater :
+ (first2 != last2) ? std::strong_ordering::less :
+ std::strong_ordering::equal;
+ }
+#endif
+
+ /// mismatch
+ ///
+ /// Finds the first position where the two ranges [first1, last1) and
+ /// [first2, first2 + (last1 - first1)) differ. The two versions of
+ /// mismatch use different tests for whether elements differ.
+ ///
+ /// Returns: A pair of iterators i and j such that j == first2 + (i - first1)
+ /// and i is the first iterator in the range [first1, last1) for which the
+ /// following corresponding condition holds: !(*i == *(first2 + (i - first1))).
+ /// Returns the pair last1 and first2 + (last1 - first1) if such an iterator
+ /// i is not found.
+ ///
+ /// Complexity: At most last1 first1 applications of the corresponding predicate.
+ ///
+ template <class InputIterator1, class InputIterator2>
+ inline eastl::pair<InputIterator1, InputIterator2>
+ mismatch(InputIterator1 first1, InputIterator1 last1,
+ InputIterator2 first2) // , InputIterator2 last2)
+ {
+ while((first1 != last1) && (*first1 == *first2)) // && (first2 != last2) <- C++ standard mismatch function doesn't check first2/last2.
+ {
+ ++first1;
+ ++first2;
+ }
+
+ return eastl::pair<InputIterator1, InputIterator2>(first1, first2);
+ }
+
+
+ /// mismatch
+ ///
+ /// Finds the first position where the two ranges [first1, last1) and
+ /// [first2, first2 + (last1 - first1)) differ. The two versions of
+ /// mismatch use different tests for whether elements differ.
+ ///
+ /// Returns: A pair of iterators i and j such that j == first2 + (i - first1)
+ /// and i is the first iterator in the range [first1, last1) for which the
+ /// following corresponding condition holds: pred(*i, *(first2 + (i - first1))) == false.
+ /// Returns the pair last1 and first2 + (last1 - first1) if such an iterator
+ /// i is not found.
+ ///
+ /// Complexity: At most last1 first1 applications of the corresponding predicate.
+ ///
+ template <class InputIterator1, class InputIterator2, class BinaryPredicate>
+ inline eastl::pair<InputIterator1, InputIterator2>
+ mismatch(InputIterator1 first1, InputIterator1 last1,
+ InputIterator2 first2, // InputIterator2 last2,
+ BinaryPredicate predicate)
+ {
+ while((first1 != last1) && predicate(*first1, *first2)) // && (first2 != last2) <- C++ standard mismatch function doesn't check first2/last2.
+ {
+ ++first1;
+ ++first2;
+ }
+
+ return eastl::pair<InputIterator1, InputIterator2>(first1, first2);
+ }
+
+
+ /// lower_bound
+ ///
+ /// Finds the position of the first element in a sorted range that has a value
+ /// greater than or equivalent to a specified value.
+ ///
+ /// Effects: Finds the first position into which value can be inserted without
+ /// violating the ordering.
+ ///
+ /// Returns: The furthermost iterator i in the range [first, last) such that
+ /// for any iterator j in the range [first, i) the following corresponding
+ /// condition holds: *j < value.
+ ///
+ /// Complexity: At most 'log(last - first) + 1' comparisons.
+ ///
+ /// Optimizations: We have no need to specialize this implementation for random
+ /// access iterators (e.g. contiguous array), as the code below will already
+ /// take advantage of them.
+ ///
+ template <typename ForwardIterator, typename T>
+ ForwardIterator
+ lower_bound(ForwardIterator first, ForwardIterator last, const T& value)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::difference_type DifferenceType;
+
+ DifferenceType d = eastl::distance(first, last); // This will be efficient for a random access iterator such as an array.
+
+ while(d > 0)
+ {
+ ForwardIterator i = first;
+ DifferenceType d2 = d >> 1; // We use '>>1' here instead of '/2' because MSVC++ for some reason generates significantly worse code for '/2'. Go figure.
+
+ eastl::advance(i, d2); // This will be efficient for a random access iterator such as an array.
+
+ if(*i < value)
+ {
+ // Disabled because std::lower_bound doesn't specify (23.3.3.3, p3) this can be done: EASTL_VALIDATE_COMPARE(!(value < *i)); // Validate that the compare function is sane.
+ first = ++i;
+ d -= d2 + 1;
+ }
+ else
+ d = d2;
+ }
+ return first;
+ }
+
+
+ /// lower_bound
+ ///
+ /// Finds the position of the first element in a sorted range that has a value
+ /// greater than or equivalent to a specified value. The input Compare function
+ /// takes two arguments and returns true if the first argument is less than
+ /// the second argument.
+ ///
+ /// Effects: Finds the first position into which value can be inserted without
+ /// violating the ordering.
+ ///
+ /// Returns: The furthermost iterator i in the range [first, last) such that
+ /// for any iterator j in the range [first, i) the following corresponding
+ /// condition holds: compare(*j, value) != false.
+ ///
+ /// Complexity: At most 'log(last - first) + 1' comparisons.
+ ///
+ /// Optimizations: We have no need to specialize this implementation for random
+ /// access iterators (e.g. contiguous array), as the code below will already
+ /// take advantage of them.
+ ///
+ template <typename ForwardIterator, typename T, typename Compare>
+ ForwardIterator
+ lower_bound(ForwardIterator first, ForwardIterator last, const T& value, Compare compare)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::difference_type DifferenceType;
+
+ DifferenceType d = eastl::distance(first, last); // This will be efficient for a random access iterator such as an array.
+
+ while(d > 0)
+ {
+ ForwardIterator i = first;
+ DifferenceType d2 = d >> 1; // We use '>>1' here instead of '/2' because MSVC++ for some reason generates significantly worse code for '/2'. Go figure.
+
+ eastl::advance(i, d2); // This will be efficient for a random access iterator such as an array.
+
+ if(compare(*i, value))
+ {
+ // Disabled because std::lower_bound doesn't specify (23.3.3.1, p3) this can be done: EASTL_VALIDATE_COMPARE(!compare(value, *i)); // Validate that the compare function is sane.
+ first = ++i;
+ d -= d2 + 1;
+ }
+ else
+ d = d2;
+ }
+ return first;
+ }
+
+
+
+ /// upper_bound
+ ///
+ /// Finds the position of the first element in a sorted range that has a
+ /// value that is greater than a specified value.
+ ///
+ /// Effects: Finds the furthermost position into which value can be inserted
+ /// without violating the ordering.
+ ///
+ /// Returns: The furthermost iterator i in the range [first, last) such that
+ /// for any iterator j in the range [first, i) the following corresponding
+ /// condition holds: !(value < *j).
+ ///
+ /// Complexity: At most 'log(last - first) + 1' comparisons.
+ ///
+ template <typename ForwardIterator, typename T>
+ ForwardIterator
+ upper_bound(ForwardIterator first, ForwardIterator last, const T& value)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::difference_type DifferenceType;
+
+ DifferenceType len = eastl::distance(first, last);
+
+ while(len > 0)
+ {
+ ForwardIterator i = first;
+ DifferenceType len2 = len >> 1; // We use '>>1' here instead of '/2' because MSVC++ for some reason generates significantly worse code for '/2'. Go figure.
+
+ eastl::advance(i, len2);
+
+ if(!(value < *i)) // Note that we always express value comparisons in terms of < or ==.
+ {
+ first = ++i;
+ len -= len2 + 1;
+ }
+ else
+ {
+ // Disabled because std::upper_bound doesn't specify (23.3.3.2, p3) this can be done: EASTL_VALIDATE_COMPARE(!(*i < value)); // Validate that the compare function is sane.
+ len = len2;
+ }
+ }
+ return first;
+ }
+
+
+ /// upper_bound
+ ///
+ /// Finds the position of the first element in a sorted range that has a
+ /// value that is greater than a specified value. The input Compare function
+ /// takes two arguments and returns true if the first argument is less than
+ /// the second argument.
+ ///
+ /// Effects: Finds the furthermost position into which value can be inserted
+ /// without violating the ordering.
+ ///
+ /// Returns: The furthermost iterator i in the range [first, last) such that
+ /// for any iterator j in the range [first, i) the following corresponding
+ /// condition holds: compare(value, *j) == false.
+ ///
+ /// Complexity: At most 'log(last - first) + 1' comparisons.
+ ///
+ template <typename ForwardIterator, typename T, typename Compare>
+ ForwardIterator
+ upper_bound(ForwardIterator first, ForwardIterator last, const T& value, Compare compare)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::difference_type DifferenceType;
+
+ DifferenceType len = eastl::distance(first, last);
+
+ while(len > 0)
+ {
+ ForwardIterator i = first;
+ DifferenceType len2 = len >> 1; // We use '>>1' here instead of '/2' because MSVC++ for some reason generates significantly worse code for '/2'. Go figure.
+
+ eastl::advance(i, len2);
+
+ if(!compare(value, *i))
+ {
+ first = ++i;
+ len -= len2 + 1;
+ }
+ else
+ {
+ // Disabled because std::upper_bound doesn't specify (23.3.3.2, p3) this can be done: EASTL_VALIDATE_COMPARE(!compare(*i, value)); // Validate that the compare function is sane.
+ len = len2;
+ }
+ }
+ return first;
+ }
+
+
+ /// equal_range
+ ///
+ /// Effects: Finds the largest subrange [i, j) such that the value can be inserted
+ /// at any iterator k in it without violating the ordering. k satisfies the
+ /// corresponding conditions: !(*k < value) && !(value < *k).
+ ///
+ /// Complexity: At most '2 * log(last - first) + 1' comparisons.
+ ///
+ template <typename ForwardIterator, typename T>
+ pair<ForwardIterator, ForwardIterator>
+ equal_range(ForwardIterator first, ForwardIterator last, const T& value)
+ {
+ typedef pair<ForwardIterator, ForwardIterator> ResultType;
+ typedef typename eastl::iterator_traits<ForwardIterator>::difference_type DifferenceType;
+
+ DifferenceType d = eastl::distance(first, last);
+
+ while(d > 0)
+ {
+ ForwardIterator i(first);
+ DifferenceType d2 = d >> 1; // We use '>>1' here instead of '/2' because MSVC++ for some reason generates significantly worse code for '/2'. Go figure.
+
+ eastl::advance(i, d2);
+
+ if(*i < value)
+ {
+ EASTL_VALIDATE_COMPARE(!(value < *i)); // Validate that the compare function is sane.
+ first = ++i;
+ d -= d2 + 1;
+ }
+ else if(value < *i)
+ {
+ EASTL_VALIDATE_COMPARE(!(*i < value)); // Validate that the compare function is sane.
+ d = d2;
+ last = i;
+ }
+ else
+ {
+ ForwardIterator j(i);
+
+ return ResultType(eastl::lower_bound(first, i, value),
+ eastl::upper_bound(++j, last, value));
+ }
+ }
+ return ResultType(first, first);
+ }
+
+
+ /// equal_range
+ ///
+ /// Effects: Finds the largest subrange [i, j) such that the value can be inserted
+ /// at any iterator k in it without violating the ordering. k satisfies the
+ /// corresponding conditions: compare(*k, value) == false && compare(value, *k) == false.
+ ///
+ /// Complexity: At most '2 * log(last - first) + 1' comparisons.
+ ///
+ template <typename ForwardIterator, typename T, typename Compare>
+ pair<ForwardIterator, ForwardIterator>
+ equal_range(ForwardIterator first, ForwardIterator last, const T& value, Compare compare)
+ {
+ typedef pair<ForwardIterator, ForwardIterator> ResultType;
+ typedef typename eastl::iterator_traits<ForwardIterator>::difference_type DifferenceType;
+
+ DifferenceType d = eastl::distance(first, last);
+
+ while(d > 0)
+ {
+ ForwardIterator i(first);
+ DifferenceType d2 = d >> 1; // We use '>>1' here instead of '/2' because MSVC++ for some reason generates significantly worse code for '/2'. Go figure.
+
+ eastl::advance(i, d2);
+
+ if(compare(*i, value))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(value, *i)); // Validate that the compare function is sane.
+ first = ++i;
+ d -= d2 + 1;
+ }
+ else if(compare(value, *i))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*i, value)); // Validate that the compare function is sane.
+ d = d2;
+ last = i;
+ }
+ else
+ {
+ ForwardIterator j(i);
+
+ return ResultType(eastl::lower_bound(first, i, value, compare),
+ eastl::upper_bound(++j, last, value, compare));
+ }
+ }
+ return ResultType(first, first);
+ }
+
+
+ /// replace
+ ///
+ /// Effects: Substitutes elements referred by the iterator i in the range [first, last)
+ /// with new_value, when the following corresponding conditions hold: *i == old_value.
+ ///
+ /// Complexity: Exactly 'last - first' applications of the corresponding predicate.
+ ///
+ /// Note: The predicate version of replace is replace_if and not another variation of replace.
+ /// This is because both versions would have the same parameter count and there could be ambiguity.
+ ///
+ template <typename ForwardIterator, typename T>
+ inline void
+ replace(ForwardIterator first, ForwardIterator last, const T& old_value, const T& new_value)
+ {
+ for(; first != last; ++first)
+ {
+ if(*first == old_value)
+ *first = new_value;
+ }
+ }
+
+
+ /// replace_if
+ ///
+ /// Effects: Substitutes elements referred by the iterator i in the range [first, last)
+ /// with new_value, when the following corresponding conditions hold: predicate(*i) != false.
+ ///
+ /// Complexity: Exactly 'last - first' applications of the corresponding predicate.
+ ///
+ /// Note: The predicate version of replace_if is replace and not another variation of replace_if.
+ /// This is because both versions would have the same parameter count and there could be ambiguity.
+ ///
+ template <typename ForwardIterator, typename Predicate, typename T>
+ inline void
+ replace_if(ForwardIterator first, ForwardIterator last, Predicate predicate, const T& new_value)
+ {
+ for(; first != last; ++first)
+ {
+ if(predicate(*first))
+ *first = new_value;
+ }
+ }
+
+
+ /// remove_copy
+ ///
+ /// Effects: Copies all the elements referred to by the iterator i in the range
+ /// [first, last) for which the following corresponding condition does not hold:
+ /// *i == value.
+ ///
+ /// Requires: The ranges [first, last) and [result, result + (last - first)) shall not overlap.
+ ///
+ /// Returns: The end of the resulting range.
+ ///
+ /// Complexity: Exactly 'last - first' applications of the corresponding predicate.
+ ///
+ template <typename InputIterator, typename OutputIterator, typename T>
+ inline OutputIterator
+ remove_copy(InputIterator first, InputIterator last, OutputIterator result, const T& value)
+ {
+ for(; first != last; ++first)
+ {
+ if(!(*first == value)) // Note that we always express value comparisons in terms of < or ==.
+ {
+ *result = eastl::move(*first);
+ ++result;
+ }
+ }
+ return result;
+ }
+
+
+ /// remove_copy_if
+ ///
+ /// Effects: Copies all the elements referred to by the iterator i in the range
+ /// [first, last) for which the following corresponding condition does not hold:
+ /// predicate(*i) != false.
+ ///
+ /// Requires: The ranges [first, last) and [result, result + (last - first)) shall not overlap.
+ ///
+ /// Returns: The end of the resulting range.
+ ///
+ /// Complexity: Exactly 'last - first' applications of the corresponding predicate.
+ ///
+ template <typename InputIterator, typename OutputIterator, typename Predicate>
+ inline OutputIterator
+ remove_copy_if(InputIterator first, InputIterator last, OutputIterator result, Predicate predicate)
+ {
+ for(; first != last; ++first)
+ {
+ if(!predicate(*first))
+ {
+ *result = eastl::move(*first);
+ ++result;
+ }
+ }
+ return result;
+ }
+
+
+ /// remove
+ ///
+ /// Effects: Eliminates all the elements referred to by iterator i in the
+ /// range [first, last) for which the following corresponding condition
+ /// holds: *i == value.
+ ///
+ /// Returns: The end of the resulting range.
+ ///
+ /// Complexity: Exactly 'last - first' applications of the corresponding predicate.
+ ///
+ /// Note: The predicate version of remove is remove_if and not another variation of remove.
+ /// This is because both versions would have the same parameter count and there could be ambiguity.
+ ///
+ /// Note: Since this function moves the element to the back of the heap and
+ /// doesn't actually remove it from the given container, the user must call
+ /// the container erase function if the user wants to erase the element
+ /// from the container.
+ ///
+ /// Example usage:
+ /// vector<int> intArray;
+ /// ...
+ /// intArray.erase(remove(intArray.begin(), intArray.end(), 4), intArray.end()); // Erase all elements of value 4.
+ ///
+ template <typename ForwardIterator, typename T>
+ inline ForwardIterator
+ remove(ForwardIterator first, ForwardIterator last, const T& value)
+ {
+ first = eastl::find(first, last, value);
+ if(first != last)
+ {
+ ForwardIterator i(first);
+ return eastl::remove_copy(++i, last, first, value);
+ }
+ return first;
+ }
+
+
+ /// remove_if
+ ///
+ /// Effects: Eliminates all the elements referred to by iterator i in the
+ /// range [first, last) for which the following corresponding condition
+ /// holds: predicate(*i) != false.
+ ///
+ /// Returns: The end of the resulting range.
+ ///
+ /// Complexity: Exactly 'last - first' applications of the corresponding predicate.
+ ///
+ /// Note: The predicate version of remove_if is remove and not another variation of remove_if.
+ /// This is because both versions would have the same parameter count and there could be ambiguity.
+ ///
+ /// Note: Since this function moves the element to the back of the heap and
+ /// doesn't actually remove it from the given container, the user must call
+ /// the container erase function if the user wants to erase the element
+ /// from the container.
+ ///
+ /// Example usage:
+ /// vector<int> intArray;
+ /// ...
+ /// intArray.erase(remove(intArray.begin(), intArray.end(), bind2nd(less<int>(), (int)3)), intArray.end()); // Erase all elements less than 3.
+ ///
+ template <typename ForwardIterator, typename Predicate>
+ inline ForwardIterator
+ remove_if(ForwardIterator first, ForwardIterator last, Predicate predicate)
+ {
+ first = eastl::find_if(first, last, predicate);
+ if(first != last)
+ {
+ ForwardIterator i(first);
+ return eastl::remove_copy_if<ForwardIterator, ForwardIterator, Predicate>(++i, last, first, predicate);
+ }
+ return first;
+ }
+
+
+ /// apply_and_remove_if
+ ///
+ /// Calls the Function function for all elements referred to my iterator i in the range
+ /// [first, last) for which the following corresponding condition holds:
+ /// predicate(*i) == true
+ /// and then left shift moves potential non-matching elements over it.
+ ///
+ /// Returns: a past-the-end iterator for the new end of the range.
+ ///
+ /// Complexity: Exactly 'last - first' applications of the corresponding predicate + applies
+ /// function once for every time the condition holds.
+ ///
+ /// Note: Since removing is done by shifting (by means of copy move assignment) the elements
+ /// in the range in such a way that the elements that are not to be removed appear in the
+ /// beginning of the range doesn't actually remove it from the given container, the user must call
+ /// the container erase function if the user wants to erase the element
+ /// from the container. I.e. in the same they as for remove_if the excess elements
+ /// are left in a valid but possibly moved from state.
+ ///
+ template <typename ForwardIterator, typename Function, typename Predicate>
+ inline ForwardIterator apply_and_remove_if(ForwardIterator first,
+ ForwardIterator last,
+ Function function,
+ Predicate predicate)
+ {
+ first = eastl::find_if(first, last, predicate);
+ if (first != last)
+ {
+ function(*first);
+ for (auto i = next(first); i != last; ++i)
+ {
+ if (predicate(*i))
+ {
+ function(*i);
+ continue;
+ }
+ *first = eastl::move(*i);
+ ++first;
+ }
+ }
+ return first;
+ }
+
+
+ /// apply_and_remove
+ ///
+ /// Calls the Function function for all elements referred to my iterator i in the range
+ /// [first, last) for which the following corresponding condition holds:
+ /// value == *i
+ /// and then left shift moves potential non-matching elements over it.
+ ///
+ /// Returns: a past-the-end iterator for the new end of the range.
+ ///
+ /// Complexity: Exactly 'last - first' applications of the corresponding equality test
+ /// + applies function once for every time the condition holds.
+ ///
+ /// Note: Since removing is done by shifting (by means of copy move assignment) the elements
+ /// in the range in such a way that the elements that are not to be removed appear in the
+ /// beginning of the range doesn't actually remove it from the given container, the user must call
+ /// the container erase function if the user wants to erase the element
+ /// from the container. I.e. in the same they as for remove_if the excess elements
+ /// are left in a valid but possibly moved from state.
+ ///
+ template <typename ForwardIterator, typename Function, typename T>
+ inline ForwardIterator apply_and_remove(ForwardIterator first,
+ ForwardIterator last,
+ Function function,
+ const T& value)
+ {
+ first = eastl::find(first, last, value);
+ if (first != last)
+ {
+ function(*first);
+ for (auto i = next(first); i != last; ++i)
+ {
+ if (value == *i)
+ {
+ function(*i);
+ continue;
+ }
+ *first = eastl::move(*i);
+ ++first;
+ }
+ }
+ return first;
+ }
+
+
+ /// replace_copy
+ ///
+ /// Effects: Assigns to every iterator i in the range [result, result + (last - first))
+ /// either new_value or *(first + (i - result)) depending on whether the following
+ /// corresponding conditions hold: *(first + (i - result)) == old_value.
+ ///
+ /// Requires: The ranges [first, last) and [result, result + (last - first)) shall not overlap.
+ ///
+ /// Returns: result + (last - first).
+ ///
+ /// Complexity: Exactly 'last - first' applications of the corresponding predicate.
+ ///
+ /// Note: The predicate version of replace_copy is replace_copy_if and not another variation of replace_copy.
+ /// This is because both versions would have the same parameter count and there could be ambiguity.
+ ///
+ template <typename InputIterator, typename OutputIterator, typename T>
+ inline OutputIterator
+ replace_copy(InputIterator first, InputIterator last, OutputIterator result, const T& old_value, const T& new_value)
+ {
+ for(; first != last; ++first, ++result)
+ *result = (*first == old_value) ? new_value : *first;
+ return result;
+ }
+
+
+ /// replace_copy_if
+ ///
+ /// Effects: Assigns to every iterator i in the range [result, result + (last - first))
+ /// either new_value or *(first + (i - result)) depending on whether the following
+ /// corresponding conditions hold: predicate(*(first + (i - result))) != false.
+ ///
+ /// Requires: The ranges [first, last) and [result, result+(lastfirst)) shall not overlap.
+ ///
+ /// Returns: result + (last - first).
+ ///
+ /// Complexity: Exactly 'last - first' applications of the corresponding predicate.
+ ///
+ /// Note: The predicate version of replace_copy_if is replace_copy and not another variation of replace_copy_if.
+ /// This is because both versions would have the same parameter count and there could be ambiguity.
+ ///
+ template <typename InputIterator, typename OutputIterator, typename Predicate, typename T>
+ inline OutputIterator
+ replace_copy_if(InputIterator first, InputIterator last, OutputIterator result, Predicate predicate, const T& new_value)
+ {
+ for(; first != last; ++first, ++result)
+ *result = predicate(*first) ? new_value : *first;
+ return result;
+ }
+
+
+
+
+ // reverse
+ //
+ // We provide helper functions which allow reverse to be implemented more
+ // efficiently for some types of iterators and types.
+ //
+ template <typename BidirectionalIterator>
+ inline void reverse_impl(BidirectionalIterator first, BidirectionalIterator last, EASTL_ITC_NS::bidirectional_iterator_tag)
+ {
+ for(; (first != last) && (first != --last); ++first) // We are not allowed to use operator <, <=, >, >= with a
+ eastl::iter_swap(first, last); // generic (bidirectional or otherwise) iterator.
+ }
+
+ template <typename RandomAccessIterator>
+ inline void reverse_impl(RandomAccessIterator first, RandomAccessIterator last, EASTL_ITC_NS::random_access_iterator_tag)
+ {
+ if(first != last)
+ {
+ for(; first < --last; ++first) // With a random access iterator, we can use operator < to more efficiently implement
+ eastl::iter_swap(first, last); // this algorithm. A generic iterator doesn't necessarily have an operator < defined.
+ }
+ }
+
+ /// reverse
+ ///
+ /// Reverses the values within the range [first, last).
+ ///
+ /// Effects: For each nonnegative integer i <= (last - first) / 2,
+ /// applies swap to all pairs of iterators first + i, (last i) - 1.
+ ///
+ /// Complexity: Exactly '(last - first) / 2' swaps.
+ ///
+ template <typename BidirectionalIterator>
+ inline void reverse(BidirectionalIterator first, BidirectionalIterator last)
+ {
+ typedef typename eastl::iterator_traits<BidirectionalIterator>::iterator_category IC;
+ eastl::reverse_impl(first, last, IC());
+ }
+
+
+
+ /// reverse_copy
+ ///
+ /// Copies the range [first, last) in reverse order to the result.
+ ///
+ /// Effects: Copies the range [first, last) to the range
+ /// [result, result + (last - first)) such that for any nonnegative
+ /// integer i < (last - first) the following assignment takes place:
+ /// *(result + (last - first) - i) = *(first + i)
+ ///
+ /// Requires: The ranges [first, last) and [result, result + (last - first))
+ /// shall not overlap.
+ ///
+ /// Returns: result + (last - first). That is, returns the end of the output range.
+ ///
+ /// Complexity: Exactly 'last - first' assignments.
+ ///
+ template <typename BidirectionalIterator, typename OutputIterator>
+ inline OutputIterator
+ reverse_copy(BidirectionalIterator first, BidirectionalIterator last, OutputIterator result)
+ {
+ for(; first != last; ++result)
+ *result = *--last;
+ return result;
+ }
+
+
+
+ /// search
+ ///
+ /// Search finds a subsequence within the range [first1, last1) that is identical to [first2, last2)
+ /// when compared element-by-element. It returns an iterator pointing to the beginning of that
+ /// subsequence, or else last1 if no such subsequence exists. As such, it is very much like
+ /// the C strstr function, with the primary difference being that strstr uses 0-terminated strings
+ /// whereas search uses an end iterator to specify the end of a string.
+ ///
+ /// Returns: The first iterator i in the range [first1, last1 - (last2 - first2)) such that for
+ /// any nonnegative integer n less than 'last2 - first2' the following corresponding condition holds:
+ /// *(i + n) == *(first2 + n). Returns last1 if no such iterator is found.
+ ///
+ /// Complexity: At most (last1 first1) * (last2 first2) applications of the corresponding predicate.
+ ///
+ template <typename ForwardIterator1, typename ForwardIterator2>
+ ForwardIterator1
+ search(ForwardIterator1 first1, ForwardIterator1 last1,
+ ForwardIterator2 first2, ForwardIterator2 last2)
+ {
+ if(first2 != last2) // If there is anything to search for...
+ {
+ // We need to make a special case for a pattern of one element,
+ // as the logic below prevents one element patterns from working.
+ ForwardIterator2 temp2(first2);
+ ++temp2;
+
+ if(temp2 != last2) // If what we are searching for has a length > 1...
+ {
+ ForwardIterator1 cur1(first1);
+ ForwardIterator2 p2;
+
+ while(first1 != last1)
+ {
+ // The following loop is the equivalent of eastl::find(first1, last1, *first2)
+ while((first1 != last1) && !(*first1 == *first2))
+ ++first1;
+
+ if(first1 != last1)
+ {
+ p2 = temp2;
+ cur1 = first1;
+
+ if(++cur1 != last1)
+ {
+ while(*cur1 == *p2)
+ {
+ if(++p2 == last2)
+ return first1;
+
+ if(++cur1 == last1)
+ return last1;
+ }
+
+ ++first1;
+ continue;
+ }
+ }
+ return last1;
+ }
+
+ // Fall through to the end.
+ }
+ else
+ return eastl::find(first1, last1, *first2);
+ }
+
+ return first1;
+
+
+ #if 0
+ /* Another implementation which is a little more simpler but executes a little slower on average.
+ typedef typename eastl::iterator_traits<ForwardIterator1>::difference_type difference_type_1;
+ typedef typename eastl::iterator_traits<ForwardIterator2>::difference_type difference_type_2;
+
+ const difference_type_2 d2 = eastl::distance(first2, last2);
+
+ for(difference_type_1 d1 = eastl::distance(first1, last1); d1 >= d2; ++first1, --d1)
+ {
+ ForwardIterator1 temp1 = first1;
+
+ for(ForwardIterator2 temp2 = first2; ; ++temp1, ++temp2)
+ {
+ if(temp2 == last2)
+ return first1;
+ if(!(*temp1 == *temp2))
+ break;
+ }
+ }
+
+ return last1;
+ */
+ #endif
+ }
+
+
+ /// search
+ ///
+ /// Search finds a subsequence within the range [first1, last1) that is identical to [first2, last2)
+ /// when compared element-by-element. It returns an iterator pointing to the beginning of that
+ /// subsequence, or else last1 if no such subsequence exists. As such, it is very much like
+ /// the C strstr function, with the only difference being that strstr uses 0-terminated strings
+ /// whereas search uses an end iterator to specify the end of a string.
+ ///
+ /// Returns: The first iterator i in the range [first1, last1 - (last2 - first2)) such that for
+ /// any nonnegative integer n less than 'last2 - first2' the following corresponding condition holds:
+ /// predicate(*(i + n), *(first2 + n)) != false. Returns last1 if no such iterator is found.
+ ///
+ /// Complexity: At most (last1 first1) * (last2 first2) applications of the corresponding predicate.
+ ///
+ template <typename ForwardIterator1, typename ForwardIterator2, typename BinaryPredicate>
+ ForwardIterator1
+ search(ForwardIterator1 first1, ForwardIterator1 last1,
+ ForwardIterator2 first2, ForwardIterator2 last2,
+ BinaryPredicate predicate)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator1>::difference_type difference_type_1;
+ typedef typename eastl::iterator_traits<ForwardIterator2>::difference_type difference_type_2;
+
+ difference_type_2 d2 = eastl::distance(first2, last2);
+
+ if(d2 != 0)
+ {
+ ForwardIterator1 i(first1);
+ eastl::advance(i, d2);
+
+ for(difference_type_1 d1 = eastl::distance(first1, last1); d1 >= d2; --d1)
+ {
+ if(eastl::equal<ForwardIterator1, ForwardIterator2, BinaryPredicate>(first1, i, first2, predicate))
+ return first1;
+ if(d1 > d2) // To do: Find a way to make the algorithm more elegant.
+ {
+ ++first1;
+ ++i;
+ }
+ }
+ return last1;
+ }
+ return first1; // Just like with strstr, we return first1 if the match string is empty.
+ }
+
+
+
+ // search_n helper functions
+ //
+ template <typename ForwardIterator, typename Size, typename T>
+ ForwardIterator // Generic implementation.
+ search_n_impl(ForwardIterator first, ForwardIterator last, Size count, const T& value, EASTL_ITC_NS::forward_iterator_tag)
+ {
+ if(count <= 0)
+ return first;
+
+ Size d1 = (Size)eastl::distance(first, last); // Should d1 be of type Size, ptrdiff_t, or iterator_traits<ForwardIterator>::difference_type?
+ // The problem with using iterator_traits<ForwardIterator>::difference_type is that
+ if(count > d1) // ForwardIterator may not be a true iterator but instead something like a pointer.
+ return last;
+
+ for(; d1 >= count; ++first, --d1)
+ {
+ ForwardIterator i(first);
+
+ for(Size n = 0; n < count; ++n, ++i, --d1)
+ {
+ if(!(*i == value)) // Note that we always express value comparisons in terms of < or ==.
+ goto not_found;
+ }
+ return first;
+
+ not_found:
+ first = i;
+ }
+ return last;
+ }
+
+ template <typename RandomAccessIterator, typename Size, typename T> inline
+ RandomAccessIterator // Random access iterator implementation. Much faster than generic implementation.
+ search_n_impl(RandomAccessIterator first, RandomAccessIterator last, Size count, const T& value, EASTL_ITC_NS::random_access_iterator_tag)
+ {
+ if(count <= 0)
+ return first;
+ else if(count == 1)
+ return eastl::find(first, last, value);
+ else if(last > first)
+ {
+ RandomAccessIterator lookAhead;
+ RandomAccessIterator backTrack;
+
+ Size skipOffset = (count - 1);
+ Size tailSize = (Size)(last - first);
+ Size remainder;
+ Size prevRemainder;
+
+ for(lookAhead = first + skipOffset; tailSize >= count; lookAhead += count)
+ {
+ tailSize -= count;
+
+ if(*lookAhead == value)
+ {
+ remainder = skipOffset;
+
+ for(backTrack = lookAhead - 1; *backTrack == value; --backTrack)
+ {
+ if(--remainder == 0)
+ return (lookAhead - skipOffset); // success
+ }
+
+ if(remainder <= tailSize)
+ {
+ prevRemainder = remainder;
+
+ while(*(++lookAhead) == value)
+ {
+ if(--remainder == 0)
+ return (backTrack + 1); // success
+ }
+ tailSize -= (prevRemainder - remainder);
+ }
+ else
+ return last; // failure
+ }
+
+ // lookAhead here is always pointing to the element of the last mismatch.
+ }
+ }
+
+ return last; // failure
+ }
+
+
+ /// search_n
+ ///
+ /// Returns: The first iterator i in the range [first, last count) such that
+ /// for any nonnegative integer n less than count the following corresponding
+ /// conditions hold: *(i + n) == value, pred(*(i + n),value) != false.
+ /// Returns last if no such iterator is found.
+ ///
+ /// Complexity: At most '(last1 - first1) * count' applications of the corresponding predicate.
+ ///
+ template <typename ForwardIterator, typename Size, typename T>
+ ForwardIterator
+ search_n(ForwardIterator first, ForwardIterator last, Size count, const T& value)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::iterator_category IC;
+ return eastl::search_n_impl(first, last, count, value, IC());
+ }
+
+
+ /// binary_search
+ ///
+ /// Returns: true if there is an iterator i in the range [first last) that
+ /// satisfies the corresponding conditions: !(*i < value) && !(value < *i).
+ ///
+ /// Complexity: At most 'log(last - first) + 2' comparisons.
+ ///
+ /// Note: The reason binary_search returns bool instead of an iterator is
+ /// that search_n, lower_bound, or equal_range already return an iterator.
+ /// However, there are arguments that binary_search should return an iterator.
+ /// Note that we provide binary_search_i (STL extension) to return an iterator.
+ ///
+ /// To use search_n to find an item, do this:
+ /// iterator i = search_n(begin, end, 1, value);
+ /// To use lower_bound to find an item, do this:
+ /// iterator i = lower_bound(begin, end, value);
+ /// if((i != last) && !(value < *i))
+ /// <use the iterator>
+ /// It turns out that the above lower_bound method is as fast as binary_search
+ /// would be if it returned an iterator.
+ ///
+ template <typename ForwardIterator, typename T>
+ inline bool
+ binary_search(ForwardIterator first, ForwardIterator last, const T& value)
+ {
+ // To do: This can be made slightly faster by not using lower_bound.
+ ForwardIterator i(eastl::lower_bound<ForwardIterator, T>(first, last, value));
+ return ((i != last) && !(value < *i)); // Note that we always express value comparisons in terms of < or ==.
+ }
+
+
+ /// binary_search
+ ///
+ /// Returns: true if there is an iterator i in the range [first last) that
+ /// satisfies the corresponding conditions: compare(*i, value) == false &&
+ /// compare(value, *i) == false.
+ ///
+ /// Complexity: At most 'log(last - first) + 2' comparisons.
+ ///
+ /// Note: See comments above regarding the bool return value of binary_search.
+ ///
+ template <typename ForwardIterator, typename T, typename Compare>
+ inline bool
+ binary_search(ForwardIterator first, ForwardIterator last, const T& value, Compare compare)
+ {
+ // To do: This can be made slightly faster by not using lower_bound.
+ ForwardIterator i(eastl::lower_bound<ForwardIterator, T, Compare>(first, last, value, compare));
+ return ((i != last) && !compare(value, *i));
+ }
+
+
+ /// binary_search_i
+ ///
+ /// Returns: iterator if there is an iterator i in the range [first last) that
+ /// satisfies the corresponding conditions: !(*i < value) && !(value < *i).
+ /// Returns last if the value is not found.
+ ///
+ /// Complexity: At most 'log(last - first) + 2' comparisons.
+ ///
+ template <typename ForwardIterator, typename T>
+ inline ForwardIterator
+ binary_search_i(ForwardIterator first, ForwardIterator last, const T& value)
+ {
+ // To do: This can be made slightly faster by not using lower_bound.
+ ForwardIterator i(eastl::lower_bound<ForwardIterator, T>(first, last, value));
+ if((i != last) && !(value < *i)) // Note that we always express value comparisons in terms of < or ==.
+ return i;
+ return last;
+ }
+
+
+ /// binary_search_i
+ ///
+ /// Returns: iterator if there is an iterator i in the range [first last) that
+ /// satisfies the corresponding conditions: !(*i < value) && !(value < *i).
+ /// Returns last if the value is not found.
+ ///
+ /// Complexity: At most 'log(last - first) + 2' comparisons.
+ ///
+ template <typename ForwardIterator, typename T, typename Compare>
+ inline ForwardIterator
+ binary_search_i(ForwardIterator first, ForwardIterator last, const T& value, Compare compare)
+ {
+ // To do: This can be made slightly faster by not using lower_bound.
+ ForwardIterator i(eastl::lower_bound<ForwardIterator, T, Compare>(first, last, value, compare));
+ if((i != last) && !compare(value, *i))
+ return i;
+ return last;
+ }
+
+
+ /// unique
+ ///
+ /// Given a sorted range, this function removes duplicated items.
+ /// Note that if you have a container then you will probably want
+ /// to call erase on the container with the return value if your
+ /// goal is to remove the duplicated items from the container.
+ ///
+ /// Effects: Eliminates all but the first element from every consecutive
+ /// group of equal elements referred to by the iterator i in the range
+ /// [first, last) for which the following corresponding condition holds:
+ /// *i == *(i - 1).
+ ///
+ /// Returns: The end of the resulting range.
+ ///
+ /// Complexity: If the range (last - first) is not empty, exactly (last - first)
+ /// applications of the corresponding predicate, otherwise no applications of the predicate.
+ ///
+ /// Example usage:
+ /// vector<int> intArray;
+ /// ...
+ /// intArray.erase(unique(intArray.begin(), intArray.end()), intArray.end());
+ ///
+ template <typename ForwardIterator>
+ ForwardIterator unique(ForwardIterator first, ForwardIterator last)
+ {
+ first = eastl::adjacent_find<ForwardIterator>(first, last);
+
+ if(first != last) // We expect that there are duplicated items, else the user wouldn't be calling this function.
+ {
+ ForwardIterator dest(first);
+
+ for(++first; first != last; ++first)
+ {
+ if(!(*dest == *first)) // Note that we always express value comparisons in terms of < or ==.
+ *++dest = *first;
+ }
+ return ++dest;
+ }
+ return last;
+ }
+
+
+ /// unique
+ ///
+ /// Given a sorted range, this function removes duplicated items.
+ /// Note that if you have a container then you will probably want
+ /// to call erase on the container with the return value if your
+ /// goal is to remove the duplicated items from the container.
+ ///
+ /// Effects: Eliminates all but the first element from every consecutive
+ /// group of equal elements referred to by the iterator i in the range
+ /// [first, last) for which the following corresponding condition holds:
+ /// predicate(*i, *(i - 1)) != false.
+ ///
+ /// Returns: The end of the resulting range.
+ ///
+ /// Complexity: If the range (last - first) is not empty, exactly (last - first)
+ /// applications of the corresponding predicate, otherwise no applications of the predicate.
+ ///
+ template <typename ForwardIterator, typename BinaryPredicate>
+ ForwardIterator unique(ForwardIterator first, ForwardIterator last, BinaryPredicate predicate)
+ {
+ first = eastl::adjacent_find<ForwardIterator, BinaryPredicate>(first, last, predicate);
+
+ if(first != last) // We expect that there are duplicated items, else the user wouldn't be calling this function.
+ {
+ ForwardIterator dest(first);
+
+ for(++first; first != last; ++first)
+ {
+ if(!predicate(*dest, *first))
+ *++dest = *first;
+ }
+ return ++dest;
+ }
+ return last;
+ }
+
+
+
+ // find_end
+ //
+ // We provide two versions here, one for a bidirectional iterators and one for
+ // regular forward iterators. Given that we are searching backward, it's a bit
+ // more efficient if we can use backwards iteration to implement our search,
+ // though this requires an iterator that can be reversed.
+ //
+ template <typename ForwardIterator1, typename ForwardIterator2>
+ ForwardIterator1
+ find_end_impl(ForwardIterator1 first1, ForwardIterator1 last1,
+ ForwardIterator2 first2, ForwardIterator2 last2,
+ EASTL_ITC_NS::forward_iterator_tag, EASTL_ITC_NS::forward_iterator_tag)
+ {
+ if(first2 != last2) // We have to do this check because the search algorithm below will return first1 (and not last1) if the first2/last2 range is empty.
+ {
+ for(ForwardIterator1 result(last1); ; )
+ {
+ const ForwardIterator1 resultNext(eastl::search(first1, last1, first2, last2));
+
+ if(resultNext != last1) // If another sequence was found...
+ {
+ first1 = result = resultNext;
+ ++first1;
+ }
+ else
+ return result;
+ }
+ }
+ return last1;
+ }
+
+ template <typename BidirectionalIterator1, typename BidirectionalIterator2>
+ BidirectionalIterator1
+ find_end_impl(BidirectionalIterator1 first1, BidirectionalIterator1 last1,
+ BidirectionalIterator2 first2, BidirectionalIterator2 last2,
+ EASTL_ITC_NS::bidirectional_iterator_tag, EASTL_ITC_NS::bidirectional_iterator_tag)
+ {
+ typedef eastl::reverse_iterator<BidirectionalIterator1> reverse_iterator1;
+ typedef eastl::reverse_iterator<BidirectionalIterator2> reverse_iterator2;
+
+ reverse_iterator1 rresult(eastl::search(reverse_iterator1(last1), reverse_iterator1(first1),
+ reverse_iterator2(last2), reverse_iterator2(first2)));
+ if(rresult.base() != first1) // If we found something...
+ {
+ BidirectionalIterator1 result(rresult.base());
+
+ eastl::advance(result, -eastl::distance(first2, last2)); // We have an opportunity to optimize this, as the
+ return result; // search function already calculates this distance.
+ }
+ return last1;
+ }
+
+ /// find_end
+ ///
+ /// Finds the last occurrence of the second sequence in the first sequence.
+ /// As such, this function is much like the C string function strrstr and it
+ /// is also the same as a reversed version of 'search'. It is called find_end
+ /// instead of the possibly more consistent search_end simply because the C++
+ /// standard algorithms have such naming.
+ ///
+ /// Returns an iterator between first1 and last1 if the sequence is found.
+ /// returns last1 (the end of the first seqence) if the sequence is not found.
+ ///
+ template <typename ForwardIterator1, typename ForwardIterator2>
+ inline ForwardIterator1
+ find_end(ForwardIterator1 first1, ForwardIterator1 last1,
+ ForwardIterator2 first2, ForwardIterator2 last2)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator1>::iterator_category IC1;
+ typedef typename eastl::iterator_traits<ForwardIterator2>::iterator_category IC2;
+
+ return eastl::find_end_impl(first1, last1, first2, last2, IC1(), IC2());
+ }
+
+
+
+
+ // To consider: Fold the predicate and non-predicate versions of
+ // this algorithm into a single function.
+ template <typename ForwardIterator1, typename ForwardIterator2, typename BinaryPredicate>
+ ForwardIterator1
+ find_end_impl(ForwardIterator1 first1, ForwardIterator1 last1,
+ ForwardIterator2 first2, ForwardIterator2 last2,
+ BinaryPredicate predicate,
+ EASTL_ITC_NS::forward_iterator_tag, EASTL_ITC_NS::forward_iterator_tag)
+ {
+ if(first2 != last2) // We have to do this check because the search algorithm below will return first1 (and not last1) if the first2/last2 range is empty.
+ {
+ for(ForwardIterator1 result = last1; ; )
+ {
+ const ForwardIterator1 resultNext(eastl::search<ForwardIterator1, ForwardIterator2, BinaryPredicate>(first1, last1, first2, last2, predicate));
+
+ if(resultNext != last1) // If another sequence was found...
+ {
+ first1 = result = resultNext;
+ ++first1;
+ }
+ else
+ return result;
+ }
+ }
+ return last1;
+ }
+
+ template <typename BidirectionalIterator1, typename BidirectionalIterator2, typename BinaryPredicate>
+ BidirectionalIterator1
+ find_end_impl(BidirectionalIterator1 first1, BidirectionalIterator1 last1,
+ BidirectionalIterator2 first2, BidirectionalIterator2 last2,
+ BinaryPredicate predicate,
+ EASTL_ITC_NS::bidirectional_iterator_tag, EASTL_ITC_NS::bidirectional_iterator_tag)
+ {
+ typedef eastl::reverse_iterator<BidirectionalIterator1> reverse_iterator1;
+ typedef eastl::reverse_iterator<BidirectionalIterator2> reverse_iterator2;
+
+ reverse_iterator1 rresult(eastl::search<reverse_iterator1, reverse_iterator2, BinaryPredicate>
+ (reverse_iterator1(last1), reverse_iterator1(first1),
+ reverse_iterator2(last2), reverse_iterator2(first2),
+ predicate));
+ if(rresult.base() != first1) // If we found something...
+ {
+ BidirectionalIterator1 result(rresult.base());
+ eastl::advance(result, -eastl::distance(first2, last2));
+ return result;
+ }
+ return last1;
+ }
+
+
+ /// find_end
+ ///
+ /// Effects: Finds a subsequence of equal values in a sequence.
+ ///
+ /// Returns: The last iterator i in the range [first1, last1 - (last2 - first2))
+ /// such that for any nonnegative integer n < (last2 - first2), the following
+ /// corresponding conditions hold: pred(*(i+n),*(first2+n)) != false. Returns
+ /// last1 if no such iterator is found.
+ ///
+ /// Complexity: At most (last2 - first2) * (last1 - first1 - (last2 - first2) + 1)
+ /// applications of the corresponding predicate.
+ ///
+ template <typename ForwardIterator1, typename ForwardIterator2, typename BinaryPredicate>
+ inline ForwardIterator1
+ find_end(ForwardIterator1 first1, ForwardIterator1 last1,
+ ForwardIterator2 first2, ForwardIterator2 last2,
+ BinaryPredicate predicate)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator1>::iterator_category IC1;
+ typedef typename eastl::iterator_traits<ForwardIterator2>::iterator_category IC2;
+
+ return eastl::find_end_impl<ForwardIterator1, ForwardIterator2, BinaryPredicate>
+ (first1, last1, first2, last2, predicate, IC1(), IC2());
+ }
+
+
+ /// set_difference
+ ///
+ /// set_difference iterates over both input ranges and copies elements present
+ /// in the first range but not the second to the output range.
+ ///
+ /// Effects: Copies the elements of the range [first1, last1) which are not
+ /// present in the range [first2, last2) to the range beginning at result.
+ /// The elements in the constructed range are sorted.
+ ///
+ /// Requires: The input ranges must be sorted.
+ /// Requires: The output range shall not overlap with either of the original ranges.
+ ///
+ /// Returns: The end of the output range.
+ ///
+ /// Complexity: At most (2 * ((last1 - first1) + (last2 - first2)) - 1) comparisons.
+ ///
+ template <typename InputIterator1, typename InputIterator2, typename OutputIterator>
+ OutputIterator set_difference(InputIterator1 first1, InputIterator1 last1,
+ InputIterator2 first2, InputIterator2 last2,
+ OutputIterator result)
+ {
+ while((first1 != last1) && (first2 != last2))
+ {
+ if(*first1 < *first2)
+ {
+ *result = *first1;
+ ++first1;
+ ++result;
+ }
+ else if(*first2 < *first1)
+ ++first2;
+ else
+ {
+ ++first1;
+ ++first2;
+ }
+ }
+
+ return eastl::copy(first1, last1, result);
+ }
+
+
+ template <typename InputIterator1, typename InputIterator2, typename OutputIterator, typename Compare>
+ OutputIterator set_difference(InputIterator1 first1, InputIterator1 last1,
+ InputIterator2 first2, InputIterator2 last2,
+ OutputIterator result, Compare compare)
+ {
+ while((first1 != last1) && (first2 != last2))
+ {
+ if(compare(*first1, *first2))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*first2, *first1)); // Validate that the compare function is sane.
+ *result = *first1;
+ ++first1;
+ ++result;
+ }
+ else if(compare(*first2, *first1))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*first1, *first2)); // Validate that the compare function is sane.
+ ++first2;
+ }
+ else
+ {
+ ++first1;
+ ++first2;
+ }
+ }
+
+ return eastl::copy(first1, last1, result);
+ }
+
+
+ /// set_difference_2
+ ///
+ /// set_difference_2 iterates over both input ranges and copies elements present
+ /// in the first range but not the second to the first output range and copies
+ /// elements present in the second range but not in the first to the second output
+ /// range.
+ ///
+ /// Effects: Copies the elements of the range [first1, last1) which are not
+ /// present in the range [first2, last2) to the first output range beginning at
+ /// result1 AND copies the element of range [first2, last2) which are not present
+ /// in the range [first1, last) to the second output range beginning at result2.
+ /// The elements in the constructed range are sorted.
+ ///
+ /// Requires: The input ranges must be sorted.
+ /// Requires: The output ranges shall not overlap with either of the original ranges.
+ ///
+ /// Returns: Nothing.
+ ///
+ /// Complexity: At most (2 * ((last1 - first1) + (last2 - first2)) - 1) comparisons.
+ ///
+ template <typename InputIterator1, typename InputIterator2, typename OutputIterator, typename Compare>
+ void set_difference_2(InputIterator1 first1, InputIterator1 last1,
+ InputIterator2 first2, InputIterator2 last2,
+ OutputIterator result1, OutputIterator result2, Compare compare)
+ {
+ while ((first1 != last1) && (first2 != last2))
+ {
+ if (compare(*first1, *first2))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*first2, *first1)); // Validate that the compare function is sane.
+ *result1++ = *first1++;
+ }
+ else if (compare(*first2, *first1))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*first1, *first2)); // Validate that the compare function is sane.
+ *result2++ = *first2++;
+ }
+ else
+ {
+ ++first1;
+ ++first2;
+ }
+ }
+
+ eastl::copy(first2, last2, result2);
+ eastl::copy(first1, last1, result1);
+ }
+
+ /// set_difference_2
+ ///
+ /// set_difference_2 with the default comparison object is eastl::less<>.
+ ///
+ template <typename InputIterator1, typename InputIterator2, typename OutputIterator>
+ void set_difference_2(InputIterator1 first1, InputIterator1 last1,
+ InputIterator2 first2, InputIterator2 last2,
+ OutputIterator result1, OutputIterator result2)
+ {
+ eastl::set_difference_2(first1, last1, first2, last2, result1, result2, eastl::less<>{});
+ }
+
+
+ /// set_symmetric_difference
+ ///
+ /// set_difference iterates over both input ranges and copies elements present
+ /// in the either range but not the other to the output range.
+ ///
+ /// Effects: Copies the elements of the range [first1, last1) which are not
+ /// present in the range [first2, last2), and the elements of the range [first2, last2)
+ /// which are not present in the range [first1, last1) to the range beginning at result.
+ /// The elements in the constructed range are sorted.
+ ///
+ /// Requires: The input ranges must be sorted.
+ /// Requires: The resulting range shall not overlap with either of the original ranges.
+ ///
+ /// Returns: The end of the constructed range.
+ ///
+ /// Complexity: At most (2 * ((last1 - first1) + (last2 - first2)) - 1) comparisons.
+ ///
+ template <typename InputIterator1, typename InputIterator2, typename OutputIterator>
+ OutputIterator set_symmetric_difference(InputIterator1 first1, InputIterator1 last1,
+ InputIterator2 first2, InputIterator2 last2,
+ OutputIterator result)
+ {
+ while((first1 != last1) && (first2 != last2))
+ {
+ if(*first1 < *first2)
+ {
+ *result = *first1;
+ ++first1;
+ ++result;
+ }
+ else if(*first2 < *first1)
+ {
+ *result = *first2;
+ ++first2;
+ ++result;
+ }
+ else
+ {
+ ++first1;
+ ++first2;
+ }
+ }
+
+ return eastl::copy(first2, last2, eastl::copy(first1, last1, result));
+ }
+
+
+ template <typename InputIterator1, typename InputIterator2, typename OutputIterator, typename Compare>
+ OutputIterator set_symmetric_difference(InputIterator1 first1, InputIterator1 last1,
+ InputIterator2 first2, InputIterator2 last2,
+ OutputIterator result, Compare compare)
+ {
+ while((first1 != last1) && (first2 != last2))
+ {
+ if(compare(*first1, *first2))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*first2, *first1)); // Validate that the compare function is sane.
+ *result = *first1;
+ ++first1;
+ ++result;
+ }
+ else if(compare(*first2, *first1))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*first1, *first2)); // Validate that the compare function is sane.
+ *result = *first2;
+ ++first2;
+ ++result;
+ }
+ else
+ {
+ ++first1;
+ ++first2;
+ }
+ }
+
+ return eastl::copy(first2, last2, eastl::copy(first1, last1, result));
+ }
+
+
+ /// set_intersection
+ ///
+ /// set_intersection over both ranges and copies elements present in
+ /// both ranges to the output range.
+ ///
+ /// Effects: Constructs a sorted intersection of the elements from the
+ /// two ranges; that is, the set of elements that are present in both of the ranges.
+ ///
+ /// Requires: The input ranges must be sorted.
+ /// Requires: The resulting range shall not overlap with either of the original ranges.
+ ///
+ /// Returns: The end of the constructed range.
+ ///
+ /// Complexity: At most 2 * ((last1 - first1) + (last2 - first2)) - 1) comparisons.
+ ///
+ /// Note: The copying operation is stable; if an element is present in both ranges,
+ /// the one from the first range is copied.
+ ///
+ template <typename InputIterator1, typename InputIterator2, typename OutputIterator>
+ OutputIterator set_intersection(InputIterator1 first1, InputIterator1 last1,
+ InputIterator2 first2, InputIterator2 last2,
+ OutputIterator result)
+ {
+ while((first1 != last1) && (first2 != last2))
+ {
+ if(*first1 < *first2)
+ ++first1;
+ else if(*first2 < *first1)
+ ++first2;
+ else
+ {
+ *result = *first1;
+ ++first1;
+ ++first2;
+ ++result;
+ }
+ }
+
+ return result;
+ }
+
+
+ template <typename InputIterator1, typename InputIterator2, typename OutputIterator, typename Compare>
+ OutputIterator set_intersection(InputIterator1 first1, InputIterator1 last1,
+ InputIterator2 first2, InputIterator2 last2,
+ OutputIterator result, Compare compare)
+ {
+ while((first1 != last1) && (first2 != last2))
+ {
+ if(compare(*first1, *first2))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*first2, *first1)); // Validate that the compare function is sane.
+ ++first1;
+ }
+ else if(compare(*first2, *first1))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*first1, *first2)); // Validate that the compare function is sane.
+ ++first2;
+ }
+ else
+ {
+ *result = *first1;
+ ++first1;
+ ++first2;
+ ++result;
+ }
+ }
+
+ return result;
+ }
+
+
+
+ /// set_union
+ ///
+ /// set_union iterates over both ranges and copies elements present in
+ /// both ranges to the output range.
+ ///
+ /// Effects: Constructs a sorted union of the elements from the two ranges;
+ /// that is, the set of elements that are present in one or both of the ranges.
+ ///
+ /// Requires: The input ranges must be sorted.
+ /// Requires: The resulting range shall not overlap with either of the original ranges.
+ ///
+ /// Returns: The end of the constructed range.
+ ///
+ /// Complexity: At most (2 * ((last1 - first1) + (last2 - first2)) - 1) comparisons.
+ ///
+ /// Note: The copying operation is stable; if an element is present in both ranges,
+ /// the one from the first range is copied.
+ ///
+ template <typename InputIterator1, typename InputIterator2, typename OutputIterator>
+ OutputIterator set_union(InputIterator1 first1, InputIterator1 last1,
+ InputIterator2 first2, InputIterator2 last2,
+ OutputIterator result)
+ {
+ while((first1 != last1) && (first2 != last2))
+ {
+ if(*first1 < *first2)
+ {
+ *result = *first1;
+ ++first1;
+ }
+ else if(*first2 < *first1)
+ {
+ *result = *first2;
+ ++first2;
+ }
+ else
+ {
+ *result = *first1;
+ ++first1;
+ ++first2;
+ }
+ ++result;
+ }
+
+ return eastl::copy(first2, last2, eastl::copy(first1, last1, result));
+ }
+
+
+ template <typename InputIterator1, typename InputIterator2, typename OutputIterator, typename Compare>
+ OutputIterator set_union(InputIterator1 first1, InputIterator1 last1,
+ InputIterator2 first2, InputIterator2 last2,
+ OutputIterator result, Compare compare)
+ {
+ while((first1 != last1) && (first2 != last2))
+ {
+ if(compare(*first1, *first2))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*first2, *first1)); // Validate that the compare function is sane.
+ *result = *first1;
+ ++first1;
+ }
+ else if(compare(*first2, *first1))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*first1, *first2)); // Validate that the compare function is sane.
+ *result = *first2;
+ ++first2;
+ }
+ else
+ {
+ *result = *first1;
+ ++first1;
+ ++first2;
+ }
+ ++result;
+ }
+
+ return eastl::copy(first2, last2, eastl::copy(first1, last1, result));
+ }
+
+
+ /// set_decomposition
+ ///
+ /// set_decomposition iterates over both ranges and copies elements to one of the three
+ /// categories of output ranges.
+ ///
+ /// Effects: Constructs three sorted containers of the elements from the two ranges.
+ /// * OutputIterator1 is elements only in Container1.
+ /// * OutputIterator2 is elements only in Container2.
+ /// * OutputIterator3 is elements that are in both Container1 and Container2.
+ ///
+ /// Requires: The input ranges must be sorted.
+ /// Requires: The resulting ranges shall not overlap with either of the original ranges.
+ ///
+ /// Returns: The end of the constructed range of elements in both Container1 and Container2.
+ ///
+ /// Complexity: At most (2 * ((last1 - first1) + (last2 - first2)) - 1) comparisons.
+ ///
+ template <typename InputIterator1, typename InputIterator2,
+ typename OutputIterator1, typename OutputIterator2, typename OutputIterator3, typename Compare>
+ OutputIterator3 set_decomposition(InputIterator1 first1, InputIterator1 last1,
+ InputIterator2 first2, InputIterator2 last2,
+ OutputIterator1 result1, OutputIterator2 result2, OutputIterator3 result3, Compare compare)
+ {
+ while ((first1 != last1) && (first2 != last2))
+ {
+ if (compare(*first1, *first2))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*first2, *first1)); // Validate that the compare function is sane.
+ *result1++ = *first1++;
+ }
+ else if (compare(*first2, *first1))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*first1, *first2)); // Validate that the compare function is sane.
+ *result2++ = *first2++;
+ }
+ else
+ {
+ *result3++ = *first1++;
+ ++first2;
+ }
+ }
+
+ eastl::copy(first1, last1, result1);
+ eastl::copy(first2, last2, result2);
+
+ return result3;
+ }
+
+ /// set_decomposition
+ ///
+ /// set_decomposition with the default comparison object is eastl::less<>.
+ ///
+ template <typename InputIterator1, typename InputIterator2,
+ typename OutputIterator1, typename OutputIterator2, typename OutputIterator3>
+ OutputIterator3 set_decomposition(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2,
+ OutputIterator1 result1, OutputIterator2 result2, OutputIterator3 result3)
+ {
+ return eastl::set_decomposition(first1, last1, first2, last2, result1, result2, result3, eastl::less<>{});
+ }
+
+
+ /// is_permutation
+ ///
+ template<typename ForwardIterator1, typename ForwardIterator2>
+ bool is_permutation(ForwardIterator1 first1, ForwardIterator1 last1, ForwardIterator2 first2)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator1>::difference_type difference_type;
+
+ // Skip past any equivalent initial elements.
+ while((first1 != last1) && (*first1 == *first2))
+ {
+ ++first1;
+ ++first2;
+ }
+
+ if(first1 != last1)
+ {
+ const difference_type first1Size = eastl::distance(first1, last1);
+ ForwardIterator2 last2 = first2;
+ eastl::advance(last2, first1Size);
+
+ for(ForwardIterator1 i = first1; i != last1; ++i)
+ {
+ if(i == eastl::find(first1, i, *i))
+ {
+ const difference_type c = eastl::count(first2, last2, *i);
+
+ if((c == 0) || (c != eastl::count(i, last1, *i)))
+ return false;
+ }
+ }
+ }
+
+ return true;
+ }
+
+ /// is_permutation
+ ///
+ template<typename ForwardIterator1, typename ForwardIterator2, class BinaryPredicate>
+ bool is_permutation(ForwardIterator1 first1, ForwardIterator1 last1, ForwardIterator2 first2, BinaryPredicate predicate)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator1>::difference_type difference_type;
+
+ // Skip past any equivalent initial elements.
+ while((first1 != last1) && predicate(*first1, *first2))
+ {
+ ++first1;
+ ++first2;
+ }
+
+ if(first1 != last1)
+ {
+ const difference_type first1Size = eastl::distance(first1, last1);
+ ForwardIterator2 last2 = first2;
+ eastl::advance(last2, first1Size);
+
+ for(ForwardIterator1 i = first1; i != last1; ++i)
+ {
+ if(i == eastl::find(first1, i, *i, predicate))
+ {
+ const difference_type c = eastl::count(first2, last2, *i, predicate);
+
+ if((c == 0) || (c != eastl::count(i, last1, *i, predicate)))
+ return false;
+ }
+ }
+ }
+
+ return true;
+ }
+
+
+ /// next_permutation
+ ///
+ /// mutates the range [first, last) to the next permutation. Returns true if the
+ /// new range is not the final permutation (sorted like the starting permutation).
+ /// Permutations start with a sorted range, and false is returned when next_permutation
+ /// results in the initial sorted range, or if the range has <= 1 element.
+ /// Note that elements are compared by operator < (as usual) and that elements deemed
+ /// equal via this are not rearranged.
+ ///
+ /// http://marknelson.us/2002/03/01/next-permutation/
+ /// Basically we start with an ordered range and reverse it's order one specifically
+ /// chosen swap and reverse at a time. It happens that this require going through every
+ /// permutation of the range. We use the same variable names as the document above.
+ ///
+ /// To consider: Significantly improved permutation/combination functionality:
+ /// http://home.roadrunner.com/~hinnant/combinations.html
+ ///
+ /// Example usage:
+ /// vector<int> intArray;
+ /// // <populate intArray>
+ /// sort(intArray.begin(), intArray.end());
+ /// do {
+ /// // <do something with intArray>
+ /// } while(next_permutation(intArray.begin(), intArray.end()));
+ ///
+
+ template<typename BidirectionalIterator, typename Compare>
+ bool next_permutation(BidirectionalIterator first, BidirectionalIterator last, Compare compare)
+ {
+ if(first != last) // If there is anything in the range...
+ {
+ BidirectionalIterator i = last;
+
+ if(first != --i) // If the range has more than one item...
+ {
+ for(;;)
+ {
+ BidirectionalIterator ii(i), j;
+
+ if(compare(*--i, *ii)) // Find two consecutive values where the first is less than the second.
+ {
+ j = last;
+ while(!compare(*i, *--j)) // Find the final value that's greater than the first (it may be equal to the second).
+ {}
+ eastl::iter_swap(i, j); // Swap the first and the final.
+ eastl::reverse(ii, last); // Reverse the ranget from second to last.
+ return true;
+ }
+
+ if(i == first) // There are no two consecutive values where the first is less than the second, meaning the range is in reverse order. The reverse ordered range is always the last permutation.
+ {
+ eastl::reverse(first, last);
+ break; // We are done.
+ }
+ }
+ }
+ }
+
+ return false;
+ }
+
+ template<typename BidirectionalIterator>
+ bool next_permutation(BidirectionalIterator first, BidirectionalIterator last)
+ {
+ typedef typename eastl::iterator_traits<BidirectionalIterator>::value_type value_type;
+
+ return eastl::next_permutation(first, last, eastl::less<value_type>());
+ }
+
+
+
+ /// rotate
+ ///
+ /// Effects: For each non-negative integer i < (last - first), places the element from the
+ /// position first + i into position first + (i + (last - middle)) % (last - first).
+ ///
+ /// Returns: first + (last - middle). That is, returns where first went to.
+ ///
+ /// Remarks: This is a left rotate.
+ ///
+ /// Requires: [first,middle) and [middle,last) shall be valid ranges. ForwardIterator shall
+ /// satisfy the requirements of ValueSwappable (17.6.3.2). The type of *first shall satisfy
+ /// the requirements of MoveConstructible (Table 20) and the requirements of MoveAssignable.
+ ///
+ /// Complexity: At most last - first swaps.
+ ///
+ /// Note: While rotate works on ForwardIterators (e.g. slist) and BidirectionalIterators (e.g. list),
+ /// you can get much better performance (O(1) instead of O(n)) with slist and list rotation by
+ /// doing splice operations on those lists instead of calling this rotate function.
+ ///
+ /// http://www.cs.bell-labs.com/cm/cs/pearls/s02b.pdf / http://books.google.com/books?id=kse_7qbWbjsC&pg=PA14&lpg=PA14&dq=Programming+Pearls+flipping+hands
+ /// http://books.google.com/books?id=tjOlkl7ecVQC&pg=PA189&lpg=PA189&dq=stepanov+Elements+of+Programming+rotate
+ /// http://stackoverflow.com/questions/21160875/why-is-stdrotate-so-fast
+ ///
+ /// Strategy:
+ /// - We handle the special case of (middle == first) and (middle == last) no-ops
+ /// up front in the main rotate entry point.
+ /// - There's a basic ForwardIterator implementation (rotate_general_impl) which is
+ /// a fallback implementation that's not as fast as others but works for all cases.
+ /// - There's a slightly better BidirectionalIterator implementation.
+ /// - We have specialized versions for rotating elements that are is_trivially_move_assignable.
+ /// These versions will use memmove for when we have a RandomAccessIterator.
+ /// - We have a specialized version for rotating by only a single position, as that allows us
+ /// (with any iterator type) to avoid a lot of logic involved with algorithms like "flipping hands"
+ /// and achieve near optimal O(n) behavior. it turns out that rotate-by-one is a common use
+ /// case in practice.
+ ///
+ namespace Internal
+ {
+ template<typename ForwardIterator>
+ ForwardIterator rotate_general_impl(ForwardIterator first, ForwardIterator middle, ForwardIterator last)
+ {
+ using eastl::swap;
+
+ ForwardIterator current = middle;
+
+ do {
+ swap(*first++, *current++);
+
+ if(first == middle)
+ middle = current;
+ } while(current != last);
+
+ ForwardIterator result = first;
+ current = middle;
+
+ while(current != last)
+ {
+ swap(*first++, *current++);
+
+ if(first == middle)
+ middle = current;
+ else if(current == last)
+ current = middle;
+ }
+
+ return result; // result points to first + (last - middle).
+ }
+
+
+ template <typename ForwardIterator>
+ ForwardIterator move_rotate_left_by_one(ForwardIterator first, ForwardIterator last)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+
+ value_type temp(eastl::move(*first));
+ ForwardIterator result = eastl::move(eastl::next(first), last, first); // Note that while our template type is BidirectionalIterator, if the actual
+ *result = eastl::move(temp); // iterator is a RandomAccessIterator then this move will be a memmove for trivial types.
+
+ return result; // result points to the final element in the range.
+ }
+
+
+ template <typename BidirectionalIterator>
+ BidirectionalIterator move_rotate_right_by_one(BidirectionalIterator first, BidirectionalIterator last)
+ {
+ typedef typename eastl::iterator_traits<BidirectionalIterator>::value_type value_type;
+
+ BidirectionalIterator beforeLast = eastl::prev(last);
+ value_type temp(eastl::move(*beforeLast));
+ BidirectionalIterator result = eastl::move_backward(first, beforeLast, last); // Note that while our template type is BidirectionalIterator, if the actual
+ *first = eastl::move(temp); // iterator is a RandomAccessIterator then this move will be a memmove for trivial types.
+
+ return result; // result points to the first element in the range.
+ }
+
+ template <typename /*IteratorCategory*/, bool /*is_trivially_move_assignable*/>
+ struct rotate_helper
+ {
+ template <typename ForwardIterator>
+ static ForwardIterator rotate_impl(ForwardIterator first, ForwardIterator middle, ForwardIterator last)
+ { return Internal::rotate_general_impl(first, middle, last); }
+ };
+
+ template <>
+ struct rotate_helper<EASTL_ITC_NS::forward_iterator_tag, true>
+ {
+ template <typename ForwardIterator>
+ static ForwardIterator rotate_impl(ForwardIterator first, ForwardIterator middle, ForwardIterator last)
+ {
+ if(eastl::next(first) == middle) // If moving trivial types by a single element, memcpy is fast for that case.
+ return Internal::move_rotate_left_by_one(first, last);
+ return Internal::rotate_general_impl(first, middle, last);
+ }
+ };
+
+ template <>
+ struct rotate_helper<EASTL_ITC_NS::bidirectional_iterator_tag, false>
+ {
+ template <typename BidirectionalIterator>
+ static BidirectionalIterator rotate_impl(BidirectionalIterator first, BidirectionalIterator middle, BidirectionalIterator last)
+ { return Internal::rotate_general_impl(first, middle, last); } // rotate_general_impl outperforms the flipping hands algorithm.
+
+ /*
+ // Simplest "flipping hands" implementation. Disabled because it's slower on average than rotate_general_impl.
+ template <typename BidirectionalIterator>
+ static BidirectionalIterator rotate_impl(BidirectionalIterator first, BidirectionalIterator middle, BidirectionalIterator last)
+ {
+ eastl::reverse(first, middle);
+ eastl::reverse(middle, last);
+ eastl::reverse(first, last);
+ return first + (last - middle); // This can be slow for large ranges because operator + and - are O(n).
+ }
+
+ // Smarter "flipping hands" implementation, but still disabled because benchmarks are showing it to be slower than rotate_general_impl.
+ template <typename BidirectionalIterator>
+ static BidirectionalIterator rotate_impl(BidirectionalIterator first, BidirectionalIterator middle, BidirectionalIterator last)
+ {
+ // This is the "flipping hands" algorithm.
+ eastl::reverse_impl(first, middle, EASTL_ITC_NS::bidirectional_iterator_tag()); // Reverse the left side.
+ eastl::reverse_impl(middle, last, EASTL_ITC_NS::bidirectional_iterator_tag()); // Reverse the right side.
+
+ // Reverse the entire range.
+ while((first != middle) && (middle != last))
+ {
+ eastl::iter_swap(first, --last);
+ ++first;
+ }
+
+ if(first == middle) // Finish reversing the entire range.
+ {
+ eastl::reverse_impl(middle, last, bidirectional_iterator_tag());
+ return last;
+ }
+ else
+ {
+ eastl::reverse_impl(first, middle, bidirectional_iterator_tag());
+ return first;
+ }
+ }
+ */
+ };
+
+ template <>
+ struct rotate_helper<EASTL_ITC_NS::bidirectional_iterator_tag, true>
+ {
+ template <typename BidirectionalIterator>
+ static BidirectionalIterator rotate_impl(BidirectionalIterator first, BidirectionalIterator middle, BidirectionalIterator last)
+ {
+ if(eastl::next(first) == middle) // If moving trivial types by a single element, memcpy is fast for that case.
+ return Internal::move_rotate_left_by_one(first, last);
+ if(eastl::next(middle) == last)
+ return Internal::move_rotate_right_by_one(first, last);
+ return Internal::rotate_general_impl(first, middle, last);
+ }
+ };
+
+ template <typename Integer>
+ inline Integer greatest_common_divisor(Integer x, Integer y)
+ {
+ do {
+ Integer t = (x % y);
+ x = y;
+ y = t;
+ } while(y);
+
+ return x;
+ }
+
+ template <>
+ struct rotate_helper<EASTL_ITC_NS::random_access_iterator_tag, false>
+ {
+ // This is the juggling algorithm, using move operations.
+ // In practice this implementation is about 25% faster than rotate_general_impl. We may want to
+ // consider sticking with just rotate_general_impl and avoid the code generation of this function.
+ template <typename RandomAccessIterator>
+ static RandomAccessIterator rotate_impl(RandomAccessIterator first, RandomAccessIterator middle, RandomAccessIterator last)
+ {
+ typedef typename iterator_traits<RandomAccessIterator>::difference_type difference_type;
+ typedef typename iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ const difference_type m1 = (middle - first);
+ const difference_type m2 = (last - middle);
+ const difference_type g = Internal::greatest_common_divisor(m1, m2);
+ value_type temp;
+
+ for(RandomAccessIterator p = first + g; p != first;)
+ {
+ temp = eastl::move(*--p);
+ RandomAccessIterator p1 = p;
+ RandomAccessIterator p2 = p + m1;
+ do
+ {
+ *p1 = eastl::move(*p2);
+ p1 = p2;
+ const difference_type d = (last - p2);
+
+ if(m1 < d)
+ p2 += m1;
+ else
+ p2 = first + (m1 - d);
+ } while(p2 != p);
+
+ *p1 = eastl::move(temp);
+ }
+
+ return first + m2;
+ }
+ };
+
+ template <>
+ struct rotate_helper<EASTL_ITC_NS::random_access_iterator_tag, true>
+ {
+ // Experiments were done which tested the performance of using an intermediate buffer
+ // to do memcpy's to as opposed to executing a swapping algorithm. It turns out this is
+ // actually slower than even rotate_general_impl, partly because the average case involves
+ // memcpy'ing a quarter of the element range twice. Experiments were done with various kinds
+ // of PODs with various element counts.
+
+ template <typename RandomAccessIterator>
+ static RandomAccessIterator rotate_impl(RandomAccessIterator first, RandomAccessIterator middle, RandomAccessIterator last)
+ {
+ if(eastl::next(first) == middle) // If moving trivial types by a single element, memcpy is fast for that case.
+ return Internal::move_rotate_left_by_one(first, last);
+ if(eastl::next(middle) == last)
+ return Internal::move_rotate_right_by_one(first, last);
+ if((last - first) < 32) // For small ranges rotate_general_impl is faster.
+ return Internal::rotate_general_impl(first, middle, last);
+ return Internal::rotate_helper<EASTL_ITC_NS::random_access_iterator_tag, false>::rotate_impl(first, middle, last);
+ }
+ };
+
+ } // namespace Internal
+
+
+ template <typename ForwardIterator>
+ ForwardIterator rotate(ForwardIterator first, ForwardIterator middle, ForwardIterator last)
+ {
+ if(middle != first)
+ {
+ if(middle != last)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::iterator_category IC;
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+
+ return Internal::rotate_helper<IC, eastl::is_trivially_move_assignable<value_type>::value || // This is the best way of telling if we can move types via memmove, but without a conforming C++11 compiler it usually returns false.
+ eastl::is_pod<value_type>::value || // This is a more conservative way of telling if we can move types via memmove, and most compilers support it, but it doesn't have as full of coverage as is_trivially_move_assignable.
+ eastl::is_scalar<value_type>::value> // This is the most conservative means and works with all compilers, but works only for scalars.
+ ::rotate_impl(first, middle, last);
+ }
+
+ return first;
+ }
+
+ return last;
+ }
+
+
+
+ /// rotate_copy
+ ///
+ /// Similar to rotate except writes the output to the OutputIterator and
+ /// returns an OutputIterator to the element past the last element copied
+ /// (i.e. result + (last - first))
+ ///
+ template <typename ForwardIterator, typename OutputIterator>
+ OutputIterator rotate_copy(ForwardIterator first, ForwardIterator middle, ForwardIterator last, OutputIterator result)
+ {
+ return eastl::copy(first, middle, eastl::copy(middle, last, result));
+ }
+
+
+
+ /// clamp
+ ///
+ /// Returns a reference to a clamped value within the range of [lo, hi].
+ ///
+ /// http://en.cppreference.com/w/cpp/algorithm/clamp
+ ///
+ template <class T, class Compare>
+ EA_CONSTEXPR const T& clamp(const T& v, const T& lo, const T& hi, Compare comp)
+ {
+ EASTL_ASSERT(!comp(hi, lo));
+ return comp(v, lo) ? lo : comp(hi, v) ? hi : v;
+ }
+
+ template <class T>
+ EA_CONSTEXPR const T& clamp(const T& v, const T& lo, const T& hi)
+ {
+ return eastl::clamp(v, lo, hi, eastl::less<>());
+ }
+
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/allocator.h b/EASTL/include/EASTL/allocator.h
new file mode 100644
index 0000000..d645466
--- /dev/null
+++ b/EASTL/include/EASTL/allocator.h
@@ -0,0 +1,397 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ALLOCATOR_H
+#define EASTL_ALLOCATOR_H
+
+
+#include <EASTL/internal/config.h>
+#include <EABase/nullptr.h>
+#include <stddef.h>
+
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+
+ /// alloc_flags
+ ///
+ /// Defines allocation flags.
+ ///
+ enum alloc_flags
+ {
+ MEM_TEMP = 0, // Low memory, not necessarily actually temporary.
+ MEM_PERM = 1 // High memory, for things that won't be unloaded.
+ };
+
+
+ /// allocator
+ ///
+ /// In this allocator class, note that it is not templated on any type and
+ /// instead it simply allocates blocks of memory much like the C malloc and
+ /// free functions. It can be thought of as similar to C++ std::allocator<char>.
+ /// The flags parameter has meaning that is specific to the allocation
+ ///
+ /// C++11's std::allocator (20.6.9) doesn't have a move constructor or assignment
+ /// operator. This is possibly because std::allocators are associated with types
+ /// instead of as instances. The potential non-equivalance of C++ std::allocator
+ /// instances has been a source of some acknowledged design problems.
+ /// We don't implement support for move construction or assignment in eastl::allocator,
+ /// but users can define their own allocators which do have move functions and
+ /// the eastl containers are compatible with such allocators (i.e. nothing unexpected
+ /// will happen).
+ ///
+ class EASTL_API allocator
+ {
+ public:
+ EASTL_ALLOCATOR_EXPLICIT allocator(const char* pName = EASTL_NAME_VAL(EASTL_ALLOCATOR_DEFAULT_NAME));
+ allocator(const allocator& x);
+ allocator(const allocator& x, const char* pName);
+
+ allocator& operator=(const allocator& x);
+
+ void* allocate(size_t n, int flags = 0);
+ void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0);
+ void deallocate(void* p, size_t n);
+
+ const char* get_name() const;
+ void set_name(const char* pName);
+
+ protected:
+ #if EASTL_NAME_ENABLED
+ const char* mpName; // Debug name, used to track memory.
+ #endif
+ };
+
+ bool operator==(const allocator& a, const allocator& b);
+#if !defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ bool operator!=(const allocator& a, const allocator& b);
+#endif
+
+
+ /// dummy_allocator
+ ///
+ /// Defines an allocator which does nothing. It returns NULL from allocate calls.
+ ///
+ class EASTL_API dummy_allocator
+ {
+ public:
+ EASTL_ALLOCATOR_EXPLICIT dummy_allocator(const char* = NULL) { }
+ dummy_allocator(const dummy_allocator&) { }
+ dummy_allocator(const dummy_allocator&, const char*) { }
+
+ dummy_allocator& operator=(const dummy_allocator&) { return *this; }
+
+ void* allocate(size_t, int = 0) { return NULL; }
+ void* allocate(size_t, size_t, size_t, int = 0) { return NULL; }
+ void deallocate(void*, size_t) { }
+
+ const char* get_name() const { return ""; }
+ void set_name(const char*) { }
+ };
+
+ inline bool operator==(const dummy_allocator&, const dummy_allocator&) { return true; }
+#if !defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ inline bool operator!=(const dummy_allocator&, const dummy_allocator&) { return false; }
+#endif
+
+
+ /// Defines a static default allocator which is constant across all types.
+ /// This is different from get_default_allocator, which is is bound at
+ /// compile-time and expected to differ per allocator type.
+ /// Currently this Default Allocator applies only to CoreAllocatorAdapter.
+ /// To consider: This naming of this function is too similar to get_default_allocator
+ /// and instead should be named something like GetStaticDefaultAllocator.
+ EASTL_API allocator* GetDefaultAllocator();
+ EASTL_API allocator* SetDefaultAllocator(allocator* pAllocator);
+
+
+ /// get_default_allocator
+ ///
+ /// This templated function allows the user to implement a default allocator
+ /// retrieval function that any part of EASTL can use. EASTL containers take
+ /// an Allocator parameter which identifies an Allocator class to use. But
+ /// different kinds of allocators have different mechanisms for retrieving
+ /// a default allocator instance, and some don't even intrinsically support
+ /// such functionality. The user can override this get_default_allocator
+ /// function in order to provide the glue between EASTL and whatever their
+ /// system's default allocator happens to be.
+ ///
+ /// Example usage:
+ /// MyAllocatorType* gpSystemAllocator;
+ ///
+ /// MyAllocatorType* get_default_allocator(const MyAllocatorType*)
+ /// { return gpSystemAllocator; }
+ ///
+ template <typename Allocator>
+ Allocator* get_default_allocator(const Allocator*);
+
+ EASTLAllocatorType* get_default_allocator(const EASTLAllocatorType*);
+
+
+ /// default_allocfreemethod
+ ///
+ /// Implements a default allocfreemethod which uses the default global allocator.
+ /// This version supports only default alignment.
+ ///
+ void* default_allocfreemethod(size_t n, void* pBuffer, void* /*pContext*/);
+
+
+ /// allocate_memory
+ ///
+ /// This is a memory allocation dispatching function.
+ /// To do: Make aligned and unaligned specializations.
+ /// Note that to do this we will need to use a class with a static
+ /// function instead of a standalone function like below.
+ ///
+ template <typename Allocator>
+ void* allocate_memory(Allocator& a, size_t n, size_t alignment, size_t alignmentOffset);
+
+
+} // namespace eastl
+
+
+
+
+
+
+#ifndef EASTL_USER_DEFINED_ALLOCATOR // If the user hasn't declared that he has defined a different allocator implementation elsewhere...
+
+ EA_DISABLE_ALL_VC_WARNINGS()
+ #include <new>
+ EA_RESTORE_ALL_VC_WARNINGS()
+
+ #if !EASTL_DLL // If building a regular library and not building EASTL as a DLL...
+ // It is expected that the application define the following
+ // versions of operator new for the application. Either that or the
+ // user needs to override the implementation of the allocator class.
+ void* operator new[](size_t size, const char* pName, int flags, unsigned debugFlags, const char* file, int line);
+ void* operator new[](size_t size, size_t alignment, size_t alignmentOffset, const char* pName, int flags, unsigned debugFlags, const char* file, int line);
+ #endif
+
+ namespace eastl
+ {
+ inline allocator::allocator(const char* EASTL_NAME(pName))
+ {
+ #if EASTL_NAME_ENABLED
+ mpName = pName ? pName : EASTL_ALLOCATOR_DEFAULT_NAME;
+ #endif
+ }
+
+
+ inline allocator::allocator(const allocator& EASTL_NAME(alloc))
+ {
+ #if EASTL_NAME_ENABLED
+ mpName = alloc.mpName;
+ #endif
+ }
+
+
+ inline allocator::allocator(const allocator&, const char* EASTL_NAME(pName))
+ {
+ #if EASTL_NAME_ENABLED
+ mpName = pName ? pName : EASTL_ALLOCATOR_DEFAULT_NAME;
+ #endif
+ }
+
+
+ inline allocator& allocator::operator=(const allocator& EASTL_NAME(alloc))
+ {
+ #if EASTL_NAME_ENABLED
+ mpName = alloc.mpName;
+ #endif
+ return *this;
+ }
+
+
+ inline const char* allocator::get_name() const
+ {
+ #if EASTL_NAME_ENABLED
+ return mpName;
+ #else
+ return EASTL_ALLOCATOR_DEFAULT_NAME;
+ #endif
+ }
+
+
+ inline void allocator::set_name(const char* EASTL_NAME(pName))
+ {
+ #if EASTL_NAME_ENABLED
+ mpName = pName;
+ #endif
+ }
+
+
+ inline void* allocator::allocate(size_t n, int flags)
+ {
+ #if EASTL_NAME_ENABLED
+ #define pName mpName
+ #else
+ #define pName EASTL_ALLOCATOR_DEFAULT_NAME
+ #endif
+
+ #if EASTL_DLL
+ return allocate(n, EASTL_SYSTEM_ALLOCATOR_MIN_ALIGNMENT, 0, flags);
+ #elif (EASTL_DEBUGPARAMS_LEVEL <= 0)
+ return ::new((char*)0, flags, 0, (char*)0, 0) char[n];
+ #elif (EASTL_DEBUGPARAMS_LEVEL == 1)
+ return ::new( pName, flags, 0, (char*)0, 0) char[n];
+ #else
+ return ::new( pName, flags, 0, __FILE__, __LINE__) char[n];
+ #endif
+ }
+
+
+ inline void* allocator::allocate(size_t n, size_t alignment, size_t offset, int flags)
+ {
+ #if EASTL_DLL
+ // We currently have no support for implementing flags when
+ // using the C runtime library operator new function. The user
+ // can use SetDefaultAllocator to override the default allocator.
+ EA_UNUSED(offset); EA_UNUSED(flags);
+
+ size_t adjustedAlignment = (alignment > EA_PLATFORM_PTR_SIZE) ? alignment : EA_PLATFORM_PTR_SIZE;
+
+ void* p = new char[n + adjustedAlignment + EA_PLATFORM_PTR_SIZE];
+ void* pPlusPointerSize = (void*)((uintptr_t)p + EA_PLATFORM_PTR_SIZE);
+ void* pAligned = (void*)(((uintptr_t)pPlusPointerSize + adjustedAlignment - 1) & ~(adjustedAlignment - 1));
+
+ void** pStoredPtr = (void**)pAligned - 1;
+ EASTL_ASSERT(pStoredPtr >= p);
+ *(pStoredPtr) = p;
+
+ EASTL_ASSERT(((size_t)pAligned & ~(alignment - 1)) == (size_t)pAligned);
+
+ return pAligned;
+ #elif (EASTL_DEBUGPARAMS_LEVEL <= 0)
+ return ::new(alignment, offset, (char*)0, flags, 0, (char*)0, 0) char[n];
+ #elif (EASTL_DEBUGPARAMS_LEVEL == 1)
+ return ::new(alignment, offset, pName, flags, 0, (char*)0, 0) char[n];
+ #else
+ return ::new(alignment, offset, pName, flags, 0, __FILE__, __LINE__) char[n];
+ #endif
+
+ #undef pName // See above for the definition of this.
+ }
+
+
+ inline void allocator::deallocate(void* p, size_t)
+ {
+ #if EASTL_DLL
+ if (p != nullptr)
+ {
+ void* pOriginalAllocation = *((void**)p - 1);
+ delete[](char*)pOriginalAllocation;
+ }
+ #else
+ delete[](char*)p;
+ #endif
+ }
+
+
+ inline bool operator==(const allocator&, const allocator&)
+ {
+ return true; // All allocators are considered equal, as they merely use global new/delete.
+ }
+
+#if !defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ inline bool operator!=(const allocator&, const allocator&)
+ {
+ return false; // All allocators are considered equal, as they merely use global new/delete.
+ }
+#endif
+
+ } // namespace eastl
+
+
+#endif // EASTL_USER_DEFINED_ALLOCATOR
+
+
+
+namespace eastl
+{
+
+ template <typename Allocator>
+ inline Allocator* get_default_allocator(const Allocator*)
+ {
+ return NULL; // By default we return NULL; the user must make specialization of this function in order to provide their own implementation.
+ }
+
+
+ inline EASTLAllocatorType* get_default_allocator(const EASTLAllocatorType*)
+ {
+ return EASTLAllocatorDefault(); // For the built-in allocator EASTLAllocatorType, we happen to already have a function for returning the default allocator instance, so we provide it.
+ }
+
+
+ inline void* default_allocfreemethod(size_t n, void* pBuffer, void* /*pContext*/)
+ {
+ EASTLAllocatorType* const pAllocator = EASTLAllocatorDefault();
+
+ if(pBuffer) // If freeing...
+ {
+ EASTLFree(*pAllocator, pBuffer, n);
+ return NULL; // The return value is meaningless for the free.
+ }
+ else // allocating
+ return EASTLAlloc(*pAllocator, n);
+ }
+
+
+ /// allocate_memory
+ ///
+ /// This is a memory allocation dispatching function.
+ /// To do: Make aligned and unaligned specializations.
+ /// Note that to do this we will need to use a class with a static
+ /// function instead of a standalone function like below.
+ ///
+ template <typename Allocator>
+ inline void* allocate_memory(Allocator& a, size_t n, size_t alignment, size_t alignmentOffset)
+ {
+ void *result;
+ if (alignment <= EASTL_ALLOCATOR_MIN_ALIGNMENT)
+ {
+ result = EASTLAlloc(a, n);
+ // Ensure the result is correctly aligned. An assertion likely indicates a mismatch between EASTL_ALLOCATOR_MIN_ALIGNMENT and the minimum alignment
+ // of EASTLAlloc. If there is a mismatch it may be necessary to define EASTL_ALLOCATOR_MIN_ALIGNMENT to be the minimum alignment of EASTLAlloc, or
+ // to increase the alignment of EASTLAlloc to match EASTL_ALLOCATOR_MIN_ALIGNMENT.
+ EASTL_ASSERT((reinterpret_cast<size_t>(result)& ~(alignment - 1)) == reinterpret_cast<size_t>(result));
+ }
+ else
+ {
+ result = EASTLAllocAligned(a, n, alignment, alignmentOffset);
+ // Ensure the result is correctly aligned. An assertion here may indicate a bug in the allocator.
+ auto resultMinusOffset = (char*)result - alignmentOffset;
+ EA_UNUSED(resultMinusOffset);
+ EASTL_ASSERT((reinterpret_cast<size_t>(resultMinusOffset)& ~(alignment - 1)) == reinterpret_cast<size_t>(resultMinusOffset));
+ }
+ return result;
+ }
+
+}
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/allocator_malloc.h b/EASTL/include/EASTL/allocator_malloc.h
new file mode 100644
index 0000000..78f4f69
--- /dev/null
+++ b/EASTL/include/EASTL/allocator_malloc.h
@@ -0,0 +1,130 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ALLOCATOR_MALLOC_H
+#define EASTL_ALLOCATOR_MALLOC_H
+
+
+#include <EABase/eahave.h>
+#include <EASTL/allocator.h>
+#include <stddef.h>
+
+
+// EASTL_ALIGNED_MALLOC_AVAILABLE
+//
+// Identifies if the standard library provides a built-in aligned version of malloc.
+// Defined as 0 or 1, depending on the standard library or platform availability.
+// None of the viable C functions provides for an aligned malloc with offset, so we
+// don't consider that supported in any case.
+//
+// Options for aligned allocations:
+// C11 aligned_alloc http://linux.die.net/man/3/aligned_alloc
+// glibc memalign http://linux.die.net/man/3/posix_memalign
+// Posix posix_memalign http://pubs.opengroup.org/onlinepubs/000095399/functions/posix_memalign.html
+// VC++ _aligned_malloc http://msdn.microsoft.com/en-us/library/8z34s9c6%28VS.80%29.aspx This is not suitable, since it has a limitation that you need to free via _aligned_free.
+//
+#if !defined EASTL_ALIGNED_MALLOC_AVAILABLE
+ #if defined(EA_PLATFORM_POSIX) && !defined(EA_PLATFORM_APPLE)
+ // memalign is more consistently available than posix_memalign, though its location isn't consistent across
+ // platforms and compiler libraries. Typically it's declared in one of three headers: stdlib.h, malloc.h, or malloc/malloc.h
+ #include <stdlib.h> // memalign, posix_memalign.
+ #define EASTL_ALIGNED_MALLOC_AVAILABLE 1
+
+ #if EA_HAS_INCLUDE_AVAILABLE
+ #if EA_HAS_INCLUDE(<malloc/malloc.h>)
+ #include <malloc/malloc.h>
+ #elif EA_HAS_INCLUDE(<malloc.h>)
+ #include <malloc.h>
+ #endif
+ #elif defined(EA_PLATFORM_BSD)
+ #include <malloc/malloc.h>
+ #elif defined(__clang__)
+ #if __has_include(<malloc/malloc.h>)
+ #include <malloc/malloc.h>
+ #elif __has_include(<malloc.h>)
+ #include <malloc.h>
+ #endif
+ #else
+ #include <malloc.h>
+ #endif
+ #else
+ #define EASTL_ALIGNED_MALLOC_AVAILABLE 0
+ #endif
+#endif
+
+
+namespace eastl
+{
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // allocator_malloc
+ //
+ // Implements an EASTL allocator that uses malloc/free as opposed to
+ // new/delete or PPMalloc Malloc/Free.
+ //
+ // Example usage:
+ // vector<int, allocator_malloc> intVector;
+ //
+ class allocator_malloc
+ {
+ public:
+ allocator_malloc(const char* = NULL)
+ { }
+
+ allocator_malloc(const allocator_malloc&)
+ { }
+
+ allocator_malloc(const allocator_malloc&, const char*)
+ { }
+
+ allocator_malloc& operator=(const allocator_malloc&)
+ { return *this; }
+
+ bool operator==(const allocator_malloc&)
+ { return true; }
+
+ bool operator!=(const allocator_malloc&)
+ { return false; }
+
+ void* allocate(size_t n, int /*flags*/ = 0)
+ { return malloc(n); }
+
+ void* allocate(size_t n, size_t alignment, size_t alignmentOffset, int /*flags*/ = 0)
+ {
+ #if EASTL_ALIGNED_MALLOC_AVAILABLE
+ if((alignmentOffset % alignment) == 0) // We check for (offset % alignmnent == 0) instead of (offset == 0) because any block which is aligned on e.g. 64 also is aligned at an offset of 64 by definition.
+ return memalign(alignment, n); // memalign is more consistently available than posix_memalign.
+ #else
+ if((alignment <= EASTL_SYSTEM_ALLOCATOR_MIN_ALIGNMENT) && ((alignmentOffset % alignment) == 0))
+ return malloc(n);
+ #endif
+ return NULL;
+ }
+
+ void deallocate(void* p, size_t /*n*/)
+ { free(p); }
+
+ const char* get_name() const
+ { return "allocator_malloc"; }
+
+ void set_name(const char*)
+ { }
+ };
+
+
+} // namespace eastl
+
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/any.h b/EASTL/include/EASTL/any.h
new file mode 100644
index 0000000..c2ef638
--- /dev/null
+++ b/EASTL/include/EASTL/any.h
@@ -0,0 +1,652 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements the eastl::any which is part of the C++ standard STL
+// library specification.
+//
+// eastl::any is a type-safe container for single values of any type. Our
+// implementation makes use of the "small local buffer" optimization to avoid
+// unnecessary dynamic memory allocation if the specified type is eligible to
+// be stored in its local buffer. The user type must satisfy the size
+// requirements and must be no-throw move-constructible to qualify for the local
+// buffer optimization.
+//
+// To consider: Implement a fixed_any<SIZE> variant to allow users to customize
+// the size of the "small local buffer" optimization.
+//
+// http://en.cppreference.com/w/cpp/utility/any
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ANY_H
+#define EASTL_ANY_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/in_place_t.h>
+#if EASTL_RTTI_ENABLED
+ #include <typeinfo>
+#endif
+#if EASTL_EXCEPTIONS_ENABLED
+ #include <exception>
+#endif
+
+
+namespace eastl
+{
+ ///////////////////////////////////////////////////////////////////////////////
+ // bad_any_cast
+ //
+ // The type thrown by any_cast on failure.
+ //
+ // http://en.cppreference.com/w/cpp/utility/any/bad_any_cast
+ //
+ #if EASTL_EXCEPTIONS_ENABLED
+ struct bad_cast : std::exception
+ {
+ const char* what() const EA_NOEXCEPT EA_OVERRIDE
+ { return "bad cast"; }
+ };
+
+ struct bad_any_cast : public bad_cast
+ {
+ const char* what() const EA_NOEXCEPT EA_OVERRIDE
+ { return "bad_any_cast"; }
+ };
+ #endif
+
+ namespace Internal
+ {
+ // utility to switch between exceptions and asserts
+ inline void DoBadAnyCast()
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ throw bad_any_cast();
+ #else
+ EASTL_ASSERT_MSG(false, "bad_any_cast\n");
+
+ // NOTE(rparolin): CRASH!
+ // You crashed here because you requested a type that was not contained in the object.
+ // We choose to intentionally crash here instead of returning invalid data to the calling
+ // code which could cause hard to track down bugs.
+ *((volatile int*)0) = 0xDEADC0DE;
+ #endif
+ }
+
+ template<typename T, typename... Args>
+ void* DefaultConstruct(Args&&... args)
+ {
+ auto* pMem = EASTLAllocatorDefault()->allocate(sizeof(T), alignof(T), 0);
+
+ return ::new(pMem) T(eastl::forward<Args>(args)...);
+ }
+
+ template<typename T>
+ void DefaultDestroy(T* p)
+ {
+ p->~T();
+
+ EASTLAllocatorDefault()->deallocate(static_cast<void*>(p), sizeof(T));
+ }
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // 20.7.3, class any
+ //
+ class any
+ {
+ //////////////////////////////////////////////////////////////////////////////////////////
+ // storage_operation
+ //
+ // operations supported by the storage handler
+ //
+ enum class storage_operation
+ {
+ GET,
+ DESTROY,
+ COPY,
+ MOVE,
+ TYPE_INFO
+ };
+
+
+ //////////////////////////////////////////////////////////////////////////////////////////
+ // storage
+ //
+ // the underlying storage type which enables the switching between objects stored in
+ // the heap and objects stored within the any type.
+ //
+ union storage
+ {
+ typedef aligned_storage_t<4 * sizeof(void*), alignment_of<void*>::value> internal_storage_t;
+
+ void* external_storage = nullptr;
+ internal_storage_t internal_storage;
+ };
+
+
+ //////////////////////////////////////////////////////////////////////////////////////////
+ // use_internal_storage
+ //
+ // determines when the "local buffer optimization" is used
+ //
+ template <typename T>
+ using use_internal_storage = bool_constant
+ <
+ is_nothrow_move_constructible<T>::value
+ && (sizeof(T) <= sizeof(storage)) &&
+ (alignment_of<storage>::value % alignment_of<T>::value == 0)
+ >;
+
+
+ //////////////////////////////////////////////////////////////////////////////////////////
+ // non-member friend functions
+ //
+ template <class ValueType> friend const ValueType* any_cast(const any* pAny) EA_NOEXCEPT;
+ template <class ValueType> friend ValueType* any_cast(any* pAny) EA_NOEXCEPT;
+ template <class ValueType> friend ValueType any_cast(const any& operand);
+ template <class ValueType> friend ValueType any_cast(any& operand);
+ template <class ValueType> friend ValueType any_cast(any&& operand);
+
+ //Adding Unsafe any cast operations
+ template <class ValueType> friend const ValueType* unsafe_any_cast(const any* pAny) EA_NOEXCEPT;
+ template <class ValueType> friend ValueType* unsafe_any_cast(any* pAny) EA_NOEXCEPT;
+
+
+ //////////////////////////////////////////////////////////////////////////////////////////
+ // internal storage handler
+ //
+ template <typename T>
+ struct storage_handler_internal
+ {
+ template <typename V>
+ static void construct(storage& s, V&& v)
+ {
+ ::new(&s.internal_storage) T(eastl::forward<V>(v));
+ }
+
+ template <typename... Args>
+ static void construct_inplace(storage& s, Args... args)
+ {
+ ::new(&s.internal_storage) T(eastl::forward<Args>(args)...);
+ }
+
+ template <class NT, class U, class... Args>
+ static void construct_inplace(storage& s, std::initializer_list<U> il, Args&&... args)
+ {
+ ::new(&s.internal_storage) NT(il, eastl::forward<Args>(args)...);
+ }
+
+ static inline void destroy(any& refAny)
+ {
+ T& t = *static_cast<T*>(static_cast<void*>(&refAny.m_storage.internal_storage));
+ EA_UNUSED(t);
+ t.~T();
+
+ refAny.m_handler = nullptr;
+ }
+
+ static void* handler_func(storage_operation op, const any* pThis, any* pOther)
+ {
+ switch (op)
+ {
+ case storage_operation::GET:
+ {
+ EASTL_ASSERT(pThis);
+ return (void*)(&pThis->m_storage.internal_storage);
+ }
+ break;
+
+ case storage_operation::DESTROY:
+ {
+ EASTL_ASSERT(pThis);
+ destroy(const_cast<any&>(*pThis));
+ }
+ break;
+
+ case storage_operation::COPY:
+ {
+ EASTL_ASSERT(pThis);
+ EASTL_ASSERT(pOther);
+ construct(pOther->m_storage, *(T*)(&pThis->m_storage.internal_storage));
+ }
+ break;
+
+ case storage_operation::MOVE:
+ {
+ EASTL_ASSERT(pThis);
+ EASTL_ASSERT(pOther);
+ construct(pOther->m_storage, eastl::move(*(T*)(&pThis->m_storage.internal_storage)));
+ destroy(const_cast<any&>(*pThis));
+ }
+ break;
+
+ case storage_operation::TYPE_INFO:
+ {
+ #if EASTL_RTTI_ENABLED
+ return (void*)&typeid(T);
+ #endif
+ }
+ break;
+
+ default:
+ {
+ EASTL_ASSERT_MSG(false, "unknown storage operation\n");
+ }
+ break;
+ };
+
+ return nullptr;
+ }
+ };
+
+
+
+ //////////////////////////////////////////////////////////////////////////////////////////
+ // external storage handler
+ //
+ template <typename T>
+ struct storage_handler_external
+ {
+ template <typename V>
+ static inline void construct(storage& s, V&& v)
+ {
+ s.external_storage = Internal::DefaultConstruct<T>(eastl::forward<V>(v));
+ }
+
+ template <typename... Args>
+ static inline void construct_inplace(storage& s, Args... args)
+ {
+ s.external_storage = Internal::DefaultConstruct<T>(eastl::forward<Args>(args)...);
+ }
+
+ template <class NT, class U, class... Args>
+ static inline void construct_inplace(storage& s, std::initializer_list<U> il, Args&&... args)
+ {
+ s.external_storage = Internal::DefaultConstruct<NT>(il, eastl::forward<Args>(args)...);
+ }
+
+ static inline void destroy(any& refAny)
+ {
+ Internal::DefaultDestroy(static_cast<T*>(refAny.m_storage.external_storage));
+
+ refAny.m_handler = nullptr;
+ }
+
+ static void* handler_func(storage_operation op, const any* pThis, any* pOther)
+ {
+ switch (op)
+ {
+ case storage_operation::GET:
+ {
+ EASTL_ASSERT(pThis);
+ EASTL_ASSERT(pThis->m_storage.external_storage);
+ return static_cast<void*>(pThis->m_storage.external_storage);
+ }
+ break;
+
+ case storage_operation::DESTROY:
+ {
+ EASTL_ASSERT(pThis);
+ destroy(*const_cast<any*>(pThis));
+ }
+ break;
+
+ case storage_operation::COPY:
+ {
+ EASTL_ASSERT(pThis);
+ EASTL_ASSERT(pOther);
+ construct(pOther->m_storage, *static_cast<T*>(pThis->m_storage.external_storage));
+ }
+ break;
+
+ case storage_operation::MOVE:
+ {
+ EASTL_ASSERT(pThis);
+ EASTL_ASSERT(pOther);
+ construct(pOther->m_storage, eastl::move(*(T*)(pThis->m_storage.external_storage)));
+ destroy(const_cast<any&>(*pThis));
+ }
+ break;
+
+ case storage_operation::TYPE_INFO:
+ {
+ #if EASTL_RTTI_ENABLED
+ return (void*)&typeid(T);
+ #endif
+ }
+ break;
+
+ default:
+ {
+ EASTL_ASSERT_MSG(false, "unknown storage operation\n");
+ }
+ break;
+ };
+
+ return nullptr;
+ }
+ };
+
+
+ //////////////////////////////////////////////////////////////////////////////////////////
+ // storage_handler_ptr
+ //
+ // defines the function signature of the storage handler that both the internal and
+ // external storage handlers must implement to retrieve the underlying type of the any
+ // object.
+ //
+ using storage_handler_ptr = void* (*)(storage_operation, const any*, any*);
+
+
+ //////////////////////////////////////////////////////////////////////////////////////////
+ // storage_handler
+ //
+ // based on the specified type T we select the appropriate underlying storage handler
+ // based on the 'use_internal_storage' trait.
+ //
+ template <typename T>
+ using storage_handler = typename conditional<use_internal_storage<T>::value,
+ storage_handler_internal<T>,
+ storage_handler_external<T>>::type;
+
+
+ //////////////////////////////////////////////////////////////////////////////////////////
+ // data layout
+ //
+ storage m_storage;
+ storage_handler_ptr m_handler;
+
+ public:
+ #ifndef EA_COMPILER_GNUC
+ // TODO(rparolin): renable constexpr for GCC
+ EA_CONSTEXPR
+ #endif
+ any() EA_NOEXCEPT
+ : m_storage(), m_handler(nullptr) {}
+
+ any(const any& other) : m_handler(nullptr)
+ {
+ if (other.m_handler)
+ {
+ // NOTE(rparolin): You can not simply copy the underlying
+ // storage because it could hold a pointer to an object on the
+ // heap which breaks the copy semantics of the language.
+ other.m_handler(storage_operation::COPY, &other, this);
+ m_handler = other.m_handler;
+ }
+ }
+
+ any(any&& other) EA_NOEXCEPT : m_handler(nullptr)
+ {
+ if(other.m_handler)
+ {
+ // NOTE(rparolin): You can not simply move the underlying
+ // storage because because the storage class has effectively
+ // type erased user type so we have to defer to the handler
+ // function to get the type back and pass on the move request.
+ m_handler = eastl::move(other.m_handler);
+ other.m_handler(storage_operation::MOVE, &other, this);
+ }
+ }
+
+ ~any() { reset(); }
+
+ template <class ValueType>
+ any(ValueType&& value,
+ typename eastl::enable_if<!eastl::is_same<typename eastl::decay<ValueType>::type, any>::value>::type* = 0)
+ {
+ typedef decay_t<ValueType> DecayedValueType;
+ static_assert(is_copy_constructible<DecayedValueType>::value, "ValueType must be copy-constructible");
+ storage_handler<DecayedValueType>::construct(m_storage, eastl::forward<ValueType>(value));
+ m_handler = &storage_handler<DecayedValueType>::handler_func;
+ }
+
+ template <class T, class... Args>
+ explicit any(in_place_type_t<T>, Args&&... args)
+ {
+ typedef storage_handler<decay_t<T>> StorageHandlerT;
+ static_assert(eastl::is_constructible<T, Args...>::value, "T must be constructible with Args...");
+
+ StorageHandlerT::construct_inplace(m_storage, eastl::forward<Args>(args)...);
+ m_handler = &StorageHandlerT::handler_func;
+ }
+
+ template <class T, class U, class... Args>
+ explicit any(in_place_type_t<T>,
+ std::initializer_list<U> il,
+ Args&&... args,
+ typename eastl::enable_if<eastl::is_constructible<T, std::initializer_list<U>&, Args...>::value,
+ void>::type* = 0)
+ {
+ typedef storage_handler<decay_t<T>> StorageHandlerT;
+
+ StorageHandlerT::construct_inplace(m_storage, il, eastl::forward<Args>(args)...);
+ m_handler = &StorageHandlerT::handler_func;
+ }
+
+ // 20.7.3.2, assignments
+ template <class ValueType>
+ any& operator=(ValueType&& value)
+ {
+ static_assert(is_copy_constructible<decay_t<ValueType>>::value, "ValueType must be copy-constructible");
+ any(eastl::forward<ValueType>(value)).swap(*this);
+ return *this;
+ }
+
+ any& operator=(const any& other)
+ {
+ any(other).swap(*this);
+ return *this;
+ }
+
+ any& operator=(any&& other) EA_NOEXCEPT
+ {
+ any(eastl::move(other)).swap(*this);
+ return *this;
+ }
+
+ // 20.7.3.3, modifiers
+ #if EASTL_VARIADIC_TEMPLATES_ENABLED
+ template <class T, class... Args>
+ void emplace(Args&&... args)
+ {
+ typedef storage_handler<decay_t<T>> StorageHandlerT;
+ static_assert(eastl::is_constructible<T, Args...>::value, "T must be constructible with Args...");
+
+ reset();
+ StorageHandlerT::construct_inplace(m_storage, eastl::forward<Args>(args)...);
+ m_handler = &StorageHandlerT::handler_func;
+ }
+
+ template <class NT, class U, class... Args>
+ typename eastl::enable_if<eastl::is_constructible<NT, std::initializer_list<U>&, Args...>::value, void>::type
+ emplace(std::initializer_list<U> il, Args&&... args)
+ {
+ typedef storage_handler<decay_t<NT>> StorageHandlerT;
+
+ reset();
+ StorageHandlerT::construct_inplace(m_storage, il, eastl::forward<Args>(args)...);
+ m_handler = &StorageHandlerT::handler_func;
+ }
+ #endif
+
+ void reset() EA_NOEXCEPT
+ {
+ if(m_handler)
+ m_handler(storage_operation::DESTROY, this, nullptr);
+ }
+
+ void swap(any& other) EA_NOEXCEPT
+ {
+ if(this == &other)
+ return;
+
+ if(m_handler && other.m_handler)
+ {
+ any tmp;
+ tmp.m_handler = other.m_handler;
+ other.m_handler(storage_operation::MOVE, &other, &tmp);
+
+ other.m_handler = m_handler;
+ m_handler(storage_operation::MOVE, this, &other);
+
+ m_handler = tmp.m_handler;
+ tmp.m_handler(storage_operation::MOVE, &tmp, this);
+ }
+ else if (m_handler == nullptr && other.m_handler)
+ {
+ eastl::swap(m_handler, other.m_handler);
+ m_handler(storage_operation::MOVE, &other, this);
+ }
+ else if(m_handler && other.m_handler == nullptr)
+ {
+ eastl::swap(m_handler, other.m_handler);
+ other.m_handler(storage_operation::MOVE, this, &other);
+ }
+ //else if (m_handler == nullptr && other.m_handler == nullptr)
+ //{
+ // // nothing to swap
+ //}
+ }
+
+ // 20.7.3.4, observers
+ bool has_value() const EA_NOEXCEPT { return m_handler != nullptr; }
+
+ #if EASTL_RTTI_ENABLED
+ inline const std::type_info& type() const EA_NOEXCEPT
+ {
+ if(m_handler)
+ {
+ auto* pTypeInfo = m_handler(storage_operation::TYPE_INFO, this, nullptr);
+ return *static_cast<const std::type_info*>(pTypeInfo);
+ }
+ else
+ {
+ return typeid(void);
+ }
+ }
+ #endif
+ };
+
+
+
+ //////////////////////////////////////////////////////////////////////////////////////////
+ // 20.7.4, non-member functions
+ //
+ inline void swap(any& rhs, any& lhs) EA_NOEXCEPT { rhs.swap(lhs); }
+
+
+ //////////////////////////////////////////////////////////////////////////////////////////
+ // 20.7.4, The non-member any_cast functions provide type-safe access to the contained object.
+ //
+ template <class ValueType>
+ inline ValueType any_cast(const any& operand)
+ {
+ static_assert(eastl::is_reference<ValueType>::value || eastl::is_copy_constructible<ValueType>::value,
+ "ValueType must be a reference or copy constructible");
+
+ auto* p = any_cast<typename add_const<typename remove_reference<ValueType>::type>::type>(&operand);
+
+ if(p == nullptr)
+ Internal::DoBadAnyCast();
+
+ return *p;
+ }
+
+ template <class ValueType>
+ inline ValueType any_cast(any& operand)
+ {
+ static_assert(eastl::is_reference<ValueType>::value || eastl::is_copy_constructible<ValueType>::value,
+ "ValueType must be a reference or copy constructible");
+
+ auto* p = any_cast<typename remove_reference<ValueType>::type>(&operand);
+
+ if(p == nullptr)
+ Internal::DoBadAnyCast();
+
+ return *p;
+ }
+
+ template <class ValueType>
+ inline ValueType any_cast(any&& operand)
+ {
+ static_assert(eastl::is_reference<ValueType>::value || eastl::is_copy_constructible<ValueType>::value,
+ "ValueType must be a reference or copy constructible");
+
+ auto* p = any_cast<typename remove_reference<ValueType>::type>(&operand);
+
+ if (p == nullptr)
+ Internal::DoBadAnyCast();
+
+ return *p;
+ }
+
+ // NOTE(rparolin): The runtime type check was commented out because in DLL builds the templated function pointer
+ // value will be different -- completely breaking the validation mechanism. Due to the fact that eastl::any uses
+ // type erasure we can't refresh (on copy/move) the cached function pointer to the internal handler function because
+ // we don't statically know the type.
+ template <class ValueType>
+ inline const ValueType* any_cast(const any* pAny) EA_NOEXCEPT
+ {
+ return (pAny && pAny->m_handler EASTL_IF_NOT_DLL(== &any::storage_handler<decay_t<ValueType>>::handler_func)
+ #if EASTL_RTTI_ENABLED
+ && pAny->type() == typeid(typename remove_reference<ValueType>::type)
+ #endif
+ ) ?
+ static_cast<const ValueType*>(pAny->m_handler(any::storage_operation::GET, pAny, nullptr)) :
+ nullptr;
+ }
+
+ template <class ValueType>
+ inline ValueType* any_cast(any* pAny) EA_NOEXCEPT
+ {
+ return (pAny && pAny->m_handler EASTL_IF_NOT_DLL(== &any::storage_handler<decay_t<ValueType>>::handler_func)
+ #if EASTL_RTTI_ENABLED
+ && pAny->type() == typeid(typename remove_reference<ValueType>::type)
+ #endif
+ ) ?
+ static_cast<ValueType*>(pAny->m_handler(any::storage_operation::GET, pAny, nullptr)) :
+ nullptr;
+ }
+
+ //Unsafe operations - use with caution
+ template <class ValueType>
+ inline const ValueType* unsafe_any_cast(const any* pAny) EA_NOEXCEPT
+ {
+ return unsafe_any_cast<ValueType>(const_cast<any*>(pAny));
+ }
+
+ template <class ValueType>
+ inline ValueType* unsafe_any_cast(any* pAny) EA_NOEXCEPT
+ {
+ return static_cast<ValueType*>(pAny->m_handler(any::storage_operation::GET, pAny, nullptr));
+ }
+
+ //////////////////////////////////////////////////////////////////////////////////////////
+ // make_any
+ //
+ #if EASTL_VARIADIC_TEMPLATES_ENABLED
+ template <class T, class... Args>
+ inline any make_any(Args&&... args)
+ {
+ return any(eastl::in_place<T>, eastl::forward<Args>(args)...);
+ }
+
+ template <class T, class U, class... Args>
+ inline any make_any(std::initializer_list<U> il, Args&&... args)
+ {
+ return any(eastl::in_place<T>, il, eastl::forward<Args>(args)...);
+ }
+ #endif
+
+} // namespace eastl
+
+#endif // EASTL_ANY_H
diff --git a/EASTL/include/EASTL/array.h b/EASTL/include/EASTL/array.h
new file mode 100644
index 0000000..34ad07d
--- /dev/null
+++ b/EASTL/include/EASTL/array.h
@@ -0,0 +1,589 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Implements a templated array class as per the C++ standard TR1 (technical
+// report 1, which is a list of proposed C++ library amendments).
+// The primary distinctions between this array and TR1 array are:
+// - array::size_type is defined as eastl_size_t instead of size_t in order
+// to save memory and run faster on 64 bit systems.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ARRAY_H
+#define EASTL_ARRAY_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/iterator.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/utility.h>
+#include <stddef.h>
+
+#if EASTL_EXCEPTIONS_ENABLED
+ EA_DISABLE_ALL_VC_WARNINGS()
+ #include <stdexcept> // std::out_of_range, std::length_error.
+ EA_RESTORE_ALL_VC_WARNINGS()
+#endif
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+
+ ///////////////////////////////////////////////////////////////////////
+ /// array
+ ///
+ /// Implements a templated array class as per the C++ standard TR1.
+ /// This class allows you to use a built-in C style array like an STL vector.
+ /// It does not let you change its size, as it is just like a C built-in array.
+ /// Our implementation here strives to remove function call nesting, as that
+ /// makes it hard for us to profile debug builds due to function call overhead.
+ /// Note that this is intentionally a struct with public data, as per the
+ /// C++ standard update proposal requirements.
+ ///
+ /// Example usage:
+ /// array<int, 5> a = { { 0, 1, 2, 3, 4 } }; // Strict compilers such as GCC require the double brackets.
+ /// a[2] = 4;
+ /// for(array<int, 5>::iterator i = a.begin(); i < a.end(); ++i)
+ /// *i = 0;
+ ///
+ template <typename T, size_t N = 1>
+ struct array
+ {
+ public:
+ typedef array<T, N> this_type;
+ typedef T value_type;
+ typedef value_type& reference;
+ typedef const value_type& const_reference;
+ typedef value_type* iterator;
+ typedef const value_type* const_iterator;
+ typedef eastl::reverse_iterator<iterator> reverse_iterator;
+ typedef eastl::reverse_iterator<const_iterator> const_reverse_iterator;
+ typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t.
+ typedef ptrdiff_t difference_type;
+
+ public:
+ enum
+ {
+ count = N
+ };
+
+ // Note that the member data is intentionally public.
+ // This allows for aggregate initialization of the
+ // object (e.g. array<int, 5> a = { 0, 3, 2, 4 }; )
+ value_type mValue[N ? N : 1];
+
+ public:
+ // We intentionally provide no constructor, destructor, or assignment operator.
+
+ void fill(const value_type& value);
+
+ // Unlike the swap function for other containers, array::swap takes linear time,
+ // may exit via an exception, and does not cause iterators to become associated with the other container.
+ void swap(this_type& x) EA_NOEXCEPT_IF(eastl::is_nothrow_swappable<value_type>::value);
+
+ EA_CPP14_CONSTEXPR iterator begin() EA_NOEXCEPT;
+ EA_CPP14_CONSTEXPR const_iterator begin() const EA_NOEXCEPT;
+ EA_CPP14_CONSTEXPR const_iterator cbegin() const EA_NOEXCEPT;
+
+ EA_CPP14_CONSTEXPR iterator end() EA_NOEXCEPT;
+ EA_CPP14_CONSTEXPR const_iterator end() const EA_NOEXCEPT;
+ EA_CPP14_CONSTEXPR const_iterator cend() const EA_NOEXCEPT;
+
+ EA_CPP14_CONSTEXPR reverse_iterator rbegin() EA_NOEXCEPT;
+ EA_CPP14_CONSTEXPR const_reverse_iterator rbegin() const EA_NOEXCEPT;
+ EA_CPP14_CONSTEXPR const_reverse_iterator crbegin() const EA_NOEXCEPT;
+
+ EA_CPP14_CONSTEXPR reverse_iterator rend() EA_NOEXCEPT;
+ EA_CPP14_CONSTEXPR const_reverse_iterator rend() const EA_NOEXCEPT;
+ EA_CPP14_CONSTEXPR const_reverse_iterator crend() const EA_NOEXCEPT;
+
+ EA_CPP14_CONSTEXPR bool empty() const EA_NOEXCEPT;
+ EA_CPP14_CONSTEXPR size_type size() const EA_NOEXCEPT;
+ EA_CPP14_CONSTEXPR size_type max_size() const EA_NOEXCEPT;
+
+ EA_CPP14_CONSTEXPR T* data() EA_NOEXCEPT;
+ EA_CPP14_CONSTEXPR const T* data() const EA_NOEXCEPT;
+
+ EA_CPP14_CONSTEXPR reference operator[](size_type i);
+ EA_CPP14_CONSTEXPR const_reference operator[](size_type i) const;
+ EA_CPP14_CONSTEXPR const_reference at(size_type i) const;
+ EA_CPP14_CONSTEXPR reference at(size_type i);
+
+ EA_CPP14_CONSTEXPR reference front();
+ EA_CPP14_CONSTEXPR const_reference front() const;
+
+ EA_CPP14_CONSTEXPR reference back();
+ EA_CPP14_CONSTEXPR const_reference back() const;
+
+ bool validate() const;
+ int validate_iterator(const_iterator i) const;
+
+ }; // class array
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // template deduction guides
+ ///////////////////////////////////////////////////////////////////////////
+ #ifdef __cpp_deduction_guides
+ template <class T, class... U> array(T, U...) -> array<T, 1 + sizeof...(U)>;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // array
+ ///////////////////////////////////////////////////////////////////////
+
+
+ template <typename T, size_t N>
+ inline void array<T, N>::fill(const value_type& value)
+ {
+ eastl::fill_n(&mValue[0], N, value);
+ }
+
+
+ template <typename T, size_t N>
+ inline void array<T, N>::swap(this_type& x) EA_NOEXCEPT_IF(eastl::is_nothrow_swappable<value_type>::value)
+ {
+ eastl::swap_ranges(&mValue[0], &mValue[N], &x.mValue[0]);
+ }
+
+
+ template <typename T, size_t N>
+ EA_CPP14_CONSTEXPR inline typename array<T, N>::iterator
+ array<T, N>::begin() EA_NOEXCEPT
+ {
+ return &mValue[0];
+ }
+
+
+ template <typename T, size_t N>
+ EA_CPP14_CONSTEXPR inline typename array<T, N>::const_iterator
+ array<T, N>::begin() const EA_NOEXCEPT
+ {
+ return &mValue[0];
+ }
+
+
+ template <typename T, size_t N>
+ EA_CPP14_CONSTEXPR inline typename array<T, N>::const_iterator
+ array<T, N>::cbegin() const EA_NOEXCEPT
+ {
+ return &mValue[0];
+ }
+
+
+ template <typename T, size_t N>
+ EA_CPP14_CONSTEXPR inline typename array<T, N>::iterator
+ array<T, N>::end() EA_NOEXCEPT
+ {
+ return &mValue[N];
+ }
+
+
+ template <typename T, size_t N>
+ EA_CPP14_CONSTEXPR inline typename array<T, N>::const_iterator
+ array<T, N>::end() const EA_NOEXCEPT
+ {
+ return &mValue[N];
+ }
+
+
+ template <typename T, size_t N>
+ EA_CPP14_CONSTEXPR inline typename array<T, N>::const_iterator
+ array<T, N>::cend() const EA_NOEXCEPT
+ {
+ return &mValue[N];
+ }
+
+
+ template <typename T, size_t N>
+ EA_CPP14_CONSTEXPR inline typename array<T, N>::reverse_iterator
+ array<T, N>::rbegin() EA_NOEXCEPT
+ {
+ return reverse_iterator(&mValue[N]);
+ }
+
+
+ template <typename T, size_t N>
+ EA_CPP14_CONSTEXPR inline typename array<T, N>::const_reverse_iterator
+ array<T, N>::rbegin() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(&mValue[N]);
+ }
+
+
+ template <typename T, size_t N>
+ EA_CPP14_CONSTEXPR inline typename array<T, N>::const_reverse_iterator
+ array<T, N>::crbegin() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(&mValue[N]);
+ }
+
+
+ template <typename T, size_t N>
+ EA_CPP14_CONSTEXPR inline typename array<T, N>::reverse_iterator
+ array<T, N>::rend() EA_NOEXCEPT
+ {
+ return reverse_iterator(&mValue[0]);
+ }
+
+
+ template <typename T, size_t N>
+ EA_CPP14_CONSTEXPR inline typename array<T, N>::const_reverse_iterator
+ array<T, N>::rend() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(static_cast<const_iterator>(&mValue[0]));
+ }
+
+
+ template <typename T, size_t N>
+ EA_CPP14_CONSTEXPR inline typename array<T, N>::const_reverse_iterator
+ array<T, N>::crend() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(static_cast<const_iterator>(&mValue[0]));
+ }
+
+
+ template <typename T, size_t N>
+ EA_CPP14_CONSTEXPR inline typename array<T, N>::size_type
+ array<T, N>::size() const EA_NOEXCEPT
+ {
+ return (size_type)N;
+ }
+
+
+ template <typename T, size_t N>
+ EA_CPP14_CONSTEXPR inline typename array<T, N>::size_type
+ array<T, N>::max_size() const EA_NOEXCEPT
+ {
+ return (size_type)N;
+ }
+
+
+ template <typename T, size_t N>
+ EA_CPP14_CONSTEXPR inline bool array<T, N>::empty() const EA_NOEXCEPT
+ {
+ return (N == 0);
+ }
+
+
+ template <typename T, size_t N>
+ EA_CPP14_CONSTEXPR inline typename array<T, N>::reference
+ array<T, N>::operator[](size_type i)
+ {
+ return mValue[i];
+ }
+
+
+ template <typename T, size_t N>
+ EA_CPP14_CONSTEXPR inline typename array<T, N>::const_reference
+ array<T, N>::operator[](size_type i) const
+ {
+ return mValue[i];
+ }
+
+
+ template <typename T, size_t N>
+ EA_CPP14_CONSTEXPR inline typename array<T, N>::reference
+ array<T, N>::front()
+ {
+ return mValue[0];
+ }
+
+
+ template <typename T, size_t N>
+ EA_CPP14_CONSTEXPR inline typename array<T, N>::const_reference
+ array<T, N>::front() const
+ {
+ return mValue[0];
+ }
+
+
+ template <typename T, size_t N>
+ EA_CPP14_CONSTEXPR inline typename array<T, N>::reference
+ array<T, N>::back()
+ {
+ return mValue[N - 1];
+ }
+
+
+ template <typename T, size_t N>
+ EA_CPP14_CONSTEXPR inline typename array<T, N>::const_reference
+ array<T, N>::back() const
+ {
+ return mValue[N - 1];
+ }
+
+
+ template <typename T, size_t N>
+ EA_CPP14_CONSTEXPR inline T* array<T, N>::data() EA_NOEXCEPT
+ {
+ return mValue;
+ }
+
+
+ template <typename T, size_t N>
+ EA_CPP14_CONSTEXPR inline const T* array<T, N>::data() const EA_NOEXCEPT
+ {
+ return mValue;
+ }
+
+
+ template <typename T, size_t N>
+ EA_CPP14_CONSTEXPR inline typename array<T, N>::const_reference array<T, N>::at(size_type i) const
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ if(EASTL_UNLIKELY(i >= N))
+ throw std::out_of_range("array::at -- out of range");
+ #elif EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(i >= N))
+ EASTL_FAIL_MSG("array::at -- out of range");
+ #endif
+
+ return static_cast<const_reference>(mValue[i]);
+ }
+
+
+ template <typename T, size_t N>
+ EA_CPP14_CONSTEXPR inline typename array<T, N>::reference array<T, N>::at(size_type i)
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ if(EASTL_UNLIKELY(i >= N))
+ throw std::out_of_range("array::at -- out of range");
+ #elif EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(i >= N))
+ EASTL_FAIL_MSG("array::at -- out of range");
+ #endif
+
+ return static_cast<reference>(mValue[i]);
+ }
+
+
+ template <typename T, size_t N>
+ inline bool array<T, N>::validate() const
+ {
+ return true; // There is nothing to do.
+ }
+
+
+ template <typename T, size_t N>
+ inline int array<T, N>::validate_iterator(const_iterator i) const
+ {
+ if(i >= mValue)
+ {
+ if(i < (mValue + N))
+ return (isf_valid | isf_current | isf_can_dereference);
+
+ if(i <= (mValue + N))
+ return (isf_valid | isf_current);
+ }
+
+ return isf_none;
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, size_t N>
+ EA_CPP14_CONSTEXPR inline bool operator==(const array<T, N>& a, const array<T, N>& b)
+ {
+ return eastl::equal(&a.mValue[0], &a.mValue[N], &b.mValue[0]);
+ }
+
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ template <typename T, size_t N>
+ inline synth_three_way_result<T> operator<=>(const array<T, N>& a, const array<T,N>& b)
+ {
+ return eastl::lexicographical_compare_three_way(&a.mValue[0], &a.mValue[N], &b.mValue[0], &b.mValue[N], synth_three_way{});
+ }
+#else
+
+ template <typename T, size_t N>
+ EA_CPP14_CONSTEXPR inline bool operator<(const array<T, N>& a, const array<T, N>& b)
+ {
+ return eastl::lexicographical_compare(&a.mValue[0], &a.mValue[N], &b.mValue[0], &b.mValue[N]);
+ }
+
+
+ template <typename T, size_t N>
+ EA_CPP14_CONSTEXPR inline bool operator!=(const array<T, N>& a, const array<T, N>& b)
+ {
+ return !eastl::equal(&a.mValue[0], &a.mValue[N], &b.mValue[0]);
+ }
+
+
+ template <typename T, size_t N>
+ EA_CPP14_CONSTEXPR inline bool operator>(const array<T, N>& a, const array<T, N>& b)
+ {
+ return eastl::lexicographical_compare(&b.mValue[0], &b.mValue[N], &a.mValue[0], &a.mValue[N]);
+ }
+
+
+ template <typename T, size_t N>
+ EA_CPP14_CONSTEXPR inline bool operator<=(const array<T, N>& a, const array<T, N>& b)
+ {
+ return !eastl::lexicographical_compare(&b.mValue[0], &b.mValue[N], &a.mValue[0], &a.mValue[N]);
+ }
+
+
+ template <typename T, size_t N>
+ EA_CPP14_CONSTEXPR inline bool operator>=(const array<T, N>& a, const array<T, N>& b)
+ {
+ return !eastl::lexicographical_compare(&a.mValue[0], &a.mValue[N], &b.mValue[0], &b.mValue[N]);
+ }
+#endif
+
+ template <typename T, size_t N>
+ inline void swap(array<T, N>& a, array<T, N>& b)
+ {
+ eastl::swap_ranges(&a.mValue[0], &a.mValue[N], &b.mValue[0]);
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // to_array
+ ///////////////////////////////////////////////////////////////////////
+ namespace internal
+ {
+ template<class T, size_t N, size_t... I>
+ EA_CONSTEXPR auto to_array(T (&a)[N], index_sequence<I...>)
+ {
+ return eastl::array<eastl::remove_cv_t<T>, N>{{a[I]...}};
+ }
+
+ template<class T, size_t N, size_t... I>
+ EA_CONSTEXPR auto to_array(T (&&a)[N], index_sequence<I...>)
+ {
+ return eastl::array<eastl::remove_cv_t<T>, N>{{eastl::move(a[I])...}};
+ }
+ }
+
+ template<class T, size_t N>
+ EA_CONSTEXPR eastl::array<eastl::remove_cv_t<T>, N> to_array(T (&a)[N])
+ {
+ static_assert(eastl::is_constructible_v<T, T&>, "element type T must be copy-initializable");
+ static_assert(!eastl::is_array_v<T>, "passing multidimensional arrays to to_array is ill-formed");
+ return internal::to_array(a, eastl::make_index_sequence<N>{});
+ }
+
+ template<class T, size_t N>
+ EA_CONSTEXPR eastl::array<eastl::remove_cv_t<T>, N> to_array(T (&&a)[N])
+ {
+ static_assert(eastl::is_move_constructible_v<T>, "element type T must be move-constructible");
+ static_assert(!eastl::is_array_v<T>, "passing multidimensional arrays to to_array is ill-formed");
+ return internal::to_array(eastl::move(a), eastl::make_index_sequence<N>{});
+ }
+
+#if EASTL_TUPLE_ENABLED
+
+ template <typename T, size_t N>
+ class tuple_size<array<T, N>> : public integral_constant<size_t, N>
+ {
+ };
+
+ template <typename T, size_t N>
+ class tuple_size<const array<T, N>> : public integral_constant<size_t, N>
+ {
+ };
+
+ template <size_t I, typename T, size_t N>
+ class tuple_element<I, array<T, N>>
+ {
+ public:
+ using type = T;
+ };
+
+ template <size_t I, typename T, size_t N>
+ class tuple_element<I, const array<T, N>>
+ {
+ public:
+ using type = const T;
+ };
+
+ template <size_t I>
+ struct GetArray
+ {
+ template <typename T, size_t N>
+ static EA_CONSTEXPR T& getInternal(array<T, N>& a)
+ {
+ return a[I];
+ }
+
+ template <typename T, size_t N>
+ static EA_CONSTEXPR const T& getInternal(const array<T, N>& a)
+ {
+ return a[I];
+ }
+
+ template <typename T, size_t N>
+ static EA_CONSTEXPR T&& getInternal(array<T, N>&& a)
+ {
+ return eastl::forward<T>(a[I]);
+ }
+ };
+
+ template <size_t I, typename T, size_t N>
+ EA_CONSTEXPR tuple_element_t<I, array<T, N>>& get(array<T, N>& p)
+ {
+ return GetArray<I>::getInternal(p);
+ }
+
+ template <size_t I, typename T, size_t N>
+ EA_CONSTEXPR const tuple_element_t<I, array<T, N>>& get(const array<T, N>& p)
+ {
+ return GetArray<I>::getInternal(p);
+ }
+
+ template <size_t I, typename T, size_t N>
+ EA_CONSTEXPR tuple_element_t<I, array<T, N>>&& get(array<T, N>&& p)
+ {
+ return GetArray<I>::getInternal(eastl::move(p));
+ }
+
+#endif // EASTL_TUPLE_ENABLED
+
+
+} // namespace eastl
+
+///////////////////////////////////////////////////////////////
+// C++17 structured binding support for eastl::array
+//
+#ifndef EA_COMPILER_NO_STRUCTURED_BINDING
+ #include <tuple>
+
+ template <typename T, size_t N>
+ class std::tuple_size<::eastl::array<T, N>> : public ::eastl::integral_constant<size_t, N>
+ {
+ };
+
+ template <size_t I, typename T, size_t N>
+ struct std::tuple_element<I, ::eastl::array<T, N>>
+ {
+ static_assert(I < N, "index is out of bounds");
+ using type = T;
+ };
+#endif // EA_COMPILER_NO_STRUCTURED_BINDING
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/atomic.h b/EASTL/include/EASTL/atomic.h
new file mode 100644
index 0000000..27117e9
--- /dev/null
+++ b/EASTL/include/EASTL/atomic.h
@@ -0,0 +1,1772 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_H
+#define EASTL_ATOMIC_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// Below is the documentation of the API of the eastl::atomic<T> library.
+// This includes class and free functions.
+// Anything marked with a '+' in front of the name is an extension to the std API.
+//
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// eastl::atomic<T> memory_order API
+//
+// See below for full explanations on the memory orders and their guarantees.
+//
+// - eastl::memory_order_relaxed
+// - eastl::memory_order_acquire
+// - eastl::memory_order_release
+// - eastl::memory_order_acq_rel
+// - eastl::memory_order_seq_cst
+// - +eastl::memory_order_read_depends
+//
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// eastl::atomic<T> class API
+//
+// All jargon and prerequisite knowledge is explained below.
+//
+// Unless otherwise specified all orders except read_depends is a valid order
+// on the given operation.
+// Unless otherwise specified all operations are valid on all types T.
+// If no order is provided, seq_cst memory ordering is used for the operation.
+//
+// - atomic() : Value-initializes the underlying object as T{}.
+//
+// - atomic(T) : Initializes the underlying object with a copy of T.
+//
+// - T operator=(T) : Atomically assigns T as store(T, seq_cst).
+//
+// - is_lock_free() : true if the operations are lockfree. Always true for eastl.
+//
+// - store(T, order) : Atomically stores T affecting memory according to order.
+// : Valid orders are relaxed, release, and seq_cst.
+//
+// - T load(order) : Atomically loads T affecting memory according to order.
+// : Valid orders are relaxed, acquire, and seq_cst.
+// : If T is a pointer type, read_depends is another valid order.
+//
+// - operator T() : Atomically loads T as load(T, seq_cst).
+//
+// - T exchange(T, order) : Atomically performs a RMW that replaces the current value with T.
+// : Memory is affected according to order.
+// : Returns the previous value stored before the RMW operation.
+//
+// - bool compare_exchange_weak(T&, T, successOrder, failOrder)
+// : Atomically compares the value stored with that of T& and if equal replaces it with T.
+// : This is a RMW operation.
+// : If the comparison fails, loads the observed value into T&. This is a load operation.
+// : Memory is affected in the RMW operation according to successOrder.
+// : Memory is affected in the load operation according to failOrder.
+// : failOrder cannot be a stronger order than successOrder.
+// : Returns true or false if the comparison succeeded and T was stored into the atomic object.
+// :
+// : The weak variant may fail even if the observed value of the atomic object equals T&.
+// : This can yield performance gains on platforms with ld/str exclusive pair instructions especially
+// : when the compare_exchange operation is done in a loop.
+// : Only the bool return value can be used to determine if the operation was successful.
+//
+// - bool compare_exchange_weak(T&, T, order)
+// : Same as the above except that order is used for both the RMW and the load operation.
+// : If order == acq_rel then the order of the load operation equals acquire.
+// : If order == release then the order of the load operation equals relaxed.
+//
+// - bool compare_exchange_strong(T&, T, successOrder, failOrder)
+// - bool compare_exchange_strong(T&, T, order)
+// : This operation is the same as the above weak variants
+// : expect that it will not fail spuriously if the value stored equals T&.
+//
+// The below operations are only valid for Integral types.
+//
+// - T fetch_add(T, order)
+// : Atomically performs a RMW that increments the value stored with T.
+// : Returns the previous value stored before the RMW operation.
+// - T fetch_sub(T, order)
+// : Atomically performs a RMW that decrements the value stored with T.
+// : Returns the previous value stored before the RMW operation.
+// - T fetch_and(T, order)
+// : Atomically performs a RMW that bit-wise and's the value stored with T.
+// : Returns the previous value stored before the RMW operation.
+// - T fetch_or(T, order)
+// : Atomically performs a RMW that bit-wise or's the value stored with T.
+// : Returns the previous value stored before the RMW operation.
+// - T fetch_xor(T, order)
+// : Atomically performs a RMW that bit-wise xor's the value stored with T.
+// : Returns the previous value stored before the RMW operation.
+//
+// - +T add_fetch(T, order)
+// : Atomically performs a RMW that increments the value stored with T.
+// : Returns the new updated value after the operation.
+// - +T sub_fetch(T, order)
+// : Atomically performs a RMW that decrements the value stored with T.
+// : Returns the new updated value after the operation.
+// - +T and_fetch(T, order)
+// : Atomically performs a RMW that bit-wise and's the value stored with T.
+// : Returns the new updated value after the operation.
+// - +T or_fetch(T, order)
+// : Atomically performs a RMW that bit-wise or's the value stored with T.
+// : Returns the new updated value after the operation.
+// - +T xor_fetch(T, order)
+// : Atomically performs a RMW that bit-wise xor's the value stored with T.
+// : Returns the new updated value after the operation.
+//
+// - T operator++/--()
+// : Atomically increments or decrements the atomic value by one.
+// : Returns the previous value stored before the RMW operation.
+// : Memory is affected according to seq_cst ordering.
+//
+// - T ++/--operator()
+// : Atomically increments or decrements the atomic value by one.
+// : Returns the new updated value after the RMW operation.
+// : Memory is affected according to seq_cst ordering.
+//
+// - T operator+=/-=/&=/|=/^=(T)
+// : Atomically adds, subtracts, bitwise and/or/xor the atomic object with T.
+// : Returns the new updated value after the operation.
+// : Memory is affected according to seq_cst ordering.
+//
+//
+// The below operations are only valid for Pointer types
+//
+// - T* fetch_add(ptrdiff_t val, order)
+// : Atomically performs a RMW that increments the value store with sizeof(T) * val
+// : Returns the previous value stored before the RMW operation.
+// - T* fetch_sub(ptrdiff_t val, order)
+// : Atomically performs a RMW that decrements the value store with sizeof(T) * val
+// : Returns the previous value stored before the RMW operation.
+//
+// - +T* add_fetch(ptrdiff_t val, order)
+// : Atomically performs a RMW that increments the value store with sizeof(T) * val
+// : Returns the new updated value after the operation.
+// - +T* sub_fetch(ptrdiff_t val, order)
+// : Atomically performs a RMW that decrements the value store with sizeof(T) * val
+// : Returns the new updated value after the operation.
+//
+// - T* operator++/--()
+// : Atomically increments or decrements the atomic value by sizeof(T) * 1.
+// : Returns the previous value stored before the RMW operation.
+// : Memory is affected according to seq_cst ordering.
+//
+// - T* ++/--operator()
+// : Atomically increments or decrements the atomic value by sizeof(T) * 1.
+// : Returns the new updated value after the RMW operation.
+// : Memory is affected according to seq_cst ordering.
+//
+//
+// - +EASTL_ATOMIC_HAS_[len]BIT Macro Definitions
+// These macros provide the ability to compile-time switch on the availability of support for the specific
+// bit width of an atomic object.
+// Example:
+//
+// #if defined(EASTL_ATOMIC_HAS_128BIT)
+// #endif
+//
+// Indicates the support for 128-bit atomic operations on an eastl::atomic<T> object.
+//
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// eastl::atomic_flag class API
+//
+// Unless otherwise specified all orders except read_depends is a valid order
+// on the given operation.
+//
+// - atomic_flag() : Initializes the flag to false.
+//
+// - clear(order)
+// : Atomically stores the value false to the flag.
+// : Valid orders are relaxed, release, and seq_cst.
+//
+// - bool test_and_set(order)
+// : Atomically exchanges flag with true and returns the previous value that was held.
+//
+// - bool test(order)
+// : Atomically loads the flag value.
+// : Valid orders are relaxed, acquire, and seq_cst.
+//
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// eastl::atomic standalone free function API
+//
+// All class methods have a standalone free function that takes a pointer to the
+// atomic object as the first argument. These functions just call the correct method
+// on the atomic object for the given operation.
+// These functions come in two variants, a non-explicit and an explicit variant
+// that take on the form atomic_op() and atomic_op_explicit() respectively.
+// The non-explicit variants take no order arguments and thus are all seq_cst.
+// The explicit variants take an order argument.
+// Only the standalone functions that do not have a class method equivalent pair will be
+// documented here which includes all new extensions to the std API.
+//
+// - +compiler_barrier()
+// : Read-Write Compiler Barrier.
+// - +compiler_barrier_data_dependency(const T&)
+// : Read-Write Compiler Barrier.
+// : Applies a fake input dependency on const T& so the compiler believes said variable is used.
+// : Useful for example when writing benchmark or testing code with local variables that must not get dead-store eliminated.
+// - +cpu_pause()
+// : Prevents speculative memory order violations in spin-wait loops.
+// : Allows giving up core resources, execution units, to other threads while in spin-wait loops.
+// - atomic_thread_fence(order)
+// : Read docs below.
+// - atomic_signal_fence(order)
+// : Prevents reordering with a signal handler.
+// - +atomic_load_cond(const eastl::atomic<T>*, Predicate)
+// : continuously loads the atomic object until Predicate is true
+// : will properly ensure the spin-wait loop is optimal
+// : very useful when needing to spin-wait for some condition to be true which is common is many lock-free algorithms
+// : Memory is affected according to seq_cst ordering.
+// - +atomic_load_cond_explicit(const eastl::atomic<T>*, Predicate, Order)
+// : Same as above but takes an order for how memory is affected
+//
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// Deviations from the standard. This does not include new features added:
+//
+// 1.
+// Description: Atomics are always lock free
+// Reasoning : We don't want people to fall into performance traps where implicit locking
+// is done. If your user defined type is large enough to not support atomic
+// instructions then your user code should do the locking.
+//
+// 2.
+// Description: Atomic objects can not be volatile
+// Reasoning : Volatile objects do not make sense in the context of eastl::atomic<T>.
+// Use the given memory orders to get the ordering you need.
+// Atomic objects have to become visible on the bus. See below for details.
+//
+// 3.
+// Description: Consume memory order is not supported
+// Reasoning : See below for the reasoning.
+//
+// 4.
+// Description: ATOMIC_INIT() macros and the ATOMIC_LOCK_FREE macros are not implemented
+// Reasoning : Use the is_lock_free() method instead of the macros.
+// ATOMIC_INIT() macros aren't needed since the default constructor value initializes.
+//
+// 5.
+// Description: compare_exchange failure memory order cannot be stronger than success memory order
+// Reasoning : Besides the argument that it ideologically does not make sense that a failure
+// of the atomic operation shouldn't have a stricter ordering guarantee than the
+// success of it; if that is required then just make the whole operation stronger.
+// This ability was added and allowed in C++17 only which makes supporting multiple
+// C++ versions harder when using the compiler provided intrinsics since their behaviour
+// is reliant on the C++ version being compiled. Also makes it harder to reason about code
+// using these atomic ops since C++ versions vary the behaviour. We have also noticed
+// that versions of compilers that say they support C++17 do not properly adhere to this
+// new requirement in their intrinsics. Thus we will not support this.
+//
+// 6.
+// Description: All memory orders are distinct types instead of enum values
+// Reasoning : This will not affect how the API is used in user code.
+// It allows us to statically assert on invalid memory orders since they are compile-time types
+// instead of potentially runtime enum values.
+// Allows for more efficient code gen without the use of switch statements or if-else conditionals
+// on the memory order enum values on compilers that do not provide intrinsics that take in a
+// memory order, such as MSVC, especially in debug and debug-opt builds.
+//
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// ******** DISCLAIMER ********
+//
+// This documentation is not meant to provide rigorous proofs on the memory models
+// of specific architectures or the C++ memory model introduced in C++11. It is not
+// meant to provide formal mathematical definitions and logic that shows that a given
+// implementation adheres to the C++ memory model. This isn't meant to be some infallible
+// oracle on memory models, barriers, observers, and architecture implementation details.
+// What I do hope a reader gets out of this is the following. An understanding of the C++
+// memory model and how that relates to implementations on various architectures. Various
+// phenomena and ways that compilers and architectures can steer away from a sequentially
+// consistent system. To provide examples on how to use this library with common patterns
+// that will be seen in many code bases. Lastly I would like to provide insight and
+// further readings into the lesser known topics that aren't shared outside people
+// who live in this space and why certain things are done the way they are
+// such as cumulativity of memory barriers as one example. Sometimes specifying barriers
+// as LDLD/LDST/STST/STLD doesn't actually cut it, and finer grain semantics are needed
+// to describe cumulativity of memory barriers.
+//
+// ******** Layout of the Documentation ********
+//
+// This document will first go through a variety of different hardware architectures with examples of the various kinds of
+// reordering that is allowed by these architectures. We will use the memory barriers provided by the hardware to "fix" these
+// examples.
+// Then we will introduce the C++ memory model and revisit the examples using the platform agnostic abstract memory model to "fix"
+// them.
+// The hope here is that we get a sense of the various types of architectures and weak memory consistency provided by them and thus
+// an appreciation for the design of the C++ abstract memory model.
+//
+// ******** REFERENCES ********
+// [1] Dekker's mutual exclusion algorithm made RW-safe
+// [2] Handling Memory Ordering in Multithreaded Applications with Oracle Solaris
+// [3] Evaluating the Cost of Atomic Operations on Modern Architectures
+// [4] A Tutorial Introduction to the ARM and POWER Relaxed Memory Models
+// [5] Memory Barriers: a Hardware View for Software Hackers
+// [6] Memory Model = Instruction Reordering + Store Atomicity
+// [7] ArMOR: Defending Against Memory Consistency Model Mismatches in Heterogeneous Architectures
+// [8] Weak Memory Models: Balancing Definitional Simplicity and Implementation Flexibility
+// [9] Repairing Sequential Consistency in C/C++11
+// [10] A high-level operational semantics for hardware weak memory models
+// [11] x86-TSO: A Rigorous and Usable Programmer's Model for x86 Multiprocessors
+// [12] Simplifying ARM Concurrency: Multicopy-Atomic Axiomatic and Operational Models for ARMv8
+// [13] Mixed-size Concurrency: ARM, POWER, C/C++11, and SC
+// [14] P0668R4: Revising the C++ memory model
+// [15] Constructing a Weak Memory Model
+// [16] The Superfluous Load Queue
+// [17] P0190R1: Proposal for New memory_order_consume Definition
+//
+// ******** What does it mean to be Atomic? ********
+//
+// The word atomic has been overloaded and can mean a lot of different things depending on the context,
+// so let's digest it.
+//
+// The first attribute for something to be atomic is that concurrent stores and loads
+// must not tear or shear. This means if two threads write 0x01 and 0x02 at the same time
+// then the only values that should ever be observed is 0x01 or 0x02. We can only see
+// the whole write of 0x01 or 0x02, not 0x03 as an example. Many algorithms rely on
+// this property; only very few such a Dekker's algorithm for mutual exclusion don't.
+// Well actually a recent paper, [1], showed that Dekker's isn't safe without atomic
+// loads and stores so this property is pretty fundamental and also hard to prove that
+// your algorithm is safe without this property on loads and stores.
+//
+// We need to ensure the compiler emits a single load instruction.
+// If we are doing 64-bit loads on a 32-bit platform, we need to ensure the load is one
+// instruction instead of 2 32-bit loads into two registers.
+// Another example is if we have this struct, struct { int32_t i; int32_t k; }, even on
+// a 64-bit system we have to ensure the compiler does one 64-bit load and not two
+// 32-bit loads for each individual member.
+//
+// We also need to ensure the correct instruction is emitted. A general load instruction
+// to do a 64-bit load on a 32-bit platform may perform a 64-bit load but it may not
+// be atomic, it may be turned into two 32-bit loads behind the scenes in the cpu.
+// For example on ARMv7 we would have to use ldrexd not ldrd for 64-bit loads
+// on a 32-bit ARMv7 core.
+//
+// An operation may be considered atomic if multiple sub-operations are done as one
+// transactional unit. This is commonly known as a Read-Modify-Write, RMW, operation.
+// Take a simple add operation; it is actually a load from memory into a register,
+// a modification of said register and then a store back to memory. If two threads
+// concurrently execute this add operation on the same memory location; any interleaving
+// of the 3 sub-operations is possible. It is possible that if the initial value is 0,
+// the result may be 1 because each thread executed in lockstep both loading 0, adding 1
+// and then storing 1. A RMW operation may be considered atomic if the whole sequence of
+// sub-operations are serialized as one transactional unit.
+//
+// Atomicity may also refer to the order in which memory operations are observed and the
+// dependencies between memory operations to different memory locations. As a quick example
+// into the very thing we will be deep diving into that is not very intuitive. If I do, [STORE(A, 2); STORE(B, 1);],
+// in one thread and another thread does, [r0 = LOAD(B); r1 = LOAD(A);]; if r0 == 1, thus we observed
+// the store to B, will we observe r1 == 2. Our intuition tells us that well A was stored
+// first and then B, so if I read the new value of B then I must also read the new value
+// of A since the store to A happened before B so if I can see B then I must be able to
+// see everything before B which includes A.
+// This highlights the ordering of memory operations and why memory barriers and memory
+// models are so heavily attached to atomic operations because one could classify something
+// is atomic if the dependency highlighted in the above example is allowed to be maintained.
+//
+// This is what people mean when you hear that volatile does NOT mean atomicity of the operation.
+// Usually people imply a lot of implicit assumptions when they mark a variable as volatile.
+// All volatile gives us is the ability to tell the compiler it may not assume anything
+// about the state of that memory location. This means the compiler must always emit a load
+// or store instruction, cannot perform constant folding, dead-store elimination, or
+// do any sort of code movement on volatile variables.
+//
+// ******** Preliminary Basics ********
+//
+// It is expected that the reader understands what a cache is, how it is organized and how data
+// is chunked into cachelines. It is helpful if the reader understands basic cache coherency
+// protocols such as MSI or MESI.
+// It is expected the reader understands alignment, especially natural alignment
+// of the processor and why alignment is important for data access.
+// The reader should have some understanding of how a processor executes instructions,
+// basics of what Out-of-Order execution means and basics of what speculative execution means.
+// It is expected that the reader has an understanding of threading, multi-threaded programming
+// and the use of concurrency primitives such as mutexes.
+// Memory Barrier, Barrier, Memory Fence and Fence are all interchangeable synonyms.
+//
+// Independent memory operations can be performed or observed, depending on your perspective,
+// in any order as long as the local cpu thinks its execution is happening in program order.
+// This can be a problem for inter-cpu communications and thus we need some way to enforce
+// that the compiler does not reorder instructions and that the cpu also does not reorder
+// instructions. This is what a barrier is, it is an enforcement of ordering on memory instructions,
+// so as the name suggests a barrier. Barriers can be one-sided or both-sided which means
+// the barrier enforces a partial order above or below or on both sides of said barrier.
+//
+// Processors will use tricks such as out-of-order execution, memory instruction buffering and
+// combining, speculative loads and speculative execution, branch prediction and many types of caching even
+// in various interconnects from the cpu to the memory itself. One key thing to note is that cpus
+// do not physically reorder the instruction stream. Instructions are dispatched and retired
+// in-order but executed out-of-order. Memory barriers will prevent these tricks from happening
+// by controlling the interaction of multiple cpus.
+//
+// Compilers will morph your code and physically move instructions around as long as the program
+// has the same observed behaviour. This is becoming increasingly true with more optimization techniques
+// such as Link Time Optimization becoming the norm where once people assumed compilers couldn't assume
+// something outside the given TU and now because they have the whole program view they know everything.
+// This means the compiler does indeed alter the instruction stream
+// and compiler barriers are a way to tell them to not move any memory instructions across the barrier.
+// This does not prevent a compiler from doing optimizations such as constant folding, merging of
+// overlapping loads, or even dead store elimination. Compiler barriers are also very cheap and
+// have zero impact on anything that the compiler knows isn't visible in memory such as local variables
+// whose addresses do not escape the function even if their address is taken. You can think of it
+// in terms of a sequence point as used with "volatile" qualified variables to denote a place in code where
+// things must be stable and the compiler doesn't cache any variables in registers or do any reordering.
+//
+// Memory Barriers come in many flavours that instill a partial or full ordering on memory operations.
+// Some memory operations themselves have implicit ordering guarantees already, for example
+// Total-Store Order, TSO, architectures like x86 guarantee that a store operation cannot be reordered with a
+// previous store operation thus a memory barrier that only orders stores is not needed
+// on this architecture other than ensuring the compiler doesn't do any shenanigans.
+// Considering we have 4 permutations of memory operations; a common way to describe an ordering
+// is via Load-Load/LDLD, Load-Store/LDST, Store-Store/STST or Store-Load/STLD notation. You read this
+// notation as follows; STLD memory barrier means a load cannot be reordered with a previous store.
+// For example, on TSO architecture we can say all stores provide a STST memory barrier,
+// since a store cannot be reordered with a previous store.
+//
+// Memory Barriers in itself are not a magic bullet, they come with caveats that must be known.
+// Each cpu architecture also has its own flavours and guarantees provided by said memory barriers.
+// There is no guarantee that memory instructions specified before a memory barrier will complete,
+// be written to memory or fully propagated throughout the rest of the system, when the memory barrier
+// instruction completes. The memory barrier creates a point in that local cpus queue of memory instructions
+// whereby they must not cross. There is no guarantee that using a memory barrier on one cpu will have
+// any effect at all on another remote cpu's observed view of memory. This also implies that executing
+// a memory barrier does not hinder, incur, stall or enforce any other cpus to serialize with each other cpu.
+// In order for a remote cpu to observe the correct effects it must also use a matching memory barrier.
+// This means code communicating in 2 threads through memory must both be employing the use of memory barriers.
+// For example, a store memory barrier that only orders stores, STST, in one thread must be paired with a load memory barrier
+// that only orders loads, LDLD, in the other thread trying to observe those stores in the correct order.
+//
+// ******** Memory Types && Devices ********
+//
+// eastl::atomic<T> and accompanying memory barriers ONLY ORDER MEMORY to cpu-to-cpu communication through whatever the
+// processor designates as normal cacheable memory. It does not order memory to devices. It does not provide any DMA ordering guarantees.
+// It does not order memory with other memory types such as Write Combining. It strictly orders memory only to shared memory that is used
+// to communicate between cpus only.
+//
+// ******** Sequentially Consistent Machine ********
+//
+// The most intuitive as well as the model people naturally expect a concurrent system to have is Sequential Consistency.
+// You may have or definitely have heard this term if you dealt with any type of distributed system. Lamport's definition
+// articulates this consistency model the best.
+// Leslie Lamport: "the result of any execution is the same as if the operations of all the processors were executed in some
+// sequential order, and the operations of each individual processor appear in this sequence in the order
+// specified by its program".
+//
+// A Sequentially Consistent machine is modelled as follows:
+//
+// ------------ ------------
+// | Thread 0 | ... | Thread N |
+// ------------ ------------
+// | | | |
+// | | | |
+// ----------------------------------------
+// | |
+// | Shared Memory |
+// | |
+// ----------------------------------------
+//
+// This is a sequentially consistent machine. Each thread is executing instructions in program order which does loads and stores
+// that are serialized in some order to the shared memory. This means all communication is done through the shared memory with one cpu
+// doing one access at a time. This system has a couple key properties.
+//
+// 1. There is no local cpu memory reordering. Each cpu executes instructions in program order and all loads and stores must complete,
+// be visible in the shared memory or be visible in a register before starting the next instruction.
+// 2. Each memory operation becomes visible to all cpus at the same time. If a store hits the shared memory, then all subsequent loads
+// from every other cpu will always see the latest store.
+//
+// A Sequentially Consistent machine has, Single-Copy Store Atomicity: All stores must become visible to all cores in the system at the same time.
+//
+// ******** Adding Caches ********
+//
+// Caches by nature implicitly add the potential for memory reordering. A centralized shared snoopy bus that we all learned in school
+// makes it easy to implement sequential consistency with caches. Writes and reads are all serialized in a total order via the cache bus transaction
+// ordering. Every modern day bus is not inorder, and most certainly not a shared centralized bus. Cache coherency guarantees that all memory operations
+// will be propagated eventually to all parties, but it doesn't guarantee in what order or in what time frame. Once you add
+// caches, various levels of caching and various interconnects between remote cpus, you inevitably run into the issue where
+// some cpus observe the effects of a store before other cpus. Obviously we have weakly-ordered and strongly-ordered cpus with
+// caches so why is that? The short answer is, where is the onus put, is it on the programmer or the hardware. Does the hardware
+// have dependency tracking, is it able to determine when a memory order violation occurs such as rolling back its speculative execution
+// and also how far along the chain of interconnects does the hardware wait before it determines that the memory operation has
+// been acknowledged or is considered to satisfy its memory ordering guarantees. Again this is a very high level view of the system
+// as a whole, but the takeaway is yes; caches do add the potential for reordering but other supporting hardware determines whether
+// that is observable by the programmer. There is also some debate whether weakly-ordered processors are actually more performant
+// than strongly-ordered cpus eluding to the fact that the hardware has a better picture of what is a violation versus the programmer
+// having to emit far more barriers on weakly-ordered architectures in multi-threaded code which may actually not be needed because the
+// hardware didn't commit a violation but it may have and we as the programmer cannot rely on may haves.
+//
+// ******** Store Buffers ********
+//
+// Obviously having all stores serialize results in unnecessary stalls. Store buffers alleviate this issue.
+// Store buffers are simple fixed size structures that sit between the cpu and the memory hierarchy. This allows
+// each cpu to record its write in the store buffer and then move onto the next instruction. The store buffer will
+// eventually be flushed to the resulting memory hierarchy in FIFO order. How and when this flushing occurs is irrelevant to the
+// understanding of a store buffer. A read from an address will grab the most recent write to the same address in the store buffer.
+//
+// The introduction of a store buffer is our first dive into weaker memory consistency. The addition of this hardware turns the consistency model weaker,
+// into one that is commonly known as TSO, Total-Store Order. This is the exact model used by x86 cpus and we will see what this means
+// and what new effects are observed with the addition of the store buffer. Below is a diagram of how the machine may now look.
+// This type of store buffer is known as a FIFO store buffer, FIFO write buffer, or Load/Store Queue in some literature. This type of
+// store buffer introduces STLD reordering but still prevents STST reordering. We will take a look at another type of store buffer later.
+// Even with this store buffer, stores to the same address can still be merged so that only the latest store is written to the cache assuming
+// no other intermediary stores happen. x86 cpus do write merging even for consecutive stores, i.e. storing to A and A+1 can be merged into one two-byte store.
+//
+// ------------ ------------
+// | Thread 0 | ... | Thread N |
+// ------------ ------------
+// | | | |
+// | | | |
+// | Store | | Store |
+// | Buffer | | Buffer |
+// | | | |
+// ----------------------------------------
+// | |
+// | Shared Memory |
+// | |
+// ----------------------------------------
+//
+// ---- Store-Buffering / Dekker's Example ----
+// This is a very common litmus test that showcases the introduction of STLD reordering. It is called Store-Buffering example because it is the only weaker
+// behaviour observed under TSO and also called Dekker's Example as it famously breaks Dekker's mutual exclusion algorithm.
+//
+// ---------------------------
+// Initial State:
+// x = 0; y = 0;
+// ---------------------------
+// Thread 0 | Thread 1
+// ---------------------------
+// STORE(x, 1) | STORE(y, 1)
+// r0 = LOAD(y) | r1 = LOAD(x)
+// ---------------------------
+// Observed: r0 = 0 && r1 = 0
+// ---------------------------
+//
+// We would normally assume that any interleaving of the two threads cannot possibly end up with both loads reading 0. We assume that the observed outcome
+// of r0 = 0 && r1 = 0 to be impossible, clearly that is not the case. Let's start by understanding the example with no reordering possible. Both threads
+// run and their first instruction is to write the value 1 into either x or y, the next instruction then loads from the opposite variable. This means no
+// matter the interleaving, one of the loads always executes after the other thread's store to that variable.
+// We could observe r0 = 1 && r1 = 1 if both threads execute in lockstep.
+// We could observe r0 = 0 && r1 = 1 if thread 0 executes and then thread 1 executes.
+// We could observe r0 = 1 && r1 = 0 if thread 1 executes and then thread 0 executes.
+// Since the stores always execute before that load in the other thread, one thread must always at least observe a store, so let's see why store buffers break this.
+//
+// What will happen is that STORE(x, 1) is stored to the store buffer but not made globally visible yet.
+// STORE(y, 1) is written to the store buffer and also is not made globally visible yet.
+// Both loads now read the initial state of x and y which is 0. We got the r0 = 0 && r1 = 0 outcome and just observed a Store-Load reordering.
+// It has appeared as if the loads have been reordered with the previous stores and thus executed before the stores.
+// Notice even if we execute the instructions in order, a series of other hardware side effects made it appear as if the instructions have been reordered.
+// We can solve this by placing a Store-Load barrier after the store and before the load as follows.
+//
+// ---------------------------
+// Thread 0 | Thread 1
+// ---------------------------
+// STORE(x, 1) | STORE(y, 1)
+// STLD BARRIER | STLD BARRIER
+// r0 = LOAD(y) | r1 = LOAD(x)
+// ---------------------------
+//
+// This STLD barrier effectively will flush the store buffer into the memory hierarchy ensuring all stores in the buffer are visible to all other cpus at the same time
+// before executing the load instruction. Again nothing prevents a potential hardware from speculatively executing the load even with the STLD barrier, the hardware will have to do
+// a proper rollback if it detected a memory order violation otherwise it can continue on with its speculative load. The barrier just delimits a stability point.
+//
+// Most hardware does not provide granular barrier semantics such as STLD. Most provide a write memory barrier which only orders stores, STST, a read memory barrier
+// which only orders loads, LDLD, and then a full memory barrier which is all 4 permutations. So on x86 we will have to use the mfence, memory fence, instruction
+// which is a full memory barrier to get our desired STLD requirements.
+//
+// TSO also has the property that we call, Multi-Copy Store Atomicity. This means a cpu sees its own stores before they become visible to other cpus,
+// by forwarding them from the store buffer, but a store becomes visible to all other cpus at the same time when flushed from the store buffer.
+//
+//
+// Let's look at a non-FIFO store buffer now as seen in ARM cpus as an example and we will use a standard Message Passing example to see how it manifests in even weaker consistency.
+// A store buffer on ARM as an example allows write merging even with adjacent stores, is not a FIFO queue, any stores in the small hardware hash table may be ejected at any point
+// due to a collision eviction or the availability of cachelines in the cache hierarchy meaning that stores may bypass the buffer entirely if that cacheline is already owned by that cpu.
+// There is no guarantee that stores will be completed in order as in the FIFO case.
+//
+// ---------------------------
+// Initial State:
+// x = 0; y = 0;
+// ---------------------------
+// Thread 0 | Thread 1
+// ---------------------------
+// STORE(x, 1) | while(LOAD(y) == 0);
+// STORE(y, 1) | r0 = LOAD(x)
+// ---------------------------
+// Observed: r0 = 0
+// ---------------------------
+//
+// This is a classic Message Passing example that is very commonly used in production code. We store some values and then set a flag, STORE(y, 1) in this case.
+// The other thread waits until the flag is observed and then reads the value out of x. If we observed the flag then we should obviously see all stores before the flag was set.
+// Given our familiarity with TSO consistency above we know this definitely works on TSO and it is impossible to observe the load of x returning 0 under that consistency model.
+// Let's see how this breaks with a non-FIFO store buffer.
+//
+// Thread 0 executes the STORE(x, 1) but the cacheline for x is not in thread 0's cache so we write to the store buffer and wait for the cacheline.
+// Thread 1 executes the LOAD(y) and it also does not have y in its cacheline so it waits before completing the load.
+// Thread 0 moves on to STORE(y, 1). It owns this cacheline, hypothetically, so it may bypass the store buffer and store directly to the cache.
+// Thread 0 receives a message that Thread 1 needs y's cacheline, so it transfers the now modified cacheline to Thread 1.
+// Thread 1 completes the load with the updated value of y = 1 and branches out of the while loop since we saw the new value of y.
+// Thread 1 executes LOAD(x) which will return 0 since Thread 0 still hasn't flushed its store buffer waiting for x's cacheline.
+// Thread 0 receives x's cacheline and now flushes x = 1 to the cache. Thread 1 will also have invalidated its cacheline for x that it brought in via the previous load.
+//
+// We have now fallen victim to STST reordering, allowing Thread 1 to observe a load of x returning 0. Not only does this store buffer allow STLD reordering due to the nature of
+// buffering stores, but it also allows another reordering; that of Store-Store reordering. It was observed as if Thread 0 executed STORE(y, 1) before STORE(x, 1) which completely
+// broke our simple message passing scenario.
+//
+// ---------------------------
+// Thread 0 | Thread 1
+// ---------------------------
+// STORE(x, 1) | while(LOAD(y) == 0);
+// STST BARRIER |
+// STORE(y, 1) | r0 = LOAD(x)
+// ---------------------------
+//
+// The STST memory barrier effectively ensures that the cpu will flush its store buffer before executing any subsequent stores. That is not entirely true, the cpu is still allowed
+// to continue and execute stores to the store buffer as long as it doesn't flush them to the cache before the previous stores are flushed to the cache. If nothing becomes
+// globally visible out of order then we are good.
+// The example above will change how the processor executes due to the STST memory barrier. Thread 0 will execute STORE(y, 1), write to the store buffer and mark all current entries. Even though it owns the cacheline
+// it cannot write the store to the cache until all marked entries, which are all the previous stores, are flushed to the cache. We have now fixed the message passing code by adding
+// a STST or write memory barrier and thus it is no longer possible to observe the load of x returning 0.
+//
+// ******** Invalidation Queues ********
+//
+// Due to the cache coherency protocol in play, a write to a cacheline will have to send invalidation messages to all other cpus that may have that cacheline as well.
+// Immediately executing and responding to invalidation messages can cause quite a stall especially if the cache is busy at the moment with other requests.
+// The longer we wait to invalidate the cacheline, the longer the remote cpu doing the write is stalled waiting on us. We don't like this very much.
+// Invalidation Queues are just that, we queue up the action of actually invalidating the cacheline but immediately respond to the request saying we did it anyway.
+// Now the remote cpu thinks we invalidated said cacheline but actually it may very well still be in our cache ready to be read from. We just got weaker again, let's
+// see how this manifests in code by starting from the end of our previous example.
+//
+// ---------------------------
+// Initial State:
+// x = 0; y = 0;
+// ---------------------------
+// Thread 0 | Thread 1
+// ---------------------------
+// STORE(x, 1) | while(LOAD(y) == 0);
+// STST BARRIER |
+// STORE(y, 1) | r0 = LOAD(x)
+// ---------------------------
+// Observed: r0 = 0
+// ---------------------------
+//
+// Thread 1 receives the invalidate x's cacheline message and queues it because it is busy.
+// Thread 1 receives the invalidate y's cacheline message, but we don't have that cacheline so acknowledge immediately.
+// Thread 1 executes LOAD(y), loads in y's cacheline and branches out of the loop.
+// Thread 1 executes LOAD(x), and loads from the cache the old value of x because the invalidation message is still sitting in the invalidation queue.
+//
+// We have just again observed the load of x returning 0 but from a different type of reordering now on the reader side.
+// This is a form of LDLD, Load-Load, reordering as it appears as if LOAD(x) was executed before LOAD(y). This can be fixed as follows.
+//
+// ---------------------------
+// Thread 0 | Thread 1
+// ---------------------------
+// STORE(x, 1) | while(LOAD(y) == 0);
+// STST BARRIER | LDLD BARRIER
+// STORE(y, 1) | r0 = LOAD(x)
+// ---------------------------
+//
+// The LDLD memory barrier essentially marks all entries currently in the invalidation queue. Any subsequent load must wait until all the marked entries have been
+// processed. This ensures once we observe y = 1, we process all entries that came before y and that way we observe all the stores that happened before y.
+// The insertion of the read memory barrier creates the required memory barrier pairing as discussed above and ensures that now our code executes as expected.
+//
+// It must be made clear that these are not the only hardware structure additions or ways that can relax STST, STLD and LDLD orderings. These are merely
+// 2 structures that are common and ones that I choose to use as examples of how hardware can reduce ordering guarantees. Knowing how the hardware does this
+// isn't always entirely clear but having a model that tells us what operations can be reordered is all we need to be able to reason about our code when executing on that hardware.
+//
+// ******** Load Buffering ********
+//
+// The analog of the Store Buffering example, this litmus test has two threads read from two different locations and then write to the other locations.
+// The outcome of having LDST reordering is allowed and observable on many processors such as ARM.
+//
+// ---------------------------
+// Initial State:
+// x = 0; y = 0;
+// ---------------------------
+// Thread 0 | Thread 1
+// ---------------------------
+// r0 = LOAD(x) | r1 = LOAD(y)
+// STORE(y, 1) | STORE(x, 1)
+// ---------------------------
+// Observed: r0 = 1 && r1 = 1
+// ---------------------------
+//
+// This is possible because the processor does not have to wait for the other cpu's cacheline to arrive before storing into the cache.
+// Assume Thread 0 owns y's cacheline and Thread 1 owns x's cacheline.
+// The processor may execute the load and thus buffer the load waiting for the cacheline to arrive.
+// The processor may continue onto the store and since each cpu owns their respective cacheline, store the result into the cache.
+// The cpus now receive the cachelines for x and y with the now modified value.
+// We have just observed the loads returning 1 and thus observed LDST reordering.
+//
+// To forbid such outcome it suffices to add any full memory barrier to both threads or a local Read-After-Write/Read-To-Write dependency or a control dependency.
+//
+// -------------------------------
+// Thread 0 | Thread 1
+// -------------------------------
+// r0 = LOAD(x) | r1 = LOAD(y)
+// if (r0 == 1) | if (r1 == 1)
+// STORE(y, 1) | STORE(x, 1)
+// -------------------------------
+//
+// -----------------------------------------------------
+// Thread 0 | Thread 1
+// -----------------------------------------------------
+// r0 = LOAD(x) | r1 = LOAD(y)
+// STORE(&(y + r0 - r1), 1) | STORE(&(x + r1 - r1), 1)
+// -----------------------------------------------------
+//
+// Both fixes above ensure that both writes cannot be committed, made globally visible, until their program source code order preceding reads have been fully satisfied.
+//
+// ******** Compiler Barriers ********
+//
+// Compiler barriers are both-sided barriers that prevent loads and stores from moving down past the compiler barrier and
+// loads and stores from moving up above the compiler barrier. Here we will see the various ways our code may be subject
+// to compiler optimizations and why compiler barriers are needed. Note as stated above, compiler barriers may not
+// prevent all compiler optimizations or transformations. Compiler barriers are usually implemented by reloading all
+// variables that are currently cached in registers and flushing all stores in registers back to memory.
+// This list isn't exhaustive but will hopefully try to outline what compiler barriers protect against and what they don't.
+//
+// Compiler may reorder loads.
+// LOAD A; LOAD B; -> LOAD B; LOAD A;
+// LOAD A; operation on A; LOAD B; operation on B; -> LOAD A; LOAD B; operation on A; operation on B
+//
+// Insert a compiler barrier in between the two loads to guarantee that they are kept in order.
+// LOAD A; COMPILER_BARRIER; LOAD B;
+// LOAD A; operation on A; COMPILER_BARRIER; LOAD B; operation on B;
+//
+// The same with stores.
+// STORE(A, 1); STORE(B, 1); -> STORE(B, 1); STORE(A, 1);
+// operations and STORE result into A; operations and STORE result int B; -> all operations; STORE result into B; STORE result into A;
+//
+// Insert a compiler barrier in between the two stores to guarantee that they are kept in order.
+// It is not required that the multiple stores to A before the barrier are not merged into one final store.
+// It is not required that the store to B after the barrier be written to memory, it may be cached in a register for some indeterminate
+// amount of time as an example.
+// STORE(A, 1); COMPILER_BARRIER; STORE(B, 1);
+//
+// The compiler is allowed to merge overlapping loads and stores.
+// Inserting a compiler barrier here will not prevent the compiler from doing this optimization as doing one wider load/store is
+// technically still abiding by the guarantee that the loads/stores are not reordered with each other.
+// LOAD A[0]; LOAD A[1]; -> A single wider LOAD instruction
+// STORE(A[0], 1); STORE(A[1], 2); -> A single wider STORE instruction
+//
+// Compilers do not have to reload the values pointers point to. This is especially common with RISC architectures with lots
+// of general purpose registers or even compiler optimizations such as inlining or Link-Time Optimization.
+// int i = *ptr; Do bunch of operations; if (*ptr) { do more; }
+// It is entirely possible the compiler may remove the last if statement because it can keep the *ptr in a register
+// and it may infer from the operations done on i that i is never 0.
+//
+// int i = *ptr; Do bunch of operations; COMPILER_BARRIER; if (*ptr) { do more; }
+// Inserting a compiler barrier at that location will cause the compiler to have reload *ptr thus keeping the if statement assuming
+// no other optimizations take place, such as the compiler knowing that *ptr is always greater than 0.
+//
+// The compiler is within its rights to also merge and reload loads as much as it pleases.
+//
+// while (int tmp = LOAD(A))
+// process_tmp(tmp)
+//
+// Will be merged and transformed to
+//
+// if (int tmp = LOAD(A))
+// for (;;) process_tmp(tmp)
+//
+// Inserting a compiler barrier will ensure that LOAD(A) is always reloaded and thus the unwanted transformation is avoided.
+//
+// while (int tmp = LOAD(A))
+// {
+// process_tmp(tmp)
+// COMPILER_BARRIER
+// }
+//
+// Under heavy register pressure scenarios, say the loop body was larger, the compiler may reload A as follows.
+// Compiler barriers cannot prevent this from happening, even if we put it after process_tmp as above;
+// the compiler still kept those loads above the barrier so it satisfied its contract even though it reloaded
+// from A more than once.
+//
+// while (int tmp = LOAD(A))
+// process_tmp(LOAD(A))
+//
+// In the above transformation it is possible that another cpu stores 0 into A. When we reload A for process_tmp, we pass 0
+// to process_tmp() which it would actually never expect to observe. Because if we observed 0, the while loop condition
+// would never be satisfied. If the compiler under register pressure instead stored and loaded tmp from its stack slot, that is fine
+// because we are just storing and loading the original observed value from A. Obviously that is slower than just reloading from
+// A again so an optimizing compiler may not do the stack slot store. This is an unwanted transformation which eastl::atomic<T> prevents
+// even on relaxed loads.
+//
+// The compiler is allowed to do dead-store elimination if it knows that value has already been stored, or that only the last store
+// needs to be stored. The compiler does not assume or know that these variables are shared variables.
+//
+// STORE(A, 1); STORE(A, 1);
+// OPERATIONS; -> OPERATIONS;
+// STORE(A, 1);
+//
+// The compiler is well within its rights to omit the second store to A. Assuming we are doing some fancy lockfree communication
+// with another cpu and the last store is meant to ensure the ending value is 1 even if another cpu changed A in between; that
+// assumption will not be satisfied. A compiler barrier will not prevent the last store from being dead-store removed.
+//
+// STORE(A, 1);
+// OPERATIONS;
+// STORE(A, 2);
+//
+// Assuming these stores are meant to denote some state changes to communicate with a remote cpu. The compiler is allowed to
+// transform this as follows without a compiler barrier. Insert a compiler barrier between the two stores to prevent the transformation.
+// Something like this will also require memory barriers, but that is not the point of this section.
+//
+// STORE(A, 2);
+// OPERATIONS;
+//
+// The compiler is also allowed to invent stores as it may please.
+// First on many RISC architectures storing an immediate value either involves loading the immediate from the .data section
+// or combing a variety of load upper immediate and add or or immediate instructions to get our constant in a register and then
+// doing a single 32-bit store instruction from said register. Some ISAs have 16-bit stores with immediate value so that a store
+// may be broken into 2 16-bit store immediate values causing shearing. To reduce instruction dependencies it may also decide
+// to do two add immediates and then two 16-bit stores again causing shearing.
+//
+// lui $t0, 1 # t0 == 0x00010000
+// ori $a0, $t0, 8 # t0 == 0x00010008
+// strw $t0, 0($a1) # store t0 into address at a1
+// ->
+// ori $a0, $t0, 1 # t0 == 0x00000001
+// ori $a0, $t1, 8 # t0 == 0x00000008
+// strhw $t0, 0($a1) # store t0 lower half at a1
+// strhw $t1, 2($a1) # store t1 upper half at a1
+//
+// The above shows a potential transformation that a compiler barrier cannot solve for us.
+//
+// A compiler may also introduce stores to save on branching. Let's see.
+//
+// if (a)
+// STORE(X, 10);
+// else
+// STORE(X, 20);
+//
+// STORE(X, 20);
+// if (a)
+// STORE(X, 10);
+//
+// This is a very common optimization as it saves a potentially more expensive branch instruction but breaks multi-threaded code.
+// This is also another case where a compiler barrier doesn't give us the granularity we need.
+// The branches may even be completely removed with the compiler instead choosing to use conditional move operations which would
+// actually be compliant since there would be one store only done, an extra store wouldn't have been added.
+//
+// You are now probably thinking that compiler barriers are useful and are definitely needed to tell the compiler to calm down
+// and guarantee our hardware guarantees are valid because the code we wrote is the instructions that were emitted.
+// But there are definitely lots of caveats where compiler barriers do not at all provide the guarantees we still need.
+// This where eastl::atomic<T> comes into play, and under the relaxed memory ordering section it will be explained
+// what the standard guarantees and how we achieve those guarantees, like ensuring the compiler never does dead-store elimination or reloads.
+//
+// ******** Control Dependencies ********
+//
+// Control dependencies are implicit local cpu ordering of memory instructions due to branching instructions, specifically
+// only conditional branches. The problem is compilers do not understand control dependencies, and control dependencies
+// are incredibly hard to understand. This is meant to make the reader aware they exist and to never use them
+// because they shouldn't be needed at all with eastl::atomic<T>. Also control dependencies are categorized as LDLD or LDST,
+// store control dependencies inherently do not make sense since the conditional branch loads and compares two values.
+//
+// A LDLD control dependency is an anti-pattern since it is not guaranteed that any architecture will detect the memory-order violation.
+// r0 = LOAD(A);
+// if (r0)
+// r1 = LOAD(B)
+//
+// Given those sequence of instructions, it is entirely possible that a cpu attempts to speculatively predict and load the value of B
+// before the branch instruction has finished executing. It is entirely allowed that the cpu loads from B, assume B is in cache and A
+// is not in cache, before A. It is allowed, that even if the cpu was correct in it's prediction that it doesn't reload B and change the
+// fact that it speculatively got lucky.
+//
+// This is also what the x86 pause instruction inserted into spin wait loops is meant to solve.
+// LOOP:
+// r0 = LOAD(A);
+// if (!r0) pause; goto LOOP;
+//
+// In the above spin loop, after a couple of iterations the processor will fill the pipeline with speculated cmp and load instructions.
+// x86 will catch a memory order violation if it sees that an external store was done to A and thus must flush the entire
+// pipeline of all the speculated load A. Pause instruction tells the cpu to not do speculative loads so that the pipeline is not
+// filled with all said speculative load instructions. This ensures we do not incur the costly pipeline flushes from memory order
+// violations which are likely to occur in tight spin wait loops. This also allows other threads on the same physical core to use the
+// core's resources better since our speculative nature won't be hogging it all.
+//
+// A LDST control dependency is a true dependency in which the cpu cannot make a store visible to the system and other cpus until it
+// knows its prediction is correct. Thus a LDST ordering is guaranteed and can be always relied upon as in the following example.
+//
+// r0 = LOAD(A);
+// if (r0)
+// STORE(B, 1);
+//
+// The fun part comes in with how does the compiler actually break all of this.
+// First is that if the compiler can ensure that the value of A in the LDST example is always not zero, then it is always within its
+// rights to completely remove the if statement which would lend us with no control dependency.
+//
+// Things get more fun when we deal with conditionals with else and else if statements where the compiler might be able to employ
+// invariant code motion optimizations. Take this example.
+//
+// r0 = LOAD(A);
+// r1 = LOAD(B);
+// if (r0)
+// STORE(B, 1);
+// /* MORE CODE */
+// else if (r1)
+// STORE(B, 1);
+// /* MORE CODE */
+// else
+// STORE(B, 1);
+// /* MORE CODE */
+//
+// If we were trying to be smart and entirely rely on the control dependency to ensure order, ya well just don't the compiler
+// is always smarter. The compiler is well within its rights to move all the STORE(B, 1) up and above all the conditionals breaking
+// our reliance on the LDST control dependency.
+//
+// Things can get even more complicated especially in C++ when values may come from constexpr, inline, inline constexpr, static const, etc,
+// variables and thus the compiler will do all sorts of transformations to reduce, remove, augment and change all your conditional code since
+// it knows the values of the expressions or even parts of it at compile time. Even more aggressive optimizations like LTO might break code that was being cautious.
+// Even adding simple short circuiting logic or your classic likely/unlikely macros can alter conditionals in ways you didn't expect.
+// In short know enough about control dependencies to know not to ever use them.
+//
+// ******** Multi-Copy Store Atomicity && Barrier Cumulativity ********
+//
+// Single-Copy Store Atomicity: All stores must become visible to all cores in the system at the same time.
+//
+// Multi-Copy Store Atomicity : This means a cpu sees its own stores before they become visible to other cpus, by forwarding them from the store buffer,
+// but a store becomes visible to all other cpus at the same time when flushed from the store buffer.
+//
+// Non-Atomic Store Atomicity : A store becomes visible to different cpus at different times.
+//
+// Those are the above variations of Store Atomicity. Most processors have Non-Atomic Store Atomicity and thus you must program to that lowest common denominator.
+// We can use barriers, with some caveats, to restore Multi-Copy Store Atomicity to a Non-Atomic system though we need to define a new granular definition for
+// memory barriers to define this behaviour. Simple LDLD/LDST/STST/STLD definition is not enough to categorize memory barriers at this level. Let's start off
+// with a simple example that breaks under a Non-Atomic Store Atomicity system and what potential hardware features allow this behaviour to be observed.
+//
+// NOTE: For all the below examples we assume no compile reordering and that the processor also executes the instructions with no local reorderings to make the examples simpler,
+// to only show off the effects of Multi-Copy Store Atomicity. This is why we don't add any address dependencies, or mark explicit LDLD/LDST memory barriers.
+// Thus you may assume all LDLD and LDST pairs have an address dependency between them, so that they are not reordered by the compiler or the local cpu.
+//
+// ---------------------------------------------------------------------------------------------------------
+// Write-To-Read Causality, WRC, Litmus Test
+// ---------------------------------------------------------------------------------------------------------
+// Initial State:
+// X = 0; Y = 0;
+// ---------------------------------------------------------------------------------------------------------
+// Thread 0 | Thread 1 | Thread 2
+// ---------------------------------------------------------------------------------------------------------
+// STORE(X, 1) | r0 = LOAD(X) | r1 = LOAD(Y)
+// | STORE(Y, r0) | r2 = LOAD(X)
+// ---------------------------------------------------------------------------------------------------------
+// Observed: r0 = 1 && r1 = 1 && r2 = 0
+// ---------------------------------------------------------------------------------------------------------
+//
+// Let's go over this example in detail and whether the outcome shown above can be observed. In this example Thread 0 stores 1 into X. If Thread 1 observes the write to X,
+// it stores the observed value into Y. Thread 2 loads from Y then X. This means if the load from Y returns 1, then we intuitively know the global store order
+// was 1 to X and then 1 to Y. So is it possible then that the load from X in Thread 2 can return 0 in that case? Under a Multi-Copy Store Atomicity system, that would be
+// impossible because once 1 was stored to X all cpus see that store so if Thread 2 saw the store to Y which can only happen after the store to X was observed, then
+// Thread 2 must also have observed the store to X and return 1. As you may well have figured out, it is possible under a Non-Atomic Store Atomicity system to still
+// observe the load from X returning 0 even if the above load from Y returned 1 in Thread 2. This completely breaks our intuition of causality. Let's now understand what hardware may cause this.
+//
+// This is possible on cpus that have Simultaneous Multi-Threading, SMT or HyperThreading in Intel parlance, which share resources such as store buffers or L1 cache.
+// We are accustomed to the x86 way of SMT where each logical core shares Execution Units on the physical core but each logical core has their own statically partitioned
+// cache and store buffer that is not visible to the other cpus. It is possible on cpus like ARMv7 or POWER, POWER9 supports 4 and even 8 threads per physical core, so
+// to save on die space though yet enable this large number of threads per physical core it is common for these logical cores to all use the same store buffer or L1 cache
+// per physical core on these processors. Let's take the above example and rerun it with this knowledge to get the observed behaviour outlined above.
+//
+// Assume Thread 0, Thread 1, and Thread 2 run on cpu 0, cpu 1, and cpu 2 respectively. Assume that cpu 0 and cpu 1 are two logical cores on the same physical core so this processor
+// has an SMT value of 2. Thread 0 will store 1 into X. This store may be in the store buffer or in the L1 cache that cpu 1 also shares with cpu 0, thus cpu 1 has early access to cpu 0's stores.
+// Thread 1 loads X which it observed as 1 early and then stores 1 into Y. Thread 2 may see the load from Y returning 1 but now the load from X returning 0 all because cpu 1 got early
+// access to cpu 0 store due to sharing a L1 cache or store buffer.
+// We will come back on how to fix this example with the proper memory barriers for the Non-Atomic Store Atomicity systems, but we need to detour first.
+//
+// We need to take a deeper dive into memory barriers to understand how to restore Multi-Copy Store Atomicity from a Non-Atomic Store Atomicity system.
+// Let's start with a motivating example and we will be using the POWER architecture throughout this example because it encompasses all the possible observable behaviour.
+// ARMv7 technically allows Non-Atomic Store Atomicity behaviour but no consumer ARMv7 chip actually observes this behaviour.
+// ARMv8 reworked its model to specifically say it is a Multi-Copy Store Atomicity system.
+// POWER is one of the last few popular consumer architectures that are guaranteed to have Non-Atomic Store Atomicity observable behaviour, thus we will be using it for the following examples.
+//
+// To preface, POWER has two types of memory barriers called lwsync and sync. The following table lists the guarantees provided by TSO, x86, and the lwsync instruction.
+// The table gives a hint as to why using our previous definition of LDLD/LDST/STST/STLD isn't granular enough to categorize memory barrier instructions.
+//
+// TSO: | POWER lwsync memory barrier:
+// LDLD : YES | LDLD : YES
+// LDST : YES | LDST : YES
+// STST : YES | STST : YES
+// STLD : NO | STLD : NO
+// A cumulative : YES | A cumulative : YES
+// B cumulative : YES | B cumulative : YES
+// IRIW : YES | IRIW : NO
+//
+// The TSO memory model provided by x86 seems to be exactly the same as POWER if we add lwsync memory barrier instructions in between each of the memory instructions.
+// This provides us the exact same ordering guarantees as the TSO memory model. If we just looked at the 4 permutations of reorderings we would be inclined to assume that
+// TSO has the exact same ordering as sprinkling lwsync in our code in between every pair of memory instructions. That is not the case because memory barrier causality and cumulativity differ in subtle ways.
+// In this case they differ by the implicit guarantees from the TSO memory model versus those provided by the POWER lwsync memory barrier.
+// So the lwsync memory barrier prevents reordering with instructions that have causality but does not prevent reordering with instructions that are completely independent.
+// Let's dive into these concepts a bit more.
+//
+// Non-Atomic Store Atomicity architectures are prone to behaviours such as the non-causal outcome of the WRC test above. Architectures such as POWER defines memory barriers to enforce
+// ordering with respect to memory accesses in remote cpus other than the cpu actually issuing the memory barrier. This is known as memory barrier cumulativity.
+// How does the memory barrier issued on my cpu affect the view of memory accesses done by remote cpuss.
+//
+// Cumulative memory barriers are defined as follows - Take your time this part is very non-trivial:
+// A-Cumulative: We denote group A as the set of memory instructions in this cpu or other cpus that are ordered before the memory barrier in this cpu.
+// A-Cumulativity requires that memory instructions from any cpu that have performed prior to a memory load before the memory barrier on this cpu are also members of group A.
+// B-Cumulative: We denote group B as the set of memory instructions in this cpu or other cpus that are ordered after the memory barrier in this cpu.
+// B-Cumulativity requires that memory instructions from any cpu that perform after a load and including the load in that cpu that returns the value of a store in group B are
+// also members of group B.
+// IRIW : enforces a global ordering even for memory instructions that have no causality. The memory instructions are completely independent.
+//
+// ---------------------------------------------------------------------------------------------------------
+// WRC Litmus Test
+// ---------------------------------------------------------------------------------------------------------
+// Thread 0 | Thread 1 | Thread 2
+// ---------------------------------------------------------------------------------------------------------
+// {i} : STORE(X, 1) | {ii} : r0 = LOAD(X) | {v} : r1 = LOAD(Y)
+// | {iii} : lwsync |
+// | {iv} : STORE(Y, r0) | {vi} : r2 = LOAD(X)
+// ---------------------------------------------------------------------------------------------------------
+// Outcome: r0 = 1 && r1 = 1 && r2 = 1
+//
+// Group A of {iii} : {i} && {ii}
+//
+// Group B of {iii} : {iv} && {v} && {vi}
+// ---------------------------------------------------------------------------------------------------------
+//
+// Using the WRC test again and inserting a POWER lwsync, don't concern yourself with why the memory barrier was inserted at that spot right now, we now see the distinctions of group A and group B.
+// It demonstrates the A and B Cumulative nature of the lwsync instruction, {iii}. First group A, initially consists of {ii} and group B initially consists of {iv} from the local cpu that issued the lwsync.
+// Since {ii} reads from {i} and assume {i} happens before {ii}, by definition of A-Cumulativity {i} is included in group A.
+// Similarly {v} reads from {iv} and assume {iv} happens before {v}, then {v} is included in group B by definition of B-Cumulativity.
+// {vi} is also included in group B since it happens after {v} by definition of B-Cumulativity.
+//
+// WRC litmus test represents a scenario where only a A-Cumulative memory barrier is needed. The lwsync not only provides the needed local LDST memory barrier for the local thread but also ensures
+// that any write Thread 1 has read from before the memory barrier is kept in order with any write Thread 1 does after the memory barrier as far as any other thread observes.
+// In other words it ensures that any write that has propagated to Thread 1 before the memory barrier is propagated to any other thread before the second store after the memory barrier in Thread 1
+// can propagate to other threads in the system. This is exactly the definition of A-Cumulativity and what we need to ensure that causality is maintained in the WRC Litmus Test example.
+// With that lwsync in place it is now impossible to observe r0 = 1 && r1 = 1 && r2 = 0. The lwsync has restored causal ordering. Let's look at an example that requires B-Cumulativity.
+//
+// ---------------------------------------------------------------------------------------------------------
+// Example 2 from POWER manual
+// ---------------------------------------------------------------------------------------------------------
+// Initial State:
+// X = 0; Y = 0; Z = 0
+// ---------------------------------------------------------------------------------------------------------
+// Thread 0 | Thread 1 | Thread 2
+// ---------------------------------------------------------------------------------------------------------
+// STORE(X, 1) | r0 = LOAD(Y) | r1 = LOAD(Z)
+// STORE(Y, 1) | STORE(Z, r0) | r2 = LOAD(X)
+// ---------------------------------------------------------------------------------------------------------
+// Observed: r0 = 1 && r1 = 1 && r2 = 0
+// ---------------------------------------------------------------------------------------------------------
+//
+// This example is very similar to WRC except that we kinda extended the Message Passing through an additional shared variable instead.
+// Think of this as Thread 0 writing some data into X, setting flag Y, Thread 1 waiting for flag Y then writing flag Z, and finally Thread 2 waiting for flag Z before reading the data.
+// Take a minute to digest the above example and think about where a memory barrier, lwsync, should be placed. Don't peek at the solution below.
+//
+// ---------------------------------------------------------------------------------------------------------
+// Example 2 from POWER manual
+// ---------------------------------------------------------------------------------------------------------
+// Thread 0 | Thread 1 | Thread 2
+// ---------------------------------------------------------------------------------------------------------
+// STORE(X, 1) | r0 = LOAD(Y) | r1 = LOAD(Z)
+// lwsync | |
+// STORE(Y, 1) | STORE(Z, r0) | r2 = LOAD(X)
+// ---------------------------------------------------------------------------------------------------------
+//
+// First the lwsync provides the needed local STST memory barrier for the local thread, thus the lwsync here ensures that the store to X propagates to Thread 1 before the store to Y.
+// B-Cumulativity applied to all operations after the memory barrier ensure that the store to X is
+// kept in order with respect to the store to Z as far as all other threads participating in the dependency chain are concerned. This is the exact definition of B-Cumulativity.
+// With this one lwsync the outcome outlined above is impossible to observe. If r0 = 1 && r1 = 1 then r2 must be properly observed to be 1.
+//
+// We know that lwsync only provides A-Cumulativity and B-Cumulativity. Now we will look at examples that have no causality constraints thus we need to grab heavier memory barriers
+// that ensures in short we will say makes a store become visible to all processors, even those not on the dependency chains. Let's get to the first example.
+//
+// ---------------------------------------------------------------------------------------------------------
+// Independent Reads of Independent Writes, IRIW, coined by Doug Lea
+// ---------------------------------------------------------------------------------------------------------
+// Initial State:
+// X = 0; Y = 0;
+// ---------------------------------------------------------------------------------------------------------
+// Thread 0 | Thread 1 | Thread 2 | Thread 3
+// ---------------------------------------------------------------------------------------------------------
+// STORE(X, 1) | r0 = LOAD(X) | STORE(Y, 1) | r2 = LOAD(Y)
+// | r1 = LOAD(Y) | | r3 = LOAD(X)
+// ---------------------------------------------------------------------------------------------------------
+// Observed: r0 = 1 && r1 = 0 && r2 = 1 && r3 = 0
+// ---------------------------------------------------------------------------------------------------------
+//
+// The IRIW example above clearly shows that writes can be propagated to different cpus in completely different orders.
+// Thread 1 sees the store to X but not the store to Y while Thread 3 sees the store to Y but not the store to X, the complete opposite.
+// Also to the keen eye you may have noticed this example is a slight modification of the Store Buffer example so try to guess where the memory barriers would go.
+//
+// ---------------------------------------------------------------------------------------------------------
+// Independent Reads of Independent Writes, IRIW, coined by Doug Lea
+// ---------------------------------------------------------------------------------------------------------
+// Thread 0 | Thread 1 | Thread 2 | Thread 3
+// ---------------------------------------------------------------------------------------------------------
+// STORE(X, 1) | r0 = LOAD(X) | STORE(Y, 1) | r2 = LOAD(Y)
+// | sync | | sync
+// | r1 = LOAD(Y) | | r3 = LOAD(X)
+// ---------------------------------------------------------------------------------------------------------
+//
+// To ensure that the above observation is forbidden we need to add a full sync memory barrier on both the reading threads. Think of sync as restoring sequential consistency.
+// The sync memory barrier ensures that any writes that Thread 1 has read from before the memory barrier are fully propagated to all threads before the reads are satisfied after the memory barrier.
+// The same can be said for Thread 3. This is why the sync memory barrier is needed because there is no partial causal ordering here or anything that can be considered for our A and B Cumulativity definitions.
+// We must ensure that all writes have been propagated to all cpus before proceeding. This gives way to the difference between sync and lwsync with regards to visibility of writes and cumulativity.
+// sync guarantees that all program-order previous stores must have been propagated to all other cpus before the memory instructions after the memory barrier.
+// lwsync does not ensure that stores before the memory barrier have actually propagated to any other cpu before memory instructions after the memory barrier, but it will keep stores before and after the
+// lwsync in order as far as other cpus are concerned that are within the dependency chain.
+//
+// Fun fact while ARMv7 claims to be Non-Atomic Store Atomicity no mainstream ARM implementation that I have seen has shown cases of Non-Atomic Store Atomicity.
+// It's allowed by the ARMv7 memory model and thus you have to program to that. ARMv8 changes this and states that it has Multi-Copy Store Atomicity.
+//
+// ******** Release-Acquire Semantics ********
+//
+// The most useful and common cases where Release-Acquire Semantics are used in every day code is in message passing and mutexes. Let's get onto some examples and the C++ definition of Release-Acquire.
+//
+// ACQUIRE:
+// An Acquire operation is a one-way memory barrier whereby all loads and stores after the acquire operation cannot move up and above the acquire operation.
+// Loads and stores before the acquire operation can move down past the acquire operation. An acquire operation should always be paired with a Release operation on the SAME atomic object.
+//
+// RELEASE:
+// A Release operation is a one-way memory barrier whereby all loads and stores before the release operation cannot move down and below the release operation.
+// Loads and stores after the release operation can move up and above the release operation. A release operation should always be paired with an Acquire operation on the SAME atomic object.
+//
+// Release-Acquire pair does not create a full memory barrier but it guarantees that all memory instructions before a Release operation on an atomic object M are visible after an Acquire
+// operation on that same atomic object M. Thus these semantics usually are enough to preclude the need for any other memory barriers.
+// The synchronization is established only between the threads Releasing and Acquiring the same atomic object M.
+//
+// ---------------------------------------------------
+// Critical Section
+// ---------------------------------------------------
+// Thread 0 | Thread 1
+// ---------------------------------------------------
+// mtx.lock() - Acquire | mtx.lock() - Acquire
+// STORE(X, 1) | r0 = LOAD(X)
+// mtx.unlock() - Release | mtx.unlock() - Release
+// ---------------------------------------------------
+//
+// A mutex only requires Release-Acquire semantics to protect the critical section. We do not care if operations above the lock leak into the critical section or that operations below the unlock leak into the
+// critical section because they are outside the protected region of the lock()/unlock() pair. Release-Acquire semantics does guarantee that everything inside the critical section cannot leak out.
+// Thus all accesses of all previous critical sections for the mutex are guaranteed to have completed and be visible when the mutex is handed off to the next party due to the Release-Acquire chaining.
+// This also means that mutexes do not provide or restore Multi-Copy Store Atomicity to any memory instructions outside the mutex, like the IRIW example since it does not emit full memory barriers.
+//
+// ------------------------------------------------------
+// Message Passing
+// ------------------------------------------------------
+// Thread 0 | Thread 1
+// ------------------------------------------------------
+// STORE(DATA, 1) | while (!LOAD_ACQUIRE(FLAG))
+// |
+// STORE_RELEASE(FLAG, 1) | r0 = LOAD(DATA)
+// ------------------------------------------------------
+//
+// This is a common message passing idiom that also shows the use of Release-Acquire semantics. It should be obvious by the definitions outlined above why this works.
+// An Acquire operation attached to a load needs to provide a LDLD and LDST memory barrier according to our definition of acquire. This is provided by default on x86 TSO thus no memory barrier is emitted.
+// A Release operation attached to a store needs to provide a STST and LDST memory barrier according to our definition of release. This is provided by default on x86 TSO thus no memory barrier is emitted.
+//
+// A couple of things of note here. One is that by attaching the semantics of a memory model directly to the memory instruction/operation itself we can take advantage of the fact the some processors
+// already provide guarantees between memory instructions and thus we do not have to emit memory barriers. Another thing of note is that the memory model is directly attached to the operation,
+// so you must do the Release-Acquire pairing on the SAME object which in this case is the FLAG variable. Doing an Acquire or Release on a separate object has no guarantee to observe an Acquire or Release on a different object.
+// This better encapsulates the meaning of the code and also allows the processor to potentially do more optimizations since a stand alone memory barrier will order all memory instructions of a given type before and after the barrier.
+// Where as the memory ordering attached to the load or store tells the processor that it only has to order memory instructions in relation to that specific load or store with the given memory order.
+//
+//
+// ---------------------------------------------------------------------------------------------------------
+// Release Attached to a Store VS. Standalone Fence
+// ---------------------------------------------------------------------------------------------------------
+// STORE(DATA, 1) | STORE(DATA, 1)
+// | ATOMIC_THREAD_FENCE_RELEASE()
+// STORE_RELEASE(FLAG, 1) | STORE_RELAXED(FLAG, 1)
+// STORE_RELAXED(VAR, 2) | STORE_RELAXED(VAR, 2)
+// ---------------------------------------------------------------------------------------------------------
+// ARMv8 Assembly
+// ---------------------------------------------------------------------------------------------------------
+// str 1, DATA | str 1, DATA
+// | dmb ish
+// stlr 1, FLAG | str 1, FLAG
+// str 2, VAR | str 2, VAR
+// ---------------------------------------------------------------------------------------------------------
+//
+// In the above example the release is attached to the FLAG variable, thus synchronization only needs to be guaranteed for that atomic variable.
+// It is entirely possible for the VAR relaxed store to be reordered above the release store.
+// In the fence version, since the fence is standalone, there is no notion where the release is meant to be attached to thus the fence must prevent all subsequent relaxed stores
+// from being reordered above the fence. The fence provides a stronger guarantee whereby now the VAR relaxed store cannot be moved up and above the release operation.
+// Also notice the ARMv8 assembly is different, the release fence must use the stronger dmb ish barrier instead of the dedicated release store instruction.
+// We dive more into fences provided by eastl::atomic<T> below.
+//
+// Release-Acquire semantics also have the property that it must chain through multiple dependencies which is where our knowledge from the previous section comes into play.
+// Everything on the Release-Acquire dependency chain must be visible to the next hop in the chain.
+//
+// ---------------------------------------------------------------------------------------------------------
+// Example 2 from POWER manual
+// ---------------------------------------------------------------------------------------------------------
+// Thread 0 | Thread 1 | Thread 2
+// ---------------------------------------------------------------------------------------------------------
+// STORE(X, 1) | r0 = LOAD_ACQUIRE(Y) | r1 = LOAD_ACQUIRE(Z)
+// STORE_RELEASE(Y, 1) | STORE_RELEASE(Z, r0) | r2 = LOAD(X)
+// ---------------------------------------------------------------------------------------------------------
+//
+// ---------------------------------------------------------------------------------------------------------
+// Write-To-Read Causality, WRC, Litmus Test
+// ---------------------------------------------------------------------------------------------------------
+// Thread 0 | Thread 1 | Thread 2
+// ---------------------------------------------------------------------------------------------------------
+// STORE(X, 1) | r0 = LOAD(X) | r1 = LOAD_ACQUIRE(Y)
+// | STORE_RELEASE(Y, r0) | r2 = LOAD(X)
+// ---------------------------------------------------------------------------------------------------------
+//
+// You may notice both of these examples from the previous section. We replaced the standalone POWER memory barrier instructions with Release-Acquire semantics attached directly to the operations where we want causality preserved.
+// We have transformed those examples to use the eastl::atomic<T> memory model.
+// Take a moment to digest these examples in relation to the definition of Release-Acquire semantics.
+//
+// The Acquire chain can be satisfied by reading the value from the store release or any later stored headed by that release operation. The following examples will make this clearer.
+//
+// ------------------------------------------------------
+// Release Sequence Headed
+// ------------------------------------------------------
+// Initial State:
+// DATA = 0; FLAG = 0;
+// ------------------------------------------------------
+// Thread 0 | Thread 1
+// ------------------------------------------------------
+// STORE(DATA, 1) | r0 = LOAD_ACQUIRE(FLAG)
+// |
+// STORE_RELEASE(FLAG, 1) | r1 = LOAD(DATA)
+// STORE_RELAXED(FLAG, 3) |
+// ------------------------------------------------------
+// Observed: r0 = 3 && r1 = 0
+// ------------------------------------------------------
+//
+// In the above example we may read the value 3 from FLAG which was not the release store, but it was headed by that release store. Thus we observed a later store and therefore it is still valid to then observe r1 = 1.
+// The stores to FLAG from the STORE_RELEASE up to but not including the next STORE_RELEASE operation make up the release sequence headed by the first release store operation. Any store on that sequence can be used to enforce
+// causality on the load acquire.
+//
+// ******** Consume is currently not useful ********
+//
+// Consume is a weaker form of an acquire barrier and creates the Release-Consume barrier pairing.
+// Consume states that a load operation on an atomic object M cannot allow any loads or stores dependent on the value loaded by the operation to be reordered before the operation.
+// To understand consume we must first understand dependent loads.
+// You might encounter this being called a data dependency or an address dependency in some literature.
+//
+// --------------------------------------------------------------
+// Address Dependency
+// --------------------------------------------------------------
+// Initial State:
+// DATA = 0; PTR = nullptr;
+// --------------------------------------------------------------
+// Thread 0 | Thread 1
+// --------------------------------------------------------------
+// STORE(DATA, 1) | r0 = LOAD(PTR) - typeof(r0) = int*
+// |
+// STORE(PTR, &DATA) | r1 = LOAD(r0) - typeof(r1) = int
+// --------------------------------------------------------------
+//
+// There is a clear dependency here where we cannot load from *int until we actually read the int* from memory.
+// Now it is possible for Thread 1's load from *ptr to be observed before the store to DATA, therefore it can lead to r0 = &DATA && r1 = 0.
+// While this is a failure of causality, it is allowed by some cpus such as the DEC Alpha and I believe Blackfin as well.
+// Thus a data dependency memory barrier must be inserted between the data dependent loads in Thread 1. Note that this would equate to a nop on any processor other than the DEC Alpha.
+//
+// This can occur for a variety of hardware reasons. We learned about invalidation queues. It is possible that the invalidation for DATA gets buffered in Thread 1. DEC Alpha allows the Thread 1
+// load from PTR to continue without marking the entries in its invalidation queue. Thus the subsequent load is allowed to return the old cached value of DATA instead of waiting for the
+// marked entries in the invalidation queue to be processed. It is a design decision of the processor not to do proper dependency tracking here and instead relying on the programmer to insert memory barriers.
+//
+// This data dependent ordering guarantee is useful because in places where we were using an Acquire memory barrier we can reduce it to this Consume memory barrier without any hardware barriers actually emitted on every modern processor.
+// Let's take the above example, translate it to Acquire and Consume memory barriers and then translate it to the ARMv7 assembly and see the difference.
+//
+// --------------------------------------------------------------- ---------------------------------------------------------------
+// Address Dependency - Release-Acquire Address Dependency - Release-Acquire - ARMv7 Assembly
+// --------------------------------------------------------------- ---------------------------------------------------------------
+// Thread 0 | Thread 1 Thread 0 | Thread 1
+// --------------------------------------------------------------- ---------------------------------------------------------------
+// STORE(DATA, 1) | r0 = LOAD_ACQUIRE(PTR) STORE(DATA, 1) | r0 = LOAD(PTR)
+// | dmb ish | dmb ish
+// STORE_RELEASE(PTR, &DATA) | r1 = LOAD(r0) STORE(PTR, &DATA) | r1 = LOAD(r0)
+// --------------------------------------------------------------- ---------------------------------------------------------------
+//
+// To get Release-Acquire semantics on ARMv7 we need to emit dmb ish; memory barriers.
+//
+// --------------------------------------------------------------- ---------------------------------------------------------------
+// Address Dependency - Release-Consume Address Dependency - Release-Consume - ARMv7 Assembly
+// --------------------------------------------------------------- ---------------------------------------------------------------
+// Thread 0 | Thread 1 Thread 0 | Thread 1
+// --------------------------------------------------------------- ---------------------------------------------------------------
+// STORE(DATA, 1) | r0 = LOAD_CONSUME(PTR) STORE(DATA, 1) | r0 = LOAD(PTR)
+// | dmb ish |
+// STORE_RELEASE(PTR, &DATA) | r1 = LOAD(r0) STORE(PTR, &DATA) | r1 = LOAD(r0)
+// --------------------------------------------------------------- ---------------------------------------------------------------
+//
+// Data Dependencies can not only be created by read-after-write/RAW on registers, but also by RAW on memory locations too. Let's look at some more elaborate examples.
+//
+// --------------------------------------------------------------- ---------------------------------------------------------------
+// Address Dependency on Registers - Release-Consume - ARMv7 Address Dependency on Memory - Release-Consume - ARMv7
+// --------------------------------------------------------------- ---------------------------------------------------------------
+// Thread 0 | Thread 1 Thread 0 | Thread 1
+// --------------------------------------------------------------- ---------------------------------------------------------------
+// STORE(DATA, 1) | r0 = LOAD(PTR) STORE(DATA, 1) | r0 = LOAD(PTR)
+// | r1 = r0 + 0 | STORE(TEMP, r0)
+// dmb ish | r2 = r1 - 0 dmb ish | r1 = LOAD(TEMP)
+// STORE(PTR, &DATA) | r3 = LOAD(r2) STORE(PTR, &DATA) | r2 = LOAD(r1)
+// --------------------------------------------------------------- ---------------------------------------------------------------
+//
+// The above shows a more elaborate example of how data dependent dependencies flow through RAW chains either through memory or through registers.
+//
+// Notice by identifying that this is a data dependent operation and asking for a consume ordering, we can completely eliminate the memory barrier on Thread 1 since we know ARMv7 does not reorder data dependent loads. Neat.
+// Unfortunately every major compiler upgrades a consume to an acquire ordering, because the consume ordering in the standard has a stronger guarantee and requires the compiler to do complicated dependency tracking.
+// Dependency chains in source code must be mapped to dependency chains at the machine instruction level until a std::kill_dependency in the source code.
+//
+// ----------------------------------------------------------------
+// Non-Address Dependency && Multiple Chains
+// ----------------------------------------------------------------
+// Initial State:
+// std::atomic<int> FLAG; int DATA[1] = 0;
+// ----------------------------------------------------------------
+// Thread 0 | Thread 1
+// ----------------------------------------------------------------
+// STORE(DATA[0], 1) | int f = LOAD_CONSUME(FLAG)
+// | int x = f
+// | if (x) return Func(x);
+// |
+// STORE_RELEASE(FLAG, 1) | Func(int y) return DATA[y - y]
+// ----------------------------------------------------------------
+//
+// This example is really concise but there is a lot going on. Let's digest it.
+// First is that the standard allows consume ordering even on what we will call not true machine level dependencies like a ptr load and then a load from that ptr as shown in the previous examples.
+// Here the dependency is between two ints, and the dependency chain on Thread 1 is as follows. f -> x -> y -> DATA[y - y]. The standard requires that source code dependencies on the loaded value
+// from consume flow thru assignments and even thru function calls. Also notice we added a dependency on the dereference of DATA with the value loaded from consume which while it does nothing actually abides by the standard
+// by enforcing a source code data dependent load on the consume operation. You may see this referred to as artificial data dependencies in other texts.
+// If we assume the compiler is able to track all these dependencies, the question is how do we enforce these dependencies at the machine instruction level. Let's go back to our ptr dependent load example.
+//
+// ----------------------------------------------------------------
+// addi r0, pc, offset;
+// ldr r1, 0(r0);
+// ldr r2, 0(r1);
+// ----------------------------------------------------------------
+//
+// The above pseudo assembly does a pc relative calculation to find the address of ptr. We then load ptr and then continue the dependency chain by loading the int from the loaded ptr.
+// Thus r0 has type of int**, which we use to load r1 an int* which we use to load our final value of r2 which is the int.
+// The key observation here is that most instructions provided by most architectures only allow moving from a base register + offset into a destination register.
+// This allows for trivial capturing of data dependent loads through pointers. But how do we capture the data dependency of DATA[y - y]. We would need something like this.
+//
+// ----------------------------------------------------------------
+// sub r1, r0, r0; // Assume r0 holds y from the Consume Operation
+// add r3, r1, r2; // Assume r2 holds the address of DATA[0]
+// ldr r4, 0(r3);
+// ----------------------------------------------------------------
+//
+// We cannot use two registers as both arguments to the load instruction. Thus to accomplish this you noticed we had to add indirect data dependencies through registers to compute the final address from the consume
+// load of y and then load from the final computed address. The compiler would have to recognize all these dependencies and enforce that they be maintained in the generated assembly.
+// The compiler must ensure the entire syntactic, source code, data-dependency chain is enforced in the generated assembly, no matter how long such chain may be.
+// Because of this and other issues, every major compiler unilaterally promotes consume to an acquire operation across the board. Read reference [15] for more information.
+// This completely removes the actual usefulness of consume for the pointer dependent case which is used quite heavily in concurrent read heavy data structures where updates are published via pointer swaps.
+//
+// ******** read_depends use case - Release-ReadDepends Semantics ********
+//
+// eastl::atomic<T> provides a weaker read_depends operation that only encapsulates the pointer dependency case above. Loading from a pointer and then loading the value from the loaded pointer.
+// The read_depends operation can be used on loads from only an eastl::atomic<T*> type. The return pointer of the load must and can only be used to then further load values. And that is it.
+// If you are unsure, upgrade this load to an acquire operation.
+//
+// MyStruct* ptr = gAtomicPtr.load(memory_order_read_depends);
+// int a = ptr->a;
+// int b = ptr->b;
+// return a + b;
+//
+// The loads from ptr after the gAtomicPtr load ensure that the correct values of a and b are observed. This pairs with a Release operation on the writer side by releasing gAtomicPtr.
+//
+//
+// As said above the returned pointer from a .load(memory_order_read_depends) can only be used to then further load values.
+// Dereferencing(*) and Arrow Dereferencing(->) are valid operations on return values from .load(memory_order_read_depends).
+//
+// MyStruct* ptr = gAtomicPtr.load(memory_order_read_depends);
+// int a = ptr->a; - VALID
+// int a = *ptr; - VALID
+//
+// Since dereferencing is just indexing via some offset from some base address, this also means addition and subtraction of constants is ok.
+//
+// int* ptr = gAtomicPtr.load(memory_order_read_depends);
+// int a = *(ptr + 1) - VALID
+// int a = *(ptr - 1) - VALID
+//
+// Casts also work correctly since casting is just offsetting a pointer depending on the inheritance hierarchy or if using intrusive containers.
+//
+// ReadDependsIntrusive** intrusivePtr = gAtomicPtr.load(memory_order_read_depends);
+// ReadDependsIntrusive* ptr = ((ReadDependsIntrusive*)(((char*)intrusivePtr) - offsetof(ReadDependsIntrusive, next)));
+//
+// Base* basePtr = gAtomicPtr.load(memory_order_read_depends);
+// Dervied* derivedPtr = static_cast<Derived*>(basePtr);
+//
+// Both of the above castings from the result of the load are valid for this memory order.
+//
+// You can reinterpret_cast the returned pointer value to a uintptr_t to set bits, clear bits, or xor bits but the pointer must be casted back before doing anything else.
+//
+// int* ptr = gAtomicPtr.load(memory_order_read_depends);
+// ptr = reinterpret_cast<int*>(reinterpret_cast<uintptr_t>(ptr) & ~3);
+//
+// Do not use any equality or relational operator (==, !=, >, <, >=, <=) results in the computation of offsets before dereferencing.
+// As we learned above in the Control Dependencies section, CPUs will not order Load-Load Control Dependencies. Relational and equality operators are often compiled using branches.
+// It doesn't have to be compiled to branched, condition instructions could be used. Or some architectures provide comparison instructions such as set less than which do not need
+// branches when using the result of the relational operator in arithmetic statements. Then again short circuiting may need to introduct branches since C++ guarantees the
+// rest of the expression must not be evaluated.
+// The following odd code is forbidden.
+//
+// int* ptr = gAtomicPtr.load(memory_order_read_depends);
+// int* ptr2 = ptr + (ptr >= 0);
+// int a = *ptr2;
+//
+// Only equality comparisons against nullptr are allowed. This is becase the compiler cannot assume that the address of the loaded value is some known address and substitute our loaded value.
+// int* ptr = gAtomicPtr.load(memory_order_read_depends);
+// if (ptr == nullptr); - VALID
+// if (ptr != nullptr); - VALID
+//
+// Thus the above sentence that states:
+// The return pointer of the load must and can only be used to then further load values. And that is it.
+// must be respected by the programmer. This memory order is an optimization added for efficient read heavy pointer swapping data structures. IF you are unsure, use memory_order_acquire.
+//
+// ******** Relaxed && eastl::atomic<T> guarantees ********
+//
+// We saw various ways that compiler barriers do not help us and that we need something more granular to make sure accesses are not mangled by the compiler to be considered atomic.
+// Ensuring these guarantees like preventing dead-store elimination or the splitting of stores into smaller sub stores is where the C/C++11
+// standard comes into play to define what it means to operate on an atomic object.
+// These basic guarantees are provided via new compiler intrinsics on gcc/clang that provide explicit indication to the compiler.
+// Or on msvc by casting the underlying atomic T to a volatile T*, providing stronger compiler guarantees than the standard requires.
+// Essentially volatile turns off all possible optimizations on that variable access and ensures all volatile variables cannot be
+// reordered across sequence points. Again we are not using volatile here to guarantee atomicity, we are using it in its very intended purpose
+// to tell the compiler it cannot assume anything about the contents of that variable. Now let's dive into the base guarantees of eastl::atomic<T>.
+//
+// The standard defines the following for all operations on an atomic object M.
+//
+// Write-Write Coherence:
+// If an operation A modifies an atomic object M(store), happens before an operation B that modifies M(store), then A shall be earlier than B in the modification order of M.
+//
+// Read-Read Coherence:
+// If a value computation A on an atomic object M(load), happens before a value computation B on M(load), and A takes its value from a side effect X on M(from a previous store to M), then the value
+// computed by B shall either be the value stored by X or some later side effect Y on M, where Y follows X in the modification order of M.
+//
+// Read-Write Coherence:
+// If a value computation A on an atomic object M(load), happens before an operation B that modifies M(store), then A shall take its value from a side effect X on M, where X precedes B in the modification
+// order of M.
+//
+// Write-Read Coherence:
+// If a side effect X on an atomic object M(store), happens before a value computation B on M(load), then the evaluation of B must take its value from X or from some side effect Y that follows X in the
+// modification order of M.
+//
+// What does all this mean. This is just a pedantic way of saying that the preceding coherence requirements disallow compiler reordering of atomic operations to a single atomic object.
+// This means all operations must be emitted by the compiler. Stores cannot be dead-store eliminated even if they are the only stores.
+// Loads cannot have common subexpression elimination performed on them even if they are the only loads.
+// Loads and Stores to the same atomic object cannot be reordered by the compiler.
+// Compiler cannot introduce extra loads or stores to the atomic object.
+// Compiler also cannot reload from an atomic object, it must save and store to a stack slot.
+// Essentially this provides all the necessary guarantees needed when treating an object as atomic from the compilers point of view.
+//
+// ******** Same Address LoadLoad Reordering ********
+//
+// It is expected that same address operations cannot and are not reordered with each other. It is expected that operations to the same address have sequential consistency because
+// they are to the same address. If you picture a cpu executing instructions, how is it possible to reorder instructions to the same address and yet keep program behaviour the same.
+// Same Address LoadLoad Reordering is one weakening that is possible to do and keep observed program behaviour for a single-threaded program.
+// More formally, A and B are two memory instructions onto the same address P, where A is program ordered before B. If A and B are both loads then their order need not be ordered.
+// If B is a store then it cannot retire the store before A instruction completes. If A is a store and B is a load, then B must get its value forwarded from the store buffer or observe a later store
+// from the cache. Thus Same Address LDST, STST, STLD cannot be reordered but Same Address LDLD can be reordered.
+// Intel Itanium and SPARC RMO cpus allow and do Same Address LoadLoad Reordering.
+// Let's look at an example.
+//
+// ---------------------------
+// Same Address LoadLoad
+// ---------------------------
+// Initial State:
+// x = 0;
+// ---------------------------
+// Thread 0 | Thread 1
+// ---------------------------
+// STORE(x, 1) | r0 = LOAD(x)
+// | r1 = LOAD(x)
+// ---------------------------
+// Observed: r0 = 1 && r0 = 0
+// ---------------------------
+//
+// Notice in the above example it has appeared as if the two loads from the same address have been reordered. If we first observed the new store of 1, then the next load should not observe a value in the past.
+// Many programmers, expect same address sequential consistency, all accesses to a single address appear to execute in a sequential order.
+// Notice this violates the Read-Read Coherence for all atomic objects defined by the std and thus provided by eastl::atomic<T>.
+//
+// All operations on eastl::atomic<T> irrelevant of the memory ordering of the operation provides Same Address Sequential Consistency since it must abide by the coherence rules above.
+//
+// ******** eastl::atomic_thread_fence ********
+//
+// eastl::atomic_thread_fence(relaxed) : Provides no ordering guarantees
+// eastl::atomic_thread_fence(acquire) : Prevents all prior loads from being reordered with all later loads and stores, LDLD && LDST memory barrier
+// eastl::atomic_thread_fence(release) : Prevents all prior loads and stores from being reordered with all later stores, STST && LDST memory barrier
+// eastl::atomic_thread_fence(acq_rel) : Union of acquire and release, LDLD && STST && LDST memory barrier
+// eastl::atomic_thread_fence(seq_cst) : Full memory barrier that provides a single total order
+//
+// See Reference [9] and Fence-Fence, Atomic-Fence, Fence-Atomic Synchronization, Atomics Order and Consistency in the C++ std.
+//
+// ******** Atomic && Fence Synchronization ********
+//
+// ---------------------------
+// Fence-Fence Synchronization
+// ---------------------------
+// A release fence A synchronizes-with an acquire fence B if there exist operations X and Y on the same atomic object M, such that fence A is sequenced-before operation X and X modifies M,
+// operation Y is sequenced-before B and Y reads the value written by X.
+// In this case all non-atomic and relaxed atomic stores that are sequenced-before fence A will happen-before all non-atomic and relaxed atomic loads after fence B.
+//
+// ----------------------------
+// Atomic-Fence Synchronization
+// ----------------------------
+// An atomic release operation A on atomic object M synchronizes-with an acquire fence B if there exists some atomic operation X on atomic object M, such that X is sequenced-before B and reads
+// the value written by A.
+// In this case all non-atomic and relaxed atomic stores that are sequenced-before atomic release operation A will happen-before all non-atomic and relaxed atomic loads after fence B.
+//
+// ----------------------------
+// Fence-Atomic Synchronization
+// ----------------------------
+// A release fence A synchronizes-with an atomic acquire operation B on an atomic object M if there exists an atomic operation X such that A is sequenced-before X, X modifies M and B reads the
+// value written by X.
+// In this case all non-atomic and relaxed atomic stores that are sequenced-before fence A will happen-before all non-atomic and relaxed atomic loads after atomic acquire operation B.
+//
+// This can be used to add synchronization to a series of several relaxed atomic operations, as in the following trivial example.
+//
+// ----------------------------------------------------------------------------------------
+// Initial State:
+// x = 0;
+// eastl::atomic<int> y = 0;
+// z = 0;
+// eastl::atomic<int> w = 0;
+// ----------------------------------------------------------------------------------------
+// Thread 0 | Thread 1
+// ----------------------------------------------------------------------------------------
+// x = 2 | r0 = y.load(memory_order_relaxed);
+// z = 2 | r1 = w.load(memory_order_relaxed);
+// atomic_thread_fence(memory_order_release); | atomic_thread_fence(memory_order_acquire);
+// y.store(1, memory_order_relaxed); | r2 = x
+// w.store(1, memory_order_relaxed); | r3 = z
+// ----------------------------------------------------------------------------------------
+// Observed: r0 = 1 && r1 = 1 && r2 = 0 && r3 = 0
+// ----------------------------------------------------------------------------------------
+//
+// ******** Atomic vs Standalone Fence ********
+//
+// A sequentially consistent fence is stronger than a sequentially consistent operation because it is not tied to a specific atomic object.
+// An atomic fence must provide synchronization with ANY atomic object whereas the ordering on the atomic object itself must only provide
+// that ordering on that SAME atomic object. Thus this can provide cheaper guarantees on architectures with dependency tracking hardware.
+// Let's look at a concrete example that will make this all clear.
+//
+// ----------------------------------------------------------------------------------------
+// Initial State:
+// eastl::atomic<int> y = 0;
+// eastl::atomic<int> z = 0;
+// ----------------------------------------------------------------------------------------
+// Thread 0 | Thread 1
+// ----------------------------------------------------------------------------------------
+// z.store(2, memory_order_relaxed); | r0 = y.load(memory_order_relaxed);
+// atomic_thread_fence(memory_order_seq_cst); | atomic_thread_fence(memory_order_seq_cst);
+// y.store(1, memory_order_relaxed); | r1 = z.load(memory_order_relaxed);
+// ----------------------------------------------------------------------------------------
+// Observed: r0 = 1 && r1 = 0
+// ----------------------------------------------------------------------------------------
+//
+// Here the two sequentially consistent fences synchronize-with each other thus ensuring that if we observe r0 = 1 then we also observe that r1 = 2.
+// In the above example if we observe r0 = 1 it is impossible to observe r1 = 0.
+//
+// ----------------------------------------------------------------------------------------
+// Initial State:
+// eastl::atomic<int> x = 0;
+// eastl::atomic<int> y = 0;
+// eastl::atomic<int> z = 0;
+// ----------------------------------------------------------------------------------------
+// Thread 0 | Thread 1
+// ----------------------------------------------------------------------------------------
+// z.store(2, memory_order_relaxed); | r0 = y.load(memory_order_relaxed);
+// x.fetch_add(1, memory_order_seq_cst); | x.fetch_add(1, memory_order_seq_cst);
+// y.store(1, memory_order_relaxed); | r1 = z.load(memory_order_relaxed);
+// ----------------------------------------------------------------------------------------
+// Observed: r0 = 1 && r1 = 0
+// ----------------------------------------------------------------------------------------
+//
+// Here the two fetch_add sequentially consistent operations on x synchronize-with each other ensuring that if we observe r0 = 1 then we cannot observer r1 = 0;
+// The thing to take note here is that we synchronized on the SAME atomic object, that being the atomic object x.
+// Note that replacing the x.fetch_add() in Thread 1 with a sequentially consistent operation on another atomic object or a sequentially consistent fence can lead to
+// observing r1 = 0 even if we observe r0 = 1. For example the following code may fail.
+//
+// ----------------------------------------------------------------------------------------
+// Initial State:
+// eastl::atomic<int> x = 0;
+// eastl::atomic<int> y = 0;
+// eastl::atomic<int> z = 0;
+// ----------------------------------------------------------------------------------------
+// Thread 0 | Thread 1
+// ----------------------------------------------------------------------------------------
+// z.store(2, memory_order_relaxed); | r0 = y.load(memory_order_relaxed);
+// | x.fetch_add(1, memory_order_seq_cst);
+// y.fetch_add(1, memory_order_seq_cst); | r1 = z.load(memory_order_relaxed);
+// ----------------------------------------------------------------------------------------
+// Observed: r0 = 1 && r1 = 0
+// ----------------------------------------------------------------------------------------
+//
+// ----------------------------------------------------------------------------------------
+// Initial State:
+// eastl::atomic<int> x = 0;
+// eastl::atomic<int> y = 0;
+// eastl::atomic<int> z = 0;
+// ----------------------------------------------------------------------------------------
+// Thread 0 | Thread 1
+// ----------------------------------------------------------------------------------------
+// z.store(2, memory_order_relaxed); | r0 = y.load(memory_order_relaxed);
+// x.fetch_add(1, memory_order_seq_cst); | atomic_thread_fence(memory_order_seq_cst);
+// y.store(1, memory_order_relaxed); | r1 = z.load(memory_order_relaxed);
+// ----------------------------------------------------------------------------------------
+// Observed: r0 = 1 && r1 = 0
+// ----------------------------------------------------------------------------------------
+//
+// In this example it is entirely possible that we observe r0 = 1 && r1 = 0 even though we have source code causality and sequentially consistent operations.
+// Observability is tied to the atomic object on which the operation was performed and the thread fence doesn't synchronize-with the fetch_add because
+// there is no load above the fence that reads the value from the fetch_add.
+//
+// ******** Sequential Consistency Semantics ********
+//
+// See section, Order and consistency, in the C++ std and Reference [9].
+//
+// A load with memory_order_seq_cst performs an acquire operation
+// A store with memory_order_seq_cst performs a release operation
+// A RMW with memory_order_seq_cst performs both an acquire and a release operation
+//
+// All memory_order_seq_cst operations exhibit the below single total order in which all threads observe all modifications in the same order
+//
+// Paraphrasing, there is a single total order on all memory_order_seq_cst operations, S, such that each sequentially consistent operation B that loads a value from
+// atomic object M observes either the result of the last sequentially consistent modification A on M, or some modification on M that isn't memory_order_seq_cst.
+// For atomic modifications A and B on an atomic object M, B occurs after A in the total order of M if:
+// there is a memory_order_seq_cst fence X whereby A is sequenced before X, and X precedes B,
+// there is a memory_order_seq_cst fence Y whereby Y is sequenced before B, and A precedes Y,
+// there are memory_order_seq_cst fences X and Y such that A is sequenced before X, Y is sequenced before B, and X precedes Y.
+//
+// Let's look at some examples using memory_order_seq_cst.
+//
+// ------------------------------------------------------------
+// Store-Buffer
+// ------------------------------------------------------------
+// Initial State:
+// x = 0; y = 0;
+// ------------------------------------------------------------
+// Thread 0 | Thread 1
+// ------------------------------------------------------------
+// STORE_RELAXED(x, 1) | STORE_RELAXED(y, 1)
+// ATOMIC_THREAD_FENCE(SEQ_CST) | ATOMIC_THREAD_FENCE(SEQ_CST)
+// r0 = LOAD_RELAXED(y) | r1 = LOAD_RELAXED(x)
+// ------------------------------------------------------------
+// Observed: r0 = 0 && r1 = 0
+// ------------------------------------------------------------
+//
+// ------------------------------------------------------------
+// Store-Buffer
+// ------------------------------------------------------------
+// Initial State:
+// x = 0; y = 0;
+// ------------------------------------------------------------
+// Thread 0 | Thread 1
+// ------------------------------------------------------------
+// STORE_SEQ_CST(x, 1) | STORE_SEQ_CST(y, 1)
+// r0 = LOAD_SEQ_CST(y) | r1 = LOAD_SEQ_CST(x)
+// ------------------------------------------------------------
+// Observed: r0 = 0 && r1 = 0
+// ------------------------------------------------------------
+//
+// Both solutions above are correct to ensure that the end results cannot lead to both r0 and r1 returning 0. Notice that the second one requires memory_order_seq_cst on both
+// operations to ensure they are in the total order, S, for all memory_order_seq_cst operations. The other example uses the stronger guarantee provided by a sequentially consistent fence.
+//
+// ------------------------------------------------------------------------------------------------
+// Read-To-Write Causality
+// ------------------------------------------------------------------------------------------------
+// Initial State:
+// x = 0; y = 0;
+// ------------------------------------------------------------------------------------------------
+// Thread 0 | Thread 1 | Thread 2
+// ------------------------------------------------------------------------------------------------
+// STORE_SEQ_CST(x, 1) | r0 = LOAD_RELAXED(x) | STORE_RELAXED(y, 1)
+// | ATOMIC_THREAD_FENCE(SEQ_CST) | ATOMIC_THREAD_FENCE(SEQ_CST)
+// | r1 = LOAD_RELAXED(y) | r2 = LOAD_RELAXED(x)
+// ------------------------------------------------------------------------------------------------
+// Observed: r0 = 1 && r1 = 0 && r2 = 0
+// ------------------------------------------------------------------------------------------------
+//
+// You'll notice this example is an in between example of the Store-Buffer and IRIW examples we have seen earlier. The store in Thread 0 needs to be sequentially consistent so it synchronizes with the
+// thread fence in Thread 1. C++20 due to Reference [9], increased the strength of sequentially consistent fences has been increased to allow for the following.
+//
+// ------------------------------------------------------------------------------------------------
+// Read-To-Write Causality - C++20
+// ------------------------------------------------------------------------------------------------
+// Initial State:
+// x = 0; y = 0;
+// ------------------------------------------------------------------------------------------------
+// Thread 0 | Thread 1 | Thread 2
+// ------------------------------------------------------------------------------------------------
+// STORE_RELAXED(x, 1) | r0 = LOAD_RELAXED(x) | STORE_RELAXED(y, 1)
+// | ATOMIC_THREAD_FENCE(SEQ_CST) | ATOMIC_THREAD_FENCE(SEQ_CST)
+// | r1 = LOAD_RELAXED(y) | r2 = LOAD_RELAXED(x)
+// ------------------------------------------------------------------------------------------------
+// Observed: r0 = 1 && r1 = 0 && r2 = 0
+// ------------------------------------------------------------------------------------------------
+//
+// Notice we were able to turn the store in Thread 0 into a relaxed store and still properly observe either r1 or r2 returning 1.
+// Note that all implementations of the C++11 standard for every architecture even now allows the C++20 behaviour.
+// The C++20 standard memory model was brought up to recognize that all current implementations are able to implement them stronger.
+//
+// ******** False Sharing ********
+//
+// As we know operations work on the granularity of a cacheline. A RMW operation obviously must have some help from the cache to ensure the entire operation
+// is seen as one whole unit. Conceptually we can think of this as the cpu's cache taking a lock on the cacheline, the cpu doing the read-modify-write operation on the
+// locked cacheline, and then releasing the lock on the cacheline. This means during that time any other cpu needing that cacheline must wait for the lock to be released.
+//
+// If we have two atomic objects doing RMW operations and they are within the same cacheline, they are unintentionally contending and serializing with each other even
+// though they are two completely separate objects. This gives us the common name to this phenomona called false sharing.
+// You can cacheline align your structure or the eastl::atomic<T> object to prevent false sharing.
+//
+// ******** union of eastl::atomic<T> ********
+//
+// union { eastl::atomic<uint8_t> atomic8; eastl::atomic<uint32_t> atomic32; };
+//
+// While we know that operations operate at the granularity of a processor's cacheline size and so we may expect that storing and loading
+// from different width atomic variables at the same address to not cause weird observable behaviour but it may.
+// Store Buffers allow smaller stores to replace parts of larger loads that are forwarded from a store buffer.
+// This means if there is 2 bytes of modified data in the store buffer that overlaps with a 4 byte load, the 2 bytes will be forwarded
+// from the store buffer. This is even documented behaviour of the x86 store buffer in the x86 architecture manual.
+// This behaviour can cause processors to observe values that have never and will never be visible on the bus to other processors.
+// The use of a union with eastl::atomic<T> is not wrong but your code must be able to withstand these effects.
+//
+// Assume everything starts out initially as zero.
+//
+// -------------------------------------------------------------------------------------------------------
+// Thread 0 | Thread 1 | Thread 2
+// --------------------------------------------------------------------------------------------------------
+// cmpxchg 0 -> 0x11111111 | cmpxchg 0x11111111 -> 0x22222222 | mov byte 0x33; mov 4 bytes into register;
+// ---------------------------------------------------------------------------------------------------------
+//
+// After all operations complete, the value in memory at that location is, 0x22222233.
+// It is possible that the 4 byte load in thread 2 actually returns 0x11111133.
+// Now 0x11111133 is an observed value that no other cpu could observe because it was never globally visible on the data bus.
+//
+// If the value in memory is 0x22222233 then the first cmpxchg succeeded, then the second cmpxchg succeeded and finally our
+// byte to memory was stored, yet our load returned 0x11111133. This is because store buffer contents can be forwarded to overlapping loads.
+// It is possible that the byte store got put in the store buffer. Our load happened after the first cmpxchg with the byte forwarded.
+// This behaviour is fine as long as your algorithm is able to cope with this kind of store buffer forwarding effects.
+//
+// Reference [13] is a great read on more about this topic of mixed-size concurrency.
+//
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#include <EASTL/internal/atomic/atomic.h>
+#include <EASTL/internal/atomic/atomic_standalone.h>
+#include <EASTL/internal/atomic/atomic_flag.h>
+#include <EASTL/internal/atomic/atomic_flag_standalone.h>
+
+
+#endif /* EASTL_ATOMIC_H */
diff --git a/EASTL/include/EASTL/bit.h b/EASTL/include/EASTL/bit.h
new file mode 100644
index 0000000..0eeeed0
--- /dev/null
+++ b/EASTL/include/EASTL/bit.h
@@ -0,0 +1,172 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_BIT_H
+#define EASTL_BIT_H
+
+#include <EASTL/internal/config.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <EASTL/internal/memory_base.h>
+#include <EASTL/type_traits.h>
+#include <string.h> // memcpy
+
+namespace eastl
+{
+ // eastl::bit_cast
+ // Obtains a value of type To by reinterpreting the object representation of 'from'.
+ // Every bit in the value representation of the returned To object is equal to the
+ // corresponding bit in the object representation of 'from'.
+ //
+ // In order for bit_cast to be constexpr, the compiler needs to explicitly support
+ // it by providing the __builtin_bit_cast builtin. If that builtin is not available,
+ // then we memcpy into aligned storage at runtime and return that instead.
+ //
+ // Both types To and From must be equal in size, and must be trivially copyable.
+
+ #if defined(EASTL_CONSTEXPR_BIT_CAST_SUPPORTED) && EASTL_CONSTEXPR_BIT_CAST_SUPPORTED
+
+ template<typename To, typename From,
+ typename = eastl::enable_if_t<
+ sizeof(To) == sizeof(From)
+ && eastl::is_trivially_copyable<To>::value
+ && eastl::is_trivially_copyable<From>::value
+ >
+ >
+ EA_CONSTEXPR To bit_cast(const From& from) EA_NOEXCEPT
+ {
+ return __builtin_bit_cast(To, from);
+ }
+
+ #else
+
+ template<typename To, typename From,
+ typename = eastl::enable_if_t<
+ sizeof(To) == sizeof(From)
+ && eastl::is_trivially_copyable<To>::value
+ && eastl::is_trivially_copyable<From>::value
+ >
+ >
+ inline To bit_cast(const From& from) EA_NOEXCEPT
+ {
+ typename eastl::aligned_storage<sizeof(To), alignof(To)>::type to;
+ ::memcpy(eastl::addressof(to), eastl::addressof(from), sizeof(To));
+ return reinterpret_cast<To&>(to);
+ }
+
+ #endif // EASTL_CONSTEXPR_BIT_CAST_SUPPORTED
+
+ #if defined(EA_COMPILER_CPP20_ENABLED)
+ #ifndef EASTL_COUNT_LEADING_ZEROES
+ #if defined(__GNUC__)
+ #if (EA_PLATFORM_PTR_SIZE == 8)
+ #define EASTL_COUNT_LEADING_ZEROES __builtin_clzll
+ #else
+ #define EASTL_COUNT_LEADING_ZEROES __builtin_clz
+ #endif
+ #endif
+
+ #ifndef EASTL_COUNT_LEADING_ZEROES
+ static inline int eastl_count_leading_zeroes(uint64_t x)
+ {
+ if(x)
+ {
+ int n = 0;
+ if(x & UINT64_C(0xFFFFFFFF00000000)) { n += 32; x >>= 32; }
+ if(x & 0xFFFF0000) { n += 16; x >>= 16; }
+ if(x & 0xFFFFFF00) { n += 8; x >>= 8; }
+ if(x & 0xFFFFFFF0) { n += 4; x >>= 4; }
+ if(x & 0xFFFFFFFC) { n += 2; x >>= 2; }
+ if(x & 0xFFFFFFFE) { n += 1; }
+ return 63 - n;
+ }
+ return 64;
+ }
+
+ static inline int eastl_count_leading_zeroes(uint32_t x)
+ {
+ if(x)
+ {
+ int n = 0;
+ if(x <= 0x0000FFFF) { n += 16; x <<= 16; }
+ if(x <= 0x00FFFFFF) { n += 8; x <<= 8; }
+ if(x <= 0x0FFFFFFF) { n += 4; x <<= 4; }
+ if(x <= 0x3FFFFFFF) { n += 2; x <<= 2; }
+ if(x <= 0x7FFFFFFF) { n += 1; }
+ return n;
+ }
+ return 32;
+ }
+
+ #define EASTL_COUNT_LEADING_ZEROES eastl_count_leading_zeroes
+ #endif
+ #endif
+
+ template <typename T, typename = eastl::enable_if_t<eastl::is_unsigned_v<T>>>
+ EA_CONSTEXPR int countl_zero(const T num) EA_NOEXCEPT
+ {
+ EA_CONSTEXPR auto DIGITS = eastl::numeric_limits<T>::digits;
+ EA_CONSTEXPR auto DIGITS_U = eastl::numeric_limits<unsigned>::digits;
+ EA_CONSTEXPR auto DIGITS_ULL = eastl::numeric_limits<unsigned long long>::digits;
+
+ if (num == 0)
+ {
+ return DIGITS;
+ }
+
+ if constexpr (DIGITS <= DIGITS_U)
+ {
+ EA_CONSTEXPR auto DIFF = DIGITS_U - DIGITS;
+ return EASTL_COUNT_LEADING_ZEROES(static_cast<uint32_t>(num)) - DIFF;
+ }
+ else
+ {
+ EA_CONSTEXPR auto DIFF = DIGITS_ULL - DIGITS;
+ return EASTL_COUNT_LEADING_ZEROES(static_cast<uint64_t>(num)) - DIFF;
+ }
+ }
+
+ template <typename T, typename = eastl::enable_if_t<eastl::is_unsigned_v<T>>>
+ EA_CONSTEXPR bool has_single_bit(const T num) EA_NOEXCEPT
+ {
+ return num != 0 && (num & (num - 1)) == 0;
+ }
+
+ template <typename T, typename = eastl::enable_if_t<eastl::is_unsigned_v<T>>>
+ EA_CONSTEXPR T bit_ceil(const T num) EA_NOEXCEPT
+ {
+ if (num <= 1U)
+ {
+ return T(1);
+ }
+
+ const auto shift = eastl::numeric_limits<T>::digits - eastl::countl_zero(static_cast<T>(num - 1));
+ return static_cast<T>(T(1) << shift);
+ }
+
+ template <typename T, typename = eastl::enable_if_t<eastl::is_unsigned_v<T>>>
+ EA_CONSTEXPR T bit_floor(const T num) EA_NOEXCEPT
+ {
+ if (num == 0)
+ {
+ return T(0);
+ }
+
+ const auto shift = eastl::numeric_limits<T>::digits - eastl::countl_zero(num) - 1;
+ return static_cast<T>(T(1) << shift);
+ }
+
+ template <typename T, typename = eastl::enable_if_t<eastl::is_unsigned_v<T>>>
+ EA_CONSTEXPR T bit_width(const T num) EA_NOEXCEPT
+ {
+ return static_cast<T>(eastl::numeric_limits<T>::digits - eastl::countl_zero(num));
+ }
+ #endif
+
+} // namespace eastl
+
+#endif // EASTL_BIT_H
diff --git a/EASTL/include/EASTL/bitset.h b/EASTL/include/EASTL/bitset.h
new file mode 100644
index 0000000..c31831a
--- /dev/null
+++ b/EASTL/include/EASTL/bitset.h
@@ -0,0 +1,2234 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements a bitset much like the C++ std::bitset class.
+// The primary distinctions between this list and std::bitset are:
+// - bitset is more efficient than some other std::bitset implementations,
+// notably the bitset that comes with Microsoft and other 1st party platforms.
+// - bitset is savvy to an environment that doesn't have exception handling,
+// as is sometimes the case with console or embedded environments.
+// - bitset is savvy to environments in which 'unsigned long' is not the
+// most efficient integral data type. std::bitset implementations use
+// unsigned long, even if it is an inefficient integer type.
+// - bitset removes as much function calls as practical, in order to allow
+// debug builds to run closer in speed and code footprint to release builds.
+// - bitset doesn't support string functionality. We can add this if
+// it is deemed useful.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_BITSET_H
+#define EASTL_BITSET_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/algorithm.h>
+
+EA_DISABLE_ALL_VC_WARNINGS();
+
+#include <stddef.h>
+#include <string.h>
+
+EA_RESTORE_ALL_VC_WARNINGS();
+
+#if EASTL_EXCEPTIONS_ENABLED
+ EA_DISABLE_ALL_VC_WARNINGS();
+
+ #include <stdexcept> // std::out_of_range, std::length_error.
+
+ EA_RESTORE_ALL_VC_WARNINGS();
+#endif
+
+EA_DISABLE_VC_WARNING(4127); // Conditional expression is constant
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+ // To consider: Enable this for backwards compatibility with any user code that might be using BitsetWordType:
+ // #define BitsetWordType EASTL_BITSET_WORD_TYPE_DEFAULT
+
+
+ /// BITSET_WORD_COUNT
+ ///
+ /// Defines the number of words we use, based on the number of bits.
+ /// nBitCount refers to the number of bits in a bitset.
+ /// WordType refers to the type of integer word which stores bitet data. By default it is BitsetWordType.
+ ///
+ #if !defined(__GNUC__) || (__GNUC__ >= 3) // GCC 2.x can't handle the simpler declaration below.
+ #define BITSET_WORD_COUNT(nBitCount, WordType) (nBitCount == 0 ? 1 : ((nBitCount - 1) / (8 * sizeof(WordType)) + 1))
+ #else
+ #define BITSET_WORD_COUNT(nBitCount, WordType) ((nBitCount - 1) / (8 * sizeof(WordType)) + 1)
+ #endif
+
+
+ /// EASTL_DISABLE_BITSET_ARRAYBOUNDS_WARNING
+ /// Before GCC 4.7 the '-Warray-bounds' buggy and was very likely to issue false positives for loops that are
+ /// difficult to evaluate.
+ /// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=45978
+ ///
+ #if defined(__GNUC__) && (EA_COMPILER_VERSION > 4007) && defined(EA_PLATFORM_ANDROID) // Earlier than GCC 4.7
+ #define EASTL_DISABLE_BITSET_ARRAYBOUNDS_WARNING 1
+ #else
+ #define EASTL_DISABLE_BITSET_ARRAYBOUNDS_WARNING 0
+ #endif
+
+
+
+ /// BitsetBase
+ ///
+ /// This is a default implementation that works for any number of words.
+ ///
+ template <size_t NW, typename WordType> // Templated on the number of words used to hold the bitset and the word type.
+ struct BitsetBase
+ {
+ typedef WordType word_type;
+ typedef BitsetBase<NW, WordType> this_type;
+ #if EASTL_BITSET_SIZE_T
+ typedef size_t size_type;
+ #else
+ typedef eastl_size_t size_type;
+ #endif
+
+ enum {
+ kBitsPerWord = (8 * sizeof(word_type)),
+ kBitsPerWordMask = (kBitsPerWord - 1),
+ kBitsPerWordShift = ((kBitsPerWord == 8) ? 3 : ((kBitsPerWord == 16) ? 4 : ((kBitsPerWord == 32) ? 5 : (((kBitsPerWord == 64) ? 6 : 7)))))
+ };
+
+ public:
+ word_type mWord[NW];
+
+ public:
+ BitsetBase();
+ BitsetBase(uint32_t value); // This exists only for compatibility with std::bitset, which has a 'long' constructor.
+ //BitsetBase(uint64_t value); // Disabled because it causes conflicts with the 32 bit version with existing user code. Use from_uint64 to init from a uint64_t instead.
+
+ void operator&=(const this_type& x);
+ void operator|=(const this_type& x);
+ void operator^=(const this_type& x);
+
+ void operator<<=(size_type n);
+ void operator>>=(size_type n);
+
+ void flip();
+ void set();
+ void set(size_type i, bool value);
+ void reset();
+
+ bool operator==(const this_type& x) const;
+
+ bool any() const;
+ size_type count() const;
+
+ void from_uint32(uint32_t value);
+ void from_uint64(uint64_t value);
+
+ unsigned long to_ulong() const;
+ uint32_t to_uint32() const;
+ uint64_t to_uint64() const;
+
+ word_type& DoGetWord(size_type i);
+ word_type DoGetWord(size_type i) const;
+
+ size_type DoFindFirst() const;
+ size_type DoFindNext(size_type last_find) const;
+
+ size_type DoFindLast() const; // Returns NW * kBitsPerWord (the bit count) if no bits are set.
+ size_type DoFindPrev(size_type last_find) const; // Returns NW * kBitsPerWord (the bit count) if no bits are set.
+
+ }; // class BitsetBase
+
+
+
+ /// BitsetBase<1, WordType>
+ ///
+ /// This is a specialization for a bitset that fits within one word.
+ ///
+ template <typename WordType>
+ struct BitsetBase<1, WordType>
+ {
+ typedef WordType word_type;
+ typedef BitsetBase<1, WordType> this_type;
+ #if EASTL_BITSET_SIZE_T
+ typedef size_t size_type;
+ #else
+ typedef eastl_size_t size_type;
+ #endif
+
+ enum {
+ kBitsPerWord = (8 * sizeof(word_type)),
+ kBitsPerWordMask = (kBitsPerWord - 1),
+ kBitsPerWordShift = ((kBitsPerWord == 8) ? 3 : ((kBitsPerWord == 16) ? 4 : ((kBitsPerWord == 32) ? 5 : (((kBitsPerWord == 64) ? 6 : 7)))))
+ };
+
+ public:
+ word_type mWord[1]; // Defined as an array of 1 so that bitset can treat this BitsetBase like others.
+
+ public:
+ BitsetBase();
+ BitsetBase(uint32_t value);
+ //BitsetBase(uint64_t value); // Disabled because it causes conflicts with the 32 bit version with existing user code. Use from_uint64 instead.
+
+ void operator&=(const this_type& x);
+ void operator|=(const this_type& x);
+ void operator^=(const this_type& x);
+
+ void operator<<=(size_type n);
+ void operator>>=(size_type n);
+
+ void flip();
+ void set();
+ void set(size_type i, bool value);
+ void reset();
+
+ bool operator==(const this_type& x) const;
+
+ bool any() const;
+ size_type count() const;
+
+ void from_uint32(uint32_t value);
+ void from_uint64(uint64_t value);
+
+ unsigned long to_ulong() const;
+ uint32_t to_uint32() const;
+ uint64_t to_uint64() const;
+
+ word_type& DoGetWord(size_type);
+ word_type DoGetWord(size_type) const;
+
+ size_type DoFindFirst() const;
+ size_type DoFindNext(size_type last_find) const;
+
+ size_type DoFindLast() const; // Returns 1 * kBitsPerWord (the bit count) if no bits are set.
+ size_type DoFindPrev(size_type last_find) const; // Returns 1 * kBitsPerWord (the bit count) if no bits are set.
+
+ }; // BitsetBase<1, WordType>
+
+
+
+ /// BitsetBase<2, WordType>
+ ///
+ /// This is a specialization for a bitset that fits within two words.
+ /// The difference here is that we avoid branching (ifs and loops).
+ ///
+ template <typename WordType>
+ struct BitsetBase<2, WordType>
+ {
+ typedef WordType word_type;
+ typedef BitsetBase<2, WordType> this_type;
+ #if EASTL_BITSET_SIZE_T
+ typedef size_t size_type;
+ #else
+ typedef eastl_size_t size_type;
+ #endif
+
+ enum {
+ kBitsPerWord = (8 * sizeof(word_type)),
+ kBitsPerWordMask = (kBitsPerWord - 1),
+ kBitsPerWordShift = ((kBitsPerWord == 8) ? 3 : ((kBitsPerWord == 16) ? 4 : ((kBitsPerWord == 32) ? 5 : (((kBitsPerWord == 64) ? 6 : 7)))))
+ };
+
+ public:
+ word_type mWord[2];
+
+ public:
+ BitsetBase();
+ BitsetBase(uint32_t value);
+ //BitsetBase(uint64_t value); // Disabled because it causes conflicts with the 32 bit version with existing user code. Use from_uint64 instead.
+
+ void operator&=(const this_type& x);
+ void operator|=(const this_type& x);
+ void operator^=(const this_type& x);
+
+ void operator<<=(size_type n);
+ void operator>>=(size_type n);
+
+ void flip();
+ void set();
+ void set(size_type i, bool value);
+ void reset();
+
+ bool operator==(const this_type& x) const;
+
+ bool any() const;
+ size_type count() const;
+
+ void from_uint32(uint32_t value);
+ void from_uint64(uint64_t value);
+
+ unsigned long to_ulong() const;
+ uint32_t to_uint32() const;
+ uint64_t to_uint64() const;
+
+ word_type& DoGetWord(size_type);
+ word_type DoGetWord(size_type) const;
+
+ size_type DoFindFirst() const;
+ size_type DoFindNext(size_type last_find) const;
+
+ size_type DoFindLast() const; // Returns 2 * kBitsPerWord (the bit count) if no bits are set.
+ size_type DoFindPrev(size_type last_find) const; // Returns 2 * kBitsPerWord (the bit count) if no bits are set.
+
+ }; // BitsetBase<2, WordType>
+
+
+
+
+ /// bitset
+ ///
+ /// Implements a bitset much like the C++ std::bitset.
+ ///
+ /// As of this writing we don't implement a specialization of bitset<0>,
+ /// as it is deemed an academic exercise that nobody would actually
+ /// use and it would increase code space and provide little practical
+ /// benefit. Note that this doesn't mean bitset<0> isn't supported;
+ /// it means that our version of it isn't as efficient as it would be
+ /// if a specialization was made for it.
+ ///
+ /// - N can be any unsigned (non-zero) value, though memory usage is
+ /// linear with respect to N, so large values of N use large amounts of memory.
+ /// - WordType must be one of [uint16_t, uint32_t, uint64_t, uint128_t]
+ /// and the compiler must support the type. By default the WordType is
+ /// the largest native register type that the target platform supports.
+ ///
+ template <size_t N, typename WordType = EASTL_BITSET_WORD_TYPE_DEFAULT>
+ class bitset : private BitsetBase<BITSET_WORD_COUNT(N, WordType), WordType>
+ {
+ public:
+ typedef BitsetBase<BITSET_WORD_COUNT(N, WordType), WordType> base_type;
+ typedef bitset<N, WordType> this_type;
+ typedef WordType word_type;
+ typedef typename base_type::size_type size_type;
+
+ enum
+ {
+ kBitsPerWord = (8 * sizeof(word_type)),
+ kBitsPerWordMask = (kBitsPerWord - 1),
+ kBitsPerWordShift = ((kBitsPerWord == 8) ? 3 : ((kBitsPerWord == 16) ? 4 : ((kBitsPerWord == 32) ? 5 : (((kBitsPerWord == 64) ? 6 : 7))))),
+ kSize = N, // The number of bits the bitset holds
+ kWordSize = sizeof(word_type), // The size of individual words the bitset uses to hold the bits.
+ kWordCount = BITSET_WORD_COUNT(N, WordType) // The number of words the bitset uses to hold the bits. sizeof(bitset<N, WordType>) == kWordSize * kWordCount.
+ };
+
+ using base_type::mWord;
+ using base_type::DoGetWord;
+ using base_type::DoFindFirst;
+ using base_type::DoFindNext;
+ using base_type::DoFindLast;
+ using base_type::DoFindPrev;
+ using base_type::to_ulong;
+ using base_type::to_uint32;
+ using base_type::to_uint64;
+ using base_type::count;
+ using base_type::any;
+
+ public:
+ /// reference
+ ///
+ /// A reference is a reference to a specific bit in the bitset.
+ /// The C++ standard specifies that this be a nested class,
+ /// though it is not clear if a non-nested reference implementation
+ /// would be non-conforming.
+ ///
+ class reference
+ {
+ protected:
+ friend class bitset<N, WordType>;
+
+ word_type* mpBitWord;
+ size_type mnBitIndex;
+
+ reference(){} // The C++ standard specifies that this is private.
+
+ public:
+ reference(const bitset& x, size_type i);
+
+ reference& operator=(bool value);
+ reference& operator=(const reference& x);
+
+ bool operator~() const;
+ operator bool() const // Defined inline because CodeWarrior fails to be able to compile it outside.
+ { return (*mpBitWord & (static_cast<word_type>(1) << (mnBitIndex & kBitsPerWordMask))) != 0; }
+
+ reference& flip();
+ };
+
+ public:
+ friend class reference;
+
+ bitset();
+ bitset(uint32_t value);
+ //bitset(uint64_t value); // Disabled because it causes conflicts with the 32 bit version with existing user code. Use from_uint64 instead.
+
+ // We don't define copy constructor and operator= because
+ // the compiler-generated versions will suffice.
+
+ this_type& operator&=(const this_type& x);
+ this_type& operator|=(const this_type& x);
+ this_type& operator^=(const this_type& x);
+
+ this_type& operator<<=(size_type n);
+ this_type& operator>>=(size_type n);
+
+ this_type& set();
+ this_type& set(size_type i, bool value = true);
+
+ this_type& reset();
+ this_type& reset(size_type i);
+
+ this_type& flip();
+ this_type& flip(size_type i);
+ this_type operator~() const;
+
+ reference operator[](size_type i);
+ bool operator[](size_type i) const;
+
+ const word_type* data() const;
+ word_type* data();
+
+ void from_uint32(uint32_t value);
+ void from_uint64(uint64_t value);
+
+ //unsigned long to_ulong() const; // We inherit this from the base class.
+ //uint32_t to_uint32() const;
+ //uint64_t to_uint64() const;
+
+ //size_type count() const; // We inherit this from the base class.
+ size_type size() const;
+
+ bool operator==(const this_type& x) const;
+#if !defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ bool operator!=(const this_type& x) const;
+#endif
+
+ bool test(size_type i) const;
+ //bool any() const; // We inherit this from the base class.
+ bool all() const;
+ bool none() const;
+
+ this_type operator<<(size_type n) const;
+ this_type operator>>(size_type n) const;
+
+ // Finds the index of the first "on" bit, returns kSize if none are set.
+ size_type find_first() const;
+
+ // Finds the index of the next "on" bit after last_find, returns kSize if none are set.
+ size_type find_next(size_type last_find) const;
+
+ // Finds the index of the last "on" bit, returns kSize if none are set.
+ size_type find_last() const;
+
+ // Finds the index of the last "on" bit before last_find, returns kSize if none are set.
+ size_type find_prev(size_type last_find) const;
+
+ }; // bitset
+
+
+
+
+
+
+
+ /// BitsetCountBits
+ ///
+ /// This is a fast trick way to count bits without branches nor memory accesses.
+ ///
+ inline uint32_t BitsetCountBits(uint64_t x)
+ {
+ // GCC 3.x's implementation of UINT64_C is broken and fails to deal with
+ // the code below correctly. So we make a workaround for it. Earlier and
+ // later versions of GCC don't have this bug.
+
+ #if defined(__GNUC__) && (__GNUC__ == 3)
+ x = x - ((x >> 1) & 0x5555555555555555ULL);
+ x = (x & 0x3333333333333333ULL) + ((x >> 2) & 0x3333333333333333ULL);
+ x = (x + (x >> 4)) & 0x0F0F0F0F0F0F0F0FULL;
+ return (uint32_t)((x * 0x0101010101010101ULL) >> 56);
+ #else
+ x = x - ((x >> 1) & UINT64_C(0x5555555555555555));
+ x = (x & UINT64_C(0x3333333333333333)) + ((x >> 2) & UINT64_C(0x3333333333333333));
+ x = (x + (x >> 4)) & UINT64_C(0x0F0F0F0F0F0F0F0F);
+ return (uint32_t)((x * UINT64_C(0x0101010101010101)) >> 56);
+ #endif
+ }
+
+ inline uint32_t BitsetCountBits(uint32_t x)
+ {
+ x = x - ((x >> 1) & 0x55555555);
+ x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
+ x = (x + (x >> 4)) & 0x0F0F0F0F;
+ return (uint32_t)((x * 0x01010101) >> 24);
+ }
+
+ inline uint32_t BitsetCountBits(uint16_t x)
+ {
+ return BitsetCountBits((uint32_t)x);
+ }
+
+ inline uint32_t BitsetCountBits(uint8_t x)
+ {
+ return BitsetCountBits((uint32_t)x);
+ }
+
+
+ // const static char kBitsPerUint16[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4 };
+ #define EASTL_BITSET_COUNT_STRING "\0\1\1\2\1\2\2\3\1\2\2\3\2\3\3\4"
+
+
+ inline uint32_t GetFirstBit(uint8_t x)
+ {
+ if(x)
+ {
+ uint32_t n = 1;
+
+ if((x & 0x0000000F) == 0) { n += 4; x >>= 4; }
+ if((x & 0x00000003) == 0) { n += 2; x >>= 2; }
+
+ return (uint32_t)(n - (x & 1));
+ }
+
+ return 8;
+ }
+
+ inline uint32_t GetFirstBit(uint16_t x) // To do: Update this to use VC++ _BitScanForward, _BitScanForward64; GCC __builtin_ctz, __builtin_ctzl. VC++ __lzcnt16, __lzcnt, __lzcnt64 requires recent CPUs (2013+) and probably can't be used. http://en.wikipedia.org/wiki/Haswell_%28microarchitecture%29#New_features
+ {
+ if(x)
+ {
+ uint32_t n = 1;
+
+ if((x & 0x000000FF) == 0) { n += 8; x >>= 8; }
+ if((x & 0x0000000F) == 0) { n += 4; x >>= 4; }
+ if((x & 0x00000003) == 0) { n += 2; x >>= 2; }
+
+ return (uint32_t)(n - (x & 1));
+ }
+
+ return 16;
+ }
+
+ inline uint32_t GetFirstBit(uint32_t x)
+ {
+ if(x)
+ {
+ uint32_t n = 1;
+
+ if((x & 0x0000FFFF) == 0) { n += 16; x >>= 16; }
+ if((x & 0x000000FF) == 0) { n += 8; x >>= 8; }
+ if((x & 0x0000000F) == 0) { n += 4; x >>= 4; }
+ if((x & 0x00000003) == 0) { n += 2; x >>= 2; }
+
+ return (n - (x & 1));
+ }
+
+ return 32;
+ }
+
+ inline uint32_t GetFirstBit(uint64_t x)
+ {
+ if(x)
+ {
+ uint32_t n = 1;
+
+ if((x & 0xFFFFFFFF) == 0) { n += 32; x >>= 32; }
+ if((x & 0x0000FFFF) == 0) { n += 16; x >>= 16; }
+ if((x & 0x000000FF) == 0) { n += 8; x >>= 8; }
+ if((x & 0x0000000F) == 0) { n += 4; x >>= 4; }
+ if((x & 0x00000003) == 0) { n += 2; x >>= 2; }
+
+ return (n - ((uint32_t)x & 1));
+ }
+
+ return 64;
+ }
+
+
+ #if EASTL_INT128_SUPPORTED
+ inline uint32_t GetFirstBit(eastl_uint128_t x)
+ {
+ if(x)
+ {
+ uint32_t n = 1;
+
+ if((x & UINT64_C(0xFFFFFFFFFFFFFFFF)) == 0) { n += 64; x >>= 64; }
+ if((x & 0xFFFFFFFF) == 0) { n += 32; x >>= 32; }
+ if((x & 0x0000FFFF) == 0) { n += 16; x >>= 16; }
+ if((x & 0x000000FF) == 0) { n += 8; x >>= 8; }
+ if((x & 0x0000000F) == 0) { n += 4; x >>= 4; }
+ if((x & 0x00000003) == 0) { n += 2; x >>= 2; }
+
+ return (n - ((uint32_t)x & 1));
+ }
+
+ return 128;
+ }
+ #endif
+
+ inline uint32_t GetLastBit(uint8_t x)
+ {
+ if(x)
+ {
+ uint32_t n = 0;
+
+ if(x & 0xFFF0) { n += 4; x >>= 4; }
+ if(x & 0xFFFC) { n += 2; x >>= 2; }
+ if(x & 0xFFFE) { n += 1; }
+
+ return n;
+ }
+
+ return 8;
+ }
+
+ inline uint32_t GetLastBit(uint16_t x)
+ {
+ if(x)
+ {
+ uint32_t n = 0;
+
+ if(x & 0xFF00) { n += 8; x >>= 8; }
+ if(x & 0xFFF0) { n += 4; x >>= 4; }
+ if(x & 0xFFFC) { n += 2; x >>= 2; }
+ if(x & 0xFFFE) { n += 1; }
+
+ return n;
+ }
+
+ return 16;
+ }
+
+ inline uint32_t GetLastBit(uint32_t x)
+ {
+ if(x)
+ {
+ uint32_t n = 0;
+
+ if(x & 0xFFFF0000) { n += 16; x >>= 16; }
+ if(x & 0xFFFFFF00) { n += 8; x >>= 8; }
+ if(x & 0xFFFFFFF0) { n += 4; x >>= 4; }
+ if(x & 0xFFFFFFFC) { n += 2; x >>= 2; }
+ if(x & 0xFFFFFFFE) { n += 1; }
+
+ return n;
+ }
+
+ return 32;
+ }
+
+ inline uint32_t GetLastBit(uint64_t x)
+ {
+ if(x)
+ {
+ uint32_t n = 0;
+
+ if(x & UINT64_C(0xFFFFFFFF00000000)) { n += 32; x >>= 32; }
+ if(x & 0xFFFF0000) { n += 16; x >>= 16; }
+ if(x & 0xFFFFFF00) { n += 8; x >>= 8; }
+ if(x & 0xFFFFFFF0) { n += 4; x >>= 4; }
+ if(x & 0xFFFFFFFC) { n += 2; x >>= 2; }
+ if(x & 0xFFFFFFFE) { n += 1; }
+
+ return n;
+ }
+
+ return 64;
+ }
+
+ #if EASTL_INT128_SUPPORTED
+ inline uint32_t GetLastBit(eastl_uint128_t x)
+ {
+ if(x)
+ {
+ uint32_t n = 0;
+
+ eastl_uint128_t mask(UINT64_C(0xFFFFFFFF00000000)); // There doesn't seem to exist compiler support for INT128_C() by any compiler. EAStdC's int128_t supports it though.
+ mask <<= 64;
+
+ if(x & mask) { n += 64; x >>= 64; }
+ if(x & UINT64_C(0xFFFFFFFF00000000)) { n += 32; x >>= 32; }
+ if(x & UINT64_C(0x00000000FFFF0000)) { n += 16; x >>= 16; }
+ if(x & UINT64_C(0x00000000FFFFFF00)) { n += 8; x >>= 8; }
+ if(x & UINT64_C(0x00000000FFFFFFF0)) { n += 4; x >>= 4; }
+ if(x & UINT64_C(0x00000000FFFFFFFC)) { n += 2; x >>= 2; }
+ if(x & UINT64_C(0x00000000FFFFFFFE)) { n += 1; }
+
+ return n;
+ }
+
+ return 128;
+ }
+ #endif
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // BitsetBase
+ //
+ // We tried two forms of array access here:
+ // for(word_type *pWord(mWord), *pWordEnd(mWord + NW); pWord < pWordEnd; ++pWord)
+ // *pWord = ...
+ // and
+ // for(size_t i = 0; i < NW; i++)
+ // mWord[i] = ...
+ //
+ // For our tests (~NW < 16), the latter (using []) access resulted in faster code.
+ ///////////////////////////////////////////////////////////////////////////
+
+ template <size_t NW, typename WordType>
+ inline BitsetBase<NW, WordType>::BitsetBase()
+ {
+ reset();
+ }
+
+
+ template <size_t NW, typename WordType>
+ inline BitsetBase<NW, WordType>::BitsetBase(uint32_t value)
+ {
+ // This implementation assumes that sizeof(value) <= sizeof(word_type).
+ //EASTL_CT_ASSERT(sizeof(value) <= sizeof(word_type)); Disabled because we now have support for uint8_t and uint16_t word types. It would be nice to have a runtime assert that tested this.
+
+ reset();
+ mWord[0] = static_cast<word_type>(value);
+ }
+
+
+ /*
+ template <size_t NW, typename WordType>
+ inline BitsetBase<NW, WordType>::BitsetBase(uint64_t value)
+ {
+ reset();
+
+ #if(EA_PLATFORM_WORD_SIZE == 4)
+ mWord[0] = static_cast<word_type>(value);
+
+ EASTL_CT_ASSERT(NW > 2); // We can assume this because we have specializations of BitsetBase for <1> and <2>.
+ //if(NW > 1) // NW is a template constant, but it would be a little messy to take advantage of it's const-ness.
+ mWord[1] = static_cast<word_type>(value >> 32);
+ #else
+ mWord[0] = static_cast<word_type>(value);
+ #endif
+ }
+ */
+
+
+ template <size_t NW, typename WordType>
+ inline void BitsetBase<NW, WordType>::operator&=(const this_type& x)
+ {
+ for(size_t i = 0; i < NW; i++)
+ mWord[i] &= x.mWord[i];
+ }
+
+
+ template <size_t NW, typename WordType>
+ inline void BitsetBase<NW, WordType>::operator|=(const this_type& x)
+ {
+ for(size_t i = 0; i < NW; i++)
+ mWord[i] |= x.mWord[i];
+ }
+
+
+ template <size_t NW, typename WordType>
+ inline void BitsetBase<NW, WordType>::operator^=(const this_type& x)
+ {
+ for(size_t i = 0; i < NW; i++)
+ mWord[i] ^= x.mWord[i];
+ }
+
+
+ template <size_t NW, typename WordType>
+ inline void BitsetBase<NW, WordType>::operator<<=(size_type n)
+ {
+ const size_type nWordShift = (size_type)(n >> kBitsPerWordShift);
+
+ if(nWordShift)
+ {
+ for(int i = (int)(NW - 1); i >= 0; --i)
+ mWord[i] = (nWordShift <= (size_type)i) ? mWord[i - nWordShift] : (word_type)0;
+ }
+
+ if(n &= kBitsPerWordMask)
+ {
+ for(size_t i = (NW - 1); i > 0; --i)
+ mWord[i] = (word_type)((mWord[i] << n) | (mWord[i - 1] >> (kBitsPerWord - n)));
+ mWord[0] <<= n;
+ }
+
+ // We let the parent class turn off any upper bits.
+ }
+
+
+ template <size_t NW, typename WordType>
+ inline void BitsetBase<NW, WordType>::operator>>=(size_type n)
+ {
+ const size_type nWordShift = (size_type)(n >> kBitsPerWordShift);
+
+ if(nWordShift)
+ {
+ for(size_t i = 0; i < NW; ++i)
+ mWord[i] = ((nWordShift < (NW - i)) ? mWord[i + nWordShift] : (word_type)0);
+ }
+
+ if(n &= kBitsPerWordMask)
+ {
+ for(size_t i = 0; i < (NW - 1); ++i)
+ mWord[i] = (word_type)((mWord[i] >> n) | (mWord[i + 1] << (kBitsPerWord - n)));
+ mWord[NW - 1] >>= n;
+ }
+ }
+
+
+ template <size_t NW, typename WordType>
+ inline void BitsetBase<NW, WordType>::flip()
+ {
+ for(size_t i = 0; i < NW; i++)
+ mWord[i] = ~mWord[i];
+ // We let the parent class turn off any upper bits.
+ }
+
+
+ template <size_t NW, typename WordType>
+ inline void BitsetBase<NW, WordType>::set()
+ {
+ for(size_t i = 0; i < NW; i++)
+ mWord[i] = static_cast<word_type>(~static_cast<word_type>(0));
+ // We let the parent class turn off any upper bits.
+ }
+
+
+ template <size_t NW, typename WordType>
+ inline void BitsetBase<NW, WordType>::set(size_type i, bool value)
+ {
+ if(value)
+ mWord[i >> kBitsPerWordShift] |= (static_cast<word_type>(1) << (i & kBitsPerWordMask));
+ else
+ mWord[i >> kBitsPerWordShift] &= ~(static_cast<word_type>(1) << (i & kBitsPerWordMask));
+ }
+
+
+ template <size_t NW, typename WordType>
+ inline void BitsetBase<NW, WordType>::reset()
+ {
+ if(NW > 16) // This is a constant expression and should be optimized away.
+ {
+ // This will be fastest if compiler intrinsic function optimizations are enabled.
+ memset(mWord, 0, sizeof(mWord));
+ }
+ else
+ {
+ for(size_t i = 0; i < NW; i++)
+ mWord[i] = 0;
+ }
+ }
+
+
+ template <size_t NW, typename WordType>
+ inline bool BitsetBase<NW, WordType>::operator==(const this_type& x) const
+ {
+ for(size_t i = 0; i < NW; i++)
+ {
+ if(mWord[i] != x.mWord[i])
+ return false;
+ }
+ return true;
+ }
+
+
+ template <size_t NW, typename WordType>
+ inline bool BitsetBase<NW, WordType>::any() const
+ {
+ for(size_t i = 0; i < NW; i++)
+ {
+ if(mWord[i])
+ return true;
+ }
+ return false;
+ }
+
+
+ template <size_t NW, typename WordType>
+ inline typename BitsetBase<NW, WordType>::size_type
+ BitsetBase<NW, WordType>::count() const
+ {
+ size_type n = 0;
+
+ for(size_t i = 0; i < NW; i++)
+ {
+ #if defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 304) && !defined(EA_PLATFORM_ANDROID) // GCC 3.4 or later
+ #if(EA_PLATFORM_WORD_SIZE == 4)
+ n += (size_type)__builtin_popcountl(mWord[i]);
+ #else
+ n += (size_type)__builtin_popcountll(mWord[i]);
+ #endif
+ #elif defined(__GNUC__) && (__GNUC__ < 3)
+ n += BitsetCountBits(mWord[i]); // GCC 2.x compiler inexplicably blows up on the code below.
+ #else
+ // todo: use __popcnt16, __popcnt, __popcnt64 for msvc builds
+ // https://msdn.microsoft.com/en-us/library/bb385231(v=vs.140).aspx
+ for(word_type w = mWord[i]; w; w >>= 4)
+ n += EASTL_BITSET_COUNT_STRING[w & 0xF];
+
+ // Version which seems to run slower in benchmarks:
+ // n += BitsetCountBits(mWord[i]);
+ #endif
+
+ }
+ return n;
+ }
+
+
+ template <size_t NW, typename WordType>
+ inline void BitsetBase<NW, WordType>::from_uint32(uint32_t value)
+ {
+ reset();
+ mWord[0] = static_cast<word_type>(value);
+ }
+
+
+ template <size_t NW, typename WordType>
+ inline void BitsetBase<NW, WordType>::from_uint64(uint64_t value)
+ {
+ reset();
+
+ #if(EA_PLATFORM_WORD_SIZE == 4)
+ mWord[0] = static_cast<word_type>(value);
+
+ EASTL_CT_ASSERT(NW > 2); // We can assume this because we have specializations of BitsetBase for <1> and <2>.
+ //if(NW > 1) // NW is a template constant, but it would be a little messy to take advantage of it's const-ness.
+ mWord[1] = static_cast<word_type>(value >> 32);
+ #else
+ mWord[0] = static_cast<word_type>(value);
+ #endif
+ }
+
+
+ template <size_t NW, typename WordType>
+ inline unsigned long BitsetBase<NW, WordType>::to_ulong() const
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ for(size_t i = 1; i < NW; ++i)
+ {
+ if(mWord[i])
+ throw std::overflow_error("BitsetBase::to_ulong");
+ }
+ #endif
+ return (unsigned long)mWord[0]; // Todo: We need to deal with the case whereby sizeof(word_type) < sizeof(unsigned long)
+ }
+
+
+ template <size_t NW, typename WordType>
+ inline uint32_t BitsetBase<NW, WordType>::to_uint32() const
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ // Verify that high words or bits are not set and thus that to_uint32 doesn't lose information.
+ for(size_t i = 1; i < NW; ++i)
+ {
+ if(mWord[i])
+ throw std::overflow_error("BitsetBase::to_uint32");
+ }
+
+ #if(EA_PLATFORM_WORD_SIZE > 4) // if we have 64 bit words...
+ if(mWord[0] >> 32)
+ throw std::overflow_error("BitsetBase::to_uint32");
+ #endif
+ #endif
+
+ return (uint32_t)mWord[0];
+ }
+
+
+ template <size_t NW, typename WordType>
+ inline uint64_t BitsetBase<NW, WordType>::to_uint64() const
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ // Verify that high words are not set and thus that to_uint64 doesn't lose information.
+
+ EASTL_CT_ASSERT(NW > 2); // We can assume this because we have specializations of BitsetBase for <1> and <2>.
+ for(size_t i = 2; i < NW; ++i)
+ {
+ if(mWord[i])
+ throw std::overflow_error("BitsetBase::to_uint64");
+ }
+ #endif
+
+ #if(EA_PLATFORM_WORD_SIZE == 4)
+ EASTL_CT_ASSERT(NW > 2); // We can assume this because we have specializations of BitsetBase for <1> and <2>.
+ return (mWord[1] << 32) | mWord[0];
+ #else
+ return (uint64_t)mWord[0];
+ #endif
+ }
+
+
+ template <size_t NW, typename WordType>
+ inline typename BitsetBase<NW, WordType>::word_type&
+ BitsetBase<NW, WordType>::DoGetWord(size_type i)
+ {
+ return mWord[i >> kBitsPerWordShift];
+ }
+
+
+ template <size_t NW, typename WordType>
+ inline typename BitsetBase<NW, WordType>::word_type
+ BitsetBase<NW, WordType>::DoGetWord(size_type i) const
+ {
+ return mWord[i >> kBitsPerWordShift];
+ }
+
+
+ template <size_t NW, typename WordType>
+ inline typename BitsetBase<NW, WordType>::size_type
+ BitsetBase<NW, WordType>::DoFindFirst() const
+ {
+ for(size_type word_index = 0; word_index < NW; ++word_index)
+ {
+ const size_type fbiw = GetFirstBit(mWord[word_index]);
+
+ if(fbiw != kBitsPerWord)
+ return (word_index * kBitsPerWord) + fbiw;
+ }
+
+ return (size_type)NW * kBitsPerWord;
+ }
+
+
+#if EASTL_DISABLE_BITSET_ARRAYBOUNDS_WARNING
+EA_DISABLE_GCC_WARNING(-Warray-bounds)
+#endif
+
+ template <size_t NW, typename WordType>
+ inline typename BitsetBase<NW, WordType>::size_type
+ BitsetBase<NW, WordType>::DoFindNext(size_type last_find) const
+ {
+ // Start looking from the next bit.
+ ++last_find;
+
+ // Set initial state based on last find.
+ size_type word_index = static_cast<size_type>(last_find >> kBitsPerWordShift);
+ size_type bit_index = static_cast<size_type>(last_find & kBitsPerWordMask);
+
+ // To do: There probably is a more elegant way to write looping below.
+ if(word_index < NW)
+ {
+ // Mask off previous bits of the word so our search becomes a "find first".
+ word_type this_word = mWord[word_index] & (~static_cast<word_type>(0) << bit_index);
+
+ for(;;)
+ {
+ const size_type fbiw = GetFirstBit(this_word);
+
+ if(fbiw != kBitsPerWord)
+ return (word_index * kBitsPerWord) + fbiw;
+
+ if(++word_index < NW)
+ this_word = mWord[word_index];
+ else
+ break;
+ }
+ }
+
+ return (size_type)NW * kBitsPerWord;
+ }
+
+#if EASTL_DISABLE_BITSET_ARRAYBOUNDS_WARNING
+EA_RESTORE_GCC_WARNING()
+#endif
+
+
+
+ template <size_t NW, typename WordType>
+ inline typename BitsetBase<NW, WordType>::size_type
+ BitsetBase<NW, WordType>::DoFindLast() const
+ {
+ for(size_type word_index = (size_type)NW; word_index > 0; --word_index)
+ {
+ const size_type lbiw = GetLastBit(mWord[word_index - 1]);
+
+ if(lbiw != kBitsPerWord)
+ return ((word_index - 1) * kBitsPerWord) + lbiw;
+ }
+
+ return (size_type)NW * kBitsPerWord;
+ }
+
+
+ template <size_t NW, typename WordType>
+ inline typename BitsetBase<NW, WordType>::size_type
+ BitsetBase<NW, WordType>::DoFindPrev(size_type last_find) const
+ {
+ if(last_find > 0)
+ {
+ // Set initial state based on last find.
+ size_type word_index = static_cast<size_type>(last_find >> kBitsPerWordShift);
+ size_type bit_index = static_cast<size_type>(last_find & kBitsPerWordMask);
+
+ // Mask off subsequent bits of the word so our search becomes a "find last".
+ word_type mask = (~static_cast<word_type>(0) >> (kBitsPerWord - 1 - bit_index)) >> 1; // We do two shifts here because many CPUs ignore requests to shift 32 bit integers by 32 bits, which could be the case above.
+ word_type this_word = mWord[word_index] & mask;
+
+ for(;;)
+ {
+ const size_type lbiw = GetLastBit(this_word);
+
+ if(lbiw != kBitsPerWord)
+ return (word_index * kBitsPerWord) + lbiw;
+
+ if(word_index > 0)
+ this_word = mWord[--word_index];
+ else
+ break;
+ }
+ }
+
+ return (size_type)NW * kBitsPerWord;
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // BitsetBase<1, WordType>
+ ///////////////////////////////////////////////////////////////////////////
+
+ template <typename WordType>
+ inline BitsetBase<1, WordType>::BitsetBase()
+ {
+ mWord[0] = 0;
+ }
+
+
+ template <typename WordType>
+ inline BitsetBase<1, WordType>::BitsetBase(uint32_t value)
+ {
+ // This implementation assumes that sizeof(value) <= sizeof(word_type).
+ //EASTL_CT_ASSERT(sizeof(value) <= sizeof(word_type)); Disabled because we now have support for uint8_t and uint16_t word types. It would be nice to have a runtime assert that tested this.
+
+ mWord[0] = static_cast<word_type>(value);
+ }
+
+
+ /*
+ template <typename WordType>
+ inline BitsetBase<1, WordType>::BitsetBase(uint64_t value)
+ {
+ #if(EA_PLATFORM_WORD_SIZE == 4)
+ EASTL_ASSERT(value <= 0xffffffff);
+ mWord[0] = static_cast<word_type>(value); // This potentially loses data, but that's what the user is requesting.
+ #else
+ mWord[0] = static_cast<word_type>(value);
+ #endif
+ }
+ */
+
+
+ template <typename WordType>
+ inline void BitsetBase<1, WordType>::operator&=(const this_type& x)
+ {
+ mWord[0] &= x.mWord[0];
+ }
+
+
+ template <typename WordType>
+ inline void BitsetBase<1, WordType>::operator|=(const this_type& x)
+ {
+ mWord[0] |= x.mWord[0];
+ }
+
+
+ template <typename WordType>
+ inline void BitsetBase<1, WordType>::operator^=(const this_type& x)
+ {
+ mWord[0] ^= x.mWord[0];
+ }
+
+
+ template <typename WordType>
+ inline void BitsetBase<1, WordType>::operator<<=(size_type n)
+ {
+ mWord[0] <<= n;
+ // We let the parent class turn off any upper bits.
+ }
+
+
+ template <typename WordType>
+ inline void BitsetBase<1, WordType>::operator>>=(size_type n)
+ {
+ mWord[0] >>= n;
+ }
+
+
+ template <typename WordType>
+ inline void BitsetBase<1, WordType>::flip()
+ {
+ mWord[0] = ~mWord[0];
+ // We let the parent class turn off any upper bits.
+ }
+
+
+ template <typename WordType>
+ inline void BitsetBase<1, WordType>::set()
+ {
+ mWord[0] = static_cast<word_type>(~static_cast<word_type>(0));
+ // We let the parent class turn off any upper bits.
+ }
+
+
+ template <typename WordType>
+ inline void BitsetBase<1, WordType>::set(size_type i, bool value)
+ {
+ if(value)
+ mWord[0] |= (static_cast<word_type>(1) << i);
+ else
+ mWord[0] &= ~(static_cast<word_type>(1) << i);
+ }
+
+
+ template <typename WordType>
+ inline void BitsetBase<1, WordType>::reset()
+ {
+ mWord[0] = 0;
+ }
+
+
+ template <typename WordType>
+ inline bool BitsetBase<1, WordType>::operator==(const this_type& x) const
+ {
+ return mWord[0] == x.mWord[0];
+ }
+
+
+ template <typename WordType>
+ inline bool BitsetBase<1, WordType>::any() const
+ {
+ return mWord[0] != 0;
+ }
+
+
+ template <typename WordType>
+ inline typename BitsetBase<1, WordType>::size_type
+ BitsetBase<1, WordType>::count() const
+ {
+ #if defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 304) && !defined(EA_PLATFORM_ANDROID) // GCC 3.4 or later
+ #if(EA_PLATFORM_WORD_SIZE == 4)
+ return (size_type)__builtin_popcountl(mWord[0]);
+ #else
+ return (size_type)__builtin_popcountll(mWord[0]);
+ #endif
+ #elif defined(__GNUC__) && (__GNUC__ < 3)
+ return BitsetCountBits(mWord[0]); // GCC 2.x compiler inexplicably blows up on the code below.
+ #else
+ size_type n = 0;
+ for(word_type w = mWord[0]; w; w >>= 4)
+ n += EASTL_BITSET_COUNT_STRING[w & 0xF];
+ return n;
+ #endif
+ }
+
+
+ template <typename WordType>
+ inline void BitsetBase<1, WordType>::from_uint32(uint32_t value)
+ {
+ mWord[0] = static_cast<word_type>(value);
+ }
+
+
+ template <typename WordType>
+ inline void BitsetBase<1, WordType>::from_uint64(uint64_t value)
+ {
+ #if(EA_PLATFORM_WORD_SIZE == 4)
+ EASTL_ASSERT(value <= 0xffffffff);
+ mWord[0] = static_cast<word_type>(value); // This potentially loses data, but that's what the user is requesting.
+ #else
+ mWord[0] = static_cast<word_type>(value);
+ #endif
+ }
+
+
+ template <typename WordType>
+ inline unsigned long BitsetBase<1, WordType>::to_ulong() const
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ #if((EA_PLATFORM_WORD_SIZE > 4) && defined(EA_PLATFORM_MICROSOFT)) // If we are using 64 bit words but ulong is less than 64 bits... Microsoft platforms alone use a 32 bit long under 64 bit platforms.
+ // Verify that high bits are not set and thus that to_ulong doesn't lose information.
+ if(mWord[0] >> 32)
+ throw std::overflow_error("BitsetBase::to_ulong");
+ #endif
+ #endif
+
+ return static_cast<unsigned long>(mWord[0]);
+ }
+
+
+ template <typename WordType>
+ inline uint32_t BitsetBase<1, WordType>::to_uint32() const
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ #if(EA_PLATFORM_WORD_SIZE > 4) // If we are using 64 bit words...
+ // Verify that high bits are not set and thus that to_uint32 doesn't lose information.
+ if(mWord[0] >> 32)
+ throw std::overflow_error("BitsetBase::to_uint32");
+ #endif
+ #endif
+
+ return static_cast<uint32_t>(mWord[0]);
+ }
+
+
+ template <typename WordType>
+ inline uint64_t BitsetBase<1, WordType>::to_uint64() const
+ {
+ // This implementation is the same regardless of the word size, and there is no possibility of overflow_error.
+ return static_cast<uint64_t>(mWord[0]);
+ }
+
+
+ template <typename WordType>
+ inline typename BitsetBase<1, WordType>::word_type&
+ BitsetBase<1, WordType>::DoGetWord(size_type)
+ {
+ return mWord[0];
+ }
+
+
+ template <typename WordType>
+ inline typename BitsetBase<1, WordType>::word_type
+ BitsetBase<1, WordType>::DoGetWord(size_type) const
+ {
+ return mWord[0];
+ }
+
+
+ template <typename WordType>
+ inline typename BitsetBase<1, WordType>::size_type
+ BitsetBase<1, WordType>::DoFindFirst() const
+ {
+ return GetFirstBit(mWord[0]);
+ }
+
+
+ template <typename WordType>
+ inline typename BitsetBase<1, WordType>::size_type
+ BitsetBase<1, WordType>::DoFindNext(size_type last_find) const
+ {
+ if(++last_find < kBitsPerWord)
+ {
+ // Mask off previous bits of word so our search becomes a "find first".
+ const word_type this_word = mWord[0] & ((~static_cast<word_type>(0)) << last_find);
+
+ return GetFirstBit(this_word);
+ }
+
+ return kBitsPerWord;
+ }
+
+
+ template <typename WordType>
+ inline typename BitsetBase<1, WordType>::size_type
+ BitsetBase<1, WordType>::DoFindLast() const
+ {
+ return GetLastBit(mWord[0]);
+ }
+
+
+ template <typename WordType>
+ inline typename BitsetBase<1, WordType>::size_type
+ BitsetBase<1, WordType>::DoFindPrev(size_type last_find) const
+ {
+ if(last_find > 0)
+ {
+ // Mask off previous bits of word so our search becomes a "find first".
+ const word_type this_word = mWord[0] & ((~static_cast<word_type>(0)) >> (kBitsPerWord - last_find));
+
+ return GetLastBit(this_word);
+ }
+
+ return kBitsPerWord;
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // BitsetBase<2, WordType>
+ ///////////////////////////////////////////////////////////////////////////
+
+ template <typename WordType>
+ inline BitsetBase<2, WordType>::BitsetBase()
+ {
+ mWord[0] = 0;
+ mWord[1] = 0;
+ }
+
+
+ template <typename WordType>
+ inline BitsetBase<2, WordType>::BitsetBase(uint32_t value)
+ {
+ // This implementation assumes that sizeof(value) <= sizeof(word_type).
+ //EASTL_CT_ASSERT(sizeof(value) <= sizeof(word_type)); Disabled because we now have support for uint8_t and uint16_t word types. It would be nice to have a runtime assert that tested this.
+
+ mWord[0] = static_cast<word_type>(value);
+ mWord[1] = 0;
+ }
+
+
+ /*
+ template <typename WordType>
+ inline BitsetBase<2, WordType>::BitsetBase(uint64_t value)
+ {
+ #if(EA_PLATFORM_WORD_SIZE == 4)
+ mWord[0] = static_cast<word_type>(value);
+ mWord[1] = static_cast<word_type>(value >> 32);
+ #else
+ mWord[0] = static_cast<word_type>(value);
+ mWord[1] = 0;
+ #endif
+ }
+ */
+
+
+ template <typename WordType>
+ inline void BitsetBase<2, WordType>::operator&=(const this_type& x)
+ {
+ mWord[0] &= x.mWord[0];
+ mWord[1] &= x.mWord[1];
+ }
+
+
+ template <typename WordType>
+ inline void BitsetBase<2, WordType>::operator|=(const this_type& x)
+ {
+ mWord[0] |= x.mWord[0];
+ mWord[1] |= x.mWord[1];
+ }
+
+
+ template <typename WordType>
+ inline void BitsetBase<2, WordType>::operator^=(const this_type& x)
+ {
+ mWord[0] ^= x.mWord[0];
+ mWord[1] ^= x.mWord[1];
+ }
+
+
+ template <typename WordType>
+ inline void BitsetBase<2, WordType>::operator<<=(size_type n)
+ {
+ if(n) // to avoid a shift by kBitsPerWord, which is undefined
+ {
+ if(EASTL_UNLIKELY(n >= kBitsPerWord)) // parent expected to handle high bits and n >= 64
+ {
+ mWord[1] = mWord[0];
+ mWord[0] = 0;
+ n -= kBitsPerWord;
+ }
+
+ mWord[1] = (mWord[1] << n) | (mWord[0] >> (kBitsPerWord - n)); // Intentionally use | instead of +.
+ mWord[0] <<= n;
+ // We let the parent class turn off any upper bits.
+ }
+ }
+
+
+ template <typename WordType>
+ inline void BitsetBase<2, WordType>::operator>>=(size_type n)
+ {
+ if(n) // to avoid a shift by kBitsPerWord, which is undefined
+ {
+ if(EASTL_UNLIKELY(n >= kBitsPerWord)) // parent expected to handle n >= 64
+ {
+ mWord[0] = mWord[1];
+ mWord[1] = 0;
+ n -= kBitsPerWord;
+ }
+
+ mWord[0] = (mWord[0] >> n) | (mWord[1] << (kBitsPerWord - n)); // Intentionally use | instead of +.
+ mWord[1] >>= n;
+ }
+ }
+
+
+ template <typename WordType>
+ inline void BitsetBase<2, WordType>::flip()
+ {
+ mWord[0] = ~mWord[0];
+ mWord[1] = ~mWord[1];
+ // We let the parent class turn off any upper bits.
+ }
+
+
+ template <typename WordType>
+ inline void BitsetBase<2, WordType>::set()
+ {
+ mWord[0] = ~static_cast<word_type>(0);
+ mWord[1] = ~static_cast<word_type>(0);
+ // We let the parent class turn off any upper bits.
+ }
+
+
+ template <typename WordType>
+ inline void BitsetBase<2, WordType>::set(size_type i, bool value)
+ {
+ if(value)
+ mWord[i >> kBitsPerWordShift] |= (static_cast<word_type>(1) << (i & kBitsPerWordMask));
+ else
+ mWord[i >> kBitsPerWordShift] &= ~(static_cast<word_type>(1) << (i & kBitsPerWordMask));
+ }
+
+
+ template <typename WordType>
+ inline void BitsetBase<2, WordType>::reset()
+ {
+ mWord[0] = 0;
+ mWord[1] = 0;
+ }
+
+
+ template <typename WordType>
+ inline bool BitsetBase<2, WordType>::operator==(const this_type& x) const
+ {
+ return (mWord[0] == x.mWord[0]) && (mWord[1] == x.mWord[1]);
+ }
+
+
+ template <typename WordType>
+ inline bool BitsetBase<2, WordType>::any() const
+ {
+ // Or with two branches: { return (mWord[0] != 0) || (mWord[1] != 0); }
+ return (mWord[0] | mWord[1]) != 0;
+ }
+
+ template <typename WordType>
+ inline typename BitsetBase<2, WordType>::size_type
+ BitsetBase<2, WordType>::count() const
+ {
+ #if (defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 304)) || defined(__clang__) // GCC 3.4 or later
+ #if(EA_PLATFORM_WORD_SIZE == 4)
+ return (size_type)__builtin_popcountl(mWord[0]) + (size_type)__builtin_popcountl(mWord[1]);
+ #else
+ return (size_type)__builtin_popcountll(mWord[0]) + (size_type)__builtin_popcountll(mWord[1]);
+ #endif
+
+ #else
+ return BitsetCountBits(mWord[0]) + BitsetCountBits(mWord[1]);
+ #endif
+ }
+
+
+ template <typename WordType>
+ inline void BitsetBase<2, WordType>::from_uint32(uint32_t value)
+ {
+ mWord[0] = static_cast<word_type>(value);
+ mWord[1] = 0;
+ }
+
+
+ template <typename WordType>
+ inline void BitsetBase<2, WordType>::from_uint64(uint64_t value)
+ {
+ #if(EA_PLATFORM_WORD_SIZE == 4)
+ mWord[0] = static_cast<word_type>(value);
+ mWord[1] = static_cast<word_type>(value >> 32);
+ #else
+ mWord[0] = static_cast<word_type>(value);
+ mWord[1] = 0;
+ #endif
+ }
+
+
+ template <typename WordType>
+ inline unsigned long BitsetBase<2, WordType>::to_ulong() const
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ if(mWord[1])
+ throw std::overflow_error("BitsetBase::to_ulong");
+ #endif
+ return (unsigned long)mWord[0]; // Todo: We need to deal with the case whereby sizeof(word_type) < sizeof(unsigned long)
+ }
+
+
+ template <typename WordType>
+ inline uint32_t BitsetBase<2, WordType>::to_uint32() const
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ // Verify that high words or bits are not set and thus that to_uint32 doesn't lose information.
+
+ #if(EA_PLATFORM_WORD_SIZE == 4)
+ if(mWord[1])
+ throw std::overflow_error("BitsetBase::to_uint32");
+ #else
+ if(mWord[1] || (mWord[0] >> 32))
+ throw std::overflow_error("BitsetBase::to_uint32");
+ #endif
+ #endif
+
+ return (uint32_t)mWord[0];
+ }
+
+
+ template <typename WordType>
+ inline uint64_t BitsetBase<2, WordType>::to_uint64() const
+ {
+ #if(EA_PLATFORM_WORD_SIZE == 4)
+ // There can't possibly be an overflow_error here.
+
+ return ((uint64_t)mWord[1] << 32) | mWord[0];
+ #else
+ #if EASTL_EXCEPTIONS_ENABLED
+ if(mWord[1])
+ throw std::overflow_error("BitsetBase::to_uint64");
+ #endif
+
+ return (uint64_t)mWord[0];
+ #endif
+ }
+
+
+ template <typename WordType>
+ inline typename BitsetBase<2, WordType>::word_type&
+ BitsetBase<2, WordType>::DoGetWord(size_type i)
+ {
+ return mWord[i >> kBitsPerWordShift];
+ }
+
+
+ template <typename WordType>
+ inline typename BitsetBase<2, WordType>::word_type
+ BitsetBase<2, WordType>::DoGetWord(size_type i) const
+ {
+ return mWord[i >> kBitsPerWordShift];
+ }
+
+
+ template <typename WordType>
+ inline typename BitsetBase<2, WordType>::size_type
+ BitsetBase<2, WordType>::DoFindFirst() const
+ {
+ size_type fbiw = GetFirstBit(mWord[0]);
+
+ if(fbiw != kBitsPerWord)
+ return fbiw;
+
+ fbiw = GetFirstBit(mWord[1]);
+
+ if(fbiw != kBitsPerWord)
+ return kBitsPerWord + fbiw;
+
+ return 2 * kBitsPerWord;
+ }
+
+
+ template <typename WordType>
+ inline typename BitsetBase<2, WordType>::size_type
+ BitsetBase<2, WordType>::DoFindNext(size_type last_find) const
+ {
+ // If the last find was in the first word, we must check it and then possibly the second.
+ if(++last_find < (size_type)kBitsPerWord)
+ {
+ // Mask off previous bits of word so our search becomes a "find first".
+ word_type this_word = mWord[0] & ((~static_cast<word_type>(0)) << last_find);
+
+ // Step through words.
+ size_type fbiw = GetFirstBit(this_word);
+
+ if(fbiw != kBitsPerWord)
+ return fbiw;
+
+ fbiw = GetFirstBit(mWord[1]);
+
+ if(fbiw != kBitsPerWord)
+ return kBitsPerWord + fbiw;
+ }
+ else if(last_find < (size_type)(2 * kBitsPerWord))
+ {
+ // The last find was in the second word, remove the bit count of the first word from the find.
+ last_find -= kBitsPerWord;
+
+ // Mask off previous bits of word so our search becomes a "find first".
+ word_type this_word = mWord[1] & ((~static_cast<word_type>(0)) << last_find);
+
+ const size_type fbiw = GetFirstBit(this_word);
+
+ if(fbiw != kBitsPerWord)
+ return kBitsPerWord + fbiw;
+ }
+
+ return 2 * kBitsPerWord;
+ }
+
+
+ template <typename WordType>
+ inline typename BitsetBase<2, WordType>::size_type
+ BitsetBase<2, WordType>::DoFindLast() const
+ {
+ size_type lbiw = GetLastBit(mWord[1]);
+
+ if(lbiw != kBitsPerWord)
+ return kBitsPerWord + lbiw;
+
+ lbiw = GetLastBit(mWord[0]);
+
+ if(lbiw != kBitsPerWord)
+ return lbiw;
+
+ return 2 * kBitsPerWord;
+ }
+
+
+ template <typename WordType>
+ inline typename BitsetBase<2, WordType>::size_type
+ BitsetBase<2, WordType>::DoFindPrev(size_type last_find) const
+ {
+ // If the last find was in the second word, we must check it and then possibly the first.
+ if(last_find > (size_type)kBitsPerWord)
+ {
+ // This has the same effect as last_find %= kBitsPerWord in our case.
+ last_find -= kBitsPerWord;
+
+ // Mask off previous bits of word so our search becomes a "find first".
+ word_type this_word = mWord[1] & ((~static_cast<word_type>(0)) >> (kBitsPerWord - last_find));
+
+ // Step through words.
+ size_type lbiw = GetLastBit(this_word);
+
+ if(lbiw != kBitsPerWord)
+ return kBitsPerWord + lbiw;
+
+ lbiw = GetLastBit(mWord[0]);
+
+ if(lbiw != kBitsPerWord)
+ return lbiw;
+ }
+ else if(last_find != 0)
+ {
+ // Mask off previous bits of word so our search becomes a "find first".
+ word_type this_word = mWord[0] & ((~static_cast<word_type>(0)) >> (kBitsPerWord - last_find));
+
+ const size_type lbiw = GetLastBit(this_word);
+
+ if(lbiw != kBitsPerWord)
+ return lbiw;
+ }
+
+ return 2 * kBitsPerWord;
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // bitset::reference
+ ///////////////////////////////////////////////////////////////////////////
+
+ template <size_t N, typename WordType>
+ inline bitset<N, WordType>::reference::reference(const bitset& x, size_type i)
+ : mpBitWord(&const_cast<bitset&>(x).DoGetWord(i)),
+ mnBitIndex(i & kBitsPerWordMask)
+ { // We have an issue here because the above is casting away the const-ness of the source bitset.
+ // Empty
+ }
+
+
+ template <size_t N, typename WordType>
+ inline typename bitset<N, WordType>::reference&
+ bitset<N, WordType>::reference::operator=(bool value)
+ {
+ if(value)
+ *mpBitWord |= (static_cast<word_type>(1) << (mnBitIndex & kBitsPerWordMask));
+ else
+ *mpBitWord &= ~(static_cast<word_type>(1) << (mnBitIndex & kBitsPerWordMask));
+ return *this;
+ }
+
+
+ template <size_t N, typename WordType>
+ inline typename bitset<N, WordType>::reference&
+ bitset<N, WordType>::reference::operator=(const reference& x)
+ {
+ if(*x.mpBitWord & (static_cast<word_type>(1) << (x.mnBitIndex & kBitsPerWordMask)))
+ *mpBitWord |= (static_cast<word_type>(1) << (mnBitIndex & kBitsPerWordMask));
+ else
+ *mpBitWord &= ~(static_cast<word_type>(1) << (mnBitIndex & kBitsPerWordMask));
+ return *this;
+ }
+
+
+ template <size_t N, typename WordType>
+ inline bool bitset<N, WordType>::reference::operator~() const
+ {
+ return (*mpBitWord & (static_cast<word_type>(1) << (mnBitIndex & kBitsPerWordMask))) == 0;
+ }
+
+
+ //Defined inline in the class because Metrowerks fails to be able to compile it here.
+ //template <size_t N, typename WordType>
+ //inline bitset<N, WordType>::reference::operator bool() const
+ //{
+ // return (*mpBitWord & (static_cast<word_type>(1) << (mnBitIndex & kBitsPerWordMask))) != 0;
+ //}
+
+
+ template <size_t N, typename WordType>
+ inline typename bitset<N, WordType>::reference&
+ bitset<N, WordType>::reference::flip()
+ {
+ *mpBitWord ^= static_cast<word_type>(1) << (mnBitIndex & kBitsPerWordMask);
+ return *this;
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // bitset
+ ///////////////////////////////////////////////////////////////////////////
+
+ template <size_t N, typename WordType>
+ inline bitset<N, WordType>::bitset()
+ : base_type()
+ {
+ // Empty. The base class will set all bits to zero.
+ }
+
+ EA_DISABLE_VC_WARNING(6313)
+ template <size_t N, typename WordType>
+ inline bitset<N, WordType>::bitset(uint32_t value)
+ : base_type(value)
+ {
+ if((N & kBitsPerWordMask) || (N == 0)) // If there are any high bits to clear... (If we didn't have this check, then the code below would do the wrong thing when N == 32.
+ mWord[kWordCount - 1] &= ~(static_cast<word_type>(~static_cast<word_type>(0)) << (N & kBitsPerWordMask)); // This clears any high unused bits.
+ }
+ EA_RESTORE_VC_WARNING()
+
+ /*
+ template <size_t N, typename WordType>
+ inline bitset<N, WordType>::bitset(uint64_t value)
+ : base_type(value)
+ {
+ if((N & kBitsPerWordMask) || (N == 0)) // If there are any high bits to clear...
+ mWord[kWordCount - 1] &= ~(~static_cast<word_type>(0) << (N & kBitsPerWordMask)); // This clears any high unused bits.
+ }
+ */
+
+
+ template <size_t N, typename WordType>
+ inline typename bitset<N, WordType>::this_type&
+ bitset<N, WordType>::operator&=(const this_type& x)
+ {
+ base_type::operator&=(x);
+ return *this;
+ }
+
+
+ template <size_t N, typename WordType>
+ inline typename bitset<N, WordType>::this_type&
+ bitset<N, WordType>::operator|=(const this_type& x)
+ {
+ base_type::operator|=(x);
+ return *this;
+ }
+
+
+ template <size_t N, typename WordType>
+ inline typename bitset<N, WordType>::this_type&
+ bitset<N, WordType>::operator^=(const this_type& x)
+ {
+ base_type::operator^=(x);
+ return *this;
+ }
+
+
+ template <size_t N, typename WordType>
+ inline typename bitset<N, WordType>::this_type&
+ bitset<N, WordType>::operator<<=(size_type n)
+ {
+ if(EASTL_LIKELY((intptr_t)n < (intptr_t)N))
+ {
+ EA_DISABLE_VC_WARNING(6313)
+ base_type::operator<<=(n);
+ if((N & kBitsPerWordMask) || (N == 0)) // If there are any high bits to clear... (If we didn't have this check, then the code below would do the wrong thing when N == 32.
+ mWord[kWordCount - 1] &= ~(static_cast<word_type>(~static_cast<word_type>(0)) << (N & kBitsPerWordMask)); // This clears any high unused bits. We need to do this so that shift operations proceed correctly.
+ EA_RESTORE_VC_WARNING()
+ }
+ else
+ base_type::reset();
+ return *this;
+ }
+
+
+ template <size_t N, typename WordType>
+ inline typename bitset<N, WordType>::this_type&
+ bitset<N, WordType>::operator>>=(size_type n)
+ {
+ if(EASTL_LIKELY(n < N))
+ base_type::operator>>=(n);
+ else
+ base_type::reset();
+ return *this;
+ }
+
+
+ template <size_t N, typename WordType>
+ inline typename bitset<N, WordType>::this_type&
+ bitset<N, WordType>::set()
+ {
+ base_type::set(); // This sets all bits.
+ if((N & kBitsPerWordMask) || (N == 0)) // If there are any high bits to clear... (If we didn't have this check, then the code below would do the wrong thing when N == 32.
+ mWord[kWordCount - 1] &= ~(static_cast<word_type>(~static_cast<word_type>(0)) << (N & kBitsPerWordMask)); // This clears any high unused bits. We need to do this so that shift operations proceed correctly.
+ return *this;
+ }
+
+
+ template <size_t N, typename WordType>
+ inline typename bitset<N, WordType>::this_type&
+ bitset<N, WordType>::set(size_type i, bool value)
+ {
+ if(i < N)
+ base_type::set(i, value);
+ else
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(!(i < N)))
+ EASTL_FAIL_MSG("bitset::set -- out of range");
+ #endif
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ throw std::out_of_range("bitset::set");
+ #endif
+ }
+
+ return *this;
+ }
+
+
+ template <size_t N, typename WordType>
+ inline typename bitset<N, WordType>::this_type&
+ bitset<N, WordType>::reset()
+ {
+ base_type::reset();
+ return *this;
+ }
+
+
+ template <size_t N, typename WordType>
+ inline typename bitset<N, WordType>::this_type&
+ bitset<N, WordType>::reset(size_type i)
+ {
+ if(EASTL_LIKELY(i < N))
+ DoGetWord(i) &= ~(static_cast<word_type>(1) << (i & kBitsPerWordMask));
+ else
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(!(i < N)))
+ EASTL_FAIL_MSG("bitset::reset -- out of range");
+ #endif
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ throw std::out_of_range("bitset::reset");
+ #endif
+ }
+
+ return *this;
+ }
+
+
+ template <size_t N, typename WordType>
+ inline typename bitset<N, WordType>::this_type&
+ bitset<N, WordType>::flip()
+ {
+ EA_DISABLE_VC_WARNING(6313)
+ base_type::flip();
+ if((N & kBitsPerWordMask) || (N == 0)) // If there are any high bits to clear... (If we didn't have this check, then the code below would do the wrong thing when N == 32.
+ mWord[kWordCount - 1] &= ~(static_cast<word_type>(~static_cast<word_type>(0)) << (N & kBitsPerWordMask)); // This clears any high unused bits. We need to do this so that shift operations proceed correctly.
+ return *this;
+ EA_RESTORE_VC_WARNING()
+ }
+
+
+ template <size_t N, typename WordType>
+ inline typename bitset<N, WordType>::this_type&
+ bitset<N, WordType>::flip(size_type i)
+ {
+ if(EASTL_LIKELY(i < N))
+ DoGetWord(i) ^= (static_cast<word_type>(1) << (i & kBitsPerWordMask));
+ else
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(!(i < N)))
+ EASTL_FAIL_MSG("bitset::flip -- out of range");
+ #endif
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ throw std::out_of_range("bitset::flip");
+ #endif
+ }
+ return *this;
+ }
+
+
+ template <size_t N, typename WordType>
+ inline typename bitset<N, WordType>::this_type
+ bitset<N, WordType>::operator~() const
+ {
+ return this_type(*this).flip();
+ }
+
+
+ template <size_t N, typename WordType>
+ inline typename bitset<N, WordType>::reference
+ bitset<N, WordType>::operator[](size_type i)
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(!(i < N)))
+ EASTL_FAIL_MSG("bitset::operator[] -- out of range");
+ #endif
+
+ return reference(*this, i);
+ }
+
+
+ template <size_t N, typename WordType>
+ inline bool bitset<N, WordType>::operator[](size_type i) const
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(!(i < N)))
+ EASTL_FAIL_MSG("bitset::operator[] -- out of range");
+ #endif
+
+ return (DoGetWord(i) & (static_cast<word_type>(1) << (i & kBitsPerWordMask))) != 0;
+ }
+
+
+ template <size_t N, typename WordType>
+ inline const typename bitset<N, WordType>::word_type* bitset<N, WordType>::data() const
+ {
+ return base_type::mWord;
+ }
+
+
+ template <size_t N, typename WordType>
+ inline typename bitset<N, WordType>::word_type* bitset<N, WordType>::data()
+ {
+ return base_type::mWord;
+ }
+
+
+ template <size_t N, typename WordType>
+ inline void bitset<N, WordType>::from_uint32(uint32_t value)
+ {
+ base_type::from_uint32(value);
+
+ if((N & kBitsPerWordMask) || (N == 0)) // If there are any high bits to clear... (If we didn't have this check, then the code below would do the wrong thing when N == 32.
+ mWord[kWordCount - 1] &= ~(static_cast<word_type>(~static_cast<word_type>(0)) << (N & kBitsPerWordMask)); // This clears any high unused bits. We need to do this so that shift operations proceed correctly.
+ }
+
+
+ template <size_t N, typename WordType>
+ inline void bitset<N, WordType>::from_uint64(uint64_t value)
+ {
+ base_type::from_uint64(value);
+
+ if((N & kBitsPerWordMask) || (N == 0)) // If there are any high bits to clear... (If we didn't have this check, then the code below would do the wrong thing when N == 32.
+ mWord[kWordCount - 1] &= ~(static_cast<word_type>(~static_cast<word_type>(0)) << (N & kBitsPerWordMask)); // This clears any high unused bits. We need to do this so that shift operations proceed correctly.
+ }
+
+
+ // template <size_t N, typename WordType>
+ // inline unsigned long bitset<N, WordType>::to_ulong() const
+ // {
+ // return base_type::to_ulong();
+ // }
+
+
+ // template <size_t N, typename WordType>
+ // inline uint32_t bitset<N, WordType>::to_uint32() const
+ // {
+ // return base_type::to_uint32();
+ // }
+
+
+ // template <size_t N, typename WordType>
+ // inline uint64_t bitset<N, WordType>::to_uint64() const
+ // {
+ // return base_type::to_uint64();
+ // }
+
+
+ // template <size_t N, typename WordType>
+ // inline typename bitset<N, WordType>::size_type
+ // bitset<N, WordType>::count() const
+ // {
+ // return base_type::count();
+ // }
+
+
+ template <size_t N, typename WordType>
+ inline typename bitset<N, WordType>::size_type
+ bitset<N, WordType>::size() const
+ {
+ return (size_type)N;
+ }
+
+
+ template <size_t N, typename WordType>
+ inline bool bitset<N, WordType>::operator==(const this_type& x) const
+ {
+ return base_type::operator==(x);
+ }
+
+#if !defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ template <size_t N, typename WordType>
+ inline bool bitset<N, WordType>::operator!=(const this_type& x) const
+ {
+ return !base_type::operator==(x);
+ }
+#endif
+
+ template <size_t N, typename WordType>
+ inline bool bitset<N, WordType>::test(size_type i) const
+ {
+ if(EASTL_UNLIKELY(i < N))
+ return (DoGetWord(i) & (static_cast<word_type>(1) << (i & kBitsPerWordMask))) != 0;
+
+ #if EASTL_ASSERT_ENABLED
+ EASTL_FAIL_MSG("bitset::test -- out of range");
+ #endif
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ throw std::out_of_range("bitset::test");
+ #else
+ return false;
+ #endif
+ }
+
+
+ // template <size_t N, typename WordType>
+ // inline bool bitset<N, WordType>::any() const
+ // {
+ // return base_type::any();
+ // }
+
+
+ template <size_t N, typename WordType>
+ inline bool bitset<N, WordType>::all() const
+ {
+ return count() == size();
+ }
+
+
+ template <size_t N, typename WordType>
+ inline bool bitset<N, WordType>::none() const
+ {
+ return !base_type::any();
+ }
+
+
+ template <size_t N, typename WordType>
+ inline typename bitset<N, WordType>::this_type
+ bitset<N, WordType>::operator<<(size_type n) const
+ {
+ return this_type(*this).operator<<=(n);
+ }
+
+
+ template <size_t N, typename WordType>
+ inline typename bitset<N, WordType>::this_type
+ bitset<N, WordType>::operator>>(size_type n) const
+ {
+ return this_type(*this).operator>>=(n);
+ }
+
+
+ template <size_t N, typename WordType>
+ inline typename bitset<N, WordType>::size_type
+ bitset<N, WordType>::find_first() const
+ {
+ const size_type i = base_type::DoFindFirst();
+
+ if(i < kSize)
+ return i;
+ // Else i could be the base type bit count, so we clamp it to our size.
+
+ return kSize;
+ }
+
+
+ template <size_t N, typename WordType>
+ inline typename bitset<N, WordType>::size_type
+ bitset<N, WordType>::find_next(size_type last_find) const
+ {
+ const size_type i = base_type::DoFindNext(last_find);
+
+ if(i < kSize)
+ return i;
+ // Else i could be the base type bit count, so we clamp it to our size.
+
+ return kSize;
+ }
+
+
+ template <size_t N, typename WordType>
+ inline typename bitset<N, WordType>::size_type
+ bitset<N, WordType>::find_last() const
+ {
+ const size_type i = base_type::DoFindLast();
+
+ if(i < kSize)
+ return i;
+ // Else i could be the base type bit count, so we clamp it to our size.
+
+ return kSize;
+ }
+
+
+ template <size_t N, typename WordType>
+ inline typename bitset<N, WordType>::size_type
+ bitset<N, WordType>::find_prev(size_type last_find) const
+ {
+ const size_type i = base_type::DoFindPrev(last_find);
+
+ if(i < kSize)
+ return i;
+ // Else i could be the base type bit count, so we clamp it to our size.
+
+ return kSize;
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////////
+
+ template <size_t N, typename WordType>
+ inline bitset<N, WordType> operator&(const bitset<N, WordType>& a, const bitset<N, WordType>& b)
+ {
+ // We get betting inlining when we don't declare temporary variables.
+ return bitset<N, WordType>(a).operator&=(b);
+ }
+
+
+ template <size_t N, typename WordType>
+ inline bitset<N, WordType> operator|(const bitset<N, WordType>& a, const bitset<N, WordType>& b)
+ {
+ return bitset<N, WordType>(a).operator|=(b);
+ }
+
+
+ template <size_t N, typename WordType>
+ inline bitset<N, WordType> operator^(const bitset<N, WordType>& a, const bitset<N, WordType>& b)
+ {
+ return bitset<N, WordType>(a).operator^=(b);
+ }
+
+
+} // namespace eastl
+
+
+EA_RESTORE_VC_WARNING();
+
+#endif // Header include guard
diff --git a/EASTL/include/EASTL/bitvector.h b/EASTL/include/EASTL/bitvector.h
new file mode 100644
index 0000000..ade6782
--- /dev/null
+++ b/EASTL/include/EASTL/bitvector.h
@@ -0,0 +1,1474 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// Implements a bit vector, which is essentially a vector of bool but which
+// uses bits instead of bytes. It is thus similar to the original std::vector<bool>.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// Note: This code is not yet complete: it isn't tested and doesn't yet
+// support containers other than vector.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_BITVECTOR_H
+#define EASTL_BITVECTOR_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/vector.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/bitset.h>
+
+EA_DISABLE_VC_WARNING(4480); // nonstandard extension used: specifying underlying type for enum
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+
+ /// EASTL_BITVECTOR_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_BITVECTOR_DEFAULT_NAME
+ #define EASTL_BITVECTOR_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " bitvector" // Unless the user overrides something, this is "EASTL bitvector".
+ #endif
+
+ /// EASTL_BITVECTOR_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_BITVECTOR_DEFAULT_ALLOCATOR
+ #define EASTL_BITVECTOR_DEFAULT_ALLOCATOR allocator_type(EASTL_BITVECTOR_DEFAULT_NAME)
+ #endif
+
+
+
+ /// BitvectorWordType
+ /// Defines the integral data type used by bitvector.
+ typedef EASTL_BITSET_WORD_TYPE_DEFAULT BitvectorWordType;
+
+
+ template <typename Element>
+ class bitvector_const_iterator;
+
+
+ template <typename Element>
+ class bitvector_reference
+ {
+ public:
+ typedef eastl_size_t size_type;
+ bitvector_reference(Element* ptr, eastl_size_t i);
+
+ bitvector_reference& operator=(bool value);
+ bitvector_reference& operator=(const bitvector_reference& rhs);
+
+ operator bool() const // Defined here because some compilers fail otherwise.
+ { return (*mpBitWord & (Element(1) << mnBitIndex)) != 0; }
+
+ protected:
+ friend class bitvector_const_iterator<Element>;
+
+ Element* mpBitWord;
+ size_type mnBitIndex;
+
+ bitvector_reference() {}
+ void CopyFrom(const bitvector_reference& rhs);
+ };
+
+
+
+ template <typename Element>
+ class bitvector_const_iterator
+ {
+ public:
+ typedef EASTL_ITC_NS::random_access_iterator_tag iterator_category;
+ typedef bitvector_const_iterator<Element> this_type;
+ typedef bool value_type;
+ typedef bitvector_reference<Element> reference_type;
+ typedef ptrdiff_t difference_type;
+ typedef Element element_type;
+ typedef element_type* pointer; // This is wrong. It needs to be someting that acts as a pointer to a bit.
+ typedef element_type& reference; // This is not right. It needs to be someting that acts as a pointer to a bit.
+ typedef eastl_size_t size_type;
+
+ protected:
+ reference_type mReference;
+
+ enum
+ {
+ kBitCount = (8 * sizeof(Element))
+ };
+
+ public:
+ bool operator*() const;
+ bool operator[](difference_type n) const;
+
+ bitvector_const_iterator();
+ bitvector_const_iterator(const element_type* p, eastl_size_t i);
+ bitvector_const_iterator(const reference_type& referenceType);
+
+ bitvector_const_iterator& operator++();
+ bitvector_const_iterator operator++(int);
+ bitvector_const_iterator& operator--();
+ bitvector_const_iterator operator--(int);
+
+ bitvector_const_iterator& operator+=(difference_type dist);
+ bitvector_const_iterator& operator-=(difference_type dist);
+ bitvector_const_iterator operator+ (difference_type dist) const;
+ bitvector_const_iterator operator- (difference_type dist) const;
+
+ difference_type operator-(const this_type& rhs) const;
+
+ bitvector_const_iterator& operator= (const this_type& rhs);
+
+ bool operator==(const this_type& rhs) const;
+ bool operator!=(const this_type& rhs) const;
+
+ bool operator< (const this_type& rhs) const;
+ bool operator<=(const this_type& rhs) const;
+ bool operator> (const this_type& rhs) const;
+ bool operator>=(const this_type& rhs) const;
+
+ int validate(const element_type* pStart, const element_type* pEnd, eastl_size_t nExtraBits) const;
+
+ protected:
+ template <typename, typename, typename>
+ friend class bitvector;
+
+ reference_type& get_reference_type() { return mReference; }
+ };
+
+
+
+ template <typename Element>
+ class bitvector_iterator : public bitvector_const_iterator<Element>
+ {
+ public:
+ typedef EASTL_ITC_NS::random_access_iterator_tag iterator_category;
+ typedef bitvector_iterator this_type;
+ typedef bitvector_const_iterator<Element> base_type;
+ typedef bool value_type;
+ typedef bitvector_reference<Element> reference_type;
+ typedef ptrdiff_t difference_type;
+ typedef Element element_type;
+ typedef element_type* pointer; // This is wrong. It needs to be someting that acts as a pointer to a bit.
+ typedef element_type& reference; // This is not right. It needs to be someting that acts as a pointer to a bit.
+
+ public:
+ reference_type operator*() const;
+ reference_type operator[](difference_type n) const;
+
+ bitvector_iterator();
+ bitvector_iterator(element_type* p, eastl_size_t i);
+ bitvector_iterator(reference_type& referenceType);
+
+ bitvector_iterator& operator++() { base_type::operator++(); return *this; }
+ bitvector_iterator& operator--() { base_type::operator--(); return *this; }
+ bitvector_iterator operator++(int);
+ bitvector_iterator operator--(int);
+
+ bitvector_iterator& operator+=(difference_type dist) { base_type::operator+=(dist); return *this; }
+ bitvector_iterator& operator-=(difference_type dist) { base_type::operator-=(dist); return *this; }
+ bitvector_iterator operator+ (difference_type dist) const;
+ bitvector_iterator operator- (difference_type dist) const;
+
+ // We need this here because we are overloading operator-, so for some reason the
+ // other overload of the function can't be found unless it's explicitly specified.
+ difference_type operator-(const base_type& rhs) const { return base_type::operator-(rhs); }
+ };
+
+
+
+ /// bitvector
+ ///
+ /// Implements an array of bits treated as boolean values.
+ /// bitvector is similar to vector<bool> but uses bits instead of bytes and
+ /// allows the user to use other containers such as deque instead of vector.
+ /// bitvector is different from bitset in that bitset is less flexible but
+ /// uses less memory and has higher performance.
+ ///
+ /// To consider: Rename the Element template parameter to WordType, for
+ /// consistency with bitset.
+ ///
+ template <typename Allocator = EASTLAllocatorType,
+ typename Element = BitvectorWordType,
+ typename Container = eastl::vector<Element, Allocator> >
+ class bitvector
+ {
+ public:
+ typedef bitvector<Allocator, Element> this_type;
+ typedef bool value_type;
+ typedef bitvector_reference<Element> reference;
+ typedef bool const_reference;
+ typedef bitvector_iterator<Element> iterator;
+ typedef bitvector_const_iterator<Element> const_iterator;
+ typedef eastl::reverse_iterator<iterator> reverse_iterator;
+ typedef eastl::reverse_iterator<const_iterator> const_reverse_iterator;
+ typedef Allocator allocator_type;
+ typedef Element element_type;
+ typedef Container container_type;
+ typedef eastl_size_t size_type;
+ typedef ptrdiff_t difference_type;
+
+ #if defined(_MSC_VER) && (_MSC_VER >= 1400) && (_MSC_VER <= 1600) && !EASTL_STD_CPP_ONLY // _MSC_VER of 1400 means VS2005, 1600 means VS2010. VS2012 generates errors with usage of enum:size_type.
+ enum : size_type { // Use Microsoft enum language extension, allowing for smaller debug symbols than using a static const. Users have been affected by this.
+ npos = container_type::npos,
+ kMaxSize = container_type::kMaxSize
+ };
+ #else
+ static const size_type npos = container_type::npos; /// 'npos' means non-valid position or simply non-position.
+ static const size_type kMaxSize = container_type::kMaxSize; /// -1 is reserved for 'npos'. It also happens to be slightly beneficial that kMaxSize is a value less than -1, as it helps us deal with potential integer wraparound issues.
+ #endif
+
+ enum
+ {
+ kBitCount = 8 * sizeof(Element)
+ };
+
+ protected:
+ container_type mContainer;
+ size_type mFreeBitCount; // Unused bits in the last word of mContainer.
+
+ public:
+ bitvector();
+ explicit bitvector(const allocator_type& allocator);
+ explicit bitvector(size_type n, const allocator_type& allocator = EASTL_BITVECTOR_DEFAULT_ALLOCATOR);
+ bitvector(size_type n, value_type value, const allocator_type& allocator = EASTL_BITVECTOR_DEFAULT_ALLOCATOR);
+ bitvector(const bitvector& copy);
+
+ template <typename InputIterator>
+ bitvector(InputIterator first, InputIterator last);
+
+ bitvector& operator=(const bitvector& x);
+ void swap(this_type& x);
+
+ template <typename InputIterator>
+ void assign(InputIterator first, InputIterator last);
+
+ iterator begin() EA_NOEXCEPT;
+ const_iterator begin() const EA_NOEXCEPT;
+ const_iterator cbegin() const EA_NOEXCEPT;
+
+ iterator end() EA_NOEXCEPT;
+ const_iterator end() const EA_NOEXCEPT;
+ const_iterator cend() const EA_NOEXCEPT;
+
+ reverse_iterator rbegin() EA_NOEXCEPT;
+ const_reverse_iterator rbegin() const EA_NOEXCEPT;
+ const_reverse_iterator crbegin() const EA_NOEXCEPT;
+
+ reverse_iterator rend() EA_NOEXCEPT;
+ const_reverse_iterator rend() const EA_NOEXCEPT;
+ const_reverse_iterator crend() const EA_NOEXCEPT;
+
+ bool empty() const EA_NOEXCEPT;
+ size_type size() const EA_NOEXCEPT;
+ size_type capacity() const EA_NOEXCEPT;
+
+ void resize(size_type n, value_type value);
+ void resize(size_type n);
+ void reserve(size_type n);
+ void set_capacity(size_type n = npos); // Revises the capacity to the user-specified value. Resizes the container to match the capacity if the requested capacity n is less than the current size. If n == npos then the capacity is reallocated (if necessary) such that capacity == size.
+
+ void push_back();
+ void push_back(value_type value);
+ void pop_back();
+
+ reference front();
+ const_reference front() const;
+ reference back();
+ const_reference back() const;
+
+ bool test(size_type n, bool defaultValue) const; // Returns true if the bit index is < size() and set. Returns defaultValue if the bit is >= size().
+ void set(size_type n, bool value); // Resizes the container to accomodate n if necessary.
+
+ reference at(size_type n); // throws an out_of_range exception if n is invalid.
+ const_reference at(size_type n) const;
+
+ reference operator[](size_type n); // behavior is undefined if n is invalid.
+ const_reference operator[](size_type n) const;
+
+ /*
+ Work in progress:
+ template <bool value = true> iterator find_first(); // Finds the lowest "on" bit.
+ template <bool value = true> iterator find_next(const_iterator it); // Finds the next lowest "on" bit after it.
+ template <bool value = true> iterator find_last(); // Finds the index of the last "on" bit, returns size if none are set.
+ template <bool value = true> iterator find_prev(const_iterator it); // Finds the index of the last "on" bit before last_find, returns size if none are set.
+
+ template <bool value = true> const_iterator find_first() const; // Finds the lowest "on" bit.
+ template <bool value = true> const_iterator find_next(const_iterator it) const; // Finds the next lowest "on" bit after it.
+ template <bool value = true> const_iterator find_last() const; // Finds the index of the last "on" bit, returns size if none are set.
+ template <bool value = true> const_iterator find_prev(const_iterator it) const; // Finds the index of the last "on" bit before last_find, returns size if none are set.
+ */
+
+ element_type* data() EA_NOEXCEPT;
+ const element_type* data() const EA_NOEXCEPT;
+
+ iterator insert(const_iterator position, value_type value);
+ void insert(const_iterator position, size_type n, value_type value);
+
+ // template <typename InputIterator> Not yet implemented. See below for disabled definition.
+ // void insert(const_iterator position, InputIterator first, InputIterator last);
+
+ iterator erase(const_iterator position);
+ iterator erase(const_iterator first, const_iterator last);
+
+ reverse_iterator erase(const_reverse_iterator position);
+ reverse_iterator erase(const_reverse_iterator first, const_reverse_iterator last);
+
+ void clear();
+ void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs.
+
+ container_type& get_container();
+ const container_type& get_container() const;
+
+ bool validate() const;
+ int validate_iterator(const_iterator i) const;
+ };
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // bitvector_reference
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Element>
+ bitvector_reference<Element>::bitvector_reference(Element* p, eastl_size_t i)
+ : mpBitWord(p),
+ mnBitIndex(i)
+ {
+ }
+
+
+ template <typename Element>
+ bitvector_reference<Element>&
+ bitvector_reference<Element>::operator=(bool value)
+ {
+ const Element mask = (Element)(Element(1) << mnBitIndex);
+
+ if(value)
+ *mpBitWord |= mask;
+ else
+ *mpBitWord &= ~mask;
+
+ return *this;
+ }
+
+
+ template <typename Element>
+ bitvector_reference<Element>&
+ bitvector_reference<Element>::operator=(const bitvector_reference& rhs)
+ {
+ return (*this = (bool)rhs);
+ }
+
+
+ template <typename Element>
+ void bitvector_reference<Element>::CopyFrom(const bitvector_reference& rhs)
+ {
+ mpBitWord = rhs.mpBitWord;
+ mnBitIndex = rhs.mnBitIndex;
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // bitvector_const_iterator
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Element>
+ bitvector_const_iterator<Element>::bitvector_const_iterator()
+ : mReference(0, 0)
+ {
+ }
+
+
+ template <typename Element>
+ bitvector_const_iterator<Element>::bitvector_const_iterator(const Element* p, eastl_size_t i)
+ : mReference(const_cast<Element*>(p), i) // const_cast is safe here because we never let mReference leak and we don't modify it.
+ {
+ }
+
+
+ template <typename Element>
+ bitvector_const_iterator<Element>::bitvector_const_iterator(const reference_type& reference)
+ : mReference(reference)
+ {
+ }
+
+
+ template <typename Element>
+ bitvector_const_iterator<Element>&
+ bitvector_const_iterator<Element>::operator++()
+ {
+ ++mReference.mnBitIndex;
+
+ if(mReference.mnBitIndex == kBitCount)
+ {
+ ++mReference.mpBitWord;
+ mReference.mnBitIndex = 0;
+ }
+
+ return *this;
+ }
+
+
+ template <typename Element>
+ bitvector_const_iterator<Element>&
+ bitvector_const_iterator<Element>::operator--()
+ {
+ if(mReference.mnBitIndex == 0)
+ {
+ --mReference.mpBitWord;
+ mReference.mnBitIndex = kBitCount;
+ }
+
+ --mReference.mnBitIndex;
+ return *this;
+ }
+
+
+ template <typename Element>
+ bitvector_const_iterator<Element>
+ bitvector_const_iterator<Element>::operator++(int)
+ {
+ bitvector_const_iterator copy(*this);
+ ++*this;
+ return copy;
+ }
+
+
+ template <typename Element>
+ bitvector_const_iterator<Element>
+ bitvector_const_iterator<Element>::operator--(int)
+ {
+ bitvector_const_iterator copy(*this);
+ --*this;
+ return copy;
+ }
+
+
+ template <typename Element>
+ bitvector_const_iterator<Element>&
+ bitvector_const_iterator<Element>::operator+=(difference_type n)
+ {
+ n += mReference.mnBitIndex;
+
+ if(n >= difference_type(0))
+ {
+ mReference.mpBitWord += n / kBitCount;
+ mReference.mnBitIndex = (size_type)(n % kBitCount);
+ }
+ else
+ {
+ // backwards is tricky
+ // figure out how many full words backwards we need to move
+ // n = [-1..-32] => 1
+ // n = [-33..-64] => 2
+ const size_type backwards = (size_type)(-n + kBitCount - 1);
+ mReference.mpBitWord -= backwards / kBitCount;
+
+ // -1 => 31; backwards = 32; 31 - (backwards % 32) = 31
+ // -2 => 30; backwards = 33; 31 - (backwards % 32) = 30
+ // -3 => 29; backwards = 34
+ // ..
+ // -32 => 0; backwards = 63; 31 - (backwards % 32) = 0
+ // -33 => 31; backwards = 64; 31 - (backwards % 32) = 31
+ mReference.mnBitIndex = (kBitCount - 1) - (backwards % kBitCount);
+ }
+
+ return *this;
+ }
+
+
+ template <typename Element>
+ bitvector_const_iterator<Element>&
+ bitvector_const_iterator<Element>::operator-=(difference_type n)
+ {
+ return (*this += -n);
+ }
+
+
+ template <typename Element>
+ bitvector_const_iterator<Element>
+ bitvector_const_iterator<Element>::operator+(difference_type n) const
+ {
+ bitvector_const_iterator copy(*this);
+ copy += n;
+ return copy;
+ }
+
+
+ template <typename Element>
+ bitvector_const_iterator<Element>
+ bitvector_const_iterator<Element>::operator-(difference_type n) const
+ {
+ bitvector_const_iterator copy(*this);
+ copy -= n;
+ return copy;
+ }
+
+
+ template <typename Element>
+ typename bitvector_const_iterator<Element>::difference_type
+ bitvector_const_iterator<Element>::operator-(const this_type& rhs) const
+ {
+ return ((mReference.mpBitWord - rhs.mReference.mpBitWord) * kBitCount) + mReference.mnBitIndex - rhs.mReference.mnBitIndex;
+ }
+
+
+ template <typename Element>
+ bool bitvector_const_iterator<Element>::operator==(const this_type& rhs) const
+ {
+ return (mReference.mpBitWord == rhs.mReference.mpBitWord) && (mReference.mnBitIndex == rhs.mReference.mnBitIndex);
+ }
+
+
+ template <typename Element>
+ bool bitvector_const_iterator<Element>::operator!=(const this_type& rhs) const
+ {
+ return !(*this == rhs);
+ }
+
+
+ template <typename Element>
+ bool bitvector_const_iterator<Element>::operator<(const this_type& rhs) const
+ {
+ return (mReference.mpBitWord < rhs.mReference.mpBitWord) ||
+ ((mReference.mpBitWord == rhs.mReference.mpBitWord) && (mReference.mnBitIndex < rhs.mReference.mnBitIndex));
+ }
+
+
+ template <typename Element>
+ bool bitvector_const_iterator<Element>::operator<=(const this_type& rhs) const
+ {
+ return (mReference.mpBitWord < rhs.mReference.mpBitWord) ||
+ ((mReference.mpBitWord == rhs.mReference.mpBitWord) && (mReference.mnBitIndex <= rhs.mReference.mnBitIndex));
+ }
+
+
+ template <typename Element>
+ bool bitvector_const_iterator<Element>::operator>(const this_type& rhs) const
+ {
+ return !(*this <= rhs);
+ }
+
+
+ template <typename Element>
+ bool bitvector_const_iterator<Element>::operator>=(const this_type& rhs) const
+ {
+ return !(*this < rhs);
+ }
+
+
+ template <typename Element>
+ bool bitvector_const_iterator<Element>::operator*() const
+ {
+ return mReference;
+ }
+
+
+ template <typename Element>
+ bool bitvector_const_iterator<Element>::operator[](difference_type n) const
+ {
+ return *(*this + n);
+ }
+
+
+ template <typename Element>
+ bitvector_const_iterator<Element>& bitvector_const_iterator<Element>::operator= (const this_type& rhs)
+ {
+ mReference.CopyFrom(rhs.mReference);
+ return *this;
+ }
+
+
+ template <typename Element>
+ int bitvector_const_iterator<Element>::validate(const Element* pStart, const Element* pEnd, eastl_size_t nExtraBits) const
+ {
+ const Element* const pCurrent = mReference.mpBitWord;
+
+ if(pCurrent >= pStart)
+ {
+ if(nExtraBits == 0)
+ {
+ if(pCurrent == pEnd && mReference)
+ return eastl::isf_valid | eastl::isf_current;
+ else if(pCurrent < pEnd)
+ return eastl::isf_valid | eastl::isf_current | eastl::isf_can_dereference;
+ }
+ else if(pCurrent == (pEnd - 1))
+ {
+ const size_type bit = mReference.mnBitIndex;
+ const size_type lastbit = kBitCount - nExtraBits;
+
+ if(bit == lastbit)
+ return eastl::isf_valid | eastl::isf_current;
+ else if(bit < lastbit)
+ return eastl::isf_valid | eastl::isf_current | eastl::isf_can_dereference;
+ }
+ else if(pCurrent < pEnd)
+ {
+ return eastl::isf_valid | eastl::isf_current | eastl::isf_can_dereference;
+ }
+ }
+
+ return eastl::isf_none;
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // bitvector_iterator
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Element>
+ bitvector_iterator<Element>::bitvector_iterator()
+ : base_type()
+ {
+ }
+
+ template <typename Element>
+ bitvector_iterator<Element>::bitvector_iterator(Element* p, eastl_size_t i)
+ : base_type(p, i)
+ {
+ }
+
+
+ template <typename Element>
+ bitvector_iterator<Element>::bitvector_iterator(reference_type& reference)
+ : base_type(reference)
+ {
+ }
+
+
+ template <typename Element>
+ typename bitvector_iterator<Element>::reference_type
+ bitvector_iterator<Element>::operator*() const
+ {
+ return base_type::mReference;
+ }
+
+
+ template <typename Element>
+ typename bitvector_iterator<Element>::reference_type
+ bitvector_iterator<Element>::operator[](difference_type n) const
+ {
+ return *(*this + n);
+ }
+
+
+ template <typename Element>
+ void MoveBits(bitvector_iterator<Element> start,
+ bitvector_iterator<Element> end,
+ bitvector_iterator<Element> dest)
+ {
+ // Slow implemenation; could optimize by moving a word at a time.
+ if(dest <= start)
+ {
+ while(start != end)
+ {
+ *dest = *start;
+ ++dest;
+ ++start;
+ }
+ }
+ else
+ {
+ // Need to move backwards
+ dest += (end - start);
+
+ while(start != end)
+ {
+ --dest;
+ --end;
+ *dest = *end;
+ }
+ }
+ }
+
+
+ template <typename Element>
+ bitvector_iterator<Element>
+ bitvector_iterator<Element>::operator++(int)
+ {
+ bitvector_iterator copy(*this);
+ ++*this;
+ return copy;
+ }
+
+
+ template <typename Element>
+ bitvector_iterator<Element>
+ bitvector_iterator<Element>::operator--(int)
+ {
+ bitvector_iterator copy(*this);
+ --*this;
+ return copy;
+ }
+
+
+ template <typename Element>
+ bitvector_iterator<Element>
+ bitvector_iterator<Element>::operator+(difference_type n) const
+ {
+ bitvector_iterator copy(*this);
+ copy += n;
+ return copy;
+ }
+
+
+ template <typename Element>
+ bitvector_iterator<Element>
+ bitvector_iterator<Element>::operator-(difference_type n) const
+ {
+ bitvector_iterator copy(*this);
+ copy -= n;
+ return copy;
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // bitvector
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Allocator, typename Element, typename Container>
+ template <typename InputIterator>
+ void bitvector<Allocator, Element, Container>::assign(InputIterator first, InputIterator last)
+ {
+ // To consider: We can maybe specialize this on bitvector_iterator to do a fast bitwise copy.
+ // We can also specialize for random access iterators to figure out the size & reserve first.
+
+ clear();
+
+ while(first != last)
+ {
+ push_back(*first);
+ ++first;
+ }
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ typename bitvector<Allocator, Element, Container>::iterator
+ bitvector<Allocator, Element, Container>::begin() EA_NOEXCEPT
+ {
+ return iterator(mContainer.begin(), 0);
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ typename bitvector<Allocator, Element, Container>::const_iterator
+ bitvector<Allocator, Element, Container>::begin() const EA_NOEXCEPT
+ {
+ return const_iterator(mContainer.begin(), 0);
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ typename bitvector<Allocator, Element, Container>::const_iterator
+ bitvector<Allocator, Element, Container>::cbegin() const EA_NOEXCEPT
+ {
+ return const_iterator(mContainer.begin(), 0);
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ typename bitvector<Allocator, Element, Container>::iterator
+ bitvector<Allocator, Element, Container>::end() EA_NOEXCEPT
+ {
+ return iterator(mContainer.end(), 0) - mFreeBitCount;
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ typename bitvector<Allocator, Element, Container>::const_iterator
+ bitvector<Allocator, Element, Container>::end() const EA_NOEXCEPT
+ {
+ return const_iterator(mContainer.end(), 0) - mFreeBitCount;
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ typename bitvector<Allocator, Element, Container>::const_iterator
+ bitvector<Allocator, Element, Container>::cend() const EA_NOEXCEPT
+ {
+ return const_iterator(mContainer.end(), 0) - mFreeBitCount;
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ bool bitvector<Allocator, Element, Container>::empty() const EA_NOEXCEPT
+ {
+ return mContainer.empty();
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ typename bitvector<Allocator, Element, Container>::size_type
+ bitvector<Allocator, Element, Container>::size() const EA_NOEXCEPT
+ {
+ return (mContainer.size() * kBitCount) - mFreeBitCount;
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ typename bitvector<Allocator, Element, Container>::size_type
+ bitvector<Allocator, Element, Container>::capacity() const EA_NOEXCEPT
+ {
+ return mContainer.capacity() * kBitCount;
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ void bitvector<Allocator, Element, Container>::set_capacity(size_type n)
+ {
+ if(n == npos)
+ mContainer.set_capacity(npos);
+ else
+ mContainer.set_capacity((n + kBitCount - 1) / kBitCount);
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ typename bitvector<Allocator, Element, Container>::reverse_iterator
+ bitvector<Allocator, Element, Container>::rbegin() EA_NOEXCEPT
+ {
+ return reverse_iterator(end());
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ typename bitvector<Allocator, Element, Container>::const_reverse_iterator
+ bitvector<Allocator, Element, Container>::rbegin() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(end());
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ typename bitvector<Allocator, Element, Container>::const_reverse_iterator
+ bitvector<Allocator, Element, Container>::crbegin() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(end());
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ typename bitvector<Allocator, Element, Container>::reverse_iterator
+ bitvector<Allocator, Element, Container>::rend() EA_NOEXCEPT
+ {
+ return reverse_iterator(begin());
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ typename bitvector<Allocator, Element, Container>::const_reverse_iterator
+ bitvector<Allocator, Element, Container>::rend() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(begin());
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ typename bitvector<Allocator, Element, Container>::const_reverse_iterator
+ bitvector<Allocator, Element, Container>::crend() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(begin());
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ typename bitvector<Allocator, Element, Container>::reference
+ bitvector<Allocator, Element, Container>::front()
+ {
+ EASTL_ASSERT(!empty());
+ return reference(&mContainer[0], 0);
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ typename bitvector<Allocator, Element, Container>::const_reference
+ bitvector<Allocator, Element, Container>::front() const
+ {
+ EASTL_ASSERT(!empty());
+
+ // To consider: make a better solution to this than const_cast.
+ return reference(const_cast<Element*>(&mContainer[0]), 0);
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ typename bitvector<Allocator, Element, Container>::reference
+ bitvector<Allocator, Element, Container>::back()
+ {
+ EASTL_ASSERT(!empty());
+ return *(--end());
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ typename bitvector<Allocator, Element, Container>::const_reference
+ bitvector<Allocator, Element, Container>::back() const
+ {
+ EASTL_ASSERT(!empty());
+ return *(--end());
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ void bitvector<Allocator, Element, Container>::push_back()
+ {
+ if(!mFreeBitCount)
+ {
+ mContainer.push_back();
+ mFreeBitCount = kBitCount;
+ }
+
+ --mFreeBitCount;
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ void bitvector<Allocator, Element, Container>::push_back(value_type value)
+ {
+ push_back();
+ *--end() = value;
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ void bitvector<Allocator, Element, Container>::pop_back()
+ {
+ EASTL_ASSERT(!empty());
+
+ if(++mFreeBitCount == kBitCount)
+ {
+ mContainer.pop_back();
+ mFreeBitCount = 0;
+ }
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ void bitvector<Allocator, Element, Container>::reserve(size_type n)
+ {
+ const size_type wordCount = (n + kBitCount - 1) / kBitCount;
+ mContainer.reserve(wordCount);
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ void bitvector<Allocator, Element, Container>::resize(size_type n)
+ {
+ const size_type wordCount = (n + kBitCount - 1) / kBitCount;
+ const size_type extra = (wordCount * kBitCount) - n;
+
+ mContainer.resize(wordCount);
+ mFreeBitCount = extra;
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ void bitvector<Allocator, Element, Container>::resize(size_type n, value_type value)
+ {
+ const size_type s = size();
+ if(n < s)
+ resize(n);
+
+ // Fill up to the end of a word
+ size_type newbits = n - s;
+
+ while(mFreeBitCount && newbits)
+ {
+ push_back(value);
+ --newbits;
+ }
+
+ // Fill the rest a word at a time
+ if(newbits)
+ {
+ element_type element(0);
+ if(value)
+ element = ~element;
+
+ const size_type words = (n + kBitCount - 1) / kBitCount;
+ const size_type extra = words * kBitCount - n;
+ mContainer.resize(words, element);
+ mFreeBitCount = extra;
+ }
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ bool bitvector<Allocator, Element, Container>::test(size_type n, bool defaultValue) const
+ {
+ if(n < size())
+ return *(begin() + (difference_type)n);
+
+ return defaultValue;
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ void bitvector<Allocator, Element, Container>::set(size_type n, bool value)
+ {
+ if(EASTL_UNLIKELY(n >= size()))
+ resize(n + 1);
+
+ *(begin() + (difference_type)n) = value;
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ typename bitvector<Allocator, Element, Container>::reference
+ bitvector<Allocator, Element, Container>::at(size_type n)
+ {
+ // The difference between at and operator[] is that at signals
+ // if the requested position is out of range by throwing an
+ // out_of_range exception.
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ if(EASTL_UNLIKELY(n >= size()))
+ throw std::out_of_range("bitvector::at -- out of range");
+ #elif EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(n >= size()))
+ EASTL_FAIL_MSG("bitvector::at -- out of range");
+ #endif
+
+ return *(begin() + (difference_type)n);
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ typename bitvector<Allocator, Element, Container>::const_reference
+ bitvector<Allocator, Element, Container>::at(size_type n) const
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ if(EASTL_UNLIKELY(n >= size()))
+ throw std::out_of_range("bitvector::at -- out of range");
+ #elif EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(n >= size()))
+ EASTL_FAIL_MSG("bitvector::at -- out of range");
+ #endif
+
+ return *(begin() + (difference_type)n);
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ typename bitvector<Allocator, Element, Container>::reference
+ bitvector<Allocator, Element, Container>::operator[](size_type n)
+ {
+ return *(begin() + (difference_type)n);
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ typename bitvector<Allocator, Element, Container>::const_reference
+ bitvector<Allocator, Element, Container>::operator[](size_type n) const
+ {
+ return *(begin() + (difference_type)n);
+ }
+
+
+/*
+ template <typename Allocator, typename Element, typename Container>
+ template <bool value>
+ typename bitvector<Allocator, Element, Container>::iterator
+ bitvector<Allocator, Element, Container>::find_first()
+ {
+ return begin();
+ }
+
+ template <bool value> iterator find_next(const_iterator it);
+ template <bool value> iterator find_last();
+ template <bool value> iterator find_prev(const_iterator it);
+
+ template <bool value> const_iterator find_first() const;
+ template <bool value> const_iterator find_next(const_iterator it) const;
+ template <bool value> const_iterator find_last() const;
+ template <bool value> const_iterator find_prev(const_iterator it) const;
+*/
+
+
+
+
+ template <typename Allocator, typename Element, typename Container>
+ inline typename bitvector<Allocator, Element, Container>::container_type&
+ bitvector<Allocator, Element, Container>::get_container()
+ {
+ return mContainer;
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ inline const typename bitvector<Allocator, Element, Container>::container_type&
+ bitvector<Allocator, Element, Container>::get_container() const
+ {
+ return mContainer;
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ bool bitvector<Allocator, Element, Container>::validate() const
+ {
+ if(!mContainer.validate())
+ return false;
+
+ if((unsigned)mFreeBitCount >= kBitCount)
+ return false;
+
+ return true;
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ int bitvector<Allocator, Element, Container>::validate_iterator(const_iterator i) const
+ {
+ return i.validate(mContainer.begin(), mContainer.end(), mFreeBitCount);
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ typename bitvector<Allocator, Element, Container>::element_type*
+ bitvector<Allocator, Element, Container>::data() EA_NOEXCEPT
+ {
+ return mContainer.data();
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ const typename bitvector<Allocator, Element, Container>::element_type*
+ bitvector<Allocator, Element, Container>::data() const EA_NOEXCEPT
+ {
+ return mContainer.data();
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ typename bitvector<Allocator, Element, Container>::iterator
+ bitvector<Allocator, Element, Container>::insert(const_iterator position, value_type value)
+ {
+ iterator iPosition(position.get_reference_type()); // This is just a non-const version of position.
+
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(validate_iterator(iPosition) & eastl::isf_valid) == 0)
+ EASTL_FAIL_MSG("bitvector::insert -- invalid iterator");
+ #endif
+
+ // Save because we might reallocate
+ const typename iterator::difference_type n = iPosition - begin();
+ push_back();
+ iPosition = begin() + n;
+
+ MoveBits(iPosition, --end(), ++iterator(iPosition));
+ *iPosition = value;
+
+ return iPosition;
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ void bitvector<Allocator, Element, Container>::insert(const_iterator position, size_type n, value_type value)
+ {
+ iterator iPosition(position.get_reference_type()); // This is just a non-const version of position.
+
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(validate_iterator(iPosition) & eastl::isf_valid) == 0)
+ EASTL_FAIL_MSG("bitvector::insert -- invalid iterator");
+ #endif
+
+ // Save because we might reallocate.
+ const typename iterator::difference_type p = iPosition - begin();
+ resize(size() + n);
+ iPosition = begin() + p;
+
+ iterator insert_end = iPosition + n;
+ MoveBits(iPosition, end() - n, insert_end);
+
+ // To do: Optimize this to word-at-a-time for large inserts
+ while(iPosition != insert_end)
+ {
+ *iPosition = value;
+ ++iPosition;
+ }
+ }
+
+
+ /*
+ The following is a placeholder for a future implementation. It turns out that a correct implementation of
+ insert(pos, first, last) is a non-trivial exercise that would take a few hours to implement and test.
+ The reasons why involve primarily the problem of handling the case where insertion source comes from
+ within the container itself, and the case that first and last (note they are templated) might not refer
+ to iterators might refer to a value/count pair. The C++ Standard requires you to handle this case and
+ I (Paul Pedriana) believe that it applies even for a bitvector, given that bool is an integral type.
+ So you have to set up a compile-time type traits function chooser. See vector, for example.
+
+ template <typename Allocator, typename Element, typename Container>
+ template <typename InputIterator>
+ void bitvector<Allocator, Element, Container>::insert(const_iterator position, InputIterator first, InputIterator last)
+ {
+ iterator iPosition(position.get_reference_type()); // This is just a non-const version of position.
+
+ // This implementation is probably broken due to not handling insertion into self.
+ // To do: Make a more efficient version of this.
+ difference_type distance = (iPosition - begin());
+
+ while(first != last)
+ {
+ insert(iPosition, *first);
+ iPosition = begin() + ++distance;
+ ++first;
+ }
+ }
+ */
+
+
+ template <typename Allocator, typename Element, typename Container>
+ typename bitvector<Allocator, Element, Container>::iterator
+ bitvector<Allocator, Element, Container>::erase(const_iterator position)
+ {
+ iterator iPosition(position.get_reference_type()); // This is just a non-const version of position.
+
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(validate_iterator(iPosition) & eastl::isf_can_dereference) == 0)
+ EASTL_FAIL_MSG("bitvector::erase -- invalid iterator");
+ #endif
+
+ MoveBits(++iterator(iPosition), end(), iPosition);
+ resize(size() - 1);
+
+ // Verify that no reallocation occurred.
+ EASTL_ASSERT(validate_iterator(iPosition) & eastl::isf_valid);
+ return iPosition;
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ typename bitvector<Allocator, Element, Container>::iterator
+ bitvector<Allocator, Element, Container>::erase(const_iterator first, const_iterator last)
+ {
+ iterator iFirst(first.get_reference_type()); // This is just a non-const version of first.
+ iterator iLast(last.get_reference_type()); // This is just a non-const version of last.
+
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(validate_iterator(iLast) & eastl::isf_valid) == 0)
+ EASTL_FAIL_MSG("bitvector::erase -- invalid iterator");
+ #endif
+
+ if(!(iFirst == iLast))
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(validate_iterator(iFirst) & eastl::isf_can_dereference) == 0)
+ EASTL_FAIL_MSG("bitvector::erase -- invalid iterator");
+ #endif
+
+ const size_type eraseCount = (size_type)(iLast - iFirst);
+ MoveBits(iLast, end(), iFirst);
+ resize(size() - eraseCount);
+
+ // Verify that no reallocation occurred.
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(validate_iterator(iFirst) & eastl::isf_valid) == 0)
+ EASTL_FAIL_MSG("bitvector::erase -- invalid iterator");
+ #endif
+ }
+
+ return iFirst;
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ typename bitvector<Allocator, Element, Container>::reverse_iterator
+ bitvector<Allocator, Element, Container>::erase(const_reverse_iterator position)
+ {
+ return reverse_iterator(erase((++position).base()));
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ typename bitvector<Allocator, Element, Container>::reverse_iterator
+ bitvector<Allocator, Element, Container>::erase(const_reverse_iterator first, const_reverse_iterator last)
+ {
+ // Version which erases in order from first to last.
+ // difference_type i(first.base() - last.base());
+ // while(i--)
+ // first = erase(first);
+ // return first;
+
+ // Version which erases in order from last to first, but is slightly more efficient:
+ return reverse_iterator(erase(last.base(), first.base()));
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ void bitvector<Allocator, Element, Container>::swap(this_type& rhs)
+ {
+ mContainer.swap(rhs.mContainer);
+ eastl::swap(mFreeBitCount, rhs.mFreeBitCount);
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ void bitvector<Allocator, Element, Container>::reset_lose_memory()
+ {
+ mContainer.reset_lose_memory(); // intentional memory leak.
+ mFreeBitCount = 0;
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ void bitvector<Allocator, Element, Container>::clear()
+ {
+ mContainer.clear();
+ mFreeBitCount = 0;
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ bitvector<Allocator, Element, Container>&
+ bitvector<Allocator, Element, Container>::operator=(const bitvector& rhs)
+ {
+ // The following is OK if (&rhs == this)
+ mContainer = rhs.mContainer;
+ mFreeBitCount = rhs.mFreeBitCount;
+
+ return *this;
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ bitvector<Allocator, Element, Container>::bitvector()
+ : mContainer(),
+ mFreeBitCount(0)
+ {
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ bitvector<Allocator, Element, Container>::bitvector(const allocator_type& allocator)
+ : mContainer(allocator),
+ mFreeBitCount(0)
+ {
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ bitvector<Allocator, Element, Container>::bitvector(size_type n, const allocator_type& allocator)
+ : mContainer((n + kBitCount - 1) / kBitCount, allocator)
+ {
+ mFreeBitCount = kBitCount - (n % kBitCount);
+
+ if(mFreeBitCount == kBitCount)
+ mFreeBitCount = 0;
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ bitvector<Allocator, Element, Container>::bitvector(size_type n, value_type value, const allocator_type& allocator)
+ : mContainer((n + kBitCount - 1) / kBitCount, value ? ~element_type(0) : element_type(0), allocator)
+ {
+ mFreeBitCount = kBitCount - (n % kBitCount);
+
+ if(mFreeBitCount == kBitCount)
+ mFreeBitCount = 0;
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ bitvector<Allocator, Element, Container>::bitvector(const bitvector& copy)
+ : mContainer(copy.mContainer),
+ mFreeBitCount(copy.mFreeBitCount)
+ {
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ template <typename InputIterator>
+ bitvector<Allocator, Element, Container>::bitvector(InputIterator first, InputIterator last)
+ : mContainer(),
+ mFreeBitCount(0)
+ {
+ assign(first, last);
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Allocator, typename Element, typename Container>
+ inline bool operator==(const bitvector<Allocator, Element, Container>& a,
+ const bitvector<Allocator, Element, Container>& b)
+ {
+ // To do: Replace this with a smart compare implementation. This is much slower than it needs to be.
+ return ((a.size() == b.size()) && eastl::equal(a.begin(), a.end(), b.begin()));
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ inline bool operator!=(const bitvector<Allocator, Element, Container>& a,
+ const bitvector<Allocator, Element, Container>& b)
+ {
+ return !operator==(a, b);
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ inline bool operator<(const bitvector<Allocator, Element, Container>& a,
+ const bitvector<Allocator, Element, Container>& b)
+ {
+ // To do: Replace this with a smart compare implementation. This is much slower than it needs to be.
+ return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end());
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ inline bool operator>(const bitvector<Allocator, Element, Container>& a,
+ const bitvector<Allocator, Element, Container>& b)
+ {
+ return b < a;
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ inline bool operator<=(const bitvector<Allocator, Element, Container>& a,
+ const bitvector<Allocator, Element, Container>& b)
+ {
+ return !(b < a);
+ }
+
+
+ template <typename Allocator, typename Element, typename Container>
+ inline bool operator>=(const bitvector<Allocator, Element, Container>& a,
+ const bitvector<Allocator, Element, Container>& b)
+ {
+ return !(a < b);
+ }
+
+ template <typename Allocator, typename Element, typename Container>
+ inline void swap(bitvector<Allocator, Element, Container>& a,
+ bitvector<Allocator, Element, Container>& b)
+ {
+ a.swap(b);
+ }
+
+
+} // namespace eastl
+
+
+EA_RESTORE_VC_WARNING();
+
+#endif // Header include guard
diff --git a/EASTL/include/EASTL/bonus/adaptors.h b/EASTL/include/EASTL/bonus/adaptors.h
new file mode 100644
index 0000000..423cacd
--- /dev/null
+++ b/EASTL/include/EASTL/bonus/adaptors.h
@@ -0,0 +1,88 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ADAPTORS_H
+#define EASTL_ADAPTORS_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/move_help.h>
+#include <EASTL/type_traits.h>
+#include <EASTL/iterator.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+EA_DISABLE_VC_WARNING(4512 4626)
+#if defined(_MSC_VER) && (_MSC_VER >= 1900) // VS2015+
+ EA_DISABLE_VC_WARNING(5027) // move assignment operator was implicitly defined as deleted
+#endif
+
+
+namespace eastl
+{
+ /// reverse
+ ///
+ /// This adaptor allows reverse iteration of a container in ranged base for-loops.
+ ///
+ /// for (auto& i : reverse(c)) { ... }
+ ///
+ template <typename Container>
+ struct reverse_wrapper
+ {
+ template <typename C>
+ reverse_wrapper(C&& c)
+ : mContainer(eastl::forward<C>(c))
+ {
+ /**
+ * NOTE:
+ *
+ * Due to reference collapsing rules of universal references Container type is either
+ *
+ * const C& if the input is a const lvalue
+ * C& if the input is a non-const lvalue
+ * C if the input is an rvalue
+ * const C if the input is a const rvalue thus the object will have to be copied and the copy-ctor will be called
+ *
+ *
+ * Thus we either move the whole container into this object or take a reference to the lvalue avoiding the copy.
+ * The static_assert below ensures this.
+ */
+ static_assert(eastl::is_same_v<C, Container>, "Reference collapsed deduced type must be the same as the deduced Container type!");
+ }
+
+ Container mContainer;
+ };
+
+ template <typename Container>
+ auto begin(const reverse_wrapper<Container>& w) -> decltype(eastl::rbegin(w.mContainer))
+ {
+ return eastl::rbegin(w.mContainer);
+ }
+
+ template <typename Container>
+ auto end(const reverse_wrapper<Container>& w) -> decltype(eastl::rend(w.mContainer))
+ {
+ return eastl::rend(w.mContainer);
+ }
+
+ template <typename Container>
+ reverse_wrapper<Container> reverse(Container&& c)
+ {
+ return reverse_wrapper<Container>(eastl::forward<Container>(c));
+ }
+
+} // namespace eastl
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1900) // VS2015+
+ EA_RESTORE_VC_WARNING()
+#endif
+EA_RESTORE_VC_WARNING()
+
+#endif // Header include guard
diff --git a/EASTL/include/EASTL/bonus/call_traits.h b/EASTL/include/EASTL/bonus/call_traits.h
new file mode 100644
index 0000000..0995d05
--- /dev/null
+++ b/EASTL/include/EASTL/bonus/call_traits.h
@@ -0,0 +1,117 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// The design for call_traits here is very similar to that found in template
+// metaprogramming libraries such as Boost, GCC, and Metrowerks, given that
+// these libraries have established this interface as a defacto standard for
+// solving this problem. Also, these are described in various books on the
+// topic of template metaprogramming, such as "Modern C++ Design".
+//
+// See http://www.boost.org/libs/utility/call_traits.htm or search for
+// call_traits in Google for a description of call_traits.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_CALL_TRAITS_H
+#define EASTL_CALL_TRAITS_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/type_traits.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+
+
+ template <typename T, bool small_>
+ struct ct_imp2 { typedef const T& param_type; };
+
+ template <typename T>
+ struct ct_imp2<T, true> { typedef const T param_type; };
+
+ template <typename T, bool isp, bool b1>
+ struct ct_imp { typedef const T& param_type; };
+
+ template <typename T, bool isp>
+ struct ct_imp<T, isp, true> { typedef typename ct_imp2<T, sizeof(T) <= sizeof(void*)>::param_type param_type; };
+
+ template <typename T, bool b1>
+ struct ct_imp<T, true, b1> { typedef T const param_type; };
+
+
+
+ template <typename T>
+ struct call_traits
+ {
+ public:
+ typedef T value_type;
+ typedef T& reference;
+ typedef const T& const_reference;
+ typedef typename ct_imp<T, is_pointer<T>::value, is_arithmetic<T>::value>::param_type param_type;
+ };
+
+
+ template <typename T>
+ struct call_traits<T&>
+ {
+ typedef T& value_type;
+ typedef T& reference;
+ typedef const T& const_reference;
+ typedef T& param_type;
+ };
+
+
+ template <typename T, size_t N>
+ struct call_traits<T [N]>
+ {
+ private:
+ typedef T array_type[N];
+
+ public:
+ typedef const T* value_type;
+ typedef array_type& reference;
+ typedef const array_type& const_reference;
+ typedef const T* const param_type;
+ };
+
+
+ template <typename T, size_t N>
+ struct call_traits<const T [N]>
+ {
+ private:
+ typedef const T array_type[N];
+
+ public:
+ typedef const T* value_type;
+ typedef array_type& reference;
+ typedef const array_type& const_reference;
+ typedef const T* const param_type;
+ };
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/bonus/compressed_pair.h b/EASTL/include/EASTL/bonus/compressed_pair.h
new file mode 100644
index 0000000..379642b
--- /dev/null
+++ b/EASTL/include/EASTL/bonus/compressed_pair.h
@@ -0,0 +1,460 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// The compressed pair class is very similar to std::pair, but if either of the
+// template arguments are empty classes, then the "empty base-class optimization"
+// is applied to compress the size of the pair.
+//
+// The design for compressed_pair here is very similar to that found in template
+// metaprogramming libraries such as Boost, GCC, and Metrowerks, given that
+// these libraries have established this interface as a defacto standard for
+// solving this problem. Also, these are described in various books on the
+// topic of template metaprogramming, such as "Modern C++ Design".
+//
+// template <typename T1, typename T2>
+// class compressed_pair
+// {
+// public:
+// typedef T1 first_type;
+// typedef T2 second_type;
+// typedef typename call_traits<first_type>::param_type first_param_type;
+// typedef typename call_traits<second_type>::param_type second_param_type;
+// typedef typename call_traits<first_type>::reference first_reference;
+// typedef typename call_traits<second_type>::reference second_reference;
+// typedef typename call_traits<first_type>::const_reference first_const_reference;
+// typedef typename call_traits<second_type>::const_reference second_const_reference;
+//
+// compressed_pair() : base() {}
+// compressed_pair(first_param_type x, second_param_type y);
+// explicit compressed_pair(first_param_type x);
+// explicit compressed_pair(second_param_type y);
+//
+// compressed_pair& operator=(const compressed_pair&);
+//
+// first_reference first();
+// first_const_reference first() const;
+//
+// second_reference second();
+// second_const_reference second() const;
+//
+// void swap(compressed_pair& y);
+// };
+//
+// The two members of the pair can be accessed using the member functions first()
+// and second(). Note that not all member functions can be instantiated for all
+// template parameter types. In particular compressed_pair can be instantiated for
+// reference and array types, however in these cases the range of constructors that
+// can be used are limited. If types T1 and T2 are the same type, then there is
+// only one version of the single-argument constructor, and this constructor
+// initialises both values in the pair to the passed value.
+//
+// Note that compressed_pair can not be instantiated if either of the template
+// arguments is a union type, unless there is compiler support for is_union,
+// or if is_union is specialised for the union type.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_COMPRESSED_PAIR_H
+#define EASTL_COMPRESSED_PAIR_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/type_traits.h>
+#include <EASTL/bonus/call_traits.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1900) // VS2015 or later
+ EA_DISABLE_VC_WARNING(4626 5027) // warning C4626: 'eastl::compressed_pair_imp<T1,T2,0>': assignment operator was implicitly defined as deleted because a base class assignment operator is inaccessible or deleted
+#endif
+
+namespace eastl
+{
+
+ template <typename T1, typename T2>
+ class compressed_pair;
+
+
+ template <typename T1, typename T2, bool isSame, bool firstEmpty, bool secondEmpty>
+ struct compressed_pair_switch;
+
+ template <typename T1, typename T2>
+ struct compressed_pair_switch<T1, T2, false, false, false>{ static const int value = 0; };
+
+ template <typename T1, typename T2>
+ struct compressed_pair_switch<T1, T2, false, true, false> { static const int value = 1; };
+
+ template <typename T1, typename T2>
+ struct compressed_pair_switch<T1, T2, false, false, true> { static const int value = 2; };
+
+ template <typename T1, typename T2>
+ struct compressed_pair_switch<T1, T2, false, true, true> { static const int value = 3; };
+
+ template <typename T1, typename T2>
+ struct compressed_pair_switch<T1, T2, true, true, true> { static const int value = 4; };
+
+ template <typename T1, typename T2>
+ struct compressed_pair_switch<T1, T2, true, false, false> { static const int value = 5; };
+
+ template <typename T1, typename T2, int version>
+ class compressed_pair_imp;
+
+
+
+ template <typename T>
+ inline void cp_swap(T& t1, T& t2)
+ {
+ T tTemp = t1;
+ t1 = t2;
+ t2 = tTemp;
+ }
+
+
+ // Derive from neither
+ template <typename T1, typename T2>
+ class compressed_pair_imp<T1, T2, 0>
+ {
+ public:
+ typedef T1 first_type;
+ typedef T2 second_type;
+ typedef typename call_traits<first_type>::param_type first_param_type;
+ typedef typename call_traits<second_type>::param_type second_param_type;
+ typedef typename call_traits<first_type>::reference first_reference;
+ typedef typename call_traits<second_type>::reference second_reference;
+ typedef typename call_traits<first_type>::const_reference first_const_reference;
+ typedef typename call_traits<second_type>::const_reference second_const_reference;
+
+ compressed_pair_imp() {}
+
+ compressed_pair_imp(first_param_type x, second_param_type y)
+ : mFirst(x), mSecond(y) {}
+
+ compressed_pair_imp(first_param_type x)
+ : mFirst(x) {}
+
+ compressed_pair_imp(second_param_type y)
+ : mSecond(y) {}
+
+ first_reference first() { return mFirst; }
+ first_const_reference first() const { return mFirst; }
+
+ second_reference second() { return mSecond; }
+ second_const_reference second() const { return mSecond; }
+
+ void swap(compressed_pair<T1, T2>& y)
+ {
+ cp_swap(mFirst, y.first());
+ cp_swap(mSecond, y.second());
+ }
+
+ private:
+ first_type mFirst;
+ second_type mSecond;
+ };
+
+
+ // Derive from T1
+ template <typename T1, typename T2>
+ class compressed_pair_imp<T1, T2, 1> : private T1
+ {
+ public:
+ typedef T1 first_type;
+ typedef T2 second_type;
+ typedef typename call_traits<first_type>::param_type first_param_type;
+ typedef typename call_traits<second_type>::param_type second_param_type;
+ typedef typename call_traits<first_type>::reference first_reference;
+ typedef typename call_traits<second_type>::reference second_reference;
+ typedef typename call_traits<first_type>::const_reference first_const_reference;
+ typedef typename call_traits<second_type>::const_reference second_const_reference;
+
+ compressed_pair_imp() {}
+
+ compressed_pair_imp(first_param_type x, second_param_type y)
+ : first_type(x), mSecond(y) {}
+
+ compressed_pair_imp(first_param_type x)
+ : first_type(x) {}
+
+ compressed_pair_imp(second_param_type y)
+ : mSecond(y) {}
+
+ first_reference first() { return *this; }
+ first_const_reference first() const { return *this; }
+
+ second_reference second() { return mSecond; }
+ second_const_reference second() const { return mSecond; }
+
+ void swap(compressed_pair<T1,T2>& y)
+ {
+ // No need to swap empty base class
+ cp_swap(mSecond, y.second());
+ }
+
+ private:
+ second_type mSecond;
+ };
+
+
+
+ // Derive from T2
+ template <typename T1, typename T2>
+ class compressed_pair_imp<T1, T2, 2> : private T2
+ {
+ public:
+ typedef T1 first_type;
+ typedef T2 second_type;
+ typedef typename call_traits<first_type>::param_type first_param_type;
+ typedef typename call_traits<second_type>::param_type second_param_type;
+ typedef typename call_traits<first_type>::reference first_reference;
+ typedef typename call_traits<second_type>::reference second_reference;
+ typedef typename call_traits<first_type>::const_reference first_const_reference;
+ typedef typename call_traits<second_type>::const_reference second_const_reference;
+
+ compressed_pair_imp() {}
+
+ compressed_pair_imp(first_param_type x, second_param_type y)
+ : second_type(y), mFirst(x) {}
+
+ compressed_pair_imp(first_param_type x)
+ : mFirst(x) {}
+
+ compressed_pair_imp(second_param_type y)
+ : second_type(y) {}
+
+ first_reference first() { return mFirst; }
+ first_const_reference first() const { return mFirst; }
+
+ second_reference second() { return *this; }
+ second_const_reference second() const { return *this; }
+
+ void swap(compressed_pair<T1,T2>& y)
+ {
+ // No need to swap empty base class
+ cp_swap(mFirst, y.first());
+ }
+
+ private:
+ first_type mFirst;
+ };
+
+
+
+ // Derive from T1 and T2
+ template <typename T1, typename T2>
+ class compressed_pair_imp<T1, T2, 3> : private T1, private T2
+ {
+ public:
+ typedef T1 first_type;
+ typedef T2 second_type;
+ typedef typename call_traits<first_type>::param_type first_param_type;
+ typedef typename call_traits<second_type>::param_type second_param_type;
+ typedef typename call_traits<first_type>::reference first_reference;
+ typedef typename call_traits<second_type>::reference second_reference;
+ typedef typename call_traits<first_type>::const_reference first_const_reference;
+ typedef typename call_traits<second_type>::const_reference second_const_reference;
+
+ compressed_pair_imp() {}
+
+ compressed_pair_imp(first_param_type x, second_param_type y)
+ : first_type(x), second_type(y) {}
+
+ compressed_pair_imp(first_param_type x)
+ : first_type(x) {}
+
+ compressed_pair_imp(second_param_type y)
+ : second_type(y) {}
+
+ first_reference first() { return *this; }
+ first_const_reference first() const { return *this; }
+
+ second_reference second() { return *this; }
+ second_const_reference second() const { return *this; }
+
+ // No need to swap empty bases
+ void swap(compressed_pair<T1, T2>&)
+ { }
+ };
+
+
+ // T1 == T2, T1 and T2 are both empty
+ // Note does not actually store an instance of T2 at all;
+ // but reuses T1 base class for both first() and second().
+ template <typename T1, typename T2>
+ class compressed_pair_imp<T1, T2, 4> : private T1
+ {
+ public:
+ typedef T1 first_type;
+ typedef T2 second_type;
+ typedef typename call_traits<first_type>::param_type first_param_type;
+ typedef typename call_traits<second_type>::param_type second_param_type;
+ typedef typename call_traits<first_type>::reference first_reference;
+ typedef typename call_traits<second_type>::reference second_reference;
+ typedef typename call_traits<first_type>::const_reference first_const_reference;
+ typedef typename call_traits<second_type>::const_reference second_const_reference;
+
+ compressed_pair_imp() {}
+
+ compressed_pair_imp(first_param_type x, second_param_type)
+ : first_type(x) {}
+
+ compressed_pair_imp(first_param_type x)
+ : first_type(x) {}
+
+ first_reference first() { return *this; }
+ first_const_reference first() const { return *this; }
+
+ second_reference second() { return *this; }
+ second_const_reference second() const { return *this; }
+
+ void swap(compressed_pair<T1, T2>&) { }
+ };
+
+
+ // T1 == T2 and are not empty
+ template <typename T1, typename T2>
+ class compressed_pair_imp<T1, T2, 5>
+ {
+ public:
+ typedef T1 first_type;
+ typedef T2 second_type;
+ typedef typename call_traits<first_type>::param_type first_param_type;
+ typedef typename call_traits<second_type>::param_type second_param_type;
+ typedef typename call_traits<first_type>::reference first_reference;
+ typedef typename call_traits<second_type>::reference second_reference;
+ typedef typename call_traits<first_type>::const_reference first_const_reference;
+ typedef typename call_traits<second_type>::const_reference second_const_reference;
+
+ compressed_pair_imp() {}
+
+ compressed_pair_imp(first_param_type x, second_param_type y)
+ : mFirst(x), mSecond(y) {}
+
+ compressed_pair_imp(first_param_type x)
+ : mFirst(x), mSecond(x) {}
+
+ first_reference first() { return mFirst; }
+ first_const_reference first() const { return mFirst; }
+
+ second_reference second() { return mSecond; }
+ second_const_reference second() const { return mSecond; }
+
+ void swap(compressed_pair<T1, T2>& y)
+ {
+ cp_swap(mFirst, y.first());
+ cp_swap(mSecond, y.second());
+ }
+
+ private:
+ first_type mFirst;
+ second_type mSecond;
+ };
+
+
+
+ template <typename T1, typename T2>
+ class compressed_pair
+ : private compressed_pair_imp<T1, T2,
+ compressed_pair_switch<
+ T1,
+ T2,
+ is_same<typename remove_cv<T1>::type, typename remove_cv<T2>::type>::value,
+ is_empty<T1>::value,
+ is_empty<T2>::value>::value>
+ {
+ private:
+ typedef compressed_pair_imp<T1, T2,
+ compressed_pair_switch<
+ T1,
+ T2,
+ is_same<typename remove_cv<T1>::type, typename remove_cv<T2>::type>::value,
+ is_empty<T1>::value,
+ is_empty<T2>::value>::value> base;
+ public:
+ typedef T1 first_type;
+ typedef T2 second_type;
+ typedef typename call_traits<first_type>::param_type first_param_type;
+ typedef typename call_traits<second_type>::param_type second_param_type;
+ typedef typename call_traits<first_type>::reference first_reference;
+ typedef typename call_traits<second_type>::reference second_reference;
+ typedef typename call_traits<first_type>::const_reference first_const_reference;
+ typedef typename call_traits<second_type>::const_reference second_const_reference;
+
+ compressed_pair() : base() {}
+ compressed_pair(first_param_type x, second_param_type y) : base(x, y) {}
+ explicit compressed_pair(first_param_type x) : base(x) {}
+ explicit compressed_pair(second_param_type y) : base(y) {}
+
+ first_reference first() { return base::first(); }
+ first_const_reference first() const { return base::first(); }
+
+ second_reference second() { return base::second(); }
+ second_const_reference second() const { return base::second(); }
+
+ void swap(compressed_pair& y) { base::swap(y); }
+ };
+
+
+ // Partial specialisation for case where T1 == T2:
+ template <typename T>
+ class compressed_pair<T, T>
+ : private compressed_pair_imp<T, T,
+ compressed_pair_switch<
+ T,
+ T,
+ is_same<typename remove_cv<T>::type, typename remove_cv<T>::type>::value,
+ is_empty<T>::value,
+ is_empty<T>::value>::value>
+ {
+ private:
+ typedef compressed_pair_imp<T, T,
+ compressed_pair_switch<
+ T,
+ T,
+ is_same<typename remove_cv<T>::type, typename remove_cv<T>::type>::value,
+ is_empty<T>::value,
+ is_empty<T>::value>::value> base;
+ public:
+ typedef T first_type;
+ typedef T second_type;
+ typedef typename call_traits<first_type>::param_type first_param_type;
+ typedef typename call_traits<second_type>::param_type second_param_type;
+ typedef typename call_traits<first_type>::reference first_reference;
+ typedef typename call_traits<second_type>::reference second_reference;
+ typedef typename call_traits<first_type>::const_reference first_const_reference;
+ typedef typename call_traits<second_type>::const_reference second_const_reference;
+
+ compressed_pair() : base() {}
+ compressed_pair(first_param_type x, second_param_type y) : base(x, y) {}
+ explicit compressed_pair(first_param_type x) : base(x) {}
+
+ first_reference first() { return base::first(); }
+ first_const_reference first() const { return base::first(); }
+
+ second_reference second() { return base::second(); }
+ second_const_reference second() const { return base::second(); }
+
+ void swap(compressed_pair<T, T>& y) { base::swap(y); }
+ };
+
+
+ template <typename T1, typename T2>
+ inline void swap(compressed_pair<T1, T2>& x, compressed_pair<T1, T2>& y)
+ {
+ x.swap(y);
+ }
+
+
+} // namespace eastl
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1900) // VS2015 or later
+ EA_RESTORE_VC_WARNING()
+#endif
+
+#endif // Header include guard
+
+
+
diff --git a/EASTL/include/EASTL/bonus/fixed_ring_buffer.h b/EASTL/include/EASTL/bonus/fixed_ring_buffer.h
new file mode 100644
index 0000000..2bb54e4
--- /dev/null
+++ b/EASTL/include/EASTL/bonus/fixed_ring_buffer.h
@@ -0,0 +1,50 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_FIXED_RING_BUFFER_H
+#define EASTL_FIXED_RING_BUFFER_H
+
+#include <EASTL/internal/config.h>
+#include <EASTL/fixed_vector.h>
+#include <EASTL/bonus/ring_buffer.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+namespace eastl
+{
+
+ /// fixed_ring_buffer
+ ///
+ /// This is a convenience template alias for creating a fixed-sized
+ /// ring_buffer using eastl::fixed_vector as its storage container. This has
+ /// been tricky for users to get correct due to the constructor requirements
+ /// of eastl::ring_buffer leaking the implementation detail of the sentinel
+ /// value being used internally. In addition, it was not obvious what the
+ /// correct allocator_type template parameter should be used for containers
+ /// providing both a default allocator type and an overflow allocator type.
+ ///
+ /// We are over-allocating the fixed_vector container to accommodate the
+ /// ring_buffer sentinel to prevent that implementation detail leaking into
+ /// user code.
+ ///
+ /// Example usage:
+ ///
+ /// fixed_ring_buffer<int, 8> rb = {0, 1, 2, 3, 4, 5, 6, 7};
+ /// or
+ /// fixed_ring_buffer<int, 8> rb(8); // capacity doesn't need to respect sentinel
+ /// rb.push_back(0);
+ ///
+ ///
+#if !defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ template <typename T, size_t N>
+ using fixed_ring_buffer =
+ ring_buffer<T, fixed_vector<T, N + 1, false>, typename fixed_vector<T, N + 1, false>::overflow_allocator_type>;
+#endif
+
+} // namespace eastl
+
+#endif // Header include guard
+
diff --git a/EASTL/include/EASTL/bonus/fixed_tuple_vector.h b/EASTL/include/EASTL/bonus/fixed_tuple_vector.h
new file mode 100644
index 0000000..e9ce0ec
--- /dev/null
+++ b/EASTL/include/EASTL/bonus/fixed_tuple_vector.h
@@ -0,0 +1,210 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_FIXEDTUPLEVECTOR_H
+#define EASTL_FIXEDTUPLEVECTOR_H
+
+#include <EASTL/bonus/tuple_vector.h>
+#include <EASTL/internal/fixed_pool.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+namespace eastl
+{
+
+ /// EASTL_FIXED_TUPLE_VECTOR_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ /// In the case of fixed-size containers, the allocator name always refers
+ /// to overflow allocations.
+ ///
+ #ifndef EASTL_FIXED_TUPLE_VECTOR_DEFAULT_NAME
+ #define EASTL_FIXED_TUPLE_VECTOR_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_tuple_vector" // Unless the user overrides something, this is "EASTL fixed_vector".
+ #endif
+
+
+ /// EASTL_FIXED_TUPLE_VECTOR_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_FIXED_TUPLE_VECTOR_DEFAULT_ALLOCATOR
+ #define EASTL_FIXED_TUPLE_VECTOR_DEFAULT_ALLOCATOR overflow_allocator_type(EASTL_FIXED_TUPLE_VECTOR_DEFAULT_NAME)
+ #endif
+
+// External interface of fixed_tuple_vector
+template <size_t nodeCount, bool bEnableOverflow, typename... Ts>
+class fixed_tuple_vector : public TupleVecInternal::TupleVecImpl<fixed_vector_allocator<
+ TupleVecInternal::TupleRecurser<Ts...>::GetTotalAllocationSize(nodeCount, 0), 1,
+ TupleVecInternal::TupleRecurser<Ts...>::GetTotalAlignment(), 0,
+ bEnableOverflow, EASTLAllocatorType>, make_index_sequence<sizeof...(Ts)>, Ts...>
+{
+public:
+ typedef fixed_vector_allocator<
+ TupleVecInternal::TupleRecurser<Ts...>::GetTotalAllocationSize(nodeCount, 0), 1,
+ TupleVecInternal::TupleRecurser<Ts...>::GetTotalAlignment(), 0,
+ bEnableOverflow, EASTLAllocatorType> fixed_allocator_type;
+ typedef aligned_buffer<fixed_allocator_type::kNodesSize, fixed_allocator_type::kNodeAlignment> aligned_buffer_type;
+ typedef fixed_tuple_vector<nodeCount, bEnableOverflow, Ts...> this_type;
+ typedef EASTLAllocatorType overflow_allocator_type;
+
+ typedef TupleVecInternal::TupleVecImpl<fixed_allocator_type, make_index_sequence<sizeof...(Ts)>, Ts...> base_type;
+ typedef typename base_type::size_type size_type;
+
+private:
+ aligned_buffer_type mBuffer;
+
+public:
+ fixed_tuple_vector()
+ : base_type(fixed_allocator_type(mBuffer.buffer), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize)
+ { }
+
+ fixed_tuple_vector(const overflow_allocator_type& allocator)
+ : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize)
+ { }
+
+ fixed_tuple_vector(this_type&& x)
+ : base_type(fixed_allocator_type(mBuffer.buffer), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize)
+ {
+ base_type::get_allocator().copy_overflow_allocator(x.get_allocator());
+ base_type::DoInitFromIterator(make_move_iterator(x.begin()), make_move_iterator(x.end()));
+ x.clear();
+ }
+
+ fixed_tuple_vector(this_type&& x, const overflow_allocator_type& allocator)
+ : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize)
+ {
+ base_type::DoInitFromIterator(make_move_iterator(x.begin()), make_move_iterator(x.end()));
+ x.clear();
+ }
+
+ fixed_tuple_vector(const this_type& x)
+ : base_type(fixed_allocator_type(mBuffer.buffer), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize)
+ {
+ base_type::get_allocator().copy_overflow_allocator(x.get_allocator());
+ base_type::DoInitFromIterator(x.begin(), x.end());
+ }
+
+ fixed_tuple_vector(const this_type& x, const overflow_allocator_type& allocator)
+ : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize)
+ {
+ base_type::DoInitFromIterator(x.begin(), x.end());
+ }
+
+ template <typename MoveIterBase>
+ fixed_tuple_vector(move_iterator<MoveIterBase> begin, move_iterator<MoveIterBase> end, const overflow_allocator_type& allocator = EASTL_FIXED_TUPLE_VECTOR_DEFAULT_ALLOCATOR)
+ : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize)
+ {
+ base_type::DoInitFromIterator(begin, end);
+ }
+
+ template <typename Iterator>
+ fixed_tuple_vector(Iterator begin, Iterator end, const overflow_allocator_type& allocator = EASTL_FIXED_TUPLE_VECTOR_DEFAULT_ALLOCATOR)
+ : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize)
+ {
+ base_type::DoInitFromIterator(begin, end);
+ }
+
+ fixed_tuple_vector(size_type n, const overflow_allocator_type& allocator = EASTL_FIXED_TUPLE_VECTOR_DEFAULT_ALLOCATOR)
+ : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize)
+ {
+ base_type::DoInitDefaultFill(n);
+ }
+
+ fixed_tuple_vector(size_type n, const Ts&... args)
+ : base_type(fixed_allocator_type(mBuffer.buffer), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize)
+ {
+ base_type::DoInitFillArgs(n, args...);
+ }
+
+ fixed_tuple_vector(size_type n, const Ts&... args, const overflow_allocator_type& allocator)
+ : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize)
+ {
+ base_type::DoInitFillArgs(n, args...);
+ }
+
+ fixed_tuple_vector(size_type n,
+ typename base_type::const_reference_tuple tup,
+ const overflow_allocator_type& allocator = EASTL_FIXED_TUPLE_VECTOR_DEFAULT_ALLOCATOR)
+ : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize)
+ {
+ base_type::DoInitFillTuple(n, tup);
+ }
+
+ fixed_tuple_vector(const typename base_type::value_tuple* first, const typename base_type::value_tuple* last,
+ const overflow_allocator_type& allocator = EASTL_FIXED_TUPLE_VECTOR_DEFAULT_ALLOCATOR)
+ : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize)
+ {
+ base_type::DoInitFromTupleArray(first, last);
+ }
+
+ fixed_tuple_vector(std::initializer_list<typename base_type::value_tuple> iList,
+ const overflow_allocator_type& allocator = EASTL_FIXED_TUPLE_VECTOR_DEFAULT_ALLOCATOR)
+ : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize)
+ {
+ base_type::DoInitFromTupleArray(iList.begin(), iList.end());
+ }
+
+ this_type& operator=(const this_type& other)
+ {
+ base_type::operator=(other);
+ return *this;
+ }
+
+ this_type& operator=(this_type&& other)
+ {
+ base_type::clear();
+ // OK to call DoInitFromIterator in a non-ctor scenario because clear() reset everything, more-or-less
+ base_type::DoInitFromIterator(make_move_iterator(other.begin()), make_move_iterator(other.end()));
+ other.clear();
+ return *this;
+ }
+
+ this_type& operator=(std::initializer_list<typename base_type::value_tuple> iList)
+ {
+ base_type::operator=(iList);
+ return *this;
+ }
+
+ void swap(this_type& x)
+ {
+ // If both containers are using the heap instead of local memory
+ // then we can do a fast pointer swap instead of content swap.
+ if ((has_overflowed() && x.has_overflowed()) && (get_overflow_allocator() == x.get_overflow_allocator()))
+ {
+ base_type::swap(x);
+ }
+ else
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(*this, x);
+ }
+ }
+
+ // Returns the max fixed size, which is the user-supplied nodeCount parameter.
+ size_type max_size() const { return nodeCount; }
+ // Returns true if the fixed space has been fully allocated. Note that if overflow is enabled,
+ // the container size can be greater than nodeCount but full() could return true because the
+ // fixed space may have a recently freed slot.
+ bool full() const { return (base_type::mNumElements >= nodeCount) || ((void*)base_type::mpData != (void*)mBuffer.buffer); }
+ // Returns true if the allocations spilled over into the overflow allocator. Meaningful
+ // only if overflow is enabled.
+ bool has_overflowed() const { return ((void*)base_type::mpData != (void*)mBuffer.buffer); }
+ // Returns the value of the bEnableOverflow template parameter.
+ bool can_overflow() const { return bEnableOverflow; }
+
+ const overflow_allocator_type& get_overflow_allocator() const { return base_type::get_allocator().get_overflow_allocator(); }
+};
+
+
+template <size_t nodeCount, bool bEnableOverflow, typename... Ts>
+inline void swap(fixed_tuple_vector<nodeCount, bEnableOverflow, Ts...>& a,
+ fixed_tuple_vector<nodeCount, bEnableOverflow, Ts...>& b)
+{
+ a.swap(b);
+}
+
+
+} // namespace eastl
+
+#endif // EASTL_TUPLEVECTOR_H
diff --git a/EASTL/include/EASTL/bonus/intrusive_sdlist.h b/EASTL/include/EASTL/bonus/intrusive_sdlist.h
new file mode 100644
index 0000000..1b126d4
--- /dev/null
+++ b/EASTL/include/EASTL/bonus/intrusive_sdlist.h
@@ -0,0 +1,694 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// intrusive_sdlist is a special kind of intrusive list which we say is
+// "singly-doubly" linked. Instead of having a typical intrusive list node
+// which looks like this:
+//
+// struct intrusive_sdlist_node {
+// intrusive_sdlist_node *mpNext;
+// intrusive_sdlist_node *mpPrev;
+// };
+//
+// We instead have one that looks like this:
+//
+// struct intrusive_sdlist_node {
+// intrusive_sdlist_node* mpNext;
+// intrusive_sdlist_node** mppPrevNext;
+// };
+//
+// This may seem to be suboptimal, but it has one specific advantage: it allows
+// the intrusive_sdlist class to be the size of only one pointer instead of two.
+// This may seem like a minor optimization, but some users have wanted to create
+// thousands of empty instances of these.
+// This is because while an intrusive_list class looks like this:
+//
+// class intrusive_list {
+// intrusive_list_node mBaseNode;
+// };
+//
+// an intrusive_sdlist class looks like this:
+//
+// class intrusive_sdlist {
+// intrusive_sdlist_node* mpNext;
+// };
+//
+// So here we make a list of plusses and minuses of intrusive sdlists
+// compared to intrusive_lists and intrusive_slists:
+//
+// | list | slist | sdlist
+// ---------------------------------------------------------
+// min size | 8 | 4 | 4
+// node size | 8 | 4 | 8
+// anonymous erase | yes | no | yes
+// reverse iteration | yes | no | no
+//
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTRUSIVE_SDLIST_H
+#define EASTL_INTRUSIVE_SDLIST_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/iterator.h>
+#include <EASTL/algorithm.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+
+
+ /// intrusive_sdlist_node
+ ///
+ struct intrusive_sdlist_node
+ {
+ intrusive_sdlist_node* mpNext;
+ intrusive_sdlist_node** mppPrevNext;
+ };
+
+
+ /// IntrusiveSDListIterator
+ ///
+ template <typename T, typename Pointer, typename Reference>
+ struct IntrusiveSDListIterator
+ {
+ typedef IntrusiveSDListIterator<T, Pointer, Reference> this_type;
+ typedef IntrusiveSDListIterator<T, T*, T&> iterator;
+ typedef IntrusiveSDListIterator<T, const T*, const T&> const_iterator;
+ typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t.
+ typedef ptrdiff_t difference_type;
+ typedef T value_type;
+ typedef T node_type;
+ typedef Pointer pointer;
+ typedef Reference reference;
+ typedef EASTL_ITC_NS::forward_iterator_tag iterator_category;
+
+ public:
+ pointer mpNode;
+
+ public:
+ IntrusiveSDListIterator();
+ explicit IntrusiveSDListIterator(pointer pNode); // Note that you can also construct an iterator from T via this, since value_type == node_type.
+ IntrusiveSDListIterator(const iterator& x);
+
+ reference operator*() const;
+ pointer operator->() const;
+
+ this_type& operator++();
+ this_type operator++(int);
+
+ }; // struct IntrusiveSDListIterator
+
+
+
+
+ /// intrusive_sdlist_base
+ ///
+ /// Provides a template-less base class for intrusive_sdlist.
+ ///
+ class intrusive_sdlist_base
+ {
+ public:
+ typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t.
+ typedef ptrdiff_t difference_type;
+
+ protected:
+ intrusive_sdlist_node* mpNext;
+
+ public:
+ intrusive_sdlist_base();
+
+ bool empty() const; ///< Returns true if the container is empty.
+ size_type size() const; ///< Returns the number of elements in the list; O(n).
+
+ void clear(); ///< Clears the list; O(1). No deallocation occurs.
+ void pop_front(); ///< Removes an element from the front of the list; O(1). The element must be present, but is not deallocated.
+ void reverse(); ///< Reverses a list so that front and back are swapped; O(n).
+
+ //bool validate() const; ///< Scans a list for linkage inconsistencies; O(n) time, O(1) space. Returns false if errors are detected, such as loops or branching.
+
+ }; // class intrusive_sdlist_base
+
+
+
+ /// intrusive_sdlist
+ ///
+ template <typename T = intrusive_sdlist_node>
+ class intrusive_sdlist : public intrusive_sdlist_base
+ {
+ public:
+ typedef intrusive_sdlist<T> this_type;
+ typedef intrusive_sdlist_base base_type;
+ typedef T node_type;
+ typedef T value_type;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::difference_type difference_type;
+ typedef T& reference;
+ typedef const T& const_reference;
+ typedef T* pointer;
+ typedef const T* const_pointer;
+ typedef IntrusiveSDListIterator<T, T*, T&> iterator;
+ typedef IntrusiveSDListIterator<T, const T*, const T&> const_iterator;
+ typedef eastl::reverse_iterator<iterator> reverse_iterator;
+ typedef eastl::reverse_iterator<const_iterator> const_reverse_iterator;
+
+ public:
+ intrusive_sdlist(); ///< Creates an empty list.
+ intrusive_sdlist(const this_type& x); ///< Creates an empty list; ignores the argument.
+ this_type& operator=(const this_type& x); ///< Clears the list; ignores the argument.
+
+ iterator begin(); ///< Returns an iterator pointing to the first element in the list.
+ const_iterator begin() const; ///< Returns a const_iterator pointing to the first element in the list.
+ const_iterator cbegin() const; ///< Returns a const_iterator pointing to the first element in the list.
+
+ iterator end(); ///< Returns an iterator pointing one-after the last element in the list.
+ const_iterator end() const; ///< Returns a const_iterator pointing one-after the last element in the list.
+ const_iterator cend() const; ///< Returns a const_iterator pointing one-after the last element in the list.
+
+ reference front(); ///< Returns a reference to the first element. The list must be empty.
+ const_reference front() const; ///< Returns a const reference to the first element. The list must be empty.
+
+ void push_front(value_type& value); ///< Adds an element to the front of the list; O(1). The element is not copied. The element must not be in any other list.
+ void push_back(value_type& value); ///< Adds an element to the back of the list; O(N). The element is not copied. The element must not be in any other list.
+ void pop_back(); ///< Removes an element from the back of the list; O(N). The element must be present, but is not deallocated.
+
+ bool contains(const value_type& value) const; ///< Returns true if the given element is in the list; O(n). Equivalent to (locate(x) != end()).
+
+ iterator locate(value_type& value); ///< Converts a reference to an object in the list back to an iterator, or returns end() if it is not part of the list. O(n)
+ const_iterator locate(const value_type& value) const; ///< Converts a const reference to an object in the list back to a const iterator, or returns end() if it is not part of the list. O(n)
+
+ iterator insert(iterator position, value_type& value); ///< Inserts an element before the element pointed to by the iterator. O(1)
+ iterator erase(iterator position); ///< Erases the element pointed to by the iterator. O(1)
+ iterator erase(iterator first, iterator last); ///< Erases elements within the iterator range [first, last). O(1).
+ void swap(intrusive_sdlist& x); ///< Swaps the contents of two intrusive lists; O(1).
+
+ static void remove(value_type& value); ///< Erases an element from a list; O(1). Note that this is static so you don't need to know which list the element, although it must be in some list.
+
+ void splice(iterator position, value_type& value); ///< Moves the given element into this list before the element pointed to by position; O(1).
+ ///< Required: x must be in some list or have first/next pointers that point it itself.
+
+ void splice(iterator position, this_type& x); ///< Moves the contents of a list into this list before the element pointed to by position; O(1).
+ ///< Required: &x != this (same as std::list).
+
+ void splice(iterator position, this_type& x, iterator xPosition); ///< Moves the given element pointed to i within the list x into the current list before
+ ///< the element pointed to by position; O(1).
+
+ void splice(iterator position, this_type& x, iterator first, iterator last); ///< Moves the range of elements [first, last) from list x into the current list before
+ ///< the element pointed to by position; O(1).
+ ///< Required: position must not be in [first, last). (same as std::list).
+ bool validate() const;
+ int validate_iterator(const_iterator i) const;
+
+ }; // intrusive_sdlist
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // IntrusiveSDListIterator functions
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Pointer, typename Reference>
+ inline IntrusiveSDListIterator<T, Pointer, Reference>::IntrusiveSDListIterator()
+ {
+ #if EASTL_DEBUG
+ mpNode = NULL;
+ #endif
+ }
+
+ template <typename T, typename Pointer, typename Reference>
+ inline IntrusiveSDListIterator<T, Pointer, Reference>::IntrusiveSDListIterator(pointer pNode)
+ : mpNode(pNode)
+ {
+ }
+
+ template <typename T, typename Pointer, typename Reference>
+ inline IntrusiveSDListIterator<T, Pointer, Reference>::IntrusiveSDListIterator(const iterator& x)
+ : mpNode(x.mpNode)
+ {
+ }
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename IntrusiveSDListIterator<T, Pointer, Reference>::reference
+ IntrusiveSDListIterator<T, Pointer, Reference>::operator*() const
+ {
+ return *mpNode;
+ }
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename IntrusiveSDListIterator<T, Pointer, Reference>::pointer
+ IntrusiveSDListIterator<T, Pointer, Reference>::operator->() const
+ {
+ return mpNode;
+ }
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename IntrusiveSDListIterator<T, Pointer, Reference>::this_type&
+ IntrusiveSDListIterator<T, Pointer, Reference>::operator++()
+ {
+ mpNode = static_cast<node_type*>(mpNode->mpNext);
+ return *this;
+ }
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename IntrusiveSDListIterator<T, Pointer, Reference>::this_type
+ IntrusiveSDListIterator<T, Pointer, Reference>::operator++(int)
+ {
+ this_type temp = *this;
+ mpNode = static_cast<node_type*>(mpNode->mpNext);
+ return temp;
+ }
+
+ // The C++ defect report #179 requires that we support comparisons between const and non-const iterators.
+ // Thus we provide additional template paremeters here to support this. The defect report does not
+ // require us to support comparisons between reverse_iterators and const_reverse_iterators.
+ template <typename T, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB>
+ inline bool operator==(const IntrusiveSDListIterator<T, PointerA, ReferenceA>& a,
+ const IntrusiveSDListIterator<T, PointerB, ReferenceB>& b)
+ {
+ return a.mpNode == b.mpNode;
+ }
+
+
+ template <typename T, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB>
+ inline bool operator!=(const IntrusiveSDListIterator<T, PointerA, ReferenceA>& a,
+ const IntrusiveSDListIterator<T, PointerB, ReferenceB>& b)
+ {
+ return a.mpNode != b.mpNode;
+ }
+
+
+ // We provide a version of operator!= for the case where the iterators are of the
+ // same type. This helps prevent ambiguity errors in the presence of rel_ops.
+ template <typename T, typename Pointer, typename Reference>
+ inline bool operator!=(const IntrusiveSDListIterator<T, Pointer, Reference>& a,
+ const IntrusiveSDListIterator<T, Pointer, Reference>& b)
+ {
+ return a.mpNode != b.mpNode;
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // intrusive_sdlist_base
+ ///////////////////////////////////////////////////////////////////////
+
+ inline intrusive_sdlist_base::intrusive_sdlist_base()
+ { mpNext = NULL; }
+
+
+ inline bool intrusive_sdlist_base::empty() const
+ { return mpNext == NULL; }
+
+
+ inline intrusive_sdlist_base::size_type intrusive_sdlist_base::size() const
+ {
+ size_type n = 0;
+ for(const intrusive_sdlist_node* pCurrent = mpNext; pCurrent; pCurrent = pCurrent->mpNext)
+ n++;
+ return n;
+ }
+
+
+ inline void intrusive_sdlist_base::clear()
+ { mpNext = NULL; } // Note that we don't do anything with the list nodes.
+
+
+ inline void intrusive_sdlist_base::pop_front()
+ {
+ // To consider: Set mpNext's pointers to NULL in debug builds.
+ mpNext = mpNext->mpNext;
+ mpNext->mppPrevNext = &mpNext;
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // intrusive_sdlist
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T>
+ inline intrusive_sdlist<T>::intrusive_sdlist()
+ {
+ }
+
+
+ template <typename T>
+ inline intrusive_sdlist<T>::intrusive_sdlist(const this_type& /*x*/)
+ : intrusive_sdlist_base()
+ {
+ // We intentionally ignore argument x.
+ }
+
+
+ template <typename T>
+ inline typename intrusive_sdlist<T>::this_type& intrusive_sdlist<T>::operator=(const this_type& /*x*/)
+ {
+ return *this; // We intentionally ignore argument x.
+ }
+
+
+ template <typename T>
+ inline typename intrusive_sdlist<T>::iterator intrusive_sdlist<T>::begin()
+ { return iterator(static_cast<T*>(mpNext)); }
+
+
+ template <typename T>
+ inline typename intrusive_sdlist<T>::const_iterator intrusive_sdlist<T>::begin() const
+ { return const_iterator(static_cast<T*>(const_cast<intrusive_sdlist_node*>(mpNext))); }
+
+
+ template <typename T>
+ inline typename intrusive_sdlist<T>::const_iterator intrusive_sdlist<T>::cbegin() const
+ { return const_iterator(static_cast<T*>(const_cast<intrusive_sdlist_node*>(mpNext))); }
+
+
+ template <typename T>
+ inline typename intrusive_sdlist<T>::iterator intrusive_sdlist<T>::end()
+ { return iterator(static_cast<T*>(NULL)); }
+
+
+ template <typename T>
+ inline typename intrusive_sdlist<T>::const_iterator intrusive_sdlist<T>::end() const
+ { return const_iterator(static_cast<const T*>(NULL)); }
+
+
+ template <typename T>
+ inline typename intrusive_sdlist<T>::const_iterator intrusive_sdlist<T>::cend() const
+ { return const_iterator(static_cast<const T*>(NULL)); }
+
+
+ template <typename T>
+ inline typename intrusive_sdlist<T>::reference intrusive_sdlist<T>::front()
+ { return *static_cast<T*>(mpNext); }
+
+
+ template <typename T>
+ inline typename intrusive_sdlist<T>::const_reference intrusive_sdlist<T>::front() const
+ { return *static_cast<const T*>(mpNext); }
+
+
+ template <typename T>
+ inline void intrusive_sdlist<T>::push_front(value_type& value)
+ {
+ value.mpNext = mpNext;
+ value.mppPrevNext = &mpNext;
+ if(mpNext)
+ mpNext->mppPrevNext = &value.mpNext;
+ mpNext = &value;
+ }
+
+
+ template <typename T>
+ inline void intrusive_sdlist<T>::push_back(value_type& value)
+ {
+ intrusive_sdlist_node* pNext = mpNext;
+ intrusive_sdlist_node** ppPrevNext = &mpNext;
+
+ while(pNext)
+ {
+ ppPrevNext = &pNext->mpNext;
+ pNext = pNext->mpNext;
+ }
+
+ *ppPrevNext = &value;
+ value.mppPrevNext = ppPrevNext;
+ value.mpNext = NULL;
+ }
+
+
+ template <typename T>
+ inline void intrusive_sdlist<T>::pop_back()
+ {
+ node_type* pCurrent = static_cast<node_type*>(mpNext);
+
+ while(pCurrent->mpNext)
+ pCurrent = static_cast<node_type*>(pCurrent->mpNext);
+
+ *pCurrent->mppPrevNext = NULL;
+ }
+
+ template <typename T>
+ inline bool intrusive_sdlist<T>::contains(const value_type& value) const
+ {
+ const intrusive_sdlist_node* pCurrent;
+
+ for(pCurrent = mpNext; pCurrent; pCurrent = pCurrent->mpNext)
+ {
+ if(pCurrent == &value)
+ break;
+ }
+
+ return (pCurrent != NULL);
+ }
+
+
+ template <typename T>
+ inline typename intrusive_sdlist<T>::iterator intrusive_sdlist<T>::locate(value_type& value)
+ {
+ intrusive_sdlist_node* pCurrent;
+
+ for(pCurrent = static_cast<value_type*>(mpNext); pCurrent; pCurrent = pCurrent->mpNext)
+ {
+ if(pCurrent == &value)
+ break;
+ }
+
+ return iterator(static_cast<value_type*>(pCurrent));
+ }
+
+
+ template <typename T>
+ inline typename intrusive_sdlist<T>::const_iterator intrusive_sdlist<T>::locate(const T& value) const
+ {
+ const intrusive_sdlist_node* pCurrent;
+
+ for(pCurrent = static_cast<value_type*>(mpNext); pCurrent; pCurrent = pCurrent->mpNext)
+ {
+ if(pCurrent == &value)
+ break;
+ }
+
+ return const_iterator(static_cast<value_type*>(const_cast<intrusive_sdlist_node*>(pCurrent)));
+ }
+
+
+ template <typename T>
+ inline typename intrusive_sdlist<T>::iterator
+ intrusive_sdlist<T>::insert(iterator position, value_type& value)
+ {
+ value.mppPrevNext = position.mpNode->mppPrevNext;
+ value.mpNext = position.mpNode;
+ *value.mppPrevNext = &value;
+ position.mpNode->mppPrevNext = &value.mpNext;
+
+ return iterator(&value);
+ }
+
+
+ template <typename T>
+ inline typename intrusive_sdlist<T>::iterator
+ intrusive_sdlist<T>::erase(iterator position)
+ {
+ *position.mpNode->mppPrevNext = position.mpNode->mpNext;
+ position.mpNode->mpNext->mppPrevNext = position.mpNode->mppPrevNext;
+
+ return iterator(position.mpNode);
+ }
+
+
+ template <typename T>
+ inline typename intrusive_sdlist<T>::iterator
+ intrusive_sdlist<T>::erase(iterator first, iterator last)
+ {
+ if(first.mpNode) // If not erasing the end...
+ {
+ *first.mpNode->mppPrevNext = last.mpNode;
+
+ if(last.mpNode) // If not erasing to the end...
+ last.mpNode->mppPrevNext = first.mpNode->mppPrevNext;
+ }
+
+ return last;
+ }
+
+
+ template <typename T>
+ inline void intrusive_sdlist<T>::remove(value_type& value)
+ {
+ *value.mppPrevNext = value.mpNext;
+ if(value.mpNext)
+ value.mpNext->mppPrevNext = value.mppPrevNext;
+ }
+
+
+ template <typename T>
+ void intrusive_sdlist<T>::swap(intrusive_sdlist& x)
+ {
+ // swap anchors
+ intrusive_sdlist_node* const temp(mpNext);
+ mpNext = x.mpNext;
+ x.mpNext = temp;
+
+ if(x.mpNext)
+ x.mpNext->mppPrevNext = &mpNext;
+
+ if(mpNext)
+ mpNext->mppPrevNext = &x.mpNext;
+ }
+
+
+
+
+
+ // To do: Complete these splice functions. Might want to look at intrusive_sdlist for help.
+
+ template <typename T>
+ void intrusive_sdlist<T>::splice(iterator /*position*/, value_type& /*value*/)
+ {
+ EASTL_ASSERT(false); // If you need this working, ask Paul Pedriana or submit a working version for inclusion.
+ }
+
+
+ template <typename T>
+ void intrusive_sdlist<T>::splice(iterator /*position*/, intrusive_sdlist& /*x*/)
+ {
+ EASTL_ASSERT(false); // If you need this working, ask Paul Pedriana or submit a working version for inclusion.
+ }
+
+
+ template <typename T>
+ void intrusive_sdlist<T>::splice(iterator /*position*/, intrusive_sdlist& /*x*/, iterator /*xPosition*/)
+ {
+ EASTL_ASSERT(false); // If you need this working, ask Paul Pedriana or submit a working version for inclusion.
+ }
+
+
+ template <typename T>
+ void intrusive_sdlist<T>::splice(iterator /*position*/, intrusive_sdlist& /*x*/, iterator /*first*/, iterator /*last*/)
+ {
+ EASTL_ASSERT(false); // If you need this working, ask Paul Pedriana or submit a working version for inclusion.
+ }
+
+
+ template <typename T>
+ inline bool intrusive_sdlist<T>::validate() const
+ {
+ return true; // To do.
+ }
+
+
+ template <typename T>
+ inline int intrusive_sdlist<T>::validate_iterator(const_iterator i) const
+ {
+ // To do: Come up with a more efficient mechanism of doing this.
+
+ for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp)
+ {
+ if(temp == i)
+ return (isf_valid | isf_current | isf_can_dereference);
+ }
+
+ if(i == end())
+ return (isf_valid | isf_current);
+
+ return isf_none;
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T>
+ bool operator==(const intrusive_sdlist<T>& a, const intrusive_sdlist<T>& b)
+ {
+ // If we store an mSize member for intrusive_sdlist, we want to take advantage of it here.
+ typename intrusive_sdlist<T>::const_iterator ia = a.begin();
+ typename intrusive_sdlist<T>::const_iterator ib = b.begin();
+ typename intrusive_sdlist<T>::const_iterator enda = a.end();
+ typename intrusive_sdlist<T>::const_iterator endb = b.end();
+
+ while((ia != enda) && (ib != endb) && (*ia == *ib))
+ {
+ ++ia;
+ ++ib;
+ }
+ return (ia == enda) && (ib == endb);
+ }
+
+ template <typename T>
+ bool operator<(const intrusive_sdlist<T>& a, const intrusive_sdlist<T>& b)
+ {
+ return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end());
+ }
+
+ template <typename T>
+ bool operator!=(const intrusive_sdlist<T>& a, const intrusive_sdlist<T>& b)
+ {
+ return !(a == b);
+ }
+
+ template <typename T>
+ bool operator>(const intrusive_sdlist<T>& a, const intrusive_sdlist<T>& b)
+ {
+ return b < a;
+ }
+
+ template <typename T>
+ bool operator<=(const intrusive_sdlist<T>& a, const intrusive_sdlist<T>& b)
+ {
+ return !(b < a);
+ }
+
+ template <typename T>
+ bool operator>=(const intrusive_sdlist<T>& a, const intrusive_sdlist<T>& b)
+ {
+ return !(a < b);
+ }
+
+ template <typename T>
+ void swap(intrusive_sdlist<T>& a, intrusive_sdlist<T>& b)
+ {
+ a.swap(b);
+ }
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/bonus/intrusive_slist.h b/EASTL/include/EASTL/bonus/intrusive_slist.h
new file mode 100644
index 0000000..28d445d
--- /dev/null
+++ b/EASTL/include/EASTL/bonus/intrusive_slist.h
@@ -0,0 +1,321 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+///////////////////////////////////////////////////////////////////////////////
+// *** Note ***
+// This implementation is incomplete.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTRUSIVE_SLIST_H
+#define EASTL_INTRUSIVE_SLIST_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/iterator.h>
+#include <EASTL/algorithm.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+
+ /// intrusive_slist_node
+ ///
+ struct intrusive_slist_node
+ {
+ intrusive_slist_node* mpNext;
+ };
+
+
+ /// IntrusiveSListIterator
+ ///
+ template <typename T, typename Pointer, typename Reference>
+ struct IntrusiveSListIterator
+ {
+ typedef IntrusiveSListIterator<T, Pointer, Reference> this_type;
+ typedef IntrusiveSListIterator<T, T*, T&> iterator;
+ typedef IntrusiveSListIterator<T, const T*, const T&> const_iterator;
+ typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t.
+ typedef ptrdiff_t difference_type;
+ typedef T value_type;
+ typedef T node_type;
+ typedef Pointer pointer;
+ typedef Reference reference;
+ typedef EASTL_ITC_NS::forward_iterator_tag iterator_category;
+
+ public:
+ node_type* mpNode;
+
+ public:
+ IntrusiveSListIterator();
+ explicit IntrusiveSListIterator(pointer pNode); // Note that you can also construct an iterator from T via this, since value_type == node_type.
+ IntrusiveSListIterator(const iterator& x);
+
+ reference operator*() const;
+ pointer operator->() const;
+
+ this_type& operator++();
+ this_type operator++(int);
+
+ }; // struct IntrusiveSListIterator
+
+
+
+ /// intrusive_slist_base
+ ///
+ /// Provides a template-less base class for intrusive_slist.
+ ///
+ class intrusive_slist_base
+ {
+ public:
+ typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t.
+ typedef ptrdiff_t difference_type;
+
+ protected:
+ intrusive_slist_node* mpNext;
+
+ public:
+ intrusive_slist_base();
+
+ bool empty() const; ///< Returns true if the container is empty.
+ size_type size() const; ///< Returns the number of elements in the list; O(n).
+
+ void clear(); ///< Clears the list; O(1). No deallocation occurs.
+ void pop_front(); ///< Removes an element from the front of the list; O(1). The element must be present, but is not deallocated.
+ void reverse(); ///< Reverses a list so that front and back are swapped; O(n).
+
+ //bool validate() const; ///< Scans a list for linkage inconsistencies; O(n) time, O(1) space. Returns false if errors are detected, such as loops or branching.
+
+ }; // class intrusive_slist_base
+
+
+
+ /// intrusive_slist
+ ///
+ template <typename T = intrusive_slist_node>
+ class intrusive_slist : public intrusive_slist_base
+ {
+ public:
+ typedef intrusive_slist<T> this_type;
+ typedef intrusive_slist_base base_type;
+ typedef T node_type;
+ typedef T value_type;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::difference_type difference_type;
+ typedef T& reference;
+ typedef const T& const_reference;
+ typedef T* pointer;
+ typedef const T* const_pointer;
+ typedef IntrusiveSListIterator<T, T*, T&> iterator;
+ typedef IntrusiveSListIterator<T, const T*, const T&> const_iterator;
+
+ public:
+ intrusive_slist(); ///< Creates an empty list.
+ //intrusive_slist(const this_type& x); ///< Creates an empty list; ignores the argument. To consider: Is this a useful function?
+ //this_type& operator=(const this_type& x); ///< Clears the list; ignores the argument. To consider: Is this a useful function?
+
+ iterator begin(); ///< Returns an iterator pointing to the first element in the list. O(1).
+ const_iterator begin() const; ///< Returns a const_iterator pointing to the first element in the list. O(1).
+ const_iterator cbegin() const; ///< Returns a const_iterator pointing to the first element in the list. O(1).
+ iterator end(); ///< Returns an iterator pointing one-after the last element in the list. O(1).
+ const_iterator end() const; ///< Returns a const_iterator pointing one-after the last element in the list. O(1).
+ const_iterator cend() const; ///< Returns a const_iterator pointing one-after the last element in the list. O(1).
+ iterator before_begin(); ///< Returns iterator to position before begin. O(1).
+ const_iterator before_begin() const; ///< Returns iterator to previous position. O(1).
+ const_iterator cbefore_begin() const; ///< Returns iterator to previous position. O(1).
+
+ iterator previous(const_iterator position); ///< Returns iterator to previous position. O(n).
+ const_iterator previous(const_iterator position) const; ///< Returns iterator to previous position. O(n).
+
+ reference front(); ///< Returns a reference to the first element. The list must be empty.
+ const_reference front() const; ///< Returns a const reference to the first element. The list must be empty.
+
+ void push_front(value_type& value); ///< Adds an element to the front of the list; O(1). The element is not copied. The element must not be in any other list.
+ void pop_front(); ///< Removes an element from the back of the list; O(n). The element must be present, but is not deallocated.
+
+ bool contains(const value_type& value) const; ///< Returns true if the given element is in the list; O(n). Equivalent to (locate(x) != end()).
+
+ iterator locate(value_type& value); ///< Converts a reference to an object in the list back to an iterator, or returns end() if it is not part of the list. O(n)
+ const_iterator locate(const value_type& value) const; ///< Converts a const reference to an object in the list back to a const iterator, or returns end() if it is not part of the list. O(n)
+
+ iterator insert(iterator position, value_type& value); ///< Inserts an element before the element pointed to by the iterator. O(n)
+ iterator insert_after(iterator position, value_type& value); ///< Inserts an element after the element pointed to by the iterator. O(1)
+
+ iterator erase(iterator position); ///< Erases the element pointed to by the iterator. O(n)
+ iterator erase_after(iterator position); ///< Erases the element after the element pointed to by the iterator. O(1)
+
+ iterator erase(iterator first, iterator last); ///< Erases elements within the iterator range [first, last). O(n).
+ iterator erase_after(iterator before_first, iterator last); ///< Erases elements within the iterator range [before_first, last). O(1).
+
+ void swap(this_type& x); ///< Swaps the contents of two intrusive lists; O(1).
+
+
+ void splice(iterator position, value_type& value); ///< Moves the given element into this list before the element pointed to by position; O(n).
+ ///< Required: x must be in some list or have first/next pointers that point it itself.
+
+ void splice(iterator position, this_type& x); ///< Moves the contents of a list into this list before the element pointed to by position; O(n).
+ ///< Required: &x != this (same as std::list).
+
+ void splice(iterator position, this_type& x, iterator xPosition); ///< Moves the given element pointed to i within the list x into the current list before
+ ///< the element pointed to by position; O(n).
+
+ void splice(iterator position, this_type& x, iterator first, iterator last); ///< Moves the range of elements [first, last) from list x into the current list before
+ ///< the element pointed to by position; O(n).
+ ///< Required: position must not be in [first, last). (same as std::list).
+
+ void splice_after(iterator position, value_type& value); ///< Moves the given element into this list after the element pointed to by position; O(1).
+ ///< Required: x must be in some list or have first/next pointers that point it itself.
+
+ void splice_after(iterator position, this_type& x); ///< Moves the contents of a list into this list after the element pointed to by position; O(n).
+ ///< Required: &x != this (same as std::list).
+
+ void splice_after(iterator position, this_type& x, iterator xPrevious); ///< Moves the element after xPrevious to be after position. O(1).
+ ///< Required: &x != this (same as std::list).
+
+ void splice_after(iterator position, this_type& x, iterator before_first, iterator before_last); ///< Moves the elements in the range of [before_first+1, before_last+1) to be after position. O(1).
+
+ bool validate() const;
+ int validate_iterator(const_iterator i) const;
+
+ }; // intrusive_slist
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // IntrusiveSListIterator
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Pointer, typename Reference>
+ inline IntrusiveSListIterator<T, Pointer, Reference>::IntrusiveSListIterator()
+ {
+ #if EASTL_DEBUG
+ mpNode = NULL;
+ #endif
+ }
+
+ template <typename T, typename Pointer, typename Reference>
+ inline IntrusiveSListIterator<T, Pointer, Reference>::IntrusiveSListIterator(pointer pNode)
+ : mpNode(pNode)
+ {
+ }
+
+ template <typename T, typename Pointer, typename Reference>
+ inline IntrusiveSListIterator<T, Pointer, Reference>::IntrusiveSListIterator(const iterator& x)
+ : mpNode(x.mpNode)
+ {
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // intrusive_slist_base
+ ///////////////////////////////////////////////////////////////////////
+
+ // To do.
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // intrusive_slist
+ ///////////////////////////////////////////////////////////////////////
+
+ // To do.
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T>
+ bool operator==(const intrusive_slist<T>& a, const intrusive_slist<T>& b)
+ {
+ // If we store an mSize member for intrusive_slist, we want to take advantage of it here.
+ typename intrusive_slist<T>::const_iterator ia = a.begin();
+ typename intrusive_slist<T>::const_iterator ib = b.begin();
+ typename intrusive_slist<T>::const_iterator enda = a.end();
+ typename intrusive_slist<T>::const_iterator endb = b.end();
+
+ while((ia != enda) && (ib != endb) && (*ia == *ib))
+ {
+ ++ia;
+ ++ib;
+ }
+ return (ia == enda) && (ib == endb);
+ }
+
+ template <typename T>
+ bool operator<(const intrusive_slist<T>& a, const intrusive_slist<T>& b)
+ {
+ return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end());
+ }
+
+ template <typename T>
+ bool operator!=(const intrusive_slist<T>& a, const intrusive_slist<T>& b)
+ {
+ return !(a == b);
+ }
+
+ template <typename T>
+ bool operator>(const intrusive_slist<T>& a, const intrusive_slist<T>& b)
+ {
+ return b < a;
+ }
+
+ template <typename T>
+ bool operator<=(const intrusive_slist<T>& a, const intrusive_slist<T>& b)
+ {
+ return !(b < a);
+ }
+
+ template <typename T>
+ bool operator>=(const intrusive_slist<T>& a, const intrusive_slist<T>& b)
+ {
+ return !(a < b);
+ }
+
+ template <typename T>
+ void swap(intrusive_slist<T>& a, intrusive_slist<T>& b)
+ {
+ a.swap(b);
+ }
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/bonus/list_map.h b/EASTL/include/EASTL/bonus/list_map.h
new file mode 100644
index 0000000..8a080d6
--- /dev/null
+++ b/EASTL/include/EASTL/bonus/list_map.h
@@ -0,0 +1,932 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_LIST_MAP_H
+#define EASTL_LIST_MAP_H
+
+
+#include <EASTL/map.h>
+
+
+namespace eastl
+{
+
+ /// EASTL_MAP_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_LIST_MAP_DEFAULT_NAME
+ #define EASTL_LIST_MAP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " list_map" // Unless the user overrides something, this is "EASTL list_map".
+ #endif
+
+ /// EASTL_MAP_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_LIST_MAP_DEFAULT_ALLOCATOR
+ #define EASTL_LIST_MAP_DEFAULT_ALLOCATOR allocator_type(EASTL_LIST_MAP_DEFAULT_NAME)
+ #endif
+
+
+ /// list_map_data_base
+ ///
+ /// We define a list_map_data_base separately from list_map_data (below), because it
+ /// allows us to have non-templated operations, and it makes it so that the
+ /// list_map anchor node doesn't carry a T with it, which would waste space and
+ /// possibly lead to surprising the user due to extra Ts existing that the user
+ /// didn't explicitly create. The downside to all of this is that it makes debug
+ /// viewing of an list_map harder, given that the node pointers are of type
+ /// list_map_data_base and not list_map_data.
+ ///
+ struct list_map_data_base
+ {
+ list_map_data_base* mpNext;
+ list_map_data_base* mpPrev;
+ };
+
+
+ /// list_map_data
+ ///
+ template <typename Value>
+ struct list_map_data : public list_map_data_base
+ {
+ typedef Value value_type;
+
+ list_map_data(const value_type& value);
+
+ value_type mValue; // This is a pair of key/value.
+ };
+
+
+ /// list_map_iterator
+ ///
+ template <typename T, typename Pointer, typename Reference>
+ struct list_map_iterator
+ {
+ typedef list_map_iterator<T, Pointer, Reference> this_type;
+ typedef list_map_iterator<T, T*, T&> iterator;
+ typedef list_map_iterator<T, const T*, const T&> const_iterator;
+ typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t.
+ typedef ptrdiff_t difference_type;
+ typedef T value_type;
+ typedef list_map_data_base base_node_type;
+ typedef list_map_data<T> node_type;
+ typedef Pointer pointer;
+ typedef Reference reference;
+ typedef EASTL_ITC_NS::bidirectional_iterator_tag iterator_category;
+
+ public:
+ node_type* mpNode;
+
+ public:
+ list_map_iterator();
+ list_map_iterator(const base_node_type* pNode);
+ list_map_iterator(const iterator& x);
+
+ reference operator*() const;
+ pointer operator->() const;
+
+ this_type& operator++();
+ this_type operator++(int);
+
+ this_type& operator--();
+ this_type operator--(int);
+
+ }; // list_map_iterator
+
+
+ /// use_value_first
+ ///
+ /// operator()(x) simply returns x.mValue.first. Used in list_map.
+ /// This is similar to eastl::use_first, however it assumes that the input type is an object
+ /// whose mValue is an eastl::pair, and the first value in the pair is the desired return.
+ ///
+ template <typename Object>
+ struct use_value_first
+ {
+ typedef Object argument_type;
+ typedef typename Object::value_type::first_type result_type;
+
+ const result_type& operator()(const Object& x) const
+ { return x.mValue.first; }
+ };
+
+
+ /// list_map
+ ///
+ /// Implements a map like container, which also provides functionality similar to a list.
+ ///
+ /// Note: Like a map, keys must still be unique. As such, push_back() and push_front() operations
+ /// return a bool indicating success, or failure if the entry's key is already in use.
+ ///
+ /// list_map is designed to improve performance for situations commonly implemented as:
+ /// A map, which must be iterated over to find the oldest entry, or purge expired entries.
+ /// A list, which must be iterated over to remove a player's record when they sign off.
+ ///
+ /// list_map requires a little more memory per node than either a list or map alone,
+ /// and many of list_map's functions have a higher operational cost (CPU time) than their
+ /// counterparts in list and map. However, as the node count increases, list_map quickly outperforms
+ /// either a list or a map when find [by-index] and front/back type operations are required.
+ ///
+ /// In essence, list_map avoids O(n) iterations at the expense of additional costs to quick (O(1) and O(log n) operations:
+ /// push_front(), push_back(), pop_front() and pop_back() have O(log n) operation time, similar to map::insert(), rather than O(1) time like a list,
+ /// however, front() and back() maintain O(1) operation time.
+ ///
+ /// As a canonical example, consider a large backlog of player group invites, which are removed when either:
+ /// The invitation times out - in main loop: while( !listMap.empty() && listMap.front().IsExpired() ) { listMap.pop_front(); }
+ /// The player rejects the outstanding invitation - on rejection: iter = listMap.find(playerId); if (iter != listMap.end()) { listMap.erase(iter); }
+ ///
+ /// For a similar example, consider a high volume pending request container which must:
+ /// Time out old requests (similar to invites timing out above)
+ /// Remove requests once they've been handled (similar to rejecting invites above)
+ ///
+ /// For such usage patterns, the performance benefits of list_map become dramatic with
+ /// common O(n) operations once the node count rises to hundreds or more.
+ ///
+ /// When high performance is a priority, Containers with thousands of nodes or more
+ /// can quickly result in unacceptable performance when executing even infrequenty O(n) operations.
+ ///
+ /// In order to maintain strong performance, avoid iterating over list_map whenever possible.
+ ///
+ ///////////////////////////////////////////////////////////////////////
+ /// find_as
+ /// In order to support the ability to have a tree of strings but
+ /// be able to do efficiently lookups via char pointers (i.e. so they
+ /// aren't converted to string objects), we provide the find_as
+ /// function. This function allows you to do a find with a key of a
+ /// type other than the tree's key type. See the find_as function
+ /// for more documentation on this.
+ ///
+ ///////////////////////////////////////////////////////////////////////
+ /// Pool allocation
+ /// If you want to make a custom memory pool for a list_map container, your pool
+ /// needs to contain items of type list_map::node_type. So if you have a memory
+ /// pool that has a constructor that takes the size of pool items and the
+ /// count of pool items, you would do this (assuming that MemoryPool implements
+ /// the Allocator interface):
+ /// typedef list_map<Widget, int, less<Widget>, MemoryPool> WidgetMap; // Delare your WidgetMap type.
+ /// MemoryPool myPool(sizeof(WidgetMap::node_type), 100); // Make a pool of 100 Widget nodes.
+ /// WidgetMap myMap(&myPool); // Create a map that uses the pool.
+ ///
+ template <typename Key, typename T, typename Compare = eastl::less<Key>, typename Allocator = EASTLAllocatorType>
+ class list_map
+ : protected rbtree<Key, eastl::list_map_data<eastl::pair<const Key, T> >, Compare, Allocator, eastl::use_value_first<eastl::list_map_data<eastl::pair<const Key, T> > >, true, true>
+ {
+ public:
+ typedef rbtree<Key, eastl::list_map_data<eastl::pair<const Key, T> >, Compare, Allocator,
+ eastl::use_value_first<eastl::list_map_data<eastl::pair<const Key, T> > >, true, true> base_type;
+ typedef list_map<Key, T, Compare, Allocator> this_type;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::key_type key_type;
+ typedef T mapped_type;
+ typedef typename eastl::pair<const Key, T> value_type; // This is intentionally different from base_type::value_type
+ typedef value_type& reference;
+ typedef const value_type& const_reference;
+ typedef typename base_type::node_type node_type; // Despite the internal and external values being different, we're keeping the node type the same as the base
+ // in order to allow for pool allocation. See EASTL/map.h for more information.
+ typedef typename eastl::list_map_iterator<value_type, value_type*, value_type&> iterator; // This is intentionally different from base_type::iterator
+ typedef typename eastl::list_map_iterator<value_type, const value_type*, const value_type&> const_iterator; // This is intentionally different from base_type::const_iterator
+ typedef eastl::reverse_iterator<iterator> reverse_iterator;
+ typedef eastl::reverse_iterator<const_iterator> const_reverse_iterator;
+ typedef typename base_type::allocator_type allocator_type;
+ typedef typename eastl::pair<iterator, bool> insert_return_type; // This is intentionally removed, as list_map doesn't support insert() functions, in favor of list like push_back and push_front
+ typedef typename eastl::use_first<value_type> extract_key; // This is intentionally different from base_type::extract_key
+
+ using base_type::get_allocator;
+ using base_type::set_allocator;
+ using base_type::key_comp;
+ using base_type::empty;
+ using base_type::size;
+
+ protected:
+ typedef typename eastl::list_map_data<eastl::pair<const Key, T> > internal_value_type;
+
+ protected:
+ // internal base node, acting as the sentinel for list like behaviors
+ list_map_data_base mNode;
+
+ public:
+ list_map(const allocator_type& allocator = EASTL_LIST_MAP_DEFAULT_ALLOCATOR);
+ list_map(const Compare& compare, const allocator_type& allocator = EASTL_MAP_DEFAULT_ALLOCATOR);
+
+ // To do: Implement the following:
+
+ //list_map(const this_type& x);
+ //list_map(this_type&& x);
+ //list_map(this_type&& x, const allocator_type& allocator);
+ //list_map(std::initializer_list<mapped_type> ilist, const Compare& compare = Compare(), const allocator_type& allocator = EASTL_LIST_MAP_DEFAULT_ALLOCATOR);
+
+ //template <typename Iterator>
+ //list_map(Iterator itBegin, Iterator itEnd);
+
+ //this_type& operator=(const this_type& x);
+ //this_type& operator=(std::initializer_list<mapped_type> ilist);
+ //this_type& operator=(this_type&& x);
+
+ //void swap(this_type& x);
+
+ public:
+ // iterators
+ iterator begin() EA_NOEXCEPT;
+ const_iterator begin() const EA_NOEXCEPT;
+ const_iterator cbegin() const EA_NOEXCEPT;
+
+ iterator end() EA_NOEXCEPT;
+ const_iterator end() const EA_NOEXCEPT;
+ const_iterator cend() const EA_NOEXCEPT;
+
+ reverse_iterator rbegin() EA_NOEXCEPT;
+ const_reverse_iterator rbegin() const EA_NOEXCEPT;
+ const_reverse_iterator crbegin() const EA_NOEXCEPT;
+
+ reverse_iterator rend() EA_NOEXCEPT;
+ const_reverse_iterator rend() const EA_NOEXCEPT;
+ const_reverse_iterator crend() const EA_NOEXCEPT;
+
+ public:
+ // List like methods
+ reference front();
+ const_reference front() const;
+
+ reference back();
+ const_reference back() const;
+
+ // push_front and push_back which takes in a key/value pair
+ bool push_front(const value_type& value);
+ bool push_back(const value_type& value);
+
+ // push_front and push_back which take key and value separately, for convenience
+ bool push_front(const key_type& key, const mapped_type& value);
+ bool push_back(const key_type& key, const mapped_type& value);
+
+ void pop_front();
+ void pop_back();
+
+ public:
+ // Map like methods
+ iterator find(const key_type& key);
+ const_iterator find(const key_type& key) const;
+
+ template <typename U, typename Compare2>
+ iterator find_as(const U& u, Compare2 compare2);
+ template <typename U, typename Compare2>
+ const_iterator find_as(const U& u, Compare2 compare2) const;
+
+ size_type count(const key_type& key) const;
+ size_type erase(const key_type& key);
+
+ public:
+ // Shared methods which are common to list and map
+ iterator erase(const_iterator position);
+ reverse_iterator erase(const_reverse_iterator position);
+
+ void clear();
+ void reset_lose_memory();
+
+ bool validate() const;
+ int validate_iterator(const_iterator i) const;
+
+ public:
+ // list like functionality which is in consideration for implementation:
+ // iterator insert(const_iterator position, const value_type& value);
+ // void remove(const mapped_type& x);
+
+ public:
+ // list like functionality which may be implemented, but is discouraged from implementation:
+ // due to the liklihood that they would require O(n) time to execute.
+ // template <typename Predicate>
+ // void remove_if(Predicate);
+ // void reverse();
+ // void sort();
+ // template<typename Compare>
+ // void sort(Compare compare);
+
+ public:
+ // map like functionality which list_map does not support, due to abmiguity with list like functionality:
+ #if !defined(EA_COMPILER_NO_DELETED_FUNCTIONS)
+ template <typename InputIterator>
+ list_map(InputIterator first, InputIterator last, const Compare& compare, const allocator_type& allocator = EASTL_RBTREE_DEFAULT_ALLOCATOR) = delete;
+
+ insert_return_type insert(const value_type& value) = delete;
+ iterator insert(const_iterator position, const value_type& value) = delete;
+
+ template <typename InputIterator>
+ void insert(InputIterator first, InputIterator last) = delete;
+
+ insert_return_type insert(const key_type& key) = delete;
+
+ iterator erase(const_iterator first, const_iterator last) = delete;
+ reverse_iterator erase(reverse_iterator first, reverse_iterator last) = delete;
+
+ void erase(const key_type* first, const key_type* last) = delete;
+
+ iterator lower_bound(const key_type& key) = delete;
+ const_iterator lower_bound(const key_type& key) const = delete;
+
+ iterator upper_bound(const key_type& key) = delete;
+ const_iterator upper_bound(const key_type& key) const = delete;
+
+ eastl::pair<iterator, iterator> equal_range(const key_type& key) = delete;
+ eastl::pair<const_iterator, const_iterator> equal_range(const key_type& key) const = delete;
+
+ mapped_type& operator[](const key_type& key) = delete; // Of map, multimap, set, and multimap, only map has operator[].
+ #endif
+
+ public:
+ // list like functionality which list_map does not support, due to ambiguity with map like functionality:
+ #if 0
+ reference push_front() = delete;
+ void* push_front_uninitialized() = delete;
+
+ reference push_back() = delete;
+ void* push_back_uninitialized() = delete;
+
+ iterator insert(const_iterator position) = delete;
+
+ void insert(const_iterator position, size_type n, const value_type& value) = delete;
+
+ template <typename InputIterator>
+ void insert(const_iterator position, InputIterator first, InputIterator last) = delete;
+
+ iterator erase(const_iterator first, const_iterator last) = delete;
+ reverse_iterator erase(const_reverse_iterator first, const_reverse_iterator last) = delete;
+
+ void splice(const_iterator position, this_type& x) = delete
+ void splice(const_iterator position, this_type& x, const_iterator i) = delete;
+ void splice(const_iterator position, this_type& x, const_iterator first, const_iterator last) = delete;
+
+ void merge(this_type& x) = delete;
+
+ template <typename Compare>
+ void merge(this_type& x, Compare compare) = delete;
+
+ void unique() = delete; // Uniqueness is enforced by map functionality
+
+ template <typename BinaryPredicate>
+ void unique(BinaryPredicate) = delete; // Uniqueness is enforced by map functionality
+ #endif
+
+ }; // list_map
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // list_map_data
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Value>
+ inline list_map_data<Value>::list_map_data(const Value& value)
+ : mValue(value)
+ {
+ mpNext = NULL; // GCC 4.8 is generating warnings about referencing these values in list_map::push_front unless we
+ mpPrev = NULL; // initialize them here. The compiler seems to be mistaken, as our code isn't actually using them unintialized.
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // list_map_iterator
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Pointer, typename Reference>
+ inline list_map_iterator<T, Pointer, Reference>::list_map_iterator()
+ : mpNode(NULL)
+ {
+ // Empty
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline list_map_iterator<T, Pointer, Reference>::list_map_iterator(const base_node_type* pNode)
+ : mpNode(static_cast<node_type*>(const_cast<base_node_type*>(pNode)))
+ {
+ // Empty
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline list_map_iterator<T, Pointer, Reference>::list_map_iterator(const iterator& x)
+ : mpNode(const_cast<node_type*>(x.mpNode))
+ {
+ // Empty
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename list_map_iterator<T, Pointer, Reference>::reference
+ list_map_iterator<T, Pointer, Reference>::operator*() const
+ {
+ return mpNode->mValue;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename list_map_iterator<T, Pointer, Reference>::pointer
+ list_map_iterator<T, Pointer, Reference>::operator->() const
+ {
+ return &mpNode->mValue;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename list_map_iterator<T, Pointer, Reference>::this_type&
+ list_map_iterator<T, Pointer, Reference>::operator++()
+ {
+ mpNode = static_cast<node_type*>(mpNode->mpNext);
+ return *this;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename list_map_iterator<T, Pointer, Reference>::this_type
+ list_map_iterator<T, Pointer, Reference>::operator++(int)
+ {
+ this_type temp(*this);
+ mpNode = static_cast<node_type*>(mpNode->mpNext);
+ return temp;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename list_map_iterator<T, Pointer, Reference>::this_type&
+ list_map_iterator<T, Pointer, Reference>::operator--()
+ {
+ mpNode = static_cast<node_type*>(mpNode->mpPrev);
+ return *this;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename list_map_iterator<T, Pointer, Reference>::this_type
+ list_map_iterator<T, Pointer, Reference>::operator--(int)
+ {
+ this_type temp(*this);
+ mpNode = static_cast<node_type*>(mpNode->mpPrev);
+ return temp;
+ }
+
+
+ // We provide additional template paremeters here to support comparisons between const and non-const iterators.
+ // See C++ defect report #179, or EASTL/list.h for more information.
+ template <typename T, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB>
+ inline bool operator==(const list_map_iterator<T, PointerA, ReferenceA>& a,
+ const list_map_iterator<T, PointerB, ReferenceB>& b)
+ {
+ return a.mpNode == b.mpNode;
+ }
+
+
+ template <typename T, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB>
+ inline bool operator!=(const list_map_iterator<T, PointerA, ReferenceA>& a,
+ const list_map_iterator<T, PointerB, ReferenceB>& b)
+ {
+ return a.mpNode != b.mpNode;
+ }
+
+
+ // We provide a version of operator!= for the case where the iterators are of the
+ // same type. This helps prevent ambiguity errors in the presence of rel_ops.
+ template <typename T, typename Pointer, typename Reference>
+ inline bool operator!=(const list_map_iterator<T, Pointer, Reference>& a,
+ const list_map_iterator<T, Pointer, Reference>& b)
+ {
+ return a.mpNode != b.mpNode;
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // list_map
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline list_map<Key, T, Compare, Allocator>::list_map(const allocator_type& allocator)
+ : base_type(allocator)
+ {
+ mNode.mpNext = &mNode;
+ mNode.mpPrev = &mNode;
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline list_map<Key, T, Compare, Allocator>::list_map(const Compare& compare, const allocator_type& allocator)
+ : base_type(compare, allocator)
+ {
+ mNode.mpNext = &mNode;
+ mNode.mpPrev = &mNode;
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::iterator
+ list_map<Key, T, Compare, Allocator>::begin() EA_NOEXCEPT
+ {
+ return iterator(mNode.mpNext);
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::const_iterator
+ list_map<Key, T, Compare, Allocator>::begin() const EA_NOEXCEPT
+ {
+ return const_iterator(mNode.mpNext);
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::const_iterator
+ list_map<Key, T, Compare, Allocator>::cbegin() const EA_NOEXCEPT
+ {
+ return const_iterator(mNode.mpNext);
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::iterator
+ list_map<Key, T, Compare, Allocator>::end() EA_NOEXCEPT
+ {
+ return iterator(&mNode);
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::const_iterator
+ list_map<Key, T, Compare, Allocator>::end() const EA_NOEXCEPT
+ {
+ return const_iterator(&mNode);
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::const_iterator
+ list_map<Key, T, Compare, Allocator>::cend() const EA_NOEXCEPT
+ {
+ return const_iterator(&mNode);
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::reverse_iterator
+ list_map<Key, T, Compare, Allocator>::rbegin() EA_NOEXCEPT
+ {
+ return reverse_iterator(&mNode);
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::const_reverse_iterator
+ list_map<Key, T, Compare, Allocator>::rbegin() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(&mNode);
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::const_reverse_iterator
+ list_map<Key, T, Compare, Allocator>::crbegin() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(&mNode);
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::reverse_iterator
+ list_map<Key, T, Compare, Allocator>::rend() EA_NOEXCEPT
+ {
+ return reverse_iterator(mNode.mpNext);
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::const_reverse_iterator
+ list_map<Key, T, Compare, Allocator>::rend() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(mNode.mpNext);
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::const_reverse_iterator
+ list_map<Key, T, Compare, Allocator>::crend() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(mNode.mpNext);
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::reference
+ list_map<Key, T, Compare, Allocator>::front()
+ {
+ #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(static_cast<internal_value_type*>(mNode.mpNext) == &mNode))
+ EASTL_FAIL_MSG("list_map::front -- empty container");
+ #else
+ // We allow the user to reference an empty container.
+ #endif
+
+ return static_cast<internal_value_type*>(mNode.mpNext)->mValue;
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::const_reference
+ list_map<Key, T, Compare, Allocator>::front() const
+ {
+ #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(static_cast<internal_value_type*>(mNode.mpNext) == &mNode))
+ EASTL_FAIL_MSG("list_map::front -- empty container");
+ #else
+ // We allow the user to reference an empty container.
+ #endif
+
+ return static_cast<internal_value_type*>(mNode.mpNext)->mValue;
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::reference
+ list_map<Key, T, Compare, Allocator>::back()
+ {
+ #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(static_cast<internal_value_type*>(mNode.mpNext) == &mNode))
+ EASTL_FAIL_MSG("list_map::back -- empty container");
+ #else
+ // We allow the user to reference an empty container.
+ #endif
+
+ return static_cast<internal_value_type*>(mNode.mpPrev)->mValue;
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::const_reference
+ list_map<Key, T, Compare, Allocator>::back() const
+ {
+ #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(static_cast<internal_value_type*>(mNode.mpNext) == &mNode))
+ EASTL_FAIL_MSG("list_map::back -- empty container");
+ #else
+ // We allow the user to reference an empty container.
+ #endif
+
+ return static_cast<internal_value_type*>(mNode.mpPrev)->mValue;
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ bool list_map<Key, T, Compare, Allocator>::push_front(const value_type& value)
+ {
+ internal_value_type tempValue(value);
+ typename base_type::insert_return_type baseReturn = base_type::insert(tempValue);
+
+ // Did the insert succeed?
+ if (baseReturn.second)
+ {
+ internal_value_type* pNode = &(*baseReturn.first);
+
+ pNode->mpNext = mNode.mpNext;
+ pNode->mpPrev = &mNode;
+
+ mNode.mpNext->mpPrev = pNode;
+ mNode.mpNext = pNode;
+
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ bool list_map<Key, T, Compare, Allocator>::push_back(const value_type& value)
+ {
+ internal_value_type tempValue(value);
+ typename base_type::insert_return_type baseReturn = base_type::insert(tempValue);
+
+ // Did the insert succeed?
+ if (baseReturn.second)
+ {
+ internal_value_type* pNode = &(*baseReturn.first);
+
+ pNode->mpPrev = mNode.mpPrev;
+ pNode->mpNext = &mNode;
+
+ mNode.mpPrev->mpNext = pNode;
+ mNode.mpPrev = pNode;
+
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ bool list_map<Key, T, Compare, Allocator>::push_front(const key_type& key, const mapped_type& value)
+ {
+ return push_front(eastl::make_pair(key, value));
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ bool list_map<Key, T, Compare, Allocator>::push_back(const key_type& key, const mapped_type& value)
+ {
+ return push_back(eastl::make_pair(key, value));
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ void list_map<Key, T, Compare, Allocator>::pop_front()
+ {
+ #if EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(empty()))
+ EASTL_FAIL_MSG("list_map::pop_front -- empty container");
+ #endif
+
+ erase(static_cast<internal_value_type*>(mNode.mpNext)->mValue.first);
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ void list_map<Key, T, Compare, Allocator>::pop_back()
+ {
+ #if EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(empty()))
+ EASTL_FAIL_MSG("list_map::pop_back -- empty container");
+ #endif
+
+ erase(static_cast<internal_value_type*>(mNode.mpPrev)->mValue.first);
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::iterator
+ list_map<Key, T, Compare, Allocator>::find(const key_type& key)
+ {
+ typename base_type::iterator baseIter = base_type::find(key);
+ if (baseIter != base_type::end())
+ {
+ return iterator(&(*baseIter));
+ }
+ else
+ {
+ return end();
+ }
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::const_iterator
+ list_map<Key, T, Compare, Allocator>::find(const key_type& key) const
+ {
+ typename base_type::const_iterator baseIter = base_type::find(key);
+ if (baseIter != base_type::end())
+ {
+ return const_iterator(&(*baseIter));
+ }
+ else
+ {
+ return end();
+ }
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ template <typename U, typename Compare2>
+ inline typename list_map<Key, T, Compare, Allocator>::iterator
+ list_map<Key, T, Compare, Allocator>::find_as(const U& u, Compare2 compare2)
+ {
+ typename base_type::iterator baseIter = base_type::find_as(u, compare2);
+ if (baseIter != base_type::end())
+ {
+ return iterator(&(*baseIter));
+ }
+ else
+ {
+ return end();
+ }
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ template <typename U, typename Compare2>
+ inline typename list_map<Key, T, Compare, Allocator>::const_iterator
+ list_map<Key, T, Compare, Allocator>::find_as(const U& u, Compare2 compare2) const
+ {
+ typename base_type::const_iterator baseIter = base_type::find_as(u, compare2);
+ if (baseIter != base_type::end())
+ {
+ return const_iterator(&(*baseIter));
+ }
+ else
+ {
+ return end();
+ }
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::size_type
+ list_map<Key, T, Compare, Allocator>::count(const key_type& key) const
+ {
+ const typename base_type::const_iterator it = base_type::find(key);
+ return (it != base_type::end()) ? 1 : 0;
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::size_type
+ list_map<Key, T, Compare, Allocator>::erase(const key_type& key)
+ {
+ typename base_type::iterator baseIter = base_type::find(key);
+ if (baseIter != base_type::end())
+ {
+ internal_value_type* node = &(*baseIter);
+
+ node->mpNext->mpPrev = node->mpPrev;
+ node->mpPrev->mpNext = node->mpNext;
+
+ base_type::erase(baseIter);
+
+ return 1;
+ }
+ return 0;
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::iterator
+ list_map<Key, T, Compare, Allocator>::erase(const_iterator position)
+ {
+ iterator posIter(position.mpNode); // Convert from const.
+ iterator eraseIter(posIter++);
+ erase(eraseIter->first);
+ return posIter;
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::reverse_iterator
+ list_map<Key, T, Compare, Allocator>::erase(const_reverse_iterator position)
+ {
+ return reverse_iterator(erase((++position).base()));
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ void list_map<Key, T, Compare, Allocator>::clear()
+ {
+ base_type::clear();
+
+ mNode.mpNext = &mNode;
+ mNode.mpPrev = &mNode;
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ void list_map<Key, T, Compare, Allocator>::reset_lose_memory()
+ {
+ base_type::reset_lose_memory();
+
+ mNode.mpNext = &mNode;
+ mNode.mpPrev = &mNode;
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ bool list_map<Key, T, Compare, Allocator>::validate() const
+ {
+ if (!base_type::validate())
+ {
+ return false;
+ }
+
+ size_type nodeCount(0);
+ list_map_data_base* node = mNode.mpNext;
+ while (node != &mNode)
+ {
+ internal_value_type* data = static_cast<internal_value_type*>(node);
+ if (base_type::find(data->mValue.first) == base_type::end())
+ {
+ return false;
+ }
+ node = node->mpNext;
+ ++nodeCount;
+ }
+ if (nodeCount != size())
+ {
+ return false;
+ }
+ nodeCount = 0;
+ node = mNode.mpPrev;
+ while (node != &mNode)
+ {
+ internal_value_type* data = static_cast<internal_value_type*>(node);
+ if (base_type::find(data->mValue.first) == base_type::end())
+ {
+ return false;
+ }
+ node = node->mpPrev;
+ ++nodeCount;
+ }
+ if (nodeCount != size())
+ {
+ return false;
+ }
+
+ return true;
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ int list_map<Key, T, Compare, Allocator>::validate_iterator(const_iterator iter) const
+ {
+ for (const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp)
+ {
+ if (temp == iter)
+ {
+ return (isf_valid | isf_current | isf_can_dereference);
+ }
+ }
+
+ if (iter == end())
+ return (isf_valid | isf_current);
+
+ return isf_none;
+ }
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
diff --git a/EASTL/include/EASTL/bonus/lru_cache.h b/EASTL/include/EASTL/bonus/lru_cache.h
new file mode 100644
index 0000000..a8d7c33
--- /dev/null
+++ b/EASTL/include/EASTL/bonus/lru_cache.h
@@ -0,0 +1,424 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// lru_cache is a container that simplifies caching of objects in a map.
+// Basically, you give the container a key, like a string, and the data you want.
+// The container provides callback mechanisms to generate data if it's missing
+// as well as delete data when it's purged from the cache. This container
+// uses a least recently used method: whatever the oldest item is will be
+// replaced with a new entry.
+//
+// Algorithmically, the container is a combination of a map and a list.
+// The list stores the age of the entries by moving the entry to the head
+// of the list on each access, either by a call to get() or to touch().
+// The map is just the map as one would expect.
+//
+// This is useful for caching off data that is expensive to generate,
+// for example text to speech wave files that are dynamically generated,
+// but that will need to be reused, as is the case in narration of menu
+// entries as a user scrolls through the entries.
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_LRUCACHE_H
+#define EASTL_LRUCACHE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+#pragma once
+#endif
+
+#include <EASTL/list.h>
+#include <EASTL/unordered_map.h>
+#include <EASTL/optional.h>
+
+namespace eastl
+{
+ /// EASTL_LRUCACHE_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_LRUCACHE_DEFAULT_NAME
+ #define EASTL_LRUCACHE_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " lru_cache" // Unless the user overrides something, this is "EASTL lru_cache".
+ #endif
+
+
+ /// EASTL_LRUCACHE_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_LRUCACHE_DEFAULT_ALLOCATOR
+ #define EASTL_LRUCACHE_DEFAULT_ALLOCATOR allocator_type(EASTL_LRUCACHE_DEFAULT_NAME)
+ #endif
+
+ /// lru_cache
+ ///
+ /// Implements a caching map based off of a key and data.
+ /// LRUList parameter is any container that guarantees the validity of its iterator even after a modification (e.g. list)
+ /// LRUMap is any mapping container that can map a key to some data. By default, we use unordered_set, but it might be better
+ /// to use hash_map or some other structure depending on your key/data combination. For example, you may want to swap the
+ /// map backing if using strings as keys or if the data objects are small. In any case, unordered_set is a good default and should
+ /// work well enough since the purpose of this class is to cache results of expensive, order of milliseconds, operations
+ ///
+ /// Algorithmic Performance (default data structures):
+ /// touch() -> O(1)
+ /// insert() / update(), get() / operator[] -> equivalent to unordered_set (O(1) on average, O(n) worst)
+ /// size() -> O(1)
+ ///
+ /// All accesses to a given key (insert, update, get) will push that key to most recently used.
+ /// If the data objects are shared between threads, it would be best to use a smartptr to manage the lifetime of the data.
+ /// as it could be removed from the cache while in use by another thread.
+ template <typename Key,
+ typename Value,
+ typename Allocator = EASTLAllocatorType,
+ typename list_type = eastl::list<Key, Allocator>,
+ typename map_type = eastl::unordered_map<Key,
+ eastl::pair<Value, typename list_type::iterator>,
+ eastl::hash<Key>,
+ eastl::equal_to<Key>,
+ Allocator>>
+ class lru_cache
+ {
+ public:
+ using key_type = Key;
+ using value_type = Value;
+ using allocator_type = Allocator;
+ using size_type = eastl_size_t;
+ using list_iterator = typename list_type::iterator;
+ using map_iterator = typename map_type::iterator;
+ using data_container_type = eastl::pair<value_type, list_iterator>;
+ using iterator = typename map_type::iterator;
+ using const_iterator = typename map_type::const_iterator;
+ using this_type = lru_cache<key_type, value_type, Allocator, list_type, map_type>;
+ using create_callback_type = eastl::function<value_type(key_type)>;
+ using delete_callback_type = eastl::function<void(const value_type &)>;
+
+ /// lru_cache constructor
+ ///
+ /// Creates a Key / Value map that only stores size Value objects until it deletes them.
+ /// For complex objects or operations, the creator and deletor callbacks can be used.
+ /// This works just like a regular map object: on access, the Value will be created if it doesn't exist, returned otherwise.
+ explicit lru_cache(size_type size,
+ const allocator_type& allocator = EASTL_LRUCACHE_DEFAULT_ALLOCATOR,
+ create_callback_type creator = nullptr,
+ delete_callback_type deletor = nullptr)
+ : m_list(allocator)
+ , m_map(allocator)
+ , m_capacity(size)
+ , m_create_callback(creator)
+ , m_delete_callback(deletor)
+ {
+ }
+
+ /// lru_cache destructor
+ ///
+ /// Iterates across every entry in the map and calls the deletor before calling the standard destructors
+ ~lru_cache()
+ {
+ // Destruct everything we have cached
+ for (auto& iter : m_map)
+ {
+ if (m_delete_callback)
+ m_delete_callback(iter.second.first);
+ }
+ }
+
+ lru_cache(std::initializer_list<eastl::pair<Key, Value>> il)
+ : lru_cache(static_cast<size_type>(il.size()))
+ {
+ for(auto& p : il)
+ insert_or_assign(p.first, p.second);
+ }
+
+ // TODO(rparolin): Why do we prevent copies? And what about moves?
+ lru_cache(const this_type&) = delete;
+ this_type &operator=(const this_type&) = delete;
+
+ /// insert
+ ///
+ /// insert key k with value v.
+ /// If key already exists, no change is made and the return value is false.
+ /// If the key doesn't exist, the data is added to the map and the return value is true.
+ bool insert(const key_type& k, const value_type& v)
+ {
+ if (m_map.find(k) == m_map.end())
+ {
+ make_space();
+
+ m_list.push_front(k);
+ m_map[k] = data_container_type(v, m_list.begin());
+
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+ }
+
+ /// emplace
+ ///
+ /// Places a new object in place k created with args
+ /// If the key already exists, it is replaced.
+ template <typename... Args>
+ void emplace(const key_type& k, Args&&... args)
+ {
+ make_space();
+
+ m_list.push_front(k);
+ m_map.emplace(k, data_container_type(eastl::forward<Args>(args)..., m_list.begin()));
+ }
+
+ /// insert_or_assign
+ ///
+ /// Same as add, but replaces the data at key k, if it exists, with the new entry v
+ /// Note that the deletor for the old v will be called before it's replaced with the new value of v
+ void insert_or_assign(const key_type& k, const value_type& v)
+ {
+ auto iter = m_map.find(k);
+
+ if (m_map.find(k) != m_map.end())
+ {
+ assign(iter, v);
+ }
+ else
+ {
+ insert(k, v);
+ }
+ }
+
+ /// contains
+ ///
+ /// Returns true if key k exists in the cache
+ bool contains(const key_type& k) const
+ {
+ return m_map.find(k) != m_map.end();
+ }
+
+ /// at
+ ///
+ /// Retrives the data for key k, not valid if k does not exist
+ eastl::optional<value_type> at(const key_type& k)
+ {
+ auto iter = m_map.find(k);
+
+ if (iter != m_map.end())
+ {
+ return iter->second.first;
+ }
+ else
+ {
+ return eastl::nullopt;
+ }
+ }
+
+ /// get
+ ///
+ /// Retrives the data for key k. If no data exists, it will be created by calling the
+ /// creator.
+ value_type& get(const key_type& k)
+ {
+ auto iter = m_map.find(k);
+
+ // The entry exists in the cache
+ if (iter != m_map.end())
+ {
+ touch(k);
+ return iter->second.first;
+ }
+ else // The entry doesn't exist in the cache, so create one
+ {
+ // Add the entry to the map
+ insert(k, m_create_callback ? m_create_callback(k) : value_type());
+
+ // return the new data
+ return m_map[k].first;
+ }
+ }
+
+ /// Equivalent to get(k)
+ value_type& operator[](const key_type& k) { return get(k); }
+
+ /// erase
+ ///
+ /// erases key k from the cache.
+ /// If k does not exist, returns false. If k exists, returns true.
+ bool erase(const key_type& k)
+ {
+ auto iter = m_map.find(k);
+
+ if (iter != m_map.end())
+ {
+ m_list.erase(iter->second.second);
+
+ // Delete the actual entry
+ map_erase(iter);
+
+ return true;
+ }
+
+ return false;
+ }
+
+ /// erase_oldest
+ ///
+ /// Removes the oldest entry from the cache.
+ void erase_oldest()
+ {
+ auto key = m_list.back();
+ m_list.pop_back();
+
+ // Delete the actual entry
+ auto iter = m_map.find(key);
+ map_erase(iter);
+ }
+
+ /// touch
+ ///
+ /// Touches key k, marking it as most recently used.
+ /// If k does not exist, returns false. If the touch was successful, returns true.
+ bool touch(const key_type& k)
+ {
+ auto iter = m_map.find(k);
+
+ if (iter != m_map.end())
+ {
+ touch(iter);
+ return true;
+ }
+
+ return false;
+ }
+
+ /// touch
+ ///
+ /// Touches key at iterator iter, moving it to most recently used position
+ void touch(iterator& iter)
+ {
+ auto listRef = iter->second.second;
+
+ m_list.erase(listRef);
+ m_list.push_front(iter->first);
+ iter->second.second = m_list.begin();
+ }
+
+ /// assign
+ ///
+ /// Updates key k with data v.
+ /// If key k does not exist, returns false and no changes are made.
+ /// If key k exists, existing data has its deletor called and key k's data is replaced with new v data
+ bool assign(const key_type& k, const value_type& v)
+ {
+ auto iter = m_map.find(k);
+
+ if (iter != m_map.end())
+ {
+ assign(iter, v);
+ return true;
+ }
+
+ return false;
+ }
+
+ /// assign
+ ///
+ /// Updates data at spot iter with data v.
+ void assign(iterator& iter, const value_type& v)
+ {
+ if (m_delete_callback)
+ m_delete_callback(iter->second.first);
+ touch(iter);
+ iter->second.first = v;
+ }
+
+ // standard container functions
+ iterator begin() EA_NOEXCEPT { return m_map.begin(); }
+ iterator end() EA_NOEXCEPT { return m_map.end(); }
+ iterator rbegin() EA_NOEXCEPT { return m_map.rbegin(); }
+ iterator rend() EA_NOEXCEPT { return m_map.rend(); }
+ const_iterator begin() const EA_NOEXCEPT { return m_map.begin(); }
+ const_iterator cbegin() const EA_NOEXCEPT { return m_map.cbegin(); }
+ const_iterator crbegin() const EA_NOEXCEPT { return m_map.crbegin(); }
+ const_iterator end() const EA_NOEXCEPT { return m_map.end(); }
+ const_iterator cend() const EA_NOEXCEPT { return m_map.cend(); }
+ const_iterator crend() const EA_NOEXCEPT { return m_map.crend(); }
+
+ bool empty() const EA_NOEXCEPT { return m_map.empty(); }
+ size_type size() const EA_NOEXCEPT { return m_map.size(); }
+ size_type capacity() const EA_NOEXCEPT { return m_capacity; }
+
+ void clear() EA_NOEXCEPT
+ {
+ // Since we have a delete callback, we want to reuse the trim function by cheating the max
+ // size to clear all the entries to avoid duplicating code.
+ auto old_max = m_capacity;
+
+ m_capacity = 0;
+ trim();
+ m_capacity = old_max;
+ }
+
+ /// resize
+ ///
+ /// Resizes the cache. Can be used to either expand or contract the cache.
+ /// In the case of a contraction, the oldest entries will be evicted with their respective
+ /// deletors called before completing.
+ void resize(size_type newSize)
+ {
+ m_capacity = newSize;
+ trim();
+ }
+
+ void setCreateCallback(create_callback_type callback) { m_create_callback = callback; }
+ void setDeleteCallback(delete_callback_type callback) { m_delete_callback = callback; }
+
+ // EASTL extensions
+ const allocator_type& get_allocator() const EA_NOEXCEPT { return m_map.get_allocator(); }
+ allocator_type& get_allocator() EA_NOEXCEPT { return m_map.get_allocator(); }
+ void set_allocator(const allocator_type& allocator) { m_map.set_allocator(allocator); m_list.set_allocator(allocator); }
+
+ /// Does not reset the callbacks
+ void reset_lose_memory() EA_NOEXCEPT { m_map.reset_lose_memory(); m_list.reset_lose_memory(); }
+
+ private:
+ inline void map_erase(map_iterator pos)
+ {
+ if (m_delete_callback)
+ m_delete_callback(pos->second.first);
+ m_map.erase(pos);
+ }
+
+ bool trim()
+ {
+ if (size() <= m_capacity)
+ {
+ return false; // No trim necessary
+ }
+
+ // We need to trim
+ do
+ {
+ erase_oldest();
+ } while (m_list.size() > m_capacity);
+
+ return true;
+ }
+
+ void make_space()
+ {
+ if (size() == m_capacity)
+ {
+ erase_oldest();
+ }
+ }
+
+ private:
+ list_type m_list;
+ map_type m_map;
+ size_type m_capacity;
+ create_callback_type m_create_callback;
+ delete_callback_type m_delete_callback;
+ };
+}
+
+
+
+#endif
diff --git a/EASTL/include/EASTL/bonus/overloaded.h b/EASTL/include/EASTL/bonus/overloaded.h
new file mode 100644
index 0000000..55ca158
--- /dev/null
+++ b/EASTL/include/EASTL/bonus/overloaded.h
@@ -0,0 +1,81 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_OVERLOADED_H
+#define EASTL_OVERLOADED_H
+
+#include <EASTL/internal/move_help.h>
+#include <EASTL/type_traits.h>
+
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+#pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed
+ // improvements in apps as a result.
+#endif
+
+namespace eastl
+{
+ ///////////////////////////////////////////////////////////////////////////
+ /// overloaded
+ ///
+ /// A helper class that permits you to combine multiple function objects into one.
+ /// Typically, this helper is really handy when visiting an eastl::variant with multiple lambdas.
+ /// Example:
+ ///
+ /// eastl::variant<int, string> v{42};
+ ///
+ /// eastl::visit(
+ /// eastl::overloaded{
+ /// [](const int& x) { std::cout << "Visited an integer: " << x << "\n"; }, // Will reach that lambda with x == 42.
+ /// [](const string& s) { std::cout << "Visited an string: " << s << "\n"; }
+ /// },
+ /// v
+ /// );
+ ///////////////////////////////////////////////////////////////////////////
+ template <class... T>
+ struct overloaded;
+
+ template <class T>
+ struct overloaded<T> : T
+ {
+ template <class U>
+ EA_CPP14_CONSTEXPR overloaded(U&& u) : T(eastl::forward<U>(u))
+ {
+ }
+
+ using T::operator();
+ };
+
+ template <class T, class... R>
+ struct overloaded<T, R...> : T, overloaded<R...>
+ {
+ template <class U, class... V>
+ EA_CPP14_CONSTEXPR overloaded(U&& u, V&&... v) : T(eastl::forward<U>(u)), overloaded<R...>(eastl::forward<V>(v)...)
+ {
+ }
+
+ using T::operator();
+ using overloaded<R...>::operator();
+ };
+
+ #ifdef __cpp_deduction_guides
+ template <class... T>
+ overloaded(T...) -> overloaded<T...>;
+ #endif
+
+ ///////////////////////////////////////////////////////////////////////////
+ /// make_overloaded
+ ///
+ /// Helper function to create an overloaded instance when lacking deduction guides.
+ /// make_overloaded(f1, f2, f3) == overloaded{f1, f2, f3}
+ ///////////////////////////////////////////////////////////////////////////
+ template <class... T>
+ EA_CPP14_CONSTEXPR overloaded<typename eastl::remove_cvref<T>::type...> make_overloaded(T&&... t)
+ {
+ return overloaded<typename eastl::remove_cvref<T>::type...>{eastl::forward<T>(t)...};
+ }
+
+} // namespace eastl
+
+#endif // EASTL_OVERLOADED_H \ No newline at end of file
diff --git a/EASTL/include/EASTL/bonus/ring_buffer.h b/EASTL/include/EASTL/bonus/ring_buffer.h
new file mode 100644
index 0000000..fcd8fd2
--- /dev/null
+++ b/EASTL/include/EASTL/bonus/ring_buffer.h
@@ -0,0 +1,1581 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// A ring buffer is a FIFO (first-in, first-out) container which acts
+// much like a queue. The difference is that a ring buffer is implemented
+// via chasing pointers around a given container instead of like queue
+// adds to the writes to the end of the container are reads from the begin.
+// The benefit of a ring buffer is that memory allocations don't occur
+// and new elements are neither added nor removed from the container.
+// Elements in the container are simply assigned values in circles around
+// the container.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_RING_BUFFER_H
+#define EASTL_RING_BUFFER_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/iterator.h>
+#include <EASTL/vector.h>
+#include <EASTL/initializer_list.h>
+#include <stddef.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+ /// EASTL_RING_BUFFER_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_RING_BUFFER_DEFAULT_NAME
+ #define EASTL_RING_BUFFER_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " ring_buffer" // Unless the user overrides something, this is "EASTL ring_buffer".
+ #endif
+
+ /// EASTL_RING_BUFFER_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_RING_BUFFER_DEFAULT_ALLOCATOR
+ #define EASTL_RING_BUFFER_DEFAULT_ALLOCATOR allocator_type(EASTL_RING_BUFFER_DEFAULT_NAME)
+ #endif
+
+
+ /// ring_buffer_iterator
+ ///
+ /// We force this iterator to act like a random access iterator even if
+ /// the underlying container doesn't support random access iteration.
+ /// Any BidirectionalIterator can be a RandomAccessIterator; it just
+ /// might be inefficient in some cases.
+ ///
+ template <typename T, typename Pointer, typename Reference, typename Container>
+ struct ring_buffer_iterator
+ {
+ public:
+ typedef ring_buffer_iterator<T, Pointer, Reference, Container> this_type;
+ typedef T value_type;
+ typedef Pointer pointer;
+ typedef Reference reference;
+ typedef typename Container::size_type size_type;
+ typedef typename Container::difference_type difference_type;
+ typedef typename Container::iterator container_iterator;
+ typedef typename Container::const_iterator container_const_iterator;
+ typedef ring_buffer_iterator<T, T*, T&, Container> iterator;
+ typedef ring_buffer_iterator<T, const T*, const T&, Container> const_iterator;
+ typedef EASTL_ITC_NS::random_access_iterator_tag iterator_category;
+
+ public:
+ Container* mpContainer;
+ container_iterator mContainerIterator;
+
+ public:
+ ring_buffer_iterator();
+ ring_buffer_iterator(Container* pContainer, const container_iterator& containerIterator);
+ ring_buffer_iterator(const iterator& x);
+
+ ring_buffer_iterator& operator=(const iterator& x);
+
+ reference operator*() const;
+ pointer operator->() const;
+
+ this_type& operator++();
+ this_type operator++(int);
+
+ this_type& operator--();
+ this_type operator--(int);
+
+ this_type& operator+=(difference_type n);
+ this_type& operator-=(difference_type n);
+
+ this_type operator+(difference_type n) const;
+ this_type operator-(difference_type n) const;
+
+ protected:
+ void increment(difference_type n, EASTL_ITC_NS::input_iterator_tag);
+ void increment(difference_type n, EASTL_ITC_NS::random_access_iterator_tag);
+
+ }; // struct ring_buffer_iterator
+
+
+
+ /// ring_buffer
+ ///
+ /// Implements a ring buffer via a given container type, which would
+ /// typically be a vector or array, though any container which supports
+ /// bidirectional iteration would work.
+ ///
+ /// A ring buffer is a FIFO (first-in, first-out) container which acts
+ /// much like a queue. The difference is that a ring buffer is implemented
+ /// via chasing pointers around a container and moving the read and write
+ /// positions forward (and possibly wrapping around) as the container is
+ /// read and written via pop_front and push_back.
+ ///
+ /// The benefit of a ring buffer is that memory allocations don't occur
+ /// and new elements are neither added nor removed from the container.
+ /// Elements in the container are simply assigned values in circles around
+ /// the container.
+ ///
+ /// ring_buffer is different from other containers -- including adapter
+ /// containers -- in how iteration is done. Iteration of a ring buffer
+ /// starts at the current begin position, proceeds to the end of the underlying
+ /// container, and continues at the begin of the underlying container until
+ /// the ring buffer's current end position. Thus a ring_buffer does
+ /// indeed have a begin and an end, though the values of begin and end
+ /// chase each other around the container. An empty ring_buffer is one
+ /// in which end == begin, and a full ring_buffer is one in which
+ /// end + 1 == begin.
+ ///
+ /// Example of a ring buffer layout, where + indicates queued items:
+ /// ++++++++++--------------------------------+++++++++
+ /// ^ ^
+ /// end begin
+ ///
+ /// Empty ring buffer:
+ /// ---------------------------------------------------
+ /// ^
+ /// begin / end
+ ///
+ /// Full ring buffer. Note that one item is necessarily unused; it is
+ /// analagous to a '\0' at the end of a C string:
+ /// +++++++++++++++++++++++++++++++++++++++++-+++++++++
+ /// ^^
+ /// end begin
+ ///
+ /// A push_back operation on a ring buffer assigns the new value to end.
+ /// If there is no more space in the buffer, this will result in begin
+ /// being overwritten and the begin position being moved foward one position.
+ /// The user can use the full() function to detect this condition.
+ /// Note that elements in a ring buffer are not created or destroyed as
+ /// their are added and removed; they are merely assigned. Only on
+ /// container construction and destruction are any elements created and
+ /// destroyed.
+ ///
+ /// The ring buffer can be used in either direction. By this we mean that
+ /// you can use push_back to add items and pop_front to remove them; or you can
+ /// use push_front to add items and pop_back to remove them. You aren't
+ /// limited to these operations; you can push or pop from either side
+ /// arbitrarily and you can insert or erase anywhere in the container.
+ ///
+ /// The ring buffer requires the user to specify a Container type, which
+ /// by default is vector. However, any container with bidirectional iterators
+ /// will work, such as list, deque, string or any of the fixed_* versions
+ /// of these containers, such as fixed_string. Since ring buffer works via copying
+ /// elements instead of allocating and freeing nodes, inserting in the middle
+ /// of a ring buffer based on list (instead of vector) is no more efficient.
+ ///
+ /// To use the ring buffer, its container must be resized to the desired
+ /// ring buffer size. Changing the size of a ring buffer may cause ring
+ /// buffer iterators to invalidate.
+ ///
+ /// An alternative to using a ring buffer is to use a list with a user-created
+ /// node pool and custom allocator. There are various tradeoffs that result from this.
+ ///
+ /// Example usage:
+ /// ring_buffer< int, list<int> > rb(100);
+ /// rb.push_back(1);
+ ///
+ /// Example usage:
+ /// // Example of creating an on-screen debug log that shows 16
+ /// // strings at a time and scrolls older strings away.
+ ///
+ /// // Create ring buffer of 16 strings.
+ /// ring_buffer< string, vector<string> > debugLogText(16);
+ ///
+ /// // Reserve 128 chars for each line. This can make it so that no
+ /// // runtime memory allocations occur.
+ /// for(vector<string>::iterator it = debugLogText.get_container().begin(),
+ /// itEnd = debugLogText.get_container().end(); it != itEnd; ++it)
+ /// {
+ /// (*it).reserve(128);
+ /// }
+ ///
+ /// // Add a new string, using push_front() and front() instead of
+ /// // push_front(str) in order to avoid creating a temporary str.
+ /// debugLogText.push_front();
+ /// debugLogText.front() = "Player fired weapon";
+ ///
+ template <typename T, typename Container = eastl::vector<T>, typename Allocator = typename Container::allocator_type>
+ class ring_buffer
+ {
+ public:
+ typedef ring_buffer<T, Container, Allocator> this_type;
+ typedef Container container_type;
+ typedef Allocator allocator_type;
+
+ typedef typename Container::value_type value_type;
+ typedef typename Container::reference reference;
+ typedef typename Container::const_reference const_reference;
+ typedef typename Container::size_type size_type;
+ typedef typename Container::difference_type difference_type;
+ typedef typename Container::iterator container_iterator;
+ typedef typename Container::const_iterator container_const_iterator;
+ typedef ring_buffer_iterator<T, T*, T&, Container> iterator;
+ typedef ring_buffer_iterator<T, const T*, const T&, Container> const_iterator;
+ typedef eastl::reverse_iterator<iterator> reverse_iterator;
+ typedef eastl::reverse_iterator<const_iterator> const_reverse_iterator;
+
+ public: // We declare public so that global comparison operators can be implemented without adding an inline level and without tripping up GCC 2.x friend declaration failures. GCC (through at least v4.0) is poor at inlining and performance wins over correctness.
+ Container c; // We follow the naming convention established for stack, queue, priority_queue and name this 'c'. This variable must always have a size of at least 1, as even an empty ring_buffer has an unused terminating element.
+
+ protected:
+ container_iterator mBegin; // We keep track of where our begin and end are by using Container iterators.
+ container_iterator mEnd;
+ size_type mSize;
+
+ public:
+ // There currently isn't a ring_buffer constructor that specifies an initial size, unlike other containers.
+ explicit ring_buffer(size_type cap = 0); // Construct with an initial capacity (but size of 0).
+ explicit ring_buffer(size_type cap, const allocator_type& allocator);
+ explicit ring_buffer(const Container& x);
+ explicit ring_buffer(const allocator_type& allocator);
+ ring_buffer(const this_type& x);
+ ring_buffer(this_type&& x);
+ ring_buffer(this_type&& x, const allocator_type& allocator);
+ ring_buffer(std::initializer_list<value_type> ilist, const allocator_type& allocator = EASTL_RING_BUFFER_DEFAULT_ALLOCATOR); // This function sets the capacity to be equal to the size of the initializer list.
+
+ // No destructor necessary. Default will do.
+
+ this_type& operator=(const this_type& x);
+ this_type& operator=(std::initializer_list<value_type> ilist);
+ this_type& operator=(this_type&& x);
+
+ template <typename InputIterator>
+ void assign(InputIterator first, InputIterator last);
+
+ void swap(this_type& x);
+
+ iterator begin() EA_NOEXCEPT;
+ const_iterator begin() const EA_NOEXCEPT;
+ const_iterator cbegin() const EA_NOEXCEPT;
+
+ iterator end() EA_NOEXCEPT;
+ const_iterator end() const EA_NOEXCEPT;
+ const_iterator cend() const EA_NOEXCEPT;
+
+ reverse_iterator rbegin() EA_NOEXCEPT;
+ const_reverse_iterator rbegin() const EA_NOEXCEPT;
+ const_reverse_iterator crbegin() const EA_NOEXCEPT;
+
+ reverse_iterator rend() EA_NOEXCEPT;
+ const_reverse_iterator rend() const EA_NOEXCEPT;
+ const_reverse_iterator crend() const EA_NOEXCEPT;
+
+ bool empty() const EA_NOEXCEPT;
+ bool full() const EA_NOEXCEPT;
+ size_type size() const EA_NOEXCEPT;
+ size_type capacity() const EA_NOEXCEPT;
+
+ void resize(size_type n);
+ void set_capacity(size_type n); // Sets the capacity to the given value, including values less than the current capacity. Adjusts the size downward if n < size, by throwing out the oldest elements in the buffer.
+ void reserve(size_type n); // Reserve a given capacity. Doesn't decrease the capacity; it only increases it (for compatibility with other containers' behavior).
+
+ reference front();
+ const_reference front() const;
+
+ reference back();
+ const_reference back() const;
+
+ void push_back(const value_type& value);
+ reference push_back();
+
+ void push_front(const value_type& value);
+ reference push_front();
+
+ void pop_back();
+ void pop_front();
+
+ reference operator[](size_type n);
+ const_reference operator[](size_type n) const;
+
+ // To consider:
+ // size_type read(value_type* pDestination, size_type nCount);
+ // size_type read(iterator** pPosition1, iterator** pPosition2, size_type& nCount1, size_type& nCount2);
+
+ /* To do:
+ template <class... Args>
+ void emplace_front(Args&&... args);
+
+ template <class... Args>
+ void emplace_back(Args&&... args);
+
+ template <class... Args>
+ iterator emplace(const_iterator position, Args&&... args);
+ */
+
+ iterator insert(const_iterator position, const value_type& value);
+ void insert(const_iterator position, size_type n, const value_type& value);
+ void insert(const_iterator position, std::initializer_list<value_type> ilist);
+
+ template <typename InputIterator>
+ void insert(const_iterator position, InputIterator first, InputIterator last);
+
+ iterator erase(const_iterator position);
+ iterator erase(const_iterator first, const_iterator last);
+ reverse_iterator erase(const_reverse_iterator position);
+ reverse_iterator erase(const_reverse_iterator first, const_reverse_iterator last);
+
+ void clear();
+
+ container_type& get_container();
+ const container_type& get_container() const;
+
+ bool validate() const;
+ int validate_iterator(const_iterator i) const;
+
+ protected:
+ //size_type DoGetSize(EASTL_ITC_NS::input_iterator_tag) const;
+ //size_type DoGetSize(EASTL_ITC_NS::random_access_iterator_tag) const;
+
+ }; // class ring_buffer
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // ring_buffer_iterator
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Pointer, typename Reference, typename Container>
+ ring_buffer_iterator<T, Pointer, Reference, Container>::ring_buffer_iterator()
+ : mpContainer(NULL), mContainerIterator()
+ {
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, typename Container>
+ ring_buffer_iterator<T, Pointer, Reference, Container>::ring_buffer_iterator(Container* pContainer, const container_iterator& containerIterator)
+ : mpContainer(pContainer), mContainerIterator(containerIterator)
+ {
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, typename Container>
+ ring_buffer_iterator<T, Pointer, Reference, Container>::ring_buffer_iterator(const iterator& x)
+ : mpContainer(x.mpContainer), mContainerIterator(x.mContainerIterator)
+ {
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, typename Container>
+ ring_buffer_iterator<T, Pointer, Reference, Container>&
+ ring_buffer_iterator<T, Pointer, Reference, Container>::operator=(const iterator& x)
+ {
+ mpContainer = x.mpContainer;
+ mContainerIterator = x.mContainerIterator;
+ return *this;
+ }
+
+ template <typename T, typename Pointer, typename Reference, typename Container>
+ typename ring_buffer_iterator<T, Pointer, Reference, Container>::reference
+ ring_buffer_iterator<T, Pointer, Reference, Container>::operator*() const
+ {
+ return *mContainerIterator;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, typename Container>
+ typename ring_buffer_iterator<T, Pointer, Reference, Container>::pointer
+ ring_buffer_iterator<T, Pointer, Reference, Container>::operator->() const
+ {
+ return &*mContainerIterator;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, typename Container>
+ typename ring_buffer_iterator<T, Pointer, Reference, Container>::this_type&
+ ring_buffer_iterator<T, Pointer, Reference, Container>::operator++()
+ {
+ if(EASTL_UNLIKELY(++mContainerIterator == mpContainer->end()))
+ mContainerIterator = mpContainer->begin();
+ return *this;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, typename Container>
+ typename ring_buffer_iterator<T, Pointer, Reference, Container>::this_type
+ ring_buffer_iterator<T, Pointer, Reference, Container>::operator++(int)
+ {
+ const this_type temp(*this);
+ if(EASTL_UNLIKELY(++mContainerIterator == mpContainer->end()))
+ mContainerIterator = mpContainer->begin();
+ return temp;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, typename Container>
+ typename ring_buffer_iterator<T, Pointer, Reference, Container>::this_type&
+ ring_buffer_iterator<T, Pointer, Reference, Container>::operator--()
+ {
+ if(EASTL_UNLIKELY(mContainerIterator == mpContainer->begin()))
+ mContainerIterator = mpContainer->end();
+ --mContainerIterator;
+ return *this;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, typename Container>
+ typename ring_buffer_iterator<T, Pointer, Reference, Container>::this_type
+ ring_buffer_iterator<T, Pointer, Reference, Container>::operator--(int)
+ {
+ const this_type temp(*this);
+ if(EASTL_UNLIKELY(mContainerIterator == mpContainer->begin()))
+ mContainerIterator = mpContainer->end();
+ --mContainerIterator;
+ return temp;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, typename Container>
+ typename ring_buffer_iterator<T, Pointer, Reference, Container>::this_type&
+ ring_buffer_iterator<T, Pointer, Reference, Container>::operator+=(difference_type n)
+ {
+ typedef typename eastl::iterator_traits<container_iterator>::iterator_category IC;
+ increment(n, IC());
+ return *this;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, typename Container>
+ typename ring_buffer_iterator<T, Pointer, Reference, Container>::this_type&
+ ring_buffer_iterator<T, Pointer, Reference, Container>::operator-=(difference_type n)
+ {
+ typedef typename eastl::iterator_traits<container_iterator>::iterator_category IC;
+ increment(-n, IC());
+ return *this;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, typename Container>
+ typename ring_buffer_iterator<T, Pointer, Reference, Container>::this_type
+ ring_buffer_iterator<T, Pointer, Reference, Container>::operator+(difference_type n) const
+ {
+ return this_type(*this).operator+=(n);
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, typename Container>
+ typename ring_buffer_iterator<T, Pointer, Reference, Container>::this_type
+ ring_buffer_iterator<T, Pointer, Reference, Container>::operator-(difference_type n) const
+ {
+ return this_type(*this).operator+=(-n);
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, typename Container>
+ void ring_buffer_iterator<T, Pointer, Reference, Container>::increment(difference_type n, EASTL_ITC_NS::input_iterator_tag)
+ {
+ // n cannot be negative, as input iterators don't support reverse iteration.
+ while(n-- > 0)
+ operator++();
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, typename Container>
+ void ring_buffer_iterator<T, Pointer, Reference, Container>::increment(difference_type n, EASTL_ITC_NS::random_access_iterator_tag)
+ {
+ // We make the assumption here that the user is incrementing from a valid
+ // starting position to a valid ending position. Thus *this + n yields a
+ // valid iterator, including if n happens to be a negative value.
+
+ if(n >= 0)
+ {
+ const difference_type d = mpContainer->end() - mContainerIterator;
+
+ if(n < d)
+ mContainerIterator += n;
+ else
+ mContainerIterator = mpContainer->begin() + (n - d);
+ }
+ else
+ {
+ // Recall that n and d here will be negative and so the logic here works as intended.
+ const difference_type d = mpContainer->begin() - mContainerIterator;
+
+ if(n >= d)
+ mContainerIterator += n;
+ else
+ mContainerIterator = mpContainer->end() + (n - d);
+ }
+ }
+
+
+ // Random access iterators must support operator + and operator -.
+ // You can only add an integer to an iterator, and you cannot add two iterators.
+ template <typename T, typename Pointer, typename Reference, typename Container>
+ inline ring_buffer_iterator<T, Pointer, Reference, Container>
+ operator+(ptrdiff_t n, const ring_buffer_iterator<T, Pointer, Reference, Container>& x)
+ {
+ return x + n; // Implement (n + x) in terms of (x + n).
+ }
+
+
+ // You can only add an integer to an iterator, but you can subtract two iterators.
+ template <typename T, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB, typename Container>
+ inline typename ring_buffer_iterator<T, PointerA, ReferenceA, Container>::difference_type
+ operator-(const ring_buffer_iterator<T, PointerA, ReferenceA, Container>& a,
+ const ring_buffer_iterator<T, PointerB, ReferenceB, Container>& b)
+ {
+ typedef typename ring_buffer_iterator<T, PointerA, ReferenceA, Container>::difference_type difference_type;
+
+ // To do: If container_iterator is a random access iterator, then do a simple calculation.
+ // Otherwise, we have little choice but to iterate from a to b and count as we go.
+ // See the ring_buffer::size function for an implementation of this.
+
+ // Iteration implementation:
+ difference_type d = 0;
+
+ for(ring_buffer_iterator<T, PointerA, ReferenceA, Container> temp(b); temp != a; ++temp)
+ ++d;
+
+ return d;
+ }
+
+
+ // The C++ defect report #179 requires that we support comparisons between const and non-const iterators.
+ // Thus we provide additional template paremeters here to support this. The defect report does not
+ // require us to support comparisons between reverse_iterators and const_reverse_iterators.
+ template <typename T, typename PointerA, typename ReferenceA, typename ContainerA,
+ typename PointerB, typename ReferenceB, typename ContainerB>
+ inline bool operator==(const ring_buffer_iterator<T, PointerA, ReferenceA, ContainerA>& a,
+ const ring_buffer_iterator<T, PointerB, ReferenceB, ContainerB>& b)
+ {
+ // Perhaps we should compare the container pointer as well.
+ // However, for valid iterators this shouldn't be necessary.
+ return a.mContainerIterator == b.mContainerIterator;
+ }
+
+
+ template <typename T, typename PointerA, typename ReferenceA, typename ContainerA,
+ typename PointerB, typename ReferenceB, typename ContainerB>
+ inline bool operator!=(const ring_buffer_iterator<T, PointerA, ReferenceA, ContainerA>& a,
+ const ring_buffer_iterator<T, PointerB, ReferenceB, ContainerB>& b)
+ {
+ // Perhaps we should compare the container pointer as well.
+ // However, for valid iterators this shouldn't be necessary.
+ return !(a.mContainerIterator == b.mContainerIterator);
+ }
+
+
+ // We provide a version of operator!= for the case where the iterators are of the
+ // same type. This helps prevent ambiguity errors in the presence of rel_ops.
+ template <typename T, typename Pointer, typename Reference, typename Container>
+ inline bool operator!=(const ring_buffer_iterator<T, Pointer, Reference, Container>& a,
+ const ring_buffer_iterator<T, Pointer, Reference, Container>& b)
+ {
+ return !(a.mContainerIterator == b.mContainerIterator);
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // ring_buffer
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Container, typename Allocator>
+ ring_buffer<T, Container, Allocator>::ring_buffer(size_type cap)
+ : c() // Default construction with default allocator for the container.
+ {
+ // To do: This code needs to be amended to deal with possible exceptions
+ // that could occur during the resize call below.
+
+ // We add one because the element at mEnd is necessarily unused.
+ c.resize(cap + 1); // Possibly we could construct 'c' with size, but c may not have such a ctor, though we rely on it having a resize function.
+ mBegin = c.begin();
+ mEnd = mBegin;
+ mSize = 0;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ ring_buffer<T, Container, Allocator>::ring_buffer(size_type cap, const allocator_type& allocator)
+ : c(allocator)
+ {
+ // To do: This code needs to be amended to deal with possible exceptions
+ // that could occur during the resize call below.
+
+ // We add one because the element at mEnd is necessarily unused.
+ c.resize(cap + 1); // Possibly we could construct 'c' with size, but c may not have such a ctor, though we rely on it having a resize function.
+ mBegin = c.begin();
+ mEnd = mBegin;
+ mSize = 0;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ ring_buffer<T, Container, Allocator>::ring_buffer(const Container& x)
+ : c(x) // This copies elements from x, but unless the user is doing some tricks, the only thing that matters is that c.size() == x.size().
+ {
+ // To do: This code needs to be amended to deal with possible exceptions
+ // that could occur during the resize call below.
+ if(c.empty())
+ c.resize(1);
+ mBegin = c.begin();
+ mEnd = mBegin;
+ mSize = 0;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ ring_buffer<T, Container, Allocator>::ring_buffer(const allocator_type& allocator)
+ : c(allocator)
+ {
+ // To do: This code needs to be amended to deal with possible exceptions
+ // that could occur during the resize call below.
+
+ // We add one because the element at mEnd is necessarily unused.
+ c.resize(1); // Possibly we could construct 'c' with size, but c may not have such a ctor, though we rely on it having a resize function.
+ mBegin = c.begin();
+ mEnd = mBegin;
+ mSize = 0;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ ring_buffer<T, Container, Allocator>::ring_buffer(const this_type& x)
+ : c(x.c)
+ {
+ mBegin = c.begin();
+ mEnd = mBegin;
+ mSize = x.mSize;
+
+ eastl::advance(mBegin, eastl::distance(const_cast<this_type&>(x).c.begin(), x.mBegin)); // We can do a simple distance algorithm here, as there will be no wraparound.
+ eastl::advance(mEnd, eastl::distance(const_cast<this_type&>(x).c.begin(), x.mEnd));
+ }
+
+ template <typename T, typename Container, typename Allocator>
+ ring_buffer<T, Container, Allocator>::ring_buffer(this_type&& x)
+ : c() // Default construction with default allocator for the container.
+ {
+ c.resize(1); // Possibly we could construct 'c' with size, but c may not have such a ctor, though we rely on it having a resize function.
+ mBegin = c.begin();
+ mEnd = mBegin;
+ mSize = 0;
+
+ swap(x); // We are leaving x in an unusual state by swapping default-initialized members with it, as it won't be usable and can be only destructible.
+ }
+
+ template <typename T, typename Container, typename Allocator>
+ ring_buffer<T, Container, Allocator>::ring_buffer(this_type&& x, const allocator_type& allocator)
+ : c(allocator)
+ {
+ c.resize(1); // Possibly we could construct 'c' with size, but c may not have such a ctor, though we rely on it having a resize function.
+ mBegin = c.begin();
+ mEnd = mBegin;
+ mSize = 0;
+
+ if(c.get_allocator() == x.c.get_allocator())
+ swap(x); // We are leaving x in an unusual state by swapping default-initialized members with it, as it won't be usable and can be only destructible.
+ else
+ operator=(x);
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ ring_buffer<T, Container, Allocator>::ring_buffer(std::initializer_list<value_type> ilist, const allocator_type& allocator)
+ : c(allocator)
+ {
+ c.resize((eastl_size_t)ilist.size() + 1);
+ mBegin = c.begin();
+ mEnd = mBegin;
+ mSize = 0;
+
+ assign(ilist.begin(), ilist.end());
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::this_type&
+ ring_buffer<T, Container, Allocator>::operator=(const this_type& x)
+ {
+ if(&x != this)
+ {
+ c = x.c;
+
+ mBegin = c.begin();
+ mEnd = mBegin;
+ mSize = x.mSize;
+
+ eastl::advance(mBegin, eastl::distance(const_cast<this_type&>(x).c.begin(), x.mBegin)); // We can do a simple distance algorithm here, as there will be no wraparound.
+ eastl::advance(mEnd, eastl::distance(const_cast<this_type&>(x).c.begin(), x.mEnd));
+ }
+
+ return *this;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::this_type&
+ ring_buffer<T, Container, Allocator>::operator=(this_type&& x)
+ {
+ swap(x);
+ return *this;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::this_type&
+ ring_buffer<T, Container, Allocator>::operator=(std::initializer_list<value_type> ilist)
+ {
+ assign(ilist.begin(), ilist.end());
+ return *this;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ template <typename InputIterator>
+ void ring_buffer<T, Container, Allocator>::assign(InputIterator first, InputIterator last)
+ {
+ // To consider: We can make specializations of this for pointer-based
+ // iterators to PODs and turn the action into a memcpy.
+ clear();
+
+ for(; first != last; ++first)
+ push_back(*first);
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ void ring_buffer<T, Container, Allocator>::swap(this_type& x)
+ {
+ if(&x != this)
+ {
+ const difference_type dBegin = eastl::distance(c.begin(), mBegin); // We can do a simple distance algorithm here, as there will be no wraparound.
+ const difference_type dEnd = eastl::distance(c.begin(), mEnd);
+
+ const difference_type dxBegin = eastl::distance(x.c.begin(), x.mBegin);
+ const difference_type dxEnd = eastl::distance(x.c.begin(), x.mEnd);
+
+ eastl::swap(c, x.c);
+ eastl::swap(mSize, x.mSize);
+
+ mBegin = c.begin();
+ eastl::advance(mBegin, dxBegin); // We can do a simple advance algorithm here, as there will be no wraparound.
+
+ mEnd = c.begin();
+ eastl::advance(mEnd, dxEnd);
+
+ x.mBegin = x.c.begin();
+ eastl::advance(x.mBegin, dBegin);
+
+ x.mEnd = x.c.begin();
+ eastl::advance(x.mEnd, dEnd);
+ }
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::iterator
+ ring_buffer<T, Container, Allocator>::begin() EA_NOEXCEPT
+ {
+ return iterator(&c, mBegin);
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::const_iterator
+ ring_buffer<T, Container, Allocator>::begin() const EA_NOEXCEPT
+ {
+ return const_iterator(const_cast<Container*>(&c), mBegin); // We trust that the const_iterator will respect const-ness.
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::const_iterator
+ ring_buffer<T, Container, Allocator>::cbegin() const EA_NOEXCEPT
+ {
+ return const_iterator(const_cast<Container*>(&c), mBegin); // We trust that the const_iterator will respect const-ness.
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::iterator
+ ring_buffer<T, Container, Allocator>::end() EA_NOEXCEPT
+ {
+ return iterator(&c, mEnd);
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::const_iterator
+ ring_buffer<T, Container, Allocator>::end() const EA_NOEXCEPT
+ {
+ return const_iterator(const_cast<Container*>(&c), mEnd); // We trust that the const_iterator will respect const-ness.
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::const_iterator
+ ring_buffer<T, Container, Allocator>::cend() const EA_NOEXCEPT
+ {
+ return const_iterator(const_cast<Container*>(&c), mEnd); // We trust that the const_iterator will respect const-ness.
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::reverse_iterator
+ ring_buffer<T, Container, Allocator>::rbegin() EA_NOEXCEPT
+ {
+ return reverse_iterator(iterator(&c, mEnd));
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::const_reverse_iterator
+ ring_buffer<T, Container, Allocator>::rbegin() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(const_iterator(const_cast<Container*>(&c), mEnd));
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::const_reverse_iterator
+ ring_buffer<T, Container, Allocator>::crbegin() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(const_iterator(const_cast<Container*>(&c), mEnd));
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::reverse_iterator
+ ring_buffer<T, Container, Allocator>::rend() EA_NOEXCEPT
+ {
+ return reverse_iterator(iterator(&c, mBegin));
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::const_reverse_iterator
+ ring_buffer<T, Container, Allocator>::rend() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(const_iterator(const_cast<Container*>(&c), mBegin));
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::const_reverse_iterator
+ ring_buffer<T, Container, Allocator>::crend() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(const_iterator(const_cast<Container*>(&c), mBegin));
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ bool ring_buffer<T, Container, Allocator>::empty() const EA_NOEXCEPT
+ {
+ return mBegin == mEnd;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ bool ring_buffer<T, Container, Allocator>::full() const EA_NOEXCEPT
+ {
+ // Implementation that relies on c.size() being a fast operation:
+ // return mSize == (c.size() - 1); // (c.size() - 1) == capacity(); we are attempting to reduce function calls.
+
+ // Version that has constant speed guarantees, but is still pretty fast.
+ const_iterator afterEnd(end());
+ ++afterEnd;
+ return afterEnd.mContainerIterator == mBegin;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::size_type
+ ring_buffer<T, Container, Allocator>::size() const EA_NOEXCEPT
+ {
+ return mSize;
+
+ // Alternatives:
+ // return eastl::distance(begin(), end());
+ // return end() - begin(); // This is more direct than using distance().
+ //typedef typename eastl::iterator_traits<container_iterator>::iterator_category IC;
+ //return DoGetSize(IC()); // This is more direct than using iterator math.
+ }
+
+
+ /*
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::size_type
+ ring_buffer<T, Container, Allocator>::DoGetSize(EASTL_ITC_NS::input_iterator_tag) const
+ {
+ // We could alternatively just use eastl::distance() here, but we happen to
+ // know that such code would boil down to what we have here, and we might
+ // as well remove function calls where possible.
+ difference_type d = 0;
+
+ for(const_iterator temp(begin()), tempEnd(end()); temp != tempEnd; ++temp)
+ ++d;
+
+ return (size_type)d;
+ }
+ */
+
+ /*
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::size_type
+ ring_buffer<T, Container, Allocator>::DoGetSize(EASTL_ITC_NS::random_access_iterator_tag) const
+ {
+ // A simpler but less efficient implementation fo this function would be:
+ // return eastl::distance(mBegin, mEnd);
+ //
+ // The calculation of distance here takes advantage of the fact that random
+ // access iterators' distances can be calculated by simple pointer calculation.
+ // Thus the code below boils down to a few subtractions when using a vector,
+ // string, or array as the Container type.
+ //
+ const difference_type dBegin = eastl::distance(const_cast<Container&>(c).begin(), mBegin); // const_cast here solves a little compiler
+ const difference_type dEnd = eastl::distance(const_cast<Container&>(c).begin(), mEnd); // argument matching problem.
+
+ if(dEnd >= dBegin)
+ return dEnd - dBegin;
+
+ return c.size() - (dBegin - dEnd);
+ }
+ */
+
+
+ namespace Internal
+ {
+ ///////////////////////////////////////////////////////////////
+ // has_overflow_allocator
+ //
+ // returns true_type when the specified container type is an
+ // eastl::fixed_* container and therefore has an overflow
+ // allocator type.
+ //
+ template <typename T, typename = void>
+ struct has_overflow_allocator : false_type {};
+
+ template <typename T>
+ struct has_overflow_allocator<T, void_t<decltype(declval<T>().get_overflow_allocator())>> : true_type {};
+
+
+ ///////////////////////////////////////////////////////////////
+ // GetFixedContainerCtorAllocator
+ //
+ // eastl::fixed_* containers are only constructible via their
+ // overflow allocator type. This helper select the appropriate
+ // allocator from the specified container.
+ //
+ template <typename Container, bool UseOverflowAllocator = has_overflow_allocator<Container>()()>
+ struct GetFixedContainerCtorAllocator
+ {
+ auto& operator()(Container& c) { return c.get_overflow_allocator(); }
+ };
+
+ template <typename Container>
+ struct GetFixedContainerCtorAllocator<Container, false>
+ {
+ auto& operator()(Container& c) { return c.get_allocator(); }
+ };
+ } // namespace Internal
+
+
+ ///////////////////////////////////////////////////////////////
+ // ContainerTemporary
+ //
+ // Helper type which prevents utilizing excessive stack space
+ // when creating temporaries when swapping/copying the underlying
+ // ring_buffer container type.
+ //
+ template <typename Container, bool UseHeapTemporary = (sizeof(Container) >= EASTL_MAX_STACK_USAGE)>
+ struct ContainerTemporary
+ {
+ Container mContainer;
+
+ ContainerTemporary(Container& parentContainer)
+ : mContainer(Internal::GetFixedContainerCtorAllocator<Container>{}(parentContainer))
+ {
+ }
+
+ Container& get() { return mContainer; }
+ };
+
+ template <typename Container>
+ struct ContainerTemporary<Container, true>
+ {
+ typename Container::allocator_type* mAllocator;
+ Container* mContainer;
+
+ ContainerTemporary(Container& parentContainer)
+ : mAllocator(&parentContainer.get_allocator())
+ , mContainer(new (mAllocator->allocate(sizeof(Container))) Container)
+ {
+ }
+
+ ~ContainerTemporary()
+ {
+ mContainer->~Container();
+ mAllocator->deallocate(mContainer, sizeof(Container));
+ }
+
+ Container& get() { return *mContainer; }
+ };
+
+
+ template <typename T, typename Container, typename Allocator>
+ void ring_buffer<T, Container, Allocator>::resize(size_type n)
+ {
+ // Note that if n > size(), we just move the end position out to
+ // the begin + n, with the data being the old end and the new end
+ // being stale values from the past. This is by design, as the concept
+ // of arbitrarily resizing a ring buffer like this is currently deemed
+ // to be vague in what it intends to do. We can only assume that the
+ // user knows what he is doing and will deal with the stale values.
+ EASTL_ASSERT(c.size() >= 1);
+ const size_type cap = (c.size() - 1);
+
+ mSize = n;
+
+ if(n > cap) // If we need to grow in capacity...
+ {
+ // Given that a growing operation will always result in memory allocation,
+ // we currently implement this function via the usage of a temp container.
+ // This makes for a simple implementation, but in some cases it is less
+ // efficient. In particular, if the container is a node-based container like
+ // a (linked) list, this function would be faster if we simply added nodes
+ // to ourself. We would do this by inserting the nodes to be after end()
+ // and adjusting the begin() position if it was after end().
+
+ // To do: This code needs to be amended to deal with possible exceptions
+ // that could occur during the resize call below.
+
+ ContainerTemporary<Container> cTemp(c);
+ cTemp.get().resize(n + 1);
+ eastl::copy(begin(), end(), cTemp.get().begin());
+ eastl::swap(c, cTemp.get());
+
+ mBegin = c.begin();
+ mEnd = mBegin;
+ eastl::advance(mEnd, n); // We can do a simple advance algorithm on this because we know that mEnd will not wrap around.
+ }
+ else // We could do a check here for n != size(), but that would be costly and people don't usually resize things to their same size.
+ {
+ mEnd = mBegin;
+
+ // eastl::advance(mEnd, n); // We *cannot* use this because there may be wraparound involved.
+
+ // To consider: Possibly we should implement some more detailed logic to optimize the code here.
+ // We'd need to do different behaviour dending on whether the container iterator type is a
+ // random access iterator or otherwise.
+
+ while(n--)
+ {
+ if(EASTL_UNLIKELY(++mEnd == c.end()))
+ mEnd = c.begin();
+ }
+ }
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::size_type
+ ring_buffer<T, Container, Allocator>::capacity() const EA_NOEXCEPT
+ {
+ EASTL_ASSERT(c.size() >= 1); // This is required because even an empty ring_buffer has one unused termination element, somewhat like a \0 at the end of a C string.
+
+ return (c.size() - 1); // Need to subtract one because the position at mEnd is unused.
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ void ring_buffer<T, Container, Allocator>::set_capacity(size_type n)
+ {
+ const size_type capacity = (c.size() - 1);
+
+ if(n != capacity) // If we need to change capacity...
+ {
+ ContainerTemporary<Container> cTemp(c);
+ cTemp.get().resize(n + 1);
+
+ iterator itCopyBegin = begin();
+
+ if(n < mSize) // If we are shrinking the capacity, to less than our size...
+ {
+ eastl::advance(itCopyBegin, mSize - n);
+ mSize = n;
+ }
+
+ eastl::copy(itCopyBegin, end(), cTemp.get().begin()); // The begin-end range may in fact be larger than n, in which case values will be overwritten.
+ eastl::swap(c, cTemp.get());
+
+ mBegin = c.begin();
+ mEnd = mBegin;
+ eastl::advance(mEnd, mSize); // We can do a simple advance algorithm on this because we know that mEnd will not wrap around.
+ }
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ void ring_buffer<T, Container, Allocator>::reserve(size_type n)
+ {
+ // We follow the pattern of vector and only do something if n > capacity.
+ EASTL_ASSERT(c.size() >= 1);
+
+ if(n > (c.size() - 1)) // If we need to grow in capacity... // (c.size() - 1) == capacity(); we are attempting to reduce function calls.
+ {
+ ContainerTemporary<Container> cTemp(c);
+ cTemp.get().resize(n + 1);
+ eastl::copy(begin(), end(), cTemp.get().begin());
+ eastl::swap(c, cTemp.get());
+
+ mBegin = c.begin();
+ mEnd = mBegin;
+ eastl::advance(mEnd, mSize); // We can do a simple advance algorithm on this because we know that mEnd will not wrap around.
+ }
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::reference
+ ring_buffer<T, Container, Allocator>::front()
+ {
+ return *mBegin;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::const_reference
+ ring_buffer<T, Container, Allocator>::front() const
+ {
+ return *mBegin;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::reference
+ ring_buffer<T, Container, Allocator>::back()
+ {
+ // return *(end() - 1); // Can't use this because not all iterators support operator-.
+
+ iterator temp(end()); // To do: Find a way to construct this temporary in the return statement.
+ return *(--temp); // We can do it by making all our containers' iterators support operator-.
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::const_reference
+ ring_buffer<T, Container, Allocator>::back() const
+ {
+ // return *(end() - 1); // Can't use this because not all iterators support operator-.
+
+ const_iterator temp(end()); // To do: Find a way to construct this temporary in the return statement.
+ return *(--temp); // We can do it by making all our containers' iterators support operator-.
+ }
+
+
+ /// A push_back operation on a ring buffer assigns the new value to end.
+ /// If there is no more space in the buffer, this will result in begin
+ /// being overwritten and the begin position being moved foward one position.
+ template <typename T, typename Container, typename Allocator>
+ void ring_buffer<T, Container, Allocator>::push_back(const value_type& value)
+ {
+ *mEnd = value;
+
+ if(++mEnd == c.end())
+ mEnd = c.begin();
+
+ if(mEnd == mBegin)
+ {
+ if(++mBegin == c.end())
+ mBegin = c.begin();
+ }
+ else
+ ++mSize;
+ }
+
+
+ /// A push_back operation on a ring buffer assigns the new value to end.
+ /// If there is no more space in the buffer, this will result in begin
+ /// being overwritten and the begin position being moved foward one position.
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::reference
+ ring_buffer<T, Container, Allocator>::push_back()
+ {
+ // We don't do the following assignment, as the value at mEnd is already constructed;
+ // it is merely possibly not default-constructed. However, the spirit of push_back
+ // is that the user intends to do an assignment or data modification after the
+ // push_back call. The user can always execute *back() = value_type() if he wants.
+ //*mEnd = value_type();
+
+ if(++mEnd == c.end())
+ mEnd = c.begin();
+
+ if(mEnd == mBegin)
+ {
+ if(++mBegin == c.end())
+ mBegin = c.begin();
+ }
+ else
+ ++mSize;
+
+ return back();
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ void ring_buffer<T, Container, Allocator>::pop_back()
+ {
+ EASTL_ASSERT(mEnd != mBegin); // We assume that size() > 0 and thus that there is something to pop.
+
+ if(EASTL_UNLIKELY(mEnd == c.begin()))
+ mEnd = c.end();
+ --mEnd;
+ --mSize;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ void ring_buffer<T, Container, Allocator>::push_front(const value_type& value)
+ {
+ if(EASTL_UNLIKELY(mBegin == c.begin()))
+ mBegin = c.end();
+
+ if(--mBegin == mEnd)
+ {
+ if(EASTL_UNLIKELY(mEnd == c.begin()))
+ mEnd = c.end();
+ --mEnd;
+ }
+ else
+ ++mSize;
+
+ *mBegin = value;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::reference
+ ring_buffer<T, Container, Allocator>::push_front()
+ {
+ if(EASTL_UNLIKELY(mBegin == c.begin()))
+ mBegin = c.end();
+
+ if(--mBegin == mEnd)
+ {
+ if(EASTL_UNLIKELY(mEnd == c.begin()))
+ mEnd = c.end();
+ --mEnd;
+ }
+ else
+ ++mSize;
+
+ // See comments above in push_back for why we don't execute this:
+ // *mBegin = value_type();
+
+ return *mBegin; // Same as return front();
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ void ring_buffer<T, Container, Allocator>::pop_front()
+ {
+ EASTL_ASSERT(mBegin != mEnd); // We assume that mEnd > mBegin and thus that there is something to pop.
+
+ if(++mBegin == c.end())
+ mBegin = c.begin();
+ --mSize;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::reference
+ ring_buffer<T, Container, Allocator>::operator[](size_type n)
+ {
+ // return *(begin() + n); // Can't use this because not all iterators support operator+.
+
+ // This should compile to code that is nearly as efficient as that above.
+ // The primary difference is the possible generation of a temporary in this case.
+ iterator temp(begin());
+ eastl::advance(temp, n);
+ return *(temp.mContainerIterator);
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::const_reference
+ ring_buffer<T, Container, Allocator>::operator[](size_type n) const
+ {
+ // return *(begin() + n); // Can't use this because not all iterators support operator+.
+
+ // This should compile to code that is nearly as efficient as that above.
+ // The primary difference is the possible generation of a temporary in this case.
+ const_iterator temp(begin());
+ eastl::advance(temp, n);
+ return *(temp.mContainerIterator);
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::iterator
+ ring_buffer<T, Container, Allocator>::insert(const_iterator position, const value_type& value)
+ {
+ // To consider: It would be faster if we could tell that position was in the first
+ // half of the container and instead of moving things after the position back,
+ // we could move things before the position forward.
+
+ iterator afterEnd(end());
+ iterator beforeEnd(afterEnd);
+
+ ++afterEnd;
+
+ if(afterEnd.mContainerIterator == mBegin) // If we are at full capacity...
+ --beforeEnd;
+ else
+ push_back();
+
+ iterator itPosition(position.mpContainer, position.mContainerIterator); // We merely copy from const_iterator to iterator.
+ eastl::copy_backward(itPosition, beforeEnd, end());
+ *itPosition = value;
+
+ return itPosition;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ void ring_buffer<T, Container, Allocator>::insert(const_iterator position, size_type n, const value_type& value)
+ {
+ // To do: This can be improved with a smarter version. However,
+ // this is a little tricky because we need to deal with the case
+ // whereby n is greater than the size of the container itself.
+ while(n--)
+ insert(position, value);
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ void ring_buffer<T, Container, Allocator>::insert(const_iterator position, std::initializer_list<value_type> ilist)
+ {
+ insert(position, ilist.begin(), ilist.end());
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ template <typename InputIterator>
+ void ring_buffer<T, Container, Allocator>::insert(const_iterator position, InputIterator first, InputIterator last)
+ {
+ // To do: This can possibly be improved with a smarter version.
+ // However, this can be tricky if distance(first, last) is greater
+ // than the size of the container itself.
+ for(; first != last; ++first, ++position)
+ insert(position, *first);
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::iterator
+ ring_buffer<T, Container, Allocator>::erase(const_iterator position)
+ {
+ iterator itPosition(position.mpContainer, position.mContainerIterator); // We merely copy from const_iterator to iterator.
+ iterator iNext(itPosition);
+
+ eastl::copy(++iNext, end(), itPosition);
+ pop_back();
+
+ return itPosition;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::iterator
+ ring_buffer<T, Container, Allocator>::erase(const_iterator first, const_iterator last)
+ {
+ iterator itFirst(first.mpContainer, first.mContainerIterator); // We merely copy from const_iterator to iterator.
+ iterator itLast(last.mpContainer, last.mContainerIterator);
+
+ typename iterator::difference_type d = eastl::distance(itFirst, itLast);
+
+ eastl::copy(itLast, end(), itFirst);
+
+ while(d--) // To do: improve this implementation.
+ pop_back();
+
+ return itFirst;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::reverse_iterator
+ ring_buffer<T, Container, Allocator>::erase(const_reverse_iterator position)
+ {
+ return reverse_iterator(erase((++position).base()));
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::reverse_iterator
+ ring_buffer<T, Container, Allocator>::erase(const_reverse_iterator first, const_reverse_iterator last)
+ {
+ // Version which erases in order from first to last.
+ // difference_type i(first.base() - last.base());
+ // while(i--)
+ // first = erase(first);
+ // return first;
+
+ // Version which erases in order from last to first, but is slightly more efficient:
+ return reverse_iterator(erase((++last).base(), (++first).base()));
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ void ring_buffer<T, Container, Allocator>::clear()
+ {
+ // Don't clear the container; we use its valid data for our elements.
+ mBegin = c.begin();
+ mEnd = c.begin();
+ mSize = 0;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::container_type&
+ ring_buffer<T, Container, Allocator>::get_container()
+ {
+ return c;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ const typename ring_buffer<T, Container, Allocator>::container_type&
+ ring_buffer<T, Container, Allocator>::get_container() const
+ {
+ return c;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ inline bool ring_buffer<T, Container, Allocator>::validate() const
+ {
+ if(!c.validate()) // This requires that the container implement the validate function. That pretty much
+ return false; // means that the container is an EASTL container and not a std STL container.
+
+ if(c.empty()) // c must always have a size of at least 1, as even an empty ring_buffer has an unused terminating element.
+ return false;
+
+ if(size() > capacity())
+ return false;
+
+ if((validate_iterator(begin()) & (isf_valid | isf_current)) != (isf_valid | isf_current))
+ return false;
+
+ if((validate_iterator(end()) & (isf_valid | isf_current)) != (isf_valid | isf_current))
+ return false;
+
+ // Verify that the size calculation is consistent.
+ size_type n = 0;
+ for(const_iterator i(begin()), iEnd(end()); i != iEnd; ++i)
+ ++n;
+ if(n != mSize)
+ return false;
+
+ return true;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ inline int ring_buffer<T, Container, Allocator>::validate_iterator(const_iterator i) const
+ {
+ // To do: Replace this with a more efficient implementation if possible.
+
+ for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp)
+ {
+ if(temp == i)
+ return (isf_valid | isf_current | isf_can_dereference);
+ }
+
+ if(i == end())
+ return (isf_valid | isf_current);
+
+ return isf_none;
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Container, typename Allocator>
+ inline bool operator==(const ring_buffer<T, Container, Allocator>& a, const ring_buffer<T, Container, Allocator>& b)
+ {
+ return (a.size() == b.size()) && (a.c == b.c);
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ inline bool operator<(const ring_buffer<T, Container, Allocator>& a, const ring_buffer<T, Container, Allocator>& b)
+ {
+ const typename ring_buffer<T, Container, Allocator>::size_type sizeA = a.size();
+ const typename ring_buffer<T, Container, Allocator>::size_type sizeB = b.size();
+
+ if(sizeA == sizeB)
+ return (a.c < b.c);
+ return sizeA < sizeB;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ inline bool operator!=(const ring_buffer<T, Container, Allocator>& a, const ring_buffer<T, Container, Allocator>& b)
+ {
+ return !(a == b);
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ inline bool operator>(const ring_buffer<T, Container, Allocator>& a, const ring_buffer<T, Container, Allocator>& b)
+ {
+ return (b < a);
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ inline bool operator<=(const ring_buffer<T, Container, Allocator>& a, const ring_buffer<T, Container, Allocator>& b)
+ {
+ return !(b < a);
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ inline bool operator>=(const ring_buffer<T, Container, Allocator>& a, const ring_buffer<T, Container, Allocator>& b)
+ {
+ return !(a < b);
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ inline void swap(ring_buffer<T, Container, Allocator>& a, ring_buffer<T, Container, Allocator>& b)
+ {
+ a.swap(b);
+ }
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/bonus/sort_extra.h b/EASTL/include/EASTL/bonus/sort_extra.h
new file mode 100644
index 0000000..5f9a0c4
--- /dev/null
+++ b/EASTL/include/EASTL/bonus/sort_extra.h
@@ -0,0 +1,204 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+//////////////////////////////////////////////////////////////////////////////
+// This file implements additional sort algorithms beyond the basic set.
+// Included here are:
+// selection_sort -- Unstable.
+// shaker_sort -- Stable.
+// bucket_sort -- Stable.
+//
+//////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_SORT_EXTRA_H
+#define EASTL_SORT_EXTRA_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/iterator.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/functional.h>
+#include <EASTL/heap.h>
+#include <EASTL/sort.h> // For backwards compatibility due to sorts moved from here to sort.h.
+#include <EASTL/allocator.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+ /// selection_sort
+ ///
+ /// Implements the SelectionSort algorithm.
+ ///
+ template <typename ForwardIterator, typename StrictWeakOrdering>
+ void selection_sort(ForwardIterator first, ForwardIterator last, StrictWeakOrdering compare)
+ {
+ ForwardIterator iCurrent, iMin;
+
+ for(; first != last; ++first)
+ {
+ iCurrent = first;
+ iMin = iCurrent;
+
+ for(++iCurrent; iCurrent != last; ++iCurrent)
+ {
+ if(compare(*iCurrent, *iMin))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*iMin, *iCurrent)); // Validate that the compare function is sane.
+ iMin = iCurrent;
+ }
+ }
+
+ if(first != iMin)
+ eastl::iter_swap(first, iMin);
+ }
+ } // selection_sort
+
+ template <typename ForwardIterator>
+ inline void selection_sort(ForwardIterator first, ForwardIterator last)
+ {
+ typedef eastl::less<typename eastl::iterator_traits<ForwardIterator>::value_type> Less;
+
+ eastl::selection_sort<ForwardIterator, Less>(first, last, Less());
+ }
+
+
+
+ /// shaker_sort
+ ///
+ /// Implements the ShakerSort algorithm, which is a sorting algorithm which
+ /// improves on bubble_sort by sweeping both from left to right and right
+ /// to left, resulting in less iteration.
+ ///
+ template <typename BidirectionalIterator, typename StrictWeakOrdering>
+ void shaker_sort(BidirectionalIterator first, BidirectionalIterator last, StrictWeakOrdering compare)
+ {
+ if(first != last)
+ {
+ BidirectionalIterator iCurrent, iNext, iLastModified;
+
+ --last;
+
+ while(first != last)
+ {
+ iLastModified = first;
+
+ for(iCurrent = first; iCurrent != last; iCurrent = iNext)
+ {
+ iNext = iCurrent;
+ ++iNext;
+
+ if(compare(*iNext, *iCurrent))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*iCurrent, *iNext)); // Validate that the compare function is sane.
+ iLastModified = iCurrent;
+ eastl::iter_swap(iCurrent, iNext);
+ }
+ }
+
+ last = iLastModified;
+
+ if(first != last)
+ {
+ for(iCurrent = last; iCurrent != first; iCurrent = iNext)
+ {
+ iNext = iCurrent;
+ --iNext;
+
+ if(compare(*iCurrent, *iNext))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*iNext, *iCurrent)); // Validate that the compare function is sane.
+ iLastModified = iCurrent;
+ eastl::iter_swap(iNext, iCurrent);
+ }
+ }
+ first = iLastModified;
+ }
+ }
+ }
+ } // shaker_sort
+
+ template <typename BidirectionalIterator>
+ inline void shaker_sort(BidirectionalIterator first, BidirectionalIterator last)
+ {
+ typedef eastl::less<typename eastl::iterator_traits<BidirectionalIterator>::value_type> Less;
+
+ eastl::shaker_sort<BidirectionalIterator, Less>(first, last, Less());
+ }
+
+
+
+ /// bucket_sort
+ ///
+ /// Implements the BucketSort algorithm.
+ ///
+ /// Example usage:
+ /// const size_t kElementRange = 32;
+ /// vector<int> intArray(1000);
+ ///
+ /// for(int i = 0; i < 1000; i++)
+ /// intArray[i] = rand() % kElementRange;
+ ///
+ /// vector< vector<int> > bucketArray(kElementRange);
+ /// bucket_sort(intArray.begin(), intArray.end(), bucketArray, eastl::hash_use_self<int>());
+ ///
+ template <typename T>
+ struct hash_use_self
+ {
+ T operator()(const T& x) const
+ { return x; }
+ };
+
+ // Requires buckeyArray to be an array of arrays with a size equal to the range of values
+ // returned by the hash function. The hash function is required to return a unique value
+ // for each uniquely sorted element. Usually the way this is done is the elements are
+ // integers of a limited range (e.g. 0-64) and the hash function returns the element value
+ // itself. If you had a case where all elements were always even numbers (e.g. 0-128),
+ // you could use a custom hash function that returns (element value / 2).
+ //
+ // The user is required to provide an empty bucketArray to this function. This function returns
+ // with the bucketArray non-empty. This function doesn't clear the bucketArray because that takes
+ // time and the user might not need it to be cleared, at least at that time.
+ //
+ template <typename ForwardIterator, typename ContainerArray, typename HashFunction>
+ void bucket_sort(ForwardIterator first, ForwardIterator last, ContainerArray& bucketArray, HashFunction hash /*= hash_use_self*/)
+ {
+ for(ForwardIterator iInput = first; iInput != last; ++iInput)
+ bucketArray[hash(*iInput)].push_back(*iInput);
+
+ for(typename ContainerArray::const_iterator iBucket = bucketArray.begin(); iBucket != bucketArray.end(); ++iBucket)
+ first = eastl::copy((*iBucket).begin(), (*iBucket).end(), first);
+ }
+
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/bonus/tuple_vector.h b/EASTL/include/EASTL/bonus/tuple_vector.h
new file mode 100644
index 0000000..6ade75a
--- /dev/null
+++ b/EASTL/include/EASTL/bonus/tuple_vector.h
@@ -0,0 +1,1598 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// tuple_vector is a data container that is designed to abstract and simplify
+// the handling of a "structure of arrays" layout of data in memory. In
+// particular, it mimics the interface of vector, including functionality to do
+// inserts, erases, push_backs, and random-access. It also provides a
+// RandomAccessIterator and corresponding functionality, making it compatible
+// with most STL (and STL-esque) algorithms such as ranged-for loops, find_if,
+// remove_if, or sort.
+
+// When used or applied properly, this container can improve performance of
+// some algorithms through cache-coherent data accesses or allowing for
+// sensible SIMD programming, while keeping the structure of a single
+// container, to permit a developer to continue to use existing algorithms in
+// STL and the like.
+//
+// Consult doc/Bonus/tuple_vector_readme.md for more information.
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_TUPLEVECTOR_H
+#define EASTL_TUPLEVECTOR_H
+
+#include <EASTL/bonus/compressed_pair.h>
+#include <EASTL/internal/config.h>
+#include <EASTL/iterator.h>
+#include <EASTL/memory.h>
+#include <EASTL/tuple.h>
+#include <EASTL/utility.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+EA_DISABLE_VC_WARNING(4244) // warning C4244: 'conversion from '___' to '___', possible loss of data
+EA_DISABLE_VC_WARNING(4623) // warning C4623: default constructor was implicitly defined as deleted
+EA_DISABLE_VC_WARNING(4625) // warning C4625: copy constructor was implicitly defined as deleted
+EA_DISABLE_VC_WARNING(4510) // warning C4510: default constructor could not be generated
+
+namespace eastl
+{
+ /// EASTL_TUPLE_VECTOR_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_TUPLE_VECTOR_DEFAULT_NAME
+ #define EASTL_TUPLE_VECTOR_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " tuple-vector" // Unless the user overrides something, this is "EASTL tuple-vector".
+ #endif
+
+
+ /// EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR
+ #define EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR allocator_type(EASTL_TUPLE_VECTOR_DEFAULT_NAME)
+ #endif
+
+namespace TupleVecInternal
+{
+
+// forward declarations
+template <eastl_size_t I, typename... Ts>
+struct tuplevec_element;
+
+template <eastl_size_t I, typename... Ts>
+using tuplevec_element_t = typename tuplevec_element<I, Ts...>::type;
+
+template <typename... Ts>
+struct TupleTypes {};
+
+template <typename Allocator, typename Indices, typename... Ts>
+class TupleVecImpl;
+
+template <typename... Ts>
+struct TupleRecurser;
+
+template <eastl_size_t I, typename... Ts>
+struct TupleIndexRecurser;
+
+template <eastl_size_t I, typename T>
+struct TupleVecLeaf;
+
+template <typename Indices, typename... Ts>
+struct TupleVecIter;
+
+// tuplevec_element helper to be able to isolate a type given an index
+template <eastl_size_t I>
+struct tuplevec_element<I>
+{
+ static_assert(I != I, "tuplevec_element index out of range");
+};
+
+template <typename T, typename... Ts>
+struct tuplevec_element<0, T, Ts...>
+{
+ tuplevec_element() = delete; // tuplevec_element should only be used for compile-time assistance, and never be instantiated
+ typedef T type;
+};
+
+template <eastl_size_t I, typename T, typename... Ts>
+struct tuplevec_element<I, T, Ts...>
+{
+ typedef tuplevec_element_t<I - 1, Ts...> type;
+};
+
+// attempt to isolate index given a type
+template <typename T, typename TupleVector>
+struct tuplevec_index
+{
+};
+
+template <typename T>
+struct tuplevec_index<T, TupleTypes<>>
+{
+ typedef void DuplicateTypeCheck;
+ tuplevec_index() = delete; // tuplevec_index should only be used for compile-time assistance, and never be instantiated
+ static const eastl_size_t index = 0;
+};
+
+template <typename T, typename... TsRest>
+struct tuplevec_index<T, TupleTypes<T, TsRest...>>
+{
+ typedef int DuplicateTypeCheck;
+ static_assert(is_void<typename tuplevec_index<T, TupleTypes<TsRest...>>::DuplicateTypeCheck>::value, "duplicate type T in tuple_vector::get<T>(); unique types must be provided in declaration, or only use get<eastl_size_t>()");
+
+ static const eastl_size_t index = 0;
+};
+
+template <typename T, typename Ts, typename... TsRest>
+struct tuplevec_index<T, TupleTypes<Ts, TsRest...>>
+{
+ typedef typename tuplevec_index<T, TupleTypes<TsRest...>>::DuplicateTypeCheck DuplicateTypeCheck;
+ static const eastl_size_t index = tuplevec_index<T, TupleTypes<TsRest...>>::index + 1;
+};
+
+template <typename Allocator, typename T, typename Indices, typename... Ts>
+struct tuplevec_index<T, TupleVecImpl<Allocator, Indices, Ts...>> : public tuplevec_index<T, TupleTypes<Ts...>>
+{
+};
+
+
+// helper to calculate the layout of the allocations for the tuple of types (esp. to take alignment into account)
+template <>
+struct TupleRecurser<>
+{
+ typedef eastl_size_t size_type;
+
+ // This class should never be instantiated. This is just a helper for working with static functions when anonymous functions don't work
+ // and provide some other utilities
+ TupleRecurser() = delete;
+
+ static EA_CONSTEXPR size_type GetTotalAlignment()
+ {
+ return 0;
+ }
+
+ static EA_CONSTEXPR size_type GetTotalAllocationSize(size_type capacity, size_type offset)
+ {
+ EA_UNUSED(capacity);
+ return offset;
+ }
+
+ template<typename Allocator, size_type I, typename Indices, typename... VecTypes>
+ static pair<void*, size_type> DoAllocate(TupleVecImpl<Allocator, Indices, VecTypes...> &vec, void** ppNewLeaf, size_type capacity, size_type offset)
+ {
+ EA_UNUSED(ppNewLeaf);
+
+ // If n is zero, then we allocate no memory and just return NULL.
+ // This is fine, as our default ctor initializes with NULL pointers.
+ size_type alignment = TupleRecurser<VecTypes...>::GetTotalAlignment();
+ void* ptr = capacity ? allocate_memory(vec.get_allocator(), offset, alignment, 0) : nullptr;
+
+ #if EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY((size_t)ptr & (alignment - 1)) != 0)
+ {
+ EASTL_FAIL_MSG("tuple_vector::DoAllocate -- memory not alignment at requested alignment");
+ }
+ #endif
+
+ return make_pair(ptr, offset);
+ }
+
+ template<typename TupleVecImplType, size_type I>
+ static void SetNewData(TupleVecImplType &vec, void* pData, size_type capacity, size_type offset)
+ {
+ EA_UNUSED(vec);
+ EA_UNUSED(pData);
+ EA_UNUSED(capacity);
+ EA_UNUSED(offset);
+ }
+};
+
+template <typename T, typename... Ts>
+struct TupleRecurser<T, Ts...> : TupleRecurser<Ts...>
+{
+ typedef eastl_size_t size_type;
+
+ static EA_CONSTEXPR size_type GetTotalAlignment()
+ {
+ return max(static_cast<size_type>(alignof(T)), TupleRecurser<Ts...>::GetTotalAlignment());
+ }
+
+ static EA_CONSTEXPR size_type GetTotalAllocationSize(size_type capacity, size_type offset)
+ {
+ return TupleRecurser<Ts...>::GetTotalAllocationSize(capacity, CalculateAllocationSize(offset, capacity));
+ }
+
+ template<typename Allocator, size_type I, typename Indices, typename... VecTypes>
+ static pair<void*, size_type> DoAllocate(TupleVecImpl<Allocator, Indices, VecTypes...> &vec, void** ppNewLeaf, size_type capacity, size_type offset)
+ {
+ size_type allocationOffset = CalculatAllocationOffset(offset);
+ size_type allocationSize = CalculateAllocationSize(offset, capacity);
+ pair<void*, size_type> allocation = TupleRecurser<Ts...>::template DoAllocate<Allocator, I + 1, Indices, VecTypes...>(
+ vec, ppNewLeaf, capacity, allocationSize);
+ ppNewLeaf[I] = (void*)((uintptr_t)(allocation.first) + allocationOffset);
+ return allocation;
+ }
+
+ template<typename TupleVecImplType, size_type I>
+ static void SetNewData(TupleVecImplType &vec, void* pData, size_type capacity, size_type offset)
+ {
+ size_type allocationOffset = CalculatAllocationOffset(offset);
+ size_type allocationSize = CalculateAllocationSize(offset, capacity);
+ vec.TupleVecLeaf<I, T>::mpData = (T*)((uintptr_t)pData + allocationOffset);
+ TupleRecurser<Ts...>::template SetNewData<TupleVecImplType, I + 1>(vec, pData, capacity, allocationSize);
+ }
+
+private:
+ static EA_CONSTEXPR size_type CalculateAllocationSize(size_type offset, size_type capacity)
+ {
+ return CalculatAllocationOffset(offset) + sizeof(T) * capacity;
+ }
+
+ static EA_CONSTEXPR size_type CalculatAllocationOffset(size_type offset) { return (offset + alignof(T) - 1) & (~alignof(T) + 1); }
+};
+
+template <eastl_size_t I, typename T>
+struct TupleVecLeaf
+{
+ typedef eastl_size_t size_type;
+
+ void DoUninitializedMoveAndDestruct(const size_type begin, const size_type end, T* pDest)
+ {
+ T* pBegin = mpData + begin;
+ T* pEnd = mpData + end;
+ eastl::uninitialized_move_ptr_if_noexcept(pBegin, pEnd, pDest);
+ eastl::destruct(pBegin, pEnd);
+ }
+
+ void DoInsertAndFill(size_type pos, size_type n, size_type numElements, const T& arg)
+ {
+ T* pDest = mpData + pos;
+ T* pDataEnd = mpData + numElements;
+ const T temp = arg;
+ const size_type nExtra = (numElements - pos);
+ if (n < nExtra) // If the inserted values are entirely within initialized memory (i.e. are before mpEnd)...
+ {
+ eastl::uninitialized_move_ptr(pDataEnd - n, pDataEnd, pDataEnd);
+ eastl::move_backward(pDest, pDataEnd - n, pDataEnd); // We need move_backward because of potential overlap issues.
+ eastl::fill(pDest, pDest + n, temp);
+ }
+ else
+ {
+ eastl::uninitialized_fill_n_ptr(pDataEnd, n - nExtra, temp);
+ eastl::uninitialized_move_ptr(pDest, pDataEnd, pDataEnd + n - nExtra);
+ eastl::fill(pDest, pDataEnd, temp);
+ }
+ }
+
+ void DoInsertRange(T* pSrcBegin, T* pSrcEnd, T* pDestBegin, size_type numDataElements)
+ {
+ size_type pos = pDestBegin - mpData;
+ size_type n = pSrcEnd - pSrcBegin;
+ T* pDataEnd = mpData + numDataElements;
+ const size_type nExtra = numDataElements - pos;
+ if (n < nExtra) // If the inserted values are entirely within initialized memory (i.e. are before mpEnd)...
+ {
+ eastl::uninitialized_move_ptr(pDataEnd - n, pDataEnd, pDataEnd);
+ eastl::move_backward(pDestBegin, pDataEnd - n, pDataEnd); // We need move_backward because of potential overlap issues.
+ eastl::copy(pSrcBegin, pSrcEnd, pDestBegin);
+ }
+ else
+ {
+ eastl::uninitialized_copy(pSrcEnd - (n - nExtra), pSrcEnd, pDataEnd);
+ eastl::uninitialized_move_ptr(pDestBegin, pDataEnd, pDataEnd + n - nExtra);
+ eastl::copy(pSrcBegin, pSrcEnd - (n - nExtra), pDestBegin);
+ }
+ }
+
+ void DoInsertValue(size_type pos, size_type numElements, T&& arg)
+ {
+ T* pDest = mpData + pos;
+ T* pDataEnd = mpData + numElements;
+
+ eastl::uninitialized_move_ptr(pDataEnd - 1, pDataEnd, pDataEnd);
+ eastl::move_backward(pDest, pDataEnd - 1, pDataEnd); // We need move_backward because of potential overlap issues.
+ eastl::destruct(pDest);
+ ::new (pDest) T(eastl::forward<T>(arg));
+ }
+
+ T* mpData = nullptr;
+};
+
+// swallow allows for parameter pack expansion of arguments as means of expanding operations performed
+// if a void function is used for operation expansion, it should be wrapped in (..., 0) so that the compiler
+// thinks it has a parameter to pass into the function
+template <typename... Ts>
+void swallow(Ts&&...) { }
+
+inline bool variadicAnd(bool cond) { return cond; }
+
+inline bool variadicAnd(bool cond, bool conds...) { return cond && variadicAnd(conds); }
+
+// Helper struct to check for strict compatibility between two iterators, whilst still allowing for
+// conversion between TupleVecImpl<Ts...>::iterator and TupleVecImpl<Ts...>::const_iterator.
+template <bool IsSameSize, typename From, typename To>
+struct TupleVecIterCompatibleImpl : public false_type { };
+
+template<>
+struct TupleVecIterCompatibleImpl<true, TupleTypes<>, TupleTypes<>> : public true_type { };
+
+template <typename From, typename... FromRest, typename To, typename... ToRest>
+struct TupleVecIterCompatibleImpl<true, TupleTypes<From, FromRest...>, TupleTypes<To, ToRest...>> : public integral_constant<bool,
+ TupleVecIterCompatibleImpl<true, TupleTypes<FromRest...>, TupleTypes<ToRest...>>::value &&
+ is_same<typename remove_const<From>::type, typename remove_const<To>::type>::value >
+{ };
+
+template <typename From, typename To>
+struct TupleVecIterCompatible;
+
+template<typename... Us, typename... Ts>
+struct TupleVecIterCompatible<TupleTypes<Us...>, TupleTypes<Ts...>> :
+ public TupleVecIterCompatibleImpl<sizeof...(Us) == sizeof...(Ts), TupleTypes<Us...>, TupleTypes<Ts...>>
+{ };
+
+// The Iterator operates by storing a persistent index internally,
+// and resolving the tuple of pointers to the various parts of the original tupleVec when dereferenced.
+// While resolving the tuple is a non-zero operation, it consistently generated better code than the alternative of
+// storing - and harmoniously updating on each modification - a full tuple of pointers to the tupleVec's data
+template <eastl_size_t... Indices, typename... Ts>
+struct TupleVecIter<index_sequence<Indices...>, Ts...>
+ : public iterator<EASTL_ITC_NS::random_access_iterator_tag, tuple<Ts...>, eastl_size_t, tuple<Ts*...>, tuple<Ts&...>>
+{
+private:
+ typedef TupleVecIter<index_sequence<Indices...>, Ts...> this_type;
+ typedef eastl_size_t size_type;
+
+ typedef iterator<EASTL_ITC_NS::random_access_iterator_tag, tuple<Ts...>, eastl_size_t, tuple<Ts*...>, tuple<Ts&...>> iter_type;
+
+ template<typename U, typename... Us>
+ friend struct TupleVecIter;
+
+ template<typename U, typename V, typename... Us>
+ friend class TupleVecImpl;
+
+ template<typename U>
+ friend class move_iterator;
+public:
+ typedef typename iter_type::iterator_category iterator_category;
+ typedef typename iter_type::value_type value_type;
+ typedef typename iter_type::difference_type difference_type;
+ typedef typename iter_type::pointer pointer;
+ typedef typename iter_type::reference reference;
+
+ TupleVecIter() = default;
+
+ template<typename VecImplType>
+ TupleVecIter(VecImplType* tupleVec, size_type index)
+ : mIndex(index)
+ , mpData{(void*)tupleVec->TupleVecLeaf<Indices, Ts>::mpData...}
+ { }
+
+ template <typename OtherIndicesType, typename... Us,
+ typename = typename enable_if<TupleVecIterCompatible<TupleTypes<Us...>, TupleTypes<Ts...>>::value, bool>::type>
+ TupleVecIter(const TupleVecIter<OtherIndicesType, Us...>& other)
+ : mIndex(other.mIndex)
+ , mpData{other.mpData[Indices]...}
+ {
+ }
+
+ bool operator==(const TupleVecIter& other) const { return mIndex == other.mIndex && mpData[0] == other.mpData[0]; }
+ bool operator!=(const TupleVecIter& other) const { return mIndex != other.mIndex || mpData[0] != other.mpData[0]; }
+ reference operator*() const { return MakeReference(); }
+
+ this_type& operator++() { ++mIndex; return *this; }
+ this_type operator++(int)
+ {
+ this_type temp = *this;
+ ++mIndex;
+ return temp;
+ }
+
+ this_type& operator--() { --mIndex; return *this; }
+ this_type operator--(int)
+ {
+ this_type temp = *this;
+ --mIndex;
+ return temp;
+ }
+
+ this_type& operator+=(difference_type n) { mIndex += n; return *this; }
+ this_type operator+(difference_type n) const
+ {
+ this_type temp = *this;
+ return temp += n;
+ }
+ friend this_type operator+(difference_type n, const this_type& rhs)
+ {
+ this_type temp = rhs;
+ return temp += n;
+ }
+
+ this_type& operator-=(difference_type n) { mIndex -= n; return *this; }
+ this_type operator-(difference_type n) const
+ {
+ this_type temp = *this;
+ return temp -= n;
+ }
+ friend this_type operator-(difference_type n, const this_type& rhs)
+ {
+ this_type temp = rhs;
+ return temp -= n;
+ }
+
+ difference_type operator-(const this_type& rhs) const { return mIndex - rhs.mIndex; }
+ bool operator<(const this_type& rhs) const { return mIndex < rhs.mIndex; }
+ bool operator>(const this_type& rhs) const { return mIndex > rhs.mIndex; }
+ bool operator>=(const this_type& rhs) const { return mIndex >= rhs.mIndex; }
+ bool operator<=(const this_type& rhs) const { return mIndex <= rhs.mIndex; }
+
+ reference operator[](const size_type n) const
+ {
+ return *(*this + n);
+ }
+
+private:
+
+ value_type MakeValue() const
+ {
+ return value_type(((Ts*)mpData[Indices])[mIndex]...);
+ }
+
+ reference MakeReference() const
+ {
+ return reference(((Ts*)mpData[Indices])[mIndex]...);
+ }
+
+ pointer MakePointer() const
+ {
+ return pointer(&((Ts*)mpData[Indices])[mIndex]...);
+ }
+
+ size_type mIndex = 0;
+ const void* mpData[sizeof...(Ts)];
+};
+
+// TupleVecImpl
+template <typename Allocator, eastl_size_t... Indices, typename... Ts>
+class TupleVecImpl<Allocator, index_sequence<Indices...>, Ts...> : public TupleVecLeaf<Indices, Ts>...
+{
+ typedef Allocator allocator_type;
+ typedef index_sequence<Indices...> index_sequence_type;
+ typedef TupleVecImpl<Allocator, index_sequence_type, Ts...> this_type;
+ typedef TupleVecImpl<Allocator, index_sequence_type, const Ts...> const_this_type;
+
+public:
+ typedef TupleVecInternal::TupleVecIter<index_sequence_type, Ts...> iterator;
+ typedef TupleVecInternal::TupleVecIter<index_sequence_type, const Ts...> const_iterator;
+ typedef eastl::reverse_iterator<iterator> reverse_iterator;
+ typedef eastl::reverse_iterator<const_iterator> const_reverse_iterator;
+ typedef eastl_size_t size_type;
+ typedef eastl::tuple<Ts...> value_tuple;
+ typedef eastl::tuple<Ts&...> reference_tuple;
+ typedef eastl::tuple<const Ts&...> const_reference_tuple;
+ typedef eastl::tuple<Ts*...> ptr_tuple;
+ typedef eastl::tuple<const Ts*...> const_ptr_tuple;
+ typedef eastl::tuple<Ts&&...> rvalue_tuple;
+
+ TupleVecImpl()
+ : mDataSizeAndAllocator(0, EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR)
+ {}
+
+ TupleVecImpl(const allocator_type& allocator)
+ : mDataSizeAndAllocator(0, allocator)
+ {}
+
+ TupleVecImpl(this_type&& x)
+ : mDataSizeAndAllocator(0, eastl::move(x.get_allocator()))
+ {
+ swap(x);
+ }
+
+ TupleVecImpl(this_type&& x, const Allocator& allocator)
+ : mDataSizeAndAllocator(0, allocator)
+ {
+ if (get_allocator() == x.get_allocator()) // If allocators are equivalent, then we can safely swap member-by-member
+ {
+ swap(x);
+ }
+ else
+ {
+ this_type temp(eastl::move(*this));
+ temp.swap(x);
+ }
+ }
+
+ TupleVecImpl(const this_type& x)
+ : mDataSizeAndAllocator(0, x.get_allocator())
+ {
+ DoInitFromIterator(x.begin(), x.end());
+ }
+
+ template<typename OtherAllocator>
+ TupleVecImpl(const TupleVecImpl<OtherAllocator, index_sequence_type, Ts...>& x, const Allocator& allocator)
+ : mDataSizeAndAllocator(0, allocator)
+ {
+ DoInitFromIterator(x.begin(), x.end());
+ }
+
+ template<typename MoveIterBase>
+ TupleVecImpl(move_iterator<MoveIterBase> begin, move_iterator<MoveIterBase> end, const allocator_type& allocator = EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR)
+ : mDataSizeAndAllocator(0, allocator)
+ {
+ DoInitFromIterator(begin, end);
+ }
+
+ TupleVecImpl(const_iterator begin, const_iterator end, const allocator_type& allocator = EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR)
+ : mDataSizeAndAllocator(0, allocator )
+ {
+ DoInitFromIterator(begin, end);
+ }
+
+ TupleVecImpl(size_type n, const allocator_type& allocator = EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR)
+ : mDataSizeAndAllocator(0, allocator)
+ {
+ DoInitDefaultFill(n);
+ }
+
+ TupleVecImpl(size_type n, const Ts&... args)
+ : mDataSizeAndAllocator(0, EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR)
+ {
+ DoInitFillArgs(n, args...);
+ }
+
+ TupleVecImpl(size_type n, const Ts&... args, const allocator_type& allocator)
+ : mDataSizeAndAllocator(0, allocator)
+ {
+ DoInitFillArgs(n, args...);
+ }
+
+ TupleVecImpl(size_type n, const_reference_tuple tup, const allocator_type& allocator = EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR)
+ : mDataSizeAndAllocator(0, allocator)
+ {
+ DoInitFillTuple(n, tup);
+ }
+
+ TupleVecImpl(const value_tuple* first, const value_tuple* last, const allocator_type& allocator = EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR)
+ : mDataSizeAndAllocator(0, allocator)
+ {
+ DoInitFromTupleArray(first, last);
+ }
+
+ TupleVecImpl(std::initializer_list<value_tuple> iList, const allocator_type& allocator = EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR)
+ : mDataSizeAndAllocator(0, allocator)
+ {
+ DoInitFromTupleArray(iList.begin(), iList.end());
+ }
+
+protected:
+ // ctor to provide a pre-allocated field of data that the container will own, specifically for fixed_tuple_vector
+ TupleVecImpl(const allocator_type& allocator, void* pData, size_type capacity, size_type dataSize)
+ : mpData(pData), mNumCapacity(capacity), mDataSizeAndAllocator(dataSize, allocator)
+ {
+ TupleRecurser<Ts...>::template SetNewData<this_type, 0>(*this, mpData, mNumCapacity, 0);
+ }
+
+public:
+ ~TupleVecImpl()
+ {
+ swallow((eastl::destruct(TupleVecLeaf<Indices, Ts>::mpData, TupleVecLeaf<Indices, Ts>::mpData + mNumElements), 0)...);
+ if (mpData)
+ EASTLFree(get_allocator(), mpData, internalDataSize());
+ }
+
+ void assign(size_type n, const Ts&... args)
+ {
+ if (n > mNumCapacity)
+ {
+ this_type temp(n, args..., get_allocator()); // We have little choice but to reallocate with new memory.
+ swap(temp);
+ }
+ else if (n > mNumElements) // If n > mNumElements ...
+ {
+ size_type oldNumElements = mNumElements;
+ swallow((eastl::fill(TupleVecLeaf<Indices, Ts>::mpData, TupleVecLeaf<Indices, Ts>::mpData + oldNumElements, args), 0)...);
+ swallow((eastl::uninitialized_fill_ptr(TupleVecLeaf<Indices, Ts>::mpData + oldNumElements,
+ TupleVecLeaf<Indices, Ts>::mpData + n, args), 0)...);
+ mNumElements = n;
+ }
+ else // else 0 <= n <= mNumElements
+ {
+ swallow((eastl::fill(TupleVecLeaf<Indices, Ts>::mpData, TupleVecLeaf<Indices, Ts>::mpData + n, args), 0)...);
+ erase(begin() + n, end());
+ }
+ }
+
+ void assign(const_iterator first, const_iterator last)
+ {
+#if EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(!validate_iterator_pair(first, last)))
+ EASTL_FAIL_MSG("tuple_vector::assign -- invalid iterator pair");
+#endif
+ size_type newNumElements = last - first;
+ if (newNumElements > mNumCapacity)
+ {
+ this_type temp(first, last, get_allocator());
+ swap(temp);
+ }
+ else
+ {
+ const void* ppOtherData[sizeof...(Ts)] = {first.mpData[Indices]...};
+ size_type firstIdx = first.mIndex;
+ size_type lastIdx = last.mIndex;
+ if (newNumElements > mNumElements) // If n > mNumElements ...
+ {
+ size_type oldNumElements = mNumElements;
+ swallow((eastl::copy((Ts*)(ppOtherData[Indices]) + firstIdx,
+ (Ts*)(ppOtherData[Indices]) + firstIdx + oldNumElements,
+ TupleVecLeaf<Indices, Ts>::mpData), 0)...);
+ swallow((eastl::uninitialized_copy_ptr((Ts*)(ppOtherData[Indices]) + firstIdx + oldNumElements,
+ (Ts*)(ppOtherData[Indices]) + lastIdx,
+ TupleVecLeaf<Indices, Ts>::mpData + oldNumElements), 0)...);
+ mNumElements = newNumElements;
+ }
+ else // else 0 <= n <= mNumElements
+ {
+ swallow((eastl::copy((Ts*)(ppOtherData[Indices]) + firstIdx, (Ts*)(ppOtherData[Indices]) + lastIdx,
+ TupleVecLeaf<Indices, Ts>::mpData), 0)...);
+ erase(begin() + newNumElements, end());
+ }
+ }
+ }
+
+ void assign(const value_tuple* first, const value_tuple* last)
+ {
+#if EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(first > last || first == nullptr || last == nullptr))
+ EASTL_FAIL_MSG("tuple_vector::assign from tuple array -- invalid ptrs");
+#endif
+ size_type newNumElements = last - first;
+ if (newNumElements > mNumCapacity)
+ {
+ this_type temp(first, last, get_allocator());
+ swap(temp);
+ }
+ else
+ {
+ if (newNumElements > mNumElements) // If n > mNumElements ...
+ {
+ size_type oldNumElements = mNumElements;
+
+ DoCopyFromTupleArray(begin(), begin() + oldNumElements, first);
+ DoUninitializedCopyFromTupleArray(begin() + oldNumElements, begin() + newNumElements, first + oldNumElements);
+ mNumElements = newNumElements;
+ }
+ else // else 0 <= n <= mNumElements
+ {
+ DoCopyFromTupleArray(begin(), begin() + newNumElements, first);
+ erase(begin() + newNumElements, end());
+ }
+ }
+ }
+
+ reference_tuple push_back()
+ {
+ size_type oldNumElements = mNumElements;
+ size_type newNumElements = oldNumElements + 1;
+ size_type oldNumCapacity = mNumCapacity;
+ mNumElements = newNumElements;
+ DoGrow(oldNumElements, oldNumCapacity, newNumElements);
+ swallow(::new(TupleVecLeaf<Indices, Ts>::mpData + oldNumElements) Ts()...);
+ return back();
+ }
+
+ void push_back(const Ts&... args)
+ {
+ size_type oldNumElements = mNumElements;
+ size_type newNumElements = oldNumElements + 1;
+ size_type oldNumCapacity = mNumCapacity;
+ mNumElements = newNumElements;
+ DoGrow(oldNumElements, oldNumCapacity, newNumElements);
+ swallow(::new(TupleVecLeaf<Indices, Ts>::mpData + oldNumElements) Ts(args)...);
+ }
+
+ void push_back_uninitialized()
+ {
+ size_type oldNumElements = mNumElements;
+ size_type newNumElements = oldNumElements + 1;
+ size_type oldNumCapacity = mNumCapacity;
+ mNumElements = newNumElements;
+ DoGrow(oldNumElements, oldNumCapacity, newNumElements);
+ }
+
+ reference_tuple emplace_back(Ts&&... args)
+ {
+ size_type oldNumElements = mNumElements;
+ size_type newNumElements = oldNumElements + 1;
+ size_type oldNumCapacity = mNumCapacity;
+ mNumElements = newNumElements;
+ DoGrow(oldNumElements, oldNumCapacity, newNumElements);
+ swallow(::new(TupleVecLeaf<Indices, Ts>::mpData + oldNumElements) Ts(eastl::forward<Ts>(args))...);
+ return back();
+ }
+
+ iterator emplace(const_iterator pos, Ts&&... args)
+ {
+#if EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(validate_iterator(pos) == isf_none))
+ EASTL_FAIL_MSG("tuple_vector::emplace -- invalid iterator");
+#endif
+ size_type firstIdx = pos - cbegin();
+ size_type oldNumElements = mNumElements;
+ size_type newNumElements = mNumElements + 1;
+ size_type oldNumCapacity = mNumCapacity;
+ mNumElements = newNumElements;
+ if (newNumElements > oldNumCapacity || firstIdx != oldNumElements)
+ {
+ if (newNumElements > oldNumCapacity)
+ {
+ const size_type newCapacity = eastl::max(GetNewCapacity(oldNumCapacity), newNumElements);
+
+ void* ppNewLeaf[sizeof...(Ts)];
+ pair<void*, size_type> allocation = TupleRecurser<Ts...>::template DoAllocate<allocator_type, 0, index_sequence_type, Ts...>(
+ *this, ppNewLeaf, newCapacity, 0);
+
+ swallow((TupleVecLeaf<Indices, Ts>::DoUninitializedMoveAndDestruct(
+ 0, firstIdx, (Ts*)ppNewLeaf[Indices]), 0)...);
+ swallow((TupleVecLeaf<Indices, Ts>::DoUninitializedMoveAndDestruct(
+ firstIdx, oldNumElements, (Ts*)ppNewLeaf[Indices] + firstIdx + 1), 0)...);
+ swallow(::new ((Ts*)ppNewLeaf[Indices] + firstIdx) Ts(eastl::forward<Ts>(args))...);
+ swallow(TupleVecLeaf<Indices, Ts>::mpData = (Ts*)ppNewLeaf[Indices]...);
+
+ EASTLFree(get_allocator(), mpData, internalDataSize());
+ mpData = allocation.first;
+ mNumCapacity = newCapacity;
+ internalDataSize() = allocation.second;
+ }
+ else
+ {
+ swallow((TupleVecLeaf<Indices, Ts>::DoInsertValue(firstIdx, oldNumElements, eastl::forward<Ts>(args)), 0)...);
+ }
+ }
+ else
+ {
+ swallow(::new (TupleVecLeaf<Indices, Ts>::mpData + oldNumElements) Ts(eastl::forward<Ts>(args))...);
+ }
+ return begin() + firstIdx;
+ }
+
+ iterator insert(const_iterator pos, size_type n, const Ts&... args)
+ {
+#if EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(validate_iterator(pos) == isf_none))
+ EASTL_FAIL_MSG("tuple_vector::insert -- invalid iterator");
+#endif
+ size_type firstIdx = pos - cbegin();
+ size_type lastIdx = firstIdx + n;
+ size_type oldNumElements = mNumElements;
+ size_type newNumElements = mNumElements + n;
+ size_type oldNumCapacity = mNumCapacity;
+ mNumElements = newNumElements;
+ if (newNumElements > oldNumCapacity || firstIdx != oldNumElements)
+ {
+ if (newNumElements > oldNumCapacity)
+ {
+ const size_type newCapacity = eastl::max(GetNewCapacity(oldNumCapacity), newNumElements);
+
+ void* ppNewLeaf[sizeof...(Ts)];
+ pair<void*, size_type> allocation = TupleRecurser<Ts...>::template DoAllocate<allocator_type, 0, index_sequence_type, Ts...>(
+ *this, ppNewLeaf, newCapacity, 0);
+
+ swallow((TupleVecLeaf<Indices, Ts>::DoUninitializedMoveAndDestruct(
+ 0, firstIdx, (Ts*)ppNewLeaf[Indices]), 0)...);
+ swallow((TupleVecLeaf<Indices, Ts>::DoUninitializedMoveAndDestruct(
+ firstIdx, oldNumElements, (Ts*)ppNewLeaf[Indices] + lastIdx), 0)...);
+ swallow((eastl::uninitialized_fill_ptr((Ts*)ppNewLeaf[Indices] + firstIdx, (Ts*)ppNewLeaf[Indices] + lastIdx, args), 0)...);
+ swallow(TupleVecLeaf<Indices, Ts>::mpData = (Ts*)ppNewLeaf[Indices]...);
+
+ EASTLFree(get_allocator(), mpData, internalDataSize());
+ mpData = allocation.first;
+ mNumCapacity = newCapacity;
+ internalDataSize() = allocation.second;
+ }
+ else
+ {
+ swallow((TupleVecLeaf<Indices, Ts>::DoInsertAndFill(firstIdx, n, oldNumElements, args), 0)...);
+ }
+ }
+ else
+ {
+ swallow((eastl::uninitialized_fill_ptr(TupleVecLeaf<Indices, Ts>::mpData + oldNumElements,
+ TupleVecLeaf<Indices, Ts>::mpData + newNumElements, args), 0)...);
+ }
+ return begin() + firstIdx;
+ }
+
+ iterator insert(const_iterator pos, const_iterator first, const_iterator last)
+ {
+#if EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(validate_iterator(pos) == isf_none))
+ EASTL_FAIL_MSG("tuple_vector::insert -- invalid iterator");
+ if (EASTL_UNLIKELY(!validate_iterator_pair(first, last)))
+ EASTL_FAIL_MSG("tuple_vector::insert -- invalid iterator pair");
+#endif
+ size_type posIdx = pos - cbegin();
+ size_type firstIdx = first.mIndex;
+ size_type lastIdx = last.mIndex;
+ size_type numToInsert = last - first;
+ size_type oldNumElements = mNumElements;
+ size_type newNumElements = oldNumElements + numToInsert;
+ size_type oldNumCapacity = mNumCapacity;
+ mNumElements = newNumElements;
+ const void* ppOtherData[sizeof...(Ts)] = {first.mpData[Indices]...};
+ if (newNumElements > oldNumCapacity || posIdx != oldNumElements)
+ {
+ if (newNumElements > oldNumCapacity)
+ {
+ const size_type newCapacity = eastl::max(GetNewCapacity(oldNumCapacity), newNumElements);
+
+ void* ppNewLeaf[sizeof...(Ts)];
+ pair<void*, size_type> allocation = TupleRecurser<Ts...>::template DoAllocate<allocator_type, 0, index_sequence_type, Ts...>(
+ *this, ppNewLeaf, newCapacity, 0);
+
+ swallow((TupleVecLeaf<Indices, Ts>::DoUninitializedMoveAndDestruct(
+ 0, posIdx, (Ts*)ppNewLeaf[Indices]), 0)...);
+ swallow((TupleVecLeaf<Indices, Ts>::DoUninitializedMoveAndDestruct(
+ posIdx, oldNumElements, (Ts*)ppNewLeaf[Indices] + posIdx + numToInsert), 0)...);
+ swallow((eastl::uninitialized_copy_ptr((Ts*)(ppOtherData[Indices]) + firstIdx,
+ (Ts*)(ppOtherData[Indices]) + lastIdx,
+ (Ts*)ppNewLeaf[Indices] + posIdx), 0)...);
+ swallow(TupleVecLeaf<Indices, Ts>::mpData = (Ts*)ppNewLeaf[Indices]...);
+
+ EASTLFree(get_allocator(), mpData, internalDataSize());
+ mpData = allocation.first;
+ mNumCapacity = newCapacity;
+ internalDataSize() = allocation.second;
+ }
+ else
+ {
+ swallow((TupleVecLeaf<Indices, Ts>::DoInsertRange(
+ (Ts*)(ppOtherData[Indices]) + firstIdx, (Ts*)(ppOtherData[Indices]) + lastIdx,
+ TupleVecLeaf<Indices, Ts>::mpData + posIdx, oldNumElements), 0)...);
+ }
+ }
+ else
+ {
+ swallow((eastl::uninitialized_copy_ptr((Ts*)(ppOtherData[Indices]) + firstIdx,
+ (Ts*)(ppOtherData[Indices]) + lastIdx,
+ TupleVecLeaf<Indices, Ts>::mpData + posIdx), 0)...);
+ }
+ return begin() + posIdx;
+ }
+
+ iterator insert(const_iterator pos, const value_tuple* first, const value_tuple* last)
+ {
+#if EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(validate_iterator(pos) == isf_none))
+ EASTL_FAIL_MSG("tuple_vector::insert -- invalid iterator");
+ if (EASTL_UNLIKELY(first > last || first == nullptr || last == nullptr))
+ EASTL_FAIL_MSG("tuple_vector::insert -- invalid source pointers");
+#endif
+ size_type posIdx = pos - cbegin();
+ size_type numToInsert = last - first;
+ size_type oldNumElements = mNumElements;
+ size_type newNumElements = oldNumElements + numToInsert;
+ size_type oldNumCapacity = mNumCapacity;
+ mNumElements = newNumElements;
+ if (newNumElements > oldNumCapacity || posIdx != oldNumElements)
+ {
+ if (newNumElements > oldNumCapacity)
+ {
+ const size_type newCapacity = eastl::max(GetNewCapacity(oldNumCapacity), newNumElements);
+
+ void* ppNewLeaf[sizeof...(Ts)];
+ pair<void*, size_type> allocation = TupleRecurser<Ts...>::template DoAllocate<allocator_type, 0, index_sequence_type, Ts...>(
+ *this, ppNewLeaf, newCapacity, 0);
+
+ swallow((TupleVecLeaf<Indices, Ts>::DoUninitializedMoveAndDestruct(
+ 0, posIdx, (Ts*)ppNewLeaf[Indices]), 0)...);
+ swallow((TupleVecLeaf<Indices, Ts>::DoUninitializedMoveAndDestruct(
+ posIdx, oldNumElements, (Ts*)ppNewLeaf[Indices] + posIdx + numToInsert), 0)...);
+
+ swallow(TupleVecLeaf<Indices, Ts>::mpData = (Ts*)ppNewLeaf[Indices]...);
+
+ // Do this after mpData is updated so that we can use new iterators
+ DoUninitializedCopyFromTupleArray(begin() + posIdx, begin() + posIdx + numToInsert, first);
+
+ EASTLFree(get_allocator(), mpData, internalDataSize());
+ mpData = allocation.first;
+ mNumCapacity = newCapacity;
+ internalDataSize() = allocation.second;
+ }
+ else
+ {
+ const size_type nExtra = oldNumElements - posIdx;
+ void* ppDataEnd[sizeof...(Ts)] = { (void*)(TupleVecLeaf<Indices, Ts>::mpData + oldNumElements)... };
+ void* ppDataBegin[sizeof...(Ts)] = { (void*)(TupleVecLeaf<Indices, Ts>::mpData + posIdx)... };
+ if (numToInsert < nExtra) // If the inserted values are entirely within initialized memory (i.e. are before mpEnd)...
+ {
+ swallow((eastl::uninitialized_move_ptr((Ts*)ppDataEnd[Indices] - numToInsert,
+ (Ts*)ppDataEnd[Indices], (Ts*)ppDataEnd[Indices]), 0)...);
+ // We need move_backward because of potential overlap issues.
+ swallow((eastl::move_backward((Ts*)ppDataBegin[Indices],
+ (Ts*)ppDataEnd[Indices] - numToInsert, (Ts*)ppDataEnd[Indices]), 0)...);
+
+ DoCopyFromTupleArray(pos, pos + numToInsert, first);
+ }
+ else
+ {
+ size_type numToInitialize = numToInsert - nExtra;
+ swallow((eastl::uninitialized_move_ptr((Ts*)ppDataBegin[Indices],
+ (Ts*)ppDataEnd[Indices], (Ts*)ppDataEnd[Indices] + numToInitialize), 0)...);
+
+ DoCopyFromTupleArray(pos, begin() + oldNumElements, first);
+ DoUninitializedCopyFromTupleArray(begin() + oldNumElements, pos + numToInsert, first + nExtra);
+ }
+ }
+ }
+ else
+ {
+ DoUninitializedCopyFromTupleArray(pos, pos + numToInsert, first);
+ }
+ return begin() + posIdx;
+ }
+
+ iterator erase(const_iterator first, const_iterator last)
+ {
+#if EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(validate_iterator(first) == isf_none || validate_iterator(last) == isf_none))
+ EASTL_FAIL_MSG("tuple_vector::erase -- invalid iterator");
+ if (EASTL_UNLIKELY(!validate_iterator_pair(first, last)))
+ EASTL_FAIL_MSG("tuple_vector::erase -- invalid iterator pair");
+#endif
+ if (first != last)
+ {
+ size_type firstIdx = first - cbegin();
+ size_type lastIdx = last - cbegin();
+ size_type oldNumElements = mNumElements;
+ size_type newNumElements = oldNumElements - (lastIdx - firstIdx);
+ mNumElements = newNumElements;
+ swallow((eastl::move(TupleVecLeaf<Indices, Ts>::mpData + lastIdx,
+ TupleVecLeaf<Indices, Ts>::mpData + oldNumElements,
+ TupleVecLeaf<Indices, Ts>::mpData + firstIdx), 0)...);
+ swallow((eastl::destruct(TupleVecLeaf<Indices, Ts>::mpData + newNumElements,
+ TupleVecLeaf<Indices, Ts>::mpData + oldNumElements), 0)...);
+ }
+ return begin() + first.mIndex;
+ }
+
+ iterator erase_unsorted(const_iterator pos)
+ {
+#if EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(validate_iterator(pos) == isf_none))
+ EASTL_FAIL_MSG("tuple_vector::erase_unsorted -- invalid iterator");
+#endif
+ size_type oldNumElements = mNumElements;
+ size_type newNumElements = oldNumElements - 1;
+ mNumElements = newNumElements;
+ swallow((eastl::move(TupleVecLeaf<Indices, Ts>::mpData + newNumElements,
+ TupleVecLeaf<Indices, Ts>::mpData + oldNumElements,
+ TupleVecLeaf<Indices, Ts>::mpData + (pos - begin())), 0)...);
+ swallow((eastl::destruct(TupleVecLeaf<Indices, Ts>::mpData + newNumElements,
+ TupleVecLeaf<Indices, Ts>::mpData + oldNumElements), 0)...);
+ return begin() + pos.mIndex;
+ }
+
+ void resize(size_type n)
+ {
+ size_type oldNumElements = mNumElements;
+ size_type oldNumCapacity = mNumCapacity;
+ mNumElements = n;
+ if (n > oldNumElements)
+ {
+ if (n > oldNumCapacity)
+ {
+ DoReallocate(oldNumElements, eastl::max<size_type>(GetNewCapacity(oldNumCapacity), n));
+ }
+ swallow((eastl::uninitialized_default_fill_n(TupleVecLeaf<Indices, Ts>::mpData + oldNumElements, n - oldNumElements), 0)...);
+ }
+ else
+ {
+ swallow((eastl::destruct(TupleVecLeaf<Indices, Ts>::mpData + n,
+ TupleVecLeaf<Indices, Ts>::mpData + oldNumElements), 0)...);
+ }
+ }
+
+ void resize(size_type n, const Ts&... args)
+ {
+ size_type oldNumElements = mNumElements;
+ size_type oldNumCapacity = mNumCapacity;
+ mNumElements = n;
+ if (n > oldNumElements)
+ {
+ if (n > oldNumCapacity)
+ {
+ DoReallocate(oldNumElements, eastl::max<size_type>(GetNewCapacity(oldNumCapacity), n));
+ }
+ swallow((eastl::uninitialized_fill_ptr(TupleVecLeaf<Indices, Ts>::mpData + oldNumElements,
+ TupleVecLeaf<Indices, Ts>::mpData + n, args), 0)...);
+ }
+ else
+ {
+ swallow((eastl::destruct(TupleVecLeaf<Indices, Ts>::mpData + n,
+ TupleVecLeaf<Indices, Ts>::mpData + oldNumElements), 0)...);
+ }
+ }
+
+ void reserve(size_type n)
+ {
+ DoConditionalReallocate(mNumElements, mNumCapacity, n);
+ }
+
+ void shrink_to_fit()
+ {
+ this_type temp(move_iterator<iterator>(begin()), move_iterator<iterator>(end()), get_allocator());
+ swap(temp);
+ }
+
+ void clear() EA_NOEXCEPT
+ {
+ size_type oldNumElements = mNumElements;
+ mNumElements = 0;
+ swallow((eastl::destruct(TupleVecLeaf<Indices, Ts>::mpData, TupleVecLeaf<Indices, Ts>::mpData + oldNumElements), 0)...);
+ }
+
+ void pop_back()
+ {
+#if EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(mNumElements <= 0))
+ EASTL_FAIL_MSG("tuple_vector::pop_back -- container is empty");
+#endif
+ size_type oldNumElements = mNumElements--;
+ swallow((eastl::destruct(TupleVecLeaf<Indices, Ts>::mpData + oldNumElements - 1,
+ TupleVecLeaf<Indices, Ts>::mpData + oldNumElements), 0)...);
+ }
+
+ void swap(this_type& x)
+ {
+ swallow((eastl::swap(TupleVecLeaf<Indices, Ts>::mpData, x.TupleVecLeaf<Indices, Ts>::mpData), 0)...);
+ eastl::swap(mpData, x.mpData);
+ eastl::swap(mNumElements, x.mNumElements);
+ eastl::swap(mNumCapacity, x.mNumCapacity);
+ eastl::swap(get_allocator(), x.get_allocator());
+ eastl::swap(internalDataSize(), x.internalDataSize());
+ }
+
+ void assign(size_type n, const_reference_tuple tup) { assign(n, eastl::get<Indices>(tup)...); }
+ void assign(std::initializer_list<value_tuple> iList) { assign(iList.begin(), iList.end()); }
+
+ void push_back(Ts&&... args) { emplace_back(eastl::forward<Ts>(args)...); }
+ void push_back(const_reference_tuple tup) { push_back(eastl::get<Indices>(tup)...); }
+ void push_back(rvalue_tuple tup) { emplace_back(eastl::forward<Ts>(eastl::get<Indices>(tup))...); }
+
+ void emplace_back(rvalue_tuple tup) { emplace_back(eastl::forward<Ts>(eastl::get<Indices>(tup))...); }
+ void emplace(const_iterator pos, rvalue_tuple tup) { emplace(pos, eastl::forward<Ts>(eastl::get<Indices>(tup))...); }
+
+ iterator insert(const_iterator pos, const Ts&... args) { return insert(pos, 1, args...); }
+ iterator insert(const_iterator pos, Ts&&... args) { return emplace(pos, eastl::forward<Ts>(args)...); }
+ iterator insert(const_iterator pos, rvalue_tuple tup) { return emplace(pos, eastl::forward<Ts>(eastl::get<Indices>(tup))...); }
+ iterator insert(const_iterator pos, const_reference_tuple tup) { return insert(pos, eastl::get<Indices>(tup)...); }
+ iterator insert(const_iterator pos, size_type n, const_reference_tuple tup) { return insert(pos, n, eastl::get<Indices>(tup)...); }
+ iterator insert(const_iterator pos, std::initializer_list<value_tuple> iList) { return insert(pos, iList.begin(), iList.end()); }
+
+ iterator erase(const_iterator pos) { return erase(pos, pos + 1); }
+ reverse_iterator erase(const_reverse_iterator pos) { return reverse_iterator(erase((pos + 1).base(), (pos).base())); }
+ reverse_iterator erase(const_reverse_iterator first, const_reverse_iterator last) { return reverse_iterator(erase((last).base(), (first).base())); }
+ reverse_iterator erase_unsorted(const_reverse_iterator pos) { return reverse_iterator(erase_unsorted((pos + 1).base())); }
+
+ void resize(size_type n, const_reference_tuple tup) { resize(n, eastl::get<Indices>(tup)...); }
+
+ bool empty() const EA_NOEXCEPT { return mNumElements == 0; }
+ size_type size() const EA_NOEXCEPT { return mNumElements; }
+ size_type capacity() const EA_NOEXCEPT { return mNumCapacity; }
+
+ iterator begin() EA_NOEXCEPT { return iterator(this, 0); }
+ const_iterator begin() const EA_NOEXCEPT { return const_iterator((const_this_type*)(this), 0); }
+ const_iterator cbegin() const EA_NOEXCEPT { return const_iterator((const_this_type*)(this), 0); }
+
+ iterator end() EA_NOEXCEPT { return iterator(this, size()); }
+ const_iterator end() const EA_NOEXCEPT { return const_iterator((const_this_type*)(this), size()); }
+ const_iterator cend() const EA_NOEXCEPT { return const_iterator((const_this_type*)(this), size()); }
+
+ reverse_iterator rbegin() EA_NOEXCEPT { return reverse_iterator(end()); }
+ const_reverse_iterator rbegin() const EA_NOEXCEPT { return const_reverse_iterator(end()); }
+ const_reverse_iterator crbegin() const EA_NOEXCEPT { return const_reverse_iterator(end()); }
+
+ reverse_iterator rend() EA_NOEXCEPT { return reverse_iterator(begin()); }
+ const_reverse_iterator rend() const EA_NOEXCEPT { return const_reverse_iterator(begin()); }
+ const_reverse_iterator crend() const EA_NOEXCEPT { return const_reverse_iterator(begin()); }
+
+ ptr_tuple data() EA_NOEXCEPT { return ptr_tuple(TupleVecLeaf<Indices, Ts>::mpData...); }
+ const_ptr_tuple data() const EA_NOEXCEPT { return const_ptr_tuple(TupleVecLeaf<Indices, Ts>::mpData...); }
+
+ reference_tuple at(size_type n)
+ {
+#if EASTL_EXCEPTIONS_ENABLED
+ if (EASTL_UNLIKELY(n >= mNumElements))
+ throw std::out_of_range("tuple_vector::at -- out of range");
+#elif EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(n >= mNumElements))
+ EASTL_FAIL_MSG("tuple_vector::at -- out of range");
+#endif
+ return reference_tuple(*(TupleVecLeaf<Indices, Ts>::mpData + n)...);
+ }
+
+ const_reference_tuple at(size_type n) const
+ {
+#if EASTL_EXCEPTIONS_ENABLED
+ if (EASTL_UNLIKELY(n >= mNumElements))
+ throw std::out_of_range("tuple_vector::at -- out of range");
+#elif EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(n >= mNumElements))
+ EASTL_FAIL_MSG("tuple_vector::at -- out of range");
+#endif
+ return const_reference_tuple(*(TupleVecLeaf<Indices, Ts>::mpData + n)...);
+ }
+
+ reference_tuple operator[](size_type n) { return at(n); }
+ const_reference_tuple operator[](size_type n) const { return at(n); }
+
+ reference_tuple front()
+ {
+ #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(mNumElements == 0)) // We don't allow the user to reference an empty container.
+ EASTL_FAIL_MSG("tuple_vector::front -- empty vector");
+ #else
+ // We allow the user to reference an empty container.
+ #endif
+
+ return at(0);
+ }
+
+ const_reference_tuple front() const
+ {
+ #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(mNumElements == 0)) // We don't allow the user to reference an empty container.
+ EASTL_FAIL_MSG("tuple_vector::front -- empty vector");
+ #else
+ // We allow the user to reference an empty container.
+ #endif
+
+ return at(0);
+ }
+
+ reference_tuple back()
+ {
+ #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(mNumElements == 0)) // We don't allow the user to reference an empty container.
+ EASTL_FAIL_MSG("tuple_vector::back -- empty vector");
+ #else
+ // We allow the user to reference an empty container.
+ #endif
+
+ return at(size() - 1);
+ }
+
+ const_reference_tuple back() const
+ {
+ #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(mNumElements == 0)) // We don't allow the user to reference an empty container.
+ EASTL_FAIL_MSG("tuple_vector::back -- empty vector");
+ #else
+ // We allow the user to reference an empty container.
+ #endif
+
+ return at(size() - 1);
+ }
+
+ template <size_type I>
+ tuplevec_element_t<I, Ts...>* get()
+ {
+ typedef tuplevec_element_t<I, Ts...> Element;
+ return TupleVecLeaf<I, Element>::mpData;
+ }
+ template <size_type I>
+ const tuplevec_element_t<I, Ts...>* get() const
+ {
+ typedef tuplevec_element_t<I, Ts...> Element;
+ return TupleVecLeaf<I, Element>::mpData;
+ }
+
+ template <typename T>
+ T* get()
+ {
+ typedef tuplevec_index<T, TupleTypes<Ts...>> Index;
+ return TupleVecLeaf<Index::index, T>::mpData;
+ }
+ template <typename T>
+ const T* get() const
+ {
+ typedef tuplevec_index<T, TupleTypes<Ts...>> Index;
+ return TupleVecLeaf<Index::index, T>::mpData;
+ }
+
+ this_type& operator=(const this_type& other)
+ {
+ if (this != &other)
+ {
+ clear();
+ assign(other.begin(), other.end());
+ }
+ return *this;
+ }
+
+ this_type& operator=(this_type&& other)
+ {
+ if (this != &other)
+ {
+ swap(other);
+ }
+ return *this;
+ }
+
+ this_type& operator=(std::initializer_list<value_tuple> iList)
+ {
+ assign(iList.begin(), iList.end());
+ return *this;
+ }
+
+ bool validate() const EA_NOEXCEPT
+ {
+ if (mNumElements > mNumCapacity)
+ return false;
+ if (!(variadicAnd(mpData <= TupleVecLeaf<Indices, Ts>::mpData...)))
+ return false;
+ void* pDataEnd = (void*)((uintptr_t)mpData + internalDataSize());
+ if (!(variadicAnd(pDataEnd >= TupleVecLeaf<Indices, Ts>::mpData...)))
+ return false;
+ return true;
+ }
+
+ int validate_iterator(const_iterator iter) const EA_NOEXCEPT
+ {
+ if (!(variadicAnd(iter.mpData[Indices] == TupleVecLeaf<Indices, Ts>::mpData...)))
+ return isf_none;
+ if (iter.mIndex < mNumElements)
+ return (isf_valid | isf_current | isf_can_dereference);
+ if (iter.mIndex <= mNumElements)
+ return (isf_valid | isf_current);
+ return isf_none;
+ }
+
+ static bool validate_iterator_pair(const_iterator first, const_iterator last) EA_NOEXCEPT
+ {
+ return (first.mIndex <= last.mIndex) && variadicAnd(first.mpData[Indices] == last.mpData[Indices]...);
+ }
+
+ template <typename Iterator, typename = typename enable_if<is_iterator_wrapper<Iterator>::value, bool>::type>
+ int validate_iterator(Iterator iter) const EA_NOEXCEPT { return validate_iterator(unwrap_iterator(iter)); }
+
+ template <typename Iterator, typename = typename enable_if<is_iterator_wrapper<Iterator>::value, bool>::type>
+ static bool validate_iterator_pair(Iterator first, Iterator last) EA_NOEXCEPT { return validate_iterator_pair(unwrap_iterator(first), unwrap_iterator(last)); }
+
+ allocator_type& get_allocator() EA_NOEXCEPT { return mDataSizeAndAllocator.second(); }
+ const allocator_type& get_allocator() const EA_NOEXCEPT { return mDataSizeAndAllocator.second(); }
+
+ void set_allocator(const allocator_type& alloc) { mDataSizeAndAllocator.second() = alloc; }
+
+protected:
+
+ void* mpData = nullptr;
+ size_type mNumElements = 0;
+ size_type mNumCapacity = 0;
+
+ compressed_pair<size_type, allocator_type> mDataSizeAndAllocator;
+
+ size_type& internalDataSize() EA_NOEXCEPT { return mDataSizeAndAllocator.first(); }
+ size_type const& internalDataSize() const EA_NOEXCEPT { return mDataSizeAndAllocator.first(); }
+
+ friend struct TupleRecurser<>;
+ template<typename... Us>
+ friend struct TupleRecurser;
+
+ template <typename MoveIterBase>
+ void DoInitFromIterator(move_iterator<MoveIterBase> begin, move_iterator<MoveIterBase> end)
+ {
+#if EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(!validate_iterator_pair(begin, end)))
+ EASTL_FAIL_MSG("tuple_vector::erase -- invalid iterator pair");
+#endif
+ size_type newNumElements = (size_type)(end - begin);
+ const void* ppOtherData[sizeof...(Ts)] = { begin.base().mpData[Indices]... };
+ size_type beginIdx = begin.base().mIndex;
+ size_type endIdx = end.base().mIndex;
+ DoConditionalReallocate(0, mNumCapacity, newNumElements);
+ mNumElements = newNumElements;
+ swallow((eastl::uninitialized_move_ptr(eastl::move_iterator<Ts*>((Ts*)(ppOtherData[Indices]) + beginIdx),
+ eastl::move_iterator<Ts*>((Ts*)(ppOtherData[Indices]) + endIdx),
+ TupleVecLeaf<Indices, Ts>::mpData), 0)...);
+ }
+
+ void DoInitFromIterator(const_iterator begin, const_iterator end)
+ {
+#if EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(!validate_iterator_pair(begin, end)))
+ EASTL_FAIL_MSG("tuple_vector::erase -- invalid iterator pair");
+#endif
+ size_type newNumElements = (size_type)(end - begin);
+ const void* ppOtherData[sizeof...(Ts)] = { begin.mpData[Indices]... };
+ size_type beginIdx = begin.mIndex;
+ size_type endIdx = end.mIndex;
+ DoConditionalReallocate(0, mNumCapacity, newNumElements);
+ mNumElements = newNumElements;
+ swallow((eastl::uninitialized_copy_ptr((Ts*)(ppOtherData[Indices]) + beginIdx,
+ (Ts*)(ppOtherData[Indices]) + endIdx,
+ TupleVecLeaf<Indices, Ts>::mpData), 0)...);
+ }
+
+ void DoInitFillTuple(size_type n, const_reference_tuple tup) { DoInitFillArgs(n, eastl::get<Indices>(tup)...); }
+
+ void DoInitFillArgs(size_type n, const Ts&... args)
+ {
+ DoConditionalReallocate(0, mNumCapacity, n);
+ mNumElements = n;
+ swallow((eastl::uninitialized_fill_ptr(TupleVecLeaf<Indices, Ts>::mpData, TupleVecLeaf<Indices, Ts>::mpData + n, args), 0)...);
+ }
+
+ void DoInitDefaultFill(size_type n)
+ {
+ DoConditionalReallocate(0, mNumCapacity, n);
+ mNumElements = n;
+ swallow((eastl::uninitialized_default_fill_n(TupleVecLeaf<Indices, Ts>::mpData, n), 0)...);
+ }
+
+ void DoInitFromTupleArray(const value_tuple* first, const value_tuple* last)
+ {
+#if EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(first > last || first == nullptr || last == nullptr))
+ EASTL_FAIL_MSG("tuple_vector::ctor from tuple array -- invalid ptrs");
+#endif
+ size_type newNumElements = last - first;
+ DoConditionalReallocate(0, mNumCapacity, newNumElements);
+ mNumElements = newNumElements;
+ DoUninitializedCopyFromTupleArray(begin(), end(), first);
+ }
+
+ void DoCopyFromTupleArray(iterator destPos, iterator destEnd, const value_tuple* srcTuple)
+ {
+ // assign to constructed region
+ while (destPos < destEnd)
+ {
+ *destPos = *srcTuple;
+ ++destPos;
+ ++srcTuple;
+ }
+ }
+
+ void DoUninitializedCopyFromTupleArray(iterator destPos, iterator destEnd, const value_tuple* srcTuple)
+ {
+ // placement-new/copy-ctor to unconstructed regions
+ while (destPos < destEnd)
+ {
+ swallow(::new(eastl::get<Indices>(destPos.MakePointer())) Ts(eastl::get<Indices>(*srcTuple))...);
+ ++destPos;
+ ++srcTuple;
+ }
+ }
+
+ // Try to grow the size of the container "naturally" given the number of elements being used
+ void DoGrow(size_type oldNumElements, size_type oldNumCapacity, size_type requiredCapacity)
+ {
+ if (requiredCapacity > oldNumCapacity)
+ DoReallocate(oldNumElements, GetNewCapacity(requiredCapacity));
+ }
+
+ // Reallocate to the newCapacity (IFF it's actually larger, though)
+ void DoConditionalReallocate(size_type oldNumElements, size_type oldNumCapacity, size_type requiredCapacity)
+ {
+ if (requiredCapacity > oldNumCapacity)
+ DoReallocate(oldNumElements, requiredCapacity);
+ }
+
+ void DoReallocate(size_type oldNumElements, size_type requiredCapacity)
+ {
+ void* ppNewLeaf[sizeof...(Ts)];
+ pair<void*, size_type> allocation = TupleRecurser<Ts...>::template DoAllocate<allocator_type, 0, index_sequence_type, Ts...>(
+ *this, ppNewLeaf, requiredCapacity, 0);
+ swallow((TupleVecLeaf<Indices, Ts>::DoUninitializedMoveAndDestruct(0, oldNumElements, (Ts*)ppNewLeaf[Indices]), 0)...);
+ swallow(TupleVecLeaf<Indices, Ts>::mpData = (Ts*)ppNewLeaf[Indices]...);
+
+ EASTLFree(get_allocator(), mpData, internalDataSize());
+ mpData = allocation.first;
+ mNumCapacity = requiredCapacity;
+ internalDataSize() = allocation.second;
+ }
+
+ size_type GetNewCapacity(size_type oldNumCapacity)
+ {
+ return (oldNumCapacity > 0) ? (2 * oldNumCapacity) : 1;
+ }
+};
+
+} // namespace TupleVecInternal
+
+// Move_iterator specialization for TupleVecIter.
+// An rvalue reference of a move_iterator would normaly be "tuple<Ts...> &&" whereas
+// what we actually want is "tuple<Ts&&...>". This specialization gives us that.
+template <eastl_size_t... Indices, typename... Ts>
+class move_iterator<TupleVecInternal::TupleVecIter<index_sequence<Indices...>, Ts...>>
+{
+public:
+ typedef TupleVecInternal::TupleVecIter<index_sequence<Indices...>, Ts...> iterator_type;
+ // a wrapping iterator type.
+ typedef iterator_traits<iterator_type> traits_type;
+ typedef typename traits_type::iterator_category iterator_category;
+ typedef typename traits_type::value_type value_type;
+ typedef typename traits_type::difference_type difference_type;
+ typedef typename traits_type::pointer pointer;
+ typedef tuple<Ts&&...> reference;
+ typedef move_iterator<iterator_type> this_type;
+
+protected:
+ iterator_type mIterator;
+
+public:
+ move_iterator() : mIterator() {}
+ explicit move_iterator(iterator_type mi) : mIterator(mi) {}
+
+ template <typename U>
+ move_iterator(const move_iterator<U>& mi) : mIterator(mi.base()) {}
+
+ iterator_type base() const { return mIterator; }
+ reference operator*() const { return eastl::move(MakeReference()); }
+ pointer operator->() const { return mIterator; }
+
+ this_type& operator++() { ++mIterator; return *this; }
+ this_type operator++(int) {
+ this_type tempMoveIterator = *this;
+ ++mIterator;
+ return tempMoveIterator;
+ }
+
+ this_type& operator--() { --mIterator; return *this; }
+ this_type operator--(int)
+ {
+ this_type tempMoveIterator = *this;
+ --mIterator;
+ return tempMoveIterator;
+ }
+
+ this_type operator+(difference_type n) const { return move_iterator(mIterator + n); }
+ this_type& operator+=(difference_type n)
+ {
+ mIterator += n;
+ return *this;
+ }
+
+ this_type operator-(difference_type n) const { return move_iterator(mIterator - n); }
+ this_type& operator-=(difference_type n)
+ {
+ mIterator -= n;
+ return *this;
+ }
+
+ difference_type operator-(const this_type& rhs) const { return mIterator - rhs.mIterator; }
+ bool operator<(const this_type& rhs) const { return mIterator < rhs.mIterator; }
+ bool operator>(const this_type& rhs) const { return mIterator > rhs.mIterator; }
+ bool operator>=(const this_type& rhs) const { return mIterator >= rhs.mIterator; }
+ bool operator<=(const this_type& rhs) const { return mIterator <= rhs.mIterator; }
+
+ reference operator[](difference_type n) const { return *(*this + n); }
+
+private:
+ reference MakeReference() const
+ {
+ return reference(eastl::move(((Ts*)mIterator.mpData[Indices])[mIterator.mIndex])...);
+ }
+
+ // Unwrapping interface, not part of the public API.
+ iterator_type unwrap() const { return mIterator; }
+
+ // The unwrapper helpers need access to unwrap().
+ friend is_iterator_wrapper_helper<this_type, true>;
+ friend is_iterator_wrapper<this_type>;
+};
+
+template <typename AllocatorA, typename AllocatorB, typename Indices, typename... Ts>
+inline bool operator==(const TupleVecInternal::TupleVecImpl<AllocatorA, Indices, Ts...>& a,
+ const TupleVecInternal::TupleVecImpl<AllocatorB, Indices, Ts...>& b)
+{
+ return ((a.size() == b.size()) && eastl::equal(a.begin(), a.end(), b.begin()));
+}
+
+template <typename AllocatorA, typename AllocatorB, typename Indices, typename... Ts>
+inline bool operator!=(const TupleVecInternal::TupleVecImpl<AllocatorA, Indices, Ts...>& a,
+ const TupleVecInternal::TupleVecImpl<AllocatorB, Indices, Ts...>& b)
+{
+ return ((a.size() != b.size()) || !eastl::equal(a.begin(), a.end(), b.begin()));
+}
+
+template <typename AllocatorA, typename AllocatorB, typename Indices, typename... Ts>
+inline bool operator<(const TupleVecInternal::TupleVecImpl<AllocatorA, Indices, Ts...>& a,
+ const TupleVecInternal::TupleVecImpl<AllocatorB, Indices, Ts...>& b)
+{
+ return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end());
+}
+
+template <typename AllocatorA, typename AllocatorB, typename Indices, typename... Ts>
+inline bool operator>(const TupleVecInternal::TupleVecImpl<AllocatorA, Indices, Ts...>& a,
+ const TupleVecInternal::TupleVecImpl<AllocatorB, Indices, Ts...>& b)
+{
+ return b < a;
+}
+
+template <typename AllocatorA, typename AllocatorB, typename Indices, typename... Ts>
+inline bool operator<=(const TupleVecInternal::TupleVecImpl<AllocatorA, Indices, Ts...>& a,
+ const TupleVecInternal::TupleVecImpl<AllocatorB, Indices, Ts...>& b)
+{
+ return !(b < a);
+}
+
+template <typename AllocatorA, typename AllocatorB, typename Indices, typename... Ts>
+inline bool operator>=(const TupleVecInternal::TupleVecImpl<AllocatorA, Indices, Ts...>& a,
+ const TupleVecInternal::TupleVecImpl<AllocatorB, Indices, Ts...>& b)
+{
+ return !(a < b);
+}
+
+template <typename AllocatorA, typename AllocatorB, typename Indices, typename... Ts>
+inline void swap(TupleVecInternal::TupleVecImpl<AllocatorA, Indices, Ts...>& a,
+ TupleVecInternal::TupleVecImpl<AllocatorB, Indices, Ts...>& b)
+{
+ a.swap(b);
+}
+
+// A customization of swap is made for r-values of tuples-of-references -
+// normally, swapping rvalues doesn't make sense, but in this case, we do want to
+// swap the contents of what the tuple-of-references are referring to
+//
+// This is required due to TupleVecIter returning a value-type for its dereferencing,
+// as opposed to an actual real reference of some sort
+template<typename... Ts>
+inline
+typename enable_if<conjunction<is_swappable<Ts>...>::value>::type
+swap(tuple<Ts&...>&& a, tuple<Ts&...>&& b)
+{
+ a.swap(b);
+}
+
+template<typename... Ts>
+inline
+typename enable_if<!conjunction<is_swappable<Ts>...>::value>::type
+swap(tuple<Ts&...>&& a, tuple<Ts&...>&& b) = delete;
+
+
+// External interface of tuple_vector
+template <typename... Ts>
+class tuple_vector : public TupleVecInternal::TupleVecImpl<EASTLAllocatorType, make_index_sequence<sizeof...(Ts)>, Ts...>
+{
+ typedef tuple_vector<Ts...> this_type;
+ typedef TupleVecInternal::TupleVecImpl<EASTLAllocatorType, make_index_sequence<sizeof...(Ts)>, Ts...> base_type;
+ using base_type::base_type;
+
+public:
+ this_type& operator=(std::initializer_list<typename base_type::value_tuple> iList)
+ {
+ base_type::operator=(iList);
+ return *this;
+ }
+};
+
+// Variant of tuple_vector that allows a user-defined allocator type (can't mix default template params with variadics)
+template <typename AllocatorType, typename... Ts>
+class tuple_vector_alloc
+ : public TupleVecInternal::TupleVecImpl<AllocatorType, make_index_sequence<sizeof...(Ts)>, Ts...>
+{
+ typedef tuple_vector_alloc<AllocatorType, Ts...> this_type;
+ typedef TupleVecInternal::TupleVecImpl<AllocatorType, make_index_sequence<sizeof...(Ts)>, Ts...> base_type;
+ using base_type::base_type;
+
+public:
+
+ this_type& operator=(std::initializer_list<typename base_type::value_tuple> iList)
+ {
+ base_type::operator=(iList);
+ return *this;
+ }
+};
+
+} // namespace eastl
+
+EA_RESTORE_VC_WARNING()
+EA_RESTORE_VC_WARNING()
+EA_RESTORE_VC_WARNING()
+EA_RESTORE_VC_WARNING()
+
+#endif // EASTL_TUPLEVECTOR_H
diff --git a/EASTL/include/EASTL/chrono.h b/EASTL/include/EASTL/chrono.h
new file mode 100644
index 0000000..4b94fe4
--- /dev/null
+++ b/EASTL/include/EASTL/chrono.h
@@ -0,0 +1,759 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements the eastl::chrono specification which is part of the
+// standard STL date and time library. eastl::chrono implements all the
+// mechanisms required to capture and manipulate times retrieved from the
+// provided clocks. It implements the all of the features to allow type safe
+// durations to be used in code.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_CHRONO_H
+#define EASTL_CHRONO_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <EASTL/internal/config.h>
+#include <EASTL/type_traits.h>
+#include <EASTL/numeric_limits.h>
+#include <EASTL/ratio.h>
+
+
+// TODO: move to platform specific cpp or header file
+#if defined EA_PLATFORM_MICROSOFT
+ EA_DISABLE_ALL_VC_WARNINGS()
+
+ #ifndef WIN32_LEAN_AND_MEAN
+ #define WIN32_LEAN_AND_MEAN
+ #endif
+
+ #undef NOMINMAX
+ #define NOMINMAX
+
+ #include <Windows.h>
+
+ #ifdef min
+ #undef min
+ #endif
+ #ifdef max
+ #undef max
+ #endif
+
+ EA_RESTORE_ALL_VC_WARNINGS()
+#endif
+
+#if defined(EA_PLATFORM_MICROSOFT) && !defined(EA_PLATFORM_MINGW)
+ // Nothing to do
+#elif defined(EA_PLATFORM_APPLE)
+ #include <mach/mach_time.h>
+#elif defined(EA_PLATFORM_POSIX) || defined(EA_PLATFORM_MINGW) || defined(EA_PLATFORM_ANDROID)
+ // Posix means Linux, Unix, and Macintosh OSX, among others (including Linux-based mobile platforms).
+ #if defined(EA_PLATFORM_MINGW)
+ #include <pthread_time.h>
+ #endif
+ #include <time.h>
+ #if (defined(CLOCK_REALTIME) || defined(CLOCK_MONOTONIC))
+ #include <errno.h>
+ #else
+ #include <sys/time.h>
+ #include <unistd.h>
+ #endif
+#endif
+
+
+namespace eastl
+{
+namespace chrono
+{
+ ///////////////////////////////////////////////////////////////////////////////
+ // treat_as_floating_point
+ ///////////////////////////////////////////////////////////////////////////////
+ template <class Rep>
+ struct treat_as_floating_point : is_floating_point<Rep> {};
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // 20.12.4, duration_values
+ ///////////////////////////////////////////////////////////////////////////////
+ template <class Rep>
+ struct duration_values
+ {
+ public:
+ EASTL_FORCE_INLINE static EA_CONSTEXPR Rep zero() { return Rep(0); }
+ EASTL_FORCE_INLINE static EA_CONSTEXPR Rep max() { return eastl::numeric_limits<Rep>::max(); }
+ EASTL_FORCE_INLINE static EA_CONSTEXPR Rep min() { return eastl::numeric_limits<Rep>::lowest(); }
+ };
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // duration fwd_decl
+ ///////////////////////////////////////////////////////////////////////////////
+ template <typename Rep, typename Period = ratio<1>>
+ class duration;
+
+
+ namespace Internal
+ {
+ ///////////////////////////////////////////////////////////////////////////////
+ // IsRatio
+ ///////////////////////////////////////////////////////////////////////////////
+ template <typename> struct IsRatio : eastl::false_type {};
+ template <intmax_t N, intmax_t D> struct IsRatio<ratio<N, D>> : eastl::true_type {};
+ template <intmax_t N, intmax_t D> struct IsRatio<const ratio<N, D>> : eastl::true_type {};
+ template <intmax_t N, intmax_t D> struct IsRatio<volatile ratio<N, D>> : eastl::true_type {};
+ template <intmax_t N, intmax_t D> struct IsRatio<const volatile ratio<N, D>> : eastl::true_type {};
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // IsDuration
+ ///////////////////////////////////////////////////////////////////////////////
+ template<typename> struct IsDuration : eastl::false_type{};
+ template<typename Rep, typename Period> struct IsDuration<duration<Rep, Period>> : eastl::true_type{};
+ template<typename Rep, typename Period> struct IsDuration<const duration<Rep, Period>> : eastl::true_type{};
+ template<typename Rep, typename Period> struct IsDuration<volatile duration<Rep, Period>> : eastl::true_type{};
+ template<typename Rep, typename Period> struct IsDuration<const volatile duration<Rep, Period>> : eastl::true_type{};
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // RatioGCD
+ ///////////////////////////////////////////////////////////////////////////////
+ template <class Period1, class Period2>
+ struct RatioGCD
+ {
+ static_assert(IsRatio<Period1>::value, "Period1 is not a eastl::ratio type");
+ static_assert(IsRatio<Period2>::value, "Period2 is not a eastl::ratio type");
+
+ typedef ratio<eastl::Internal::gcd<Period1::num, Period2::num>::value,
+ eastl::Internal::lcm<Period1::den, Period2::den>::value> type;
+ };
+ };
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // 20.12.5.7, duration_cast
+ ///////////////////////////////////////////////////////////////////////////////
+ namespace Internal
+ {
+ template <typename FromDuration,
+ typename ToDuration,
+ typename CommonPeriod =
+ typename ratio_divide<typename FromDuration::period, typename ToDuration::period>::type,
+ typename CommonRep = typename eastl::decay<typename eastl::common_type<typename ToDuration::rep,
+ typename FromDuration::rep,
+ intmax_t>::type>::type,
+ bool = CommonPeriod::num == 1,
+ bool = CommonPeriod::den == 1>
+ struct DurationCastImpl;
+
+ template <typename FromDuration, typename ToDuration, typename CommonPeriod, typename CommonRep>
+ struct DurationCastImpl<FromDuration, ToDuration, CommonPeriod, CommonRep, true, true>
+ {
+ inline static ToDuration DoCast(const FromDuration& fd)
+ {
+ return ToDuration(static_cast<typename ToDuration::rep>(fd.count()));
+ }
+ };
+
+ template <typename FromDuration, typename ToDuration, typename CommonPeriod, typename CommonRep>
+ struct DurationCastImpl<FromDuration, ToDuration, CommonPeriod, CommonRep, false, true>
+ {
+ inline static ToDuration DoCast(const FromDuration& d)
+ {
+ return ToDuration(static_cast<typename ToDuration::rep>(static_cast<CommonRep>(d.count()) *
+ static_cast<CommonRep>(CommonPeriod::num)));
+ }
+ };
+
+ template <typename FromDuration, typename ToDuration, typename CommonPeriod, typename CommonRep>
+ struct DurationCastImpl<FromDuration, ToDuration, CommonPeriod, CommonRep, true, false>
+ {
+ inline static ToDuration DoCast(const FromDuration& d)
+ {
+ return ToDuration(static_cast<typename ToDuration::rep>(static_cast<CommonRep>(d.count()) /
+ static_cast<CommonRep>(CommonPeriod::den)));
+ }
+ };
+
+ template <typename FromDuration, typename ToDuration, typename CommonPeriod, typename CommonRep>
+ struct DurationCastImpl<FromDuration, ToDuration, CommonPeriod, CommonRep, false, false>
+ {
+ inline static ToDuration DoCast(const FromDuration& d)
+ {
+ return ToDuration(static_cast<typename ToDuration::rep>(static_cast<CommonRep>(d.count()) *
+ static_cast<CommonRep>(CommonPeriod::num) /
+ static_cast<CommonRep>(CommonPeriod::den)));
+ }
+ };
+ }; // namespace Internal
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // duration_cast
+ ///////////////////////////////////////////////////////////////////////////////
+ template <typename ToDuration, typename Rep, typename Period>
+ inline typename eastl::enable_if<Internal::IsDuration<ToDuration>::value, ToDuration>::type
+ duration_cast(const duration<Rep, Period>& d)
+ {
+ typedef typename duration<Rep, Period>::this_type FromDuration;
+ return Internal::DurationCastImpl<FromDuration, ToDuration>::DoCast(d);
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // duration
+ ///////////////////////////////////////////////////////////////////////////////
+ template <class Rep, class Period>
+ class duration
+ {
+ Rep mRep;
+
+ public:
+ typedef Rep rep;
+ typedef Period period;
+ typedef duration<Rep, Period> this_type;
+
+ #if defined(EA_COMPILER_NO_DEFAULTED_FUNCTIONS)
+ EA_CONSTEXPR duration()
+ : mRep() {}
+
+ duration(const duration& other)
+ : mRep(Rep(other.mRep)) {}
+
+ duration& operator=(const duration& other)
+ { mRep = other.mRep; return *this; }
+ #else
+ EA_CONSTEXPR duration() = default;
+ duration(const duration&) = default;
+ duration& operator=(const duration&) = default;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // conversion constructors
+ ///////////////////////////////////////////////////////////////////////////////
+ template <class Rep2>
+ inline EA_CONSTEXPR explicit duration(
+ const Rep2& rep2,
+ typename eastl::enable_if<eastl::is_convertible<Rep2, Rep>::value &&
+ (treat_as_floating_point<Rep>::value ||
+ !treat_as_floating_point<Rep2>::value)>::type** = 0)
+ : mRep(static_cast<Rep>(rep2)) {}
+
+
+ template <class Rep2, class Period2>
+ EA_CONSTEXPR duration(const duration<Rep2, Period2>& d2,
+ typename eastl::enable_if<treat_as_floating_point<Rep>::value ||
+ (eastl::ratio_divide<Period2, Period>::type::den == 1 &&
+ !treat_as_floating_point<Rep2>::value),
+ void>::type** = 0)
+ : mRep(duration_cast<duration>(d2).count()) {}
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // returns the count of ticks
+ ///////////////////////////////////////////////////////////////////////////////
+ EA_CONSTEXPR Rep count() const { return mRep; }
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // static accessors of special duration values
+ ///////////////////////////////////////////////////////////////////////////////
+ EA_CONSTEXPR inline static duration zero() { return duration(duration_values<Rep>::zero()); }
+ EA_CONSTEXPR inline static duration min() { return duration(duration_values<Rep>::min()); }
+ EA_CONSTEXPR inline static duration max() { return duration(duration_values<Rep>::max()); }
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // const arithmetic operations
+ ///////////////////////////////////////////////////////////////////////////////
+ EA_CONSTEXPR inline duration operator+() const { return *this; }
+ EA_CONSTEXPR inline duration operator-() const { return duration(0-mRep); }
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // arithmetic operations
+ ///////////////////////////////////////////////////////////////////////////////
+ inline duration operator++(int) { return duration(mRep++); }
+ inline duration operator--(int) { return duration(mRep--); }
+ inline duration& operator++() { ++mRep; return *this; }
+ inline duration& operator--() { --mRep; return *this; }
+ inline duration& operator+=(const duration& d) { mRep += d.count(); return *this; }
+ inline duration& operator-=(const duration& d) { mRep -= d.count(); return *this; }
+ inline duration& operator*=(const Rep& rhs) { mRep *= rhs; return *this; }
+ inline duration& operator/=(const Rep& rhs) { mRep /= rhs; return *this; }
+ inline duration& operator%=(const Rep& rhs) { mRep %= rhs; return *this; }
+ inline duration& operator%=(const duration& d) { mRep %= d.count(); return *this; }
+ };
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // 20.12.5.5, arithmetic operations with durations as arguments
+ ///////////////////////////////////////////////////////////////////////////////
+ template <typename Rep1, typename Period1, typename Rep2, typename Period2>
+ typename eastl::common_type<duration<Rep1, Period1>, duration<Rep2, Period2>>::type EASTL_FORCE_INLINE
+ operator+(const duration<Rep1, Period1>& lhs, const duration<Rep2, Period2>& rhs)
+ {
+ typedef typename eastl::common_type<duration<Rep1, Period1>, duration<Rep2, Period2>>::type common_duration_t;
+ return common_duration_t(common_duration_t(lhs).count() + common_duration_t(rhs).count());
+ }
+
+ template <typename Rep1, typename Period1, typename Rep2, typename Period2>
+ typename eastl::common_type<duration<Rep1, Period1>, duration<Rep2, Period2>>::type EASTL_FORCE_INLINE
+ operator-(const duration<Rep1, Period1>& lhs, const duration<Rep2, Period2>& rhs)
+ {
+ typedef typename eastl::common_type<duration<Rep1, Period1>, duration<Rep2, Period2>>::type common_duration_t;
+ return common_duration_t(common_duration_t(lhs).count() - common_duration_t(rhs).count());
+ }
+
+ template <typename Rep1, typename Period1, typename Rep2>
+ duration<typename eastl::common_type<Rep1, Rep2>::type, Period1> EASTL_FORCE_INLINE
+ operator*(const duration<Rep1, Period1>& lhs, const Rep2& rhs)
+ {
+ typedef duration<typename eastl::common_type<Rep1, Rep2>::type, Period1> common_duration_t;
+ return common_duration_t(common_duration_t(lhs).count() * rhs);
+ }
+
+ template <typename Rep1, typename Rep2, typename Period2>
+ duration<typename eastl::common_type<Rep1, Rep2>::type, Period2> EASTL_FORCE_INLINE
+ operator*(const Rep1& lhs, const duration<Rep2, Period2>& rhs)
+ {
+ typedef duration<typename eastl::common_type<Rep1, Rep2>::type, Period2> common_duration_t;
+ return common_duration_t(lhs * common_duration_t(rhs).count());
+ }
+
+ template <typename Rep1, typename Period1, typename Rep2>
+ duration<typename eastl::common_type<Rep1, Rep2>::type, Period1> EASTL_FORCE_INLINE
+ operator/(const duration<Rep1, Period1>& lhs, const Rep2& rhs)
+ {
+ typedef duration<typename eastl::common_type<Rep1, Rep2>::type, Period1> common_duration_t;
+ return common_duration_t(common_duration_t(lhs).count() / rhs);
+ }
+
+ template <typename Rep1, typename Period1, typename Rep2, typename Period2>
+ typename eastl::common_type<duration<Rep1, Period1>, duration<Rep2, Period2>>::type EASTL_FORCE_INLINE
+ operator/(const duration<Rep1, Period1>& lhs, const duration<Rep2, Period2>& rhs)
+ {
+ typedef typename eastl::common_type<duration<Rep1, Period1>, duration<Rep2, Period2>>::type common_duration_t;
+ return common_duration_t(common_duration_t(lhs).count() / common_duration_t(rhs).count());
+ }
+
+ template <typename Rep1, typename Period1, typename Rep2>
+ duration<typename eastl::common_type<Rep1, Rep2>::type, Period1> EASTL_FORCE_INLINE
+ operator%(const duration<Rep1, Period1>& lhs, const Rep2& rhs)
+ {
+ typedef duration<typename eastl::common_type<Rep1, Rep2>::type, Period1> common_duration_t;
+ return common_duration_t(common_duration_t(lhs).count() % rhs);
+ }
+
+ template <typename Rep1, typename Period1, typename Rep2, typename Period2>
+ typename eastl::common_type<duration<Rep1, Period1>, duration<Rep2, Period2>>::type EASTL_FORCE_INLINE
+ operator%(const duration<Rep1, Period1>& lhs, const duration<Rep2, Period2>& rhs)
+ {
+ typedef typename eastl::common_type<duration<Rep1, Period1>, duration<Rep2, Period2>>::type common_duration_t;
+ return common_duration_t(common_duration_t(lhs).count() % common_duration_t(rhs).count());
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // 20.12.5.6, compares two durations
+ ///////////////////////////////////////////////////////////////////////////////
+ template <typename Rep1, typename Period1, typename Rep2, typename Period2>
+ EASTL_FORCE_INLINE bool operator==(const duration<Rep1, Period1>& lhs,
+ const duration<Rep2, Period2>& rhs)
+ {
+ typedef typename eastl::common_type<duration<Rep1, Period1>, duration<Rep2, Period2>>::type common_duration_t;
+ return common_duration_t(lhs).count() == common_duration_t(rhs).count();
+ }
+
+ template <typename Rep1, typename Period1, typename Rep2, typename Period2>
+ EASTL_FORCE_INLINE bool operator<(const duration<Rep1, Period1>& lhs,
+ const duration<Rep2, Period2>& rhs)
+ {
+ typedef typename eastl::common_type<duration<Rep1, Period1>, duration<Rep2, Period2>>::type common_duration_t;
+ return common_duration_t(lhs).count() < common_duration_t(rhs).count();
+ }
+
+ template <typename Rep1, typename Period1, typename Rep2, typename Period2>
+ EASTL_FORCE_INLINE bool operator!=(const duration<Rep1, Period1>& lhs,
+ const duration<Rep2, Period2>& rhs)
+ {
+ return !(lhs == rhs);
+ }
+
+ template <typename Rep1, typename Period1, typename Rep2, typename Period2>
+ EASTL_FORCE_INLINE bool operator<=(const duration<Rep1, Period1>& lhs,
+ const duration<Rep2, Period2>& rhs)
+ {
+ return !(rhs < lhs);
+ }
+
+ template <typename Rep1, typename Period1, typename Rep2, typename Period2>
+ EASTL_FORCE_INLINE bool operator>(const duration<Rep1, Period1>& lhs,
+ const duration<Rep2, Period2>& rhs)
+ {
+ return rhs < lhs;
+ }
+
+ template <typename Rep1, typename Period1, typename Rep2, typename Period2>
+ EASTL_FORCE_INLINE bool operator>=(const duration<Rep1, Period1>& lhs,
+ const duration<Rep2, Period2>& rhs)
+ {
+ return !(lhs < rhs);
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // standard duration units
+ ///////////////////////////////////////////////////////////////////////////////
+ typedef duration<long long, nano> nanoseconds;
+ typedef duration<long long, micro> microseconds;
+ typedef duration<long long, milli> milliseconds;
+ typedef duration<long long> seconds;
+ typedef duration<int, ratio<60>> minutes;
+ typedef duration<int, ratio<3600>> hours;
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // 20.12.6, time_point
+ ///////////////////////////////////////////////////////////////////////////////
+ template <typename Clock, typename Duration = typename Clock::duration>
+ class time_point
+ {
+ Duration mDuration;
+
+ public:
+ typedef Clock clock;
+ typedef Duration duration;
+ typedef typename Duration::rep rep;
+ typedef typename Duration::period period;
+
+ inline EA_CONSTEXPR time_point() : mDuration(Duration::zero()) {}
+ EA_CONSTEXPR explicit time_point(const Duration& other) : mDuration(other) {}
+
+ template <typename Duration2>
+ inline EA_CONSTEXPR time_point(
+ const time_point<Clock, Duration2>& t,
+ typename eastl::enable_if<eastl::is_convertible<Duration2, Duration>::value>::type** = 0)
+ : mDuration(t.time_since_epoch()) {}
+
+ EA_CONSTEXPR Duration time_since_epoch() const { return mDuration; }
+
+ time_point& operator+=(const Duration& d) { mDuration += d; return *this; }
+ time_point& operator-=(const Duration& d) { mDuration -= d; return *this; }
+
+ static EA_CONSTEXPR time_point min() { return time_point(Duration::min()); }
+ static EA_CONSTEXPR time_point max() { return time_point(Duration::max()); }
+ };
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // 20.12.6.5, time_point arithmetic
+ ///////////////////////////////////////////////////////////////////////////////
+ template <class Clock, class Duration1, class Rep2, class Period2>
+ inline EA_CONSTEXPR time_point<Clock, typename eastl::common_type<Duration1, duration<Rep2, Period2>>::type>
+ operator+(const time_point<Clock, Duration1>& lhs, const duration<Rep2, Period2>& rhs)
+ {
+ typedef time_point<Clock, typename eastl::common_type<Duration1, duration<Rep2, Period2>>::type> common_timepoint_t;
+ return common_timepoint_t(lhs.time_since_epoch() + rhs);
+ }
+
+ template <class Rep1, class Period1, class Clock, class Duration2>
+ inline EA_CONSTEXPR time_point<Clock, typename eastl::common_type<Duration2, duration<Rep1, Period1>>::type>
+ operator+(const duration<Rep1, Period1>& lhs, const time_point<Clock, Duration2>& rhs)
+ {
+ typedef time_point<Clock, typename eastl::common_type<Duration2, duration<Rep1, Period1>>::type> common_timepoint_t;
+ return common_timepoint_t(lhs + rhs.time_since_epoch());
+ }
+
+ template <class Clock, class Duration1, class Rep2, class Period2>
+ inline EA_CONSTEXPR time_point<Clock, typename eastl::common_type<Duration1, duration<Rep2, Period2>>::type>
+ operator-(const time_point<Clock, Duration1>& lhs, const duration<Rep2, Period2>& rhs)
+ {
+ typedef time_point<Clock, typename eastl::common_type<Duration1, duration<Rep2, Period2>>::type> common_timepoint_t;
+ return common_timepoint_t(lhs.time_since_epoch() - rhs);
+ }
+
+ template <class Clock, class Duration1, class Duration2>
+ inline EA_CONSTEXPR typename eastl::common_type<Duration1, Duration2>::type operator-(
+ const time_point<Clock, Duration1>& lhs,
+ const time_point<Clock, Duration2>& rhs)
+ {
+ return lhs.time_since_epoch() - rhs.time_since_epoch();
+ }
+
+ template <class Clock, class Duration1, class Duration2>
+ inline EA_CONSTEXPR bool operator==(const time_point<Clock, Duration1>& lhs,
+ const time_point<Clock, Duration2>& rhs)
+ {
+ return lhs.time_since_epoch() == rhs.time_since_epoch();
+ }
+
+ template <class Clock, class Duration1, class Duration2>
+ inline EA_CONSTEXPR bool operator!=(const time_point<Clock, Duration1>& lhs,
+ const time_point<Clock, Duration2>& rhs)
+ {
+ return !(lhs == rhs);
+ }
+
+ template <class Clock, class Duration1, class Duration2>
+ inline EA_CONSTEXPR bool operator<(const time_point<Clock, Duration1>& lhs, const time_point<Clock, Duration2>& rhs)
+ {
+ return lhs.time_since_epoch() < rhs.time_since_epoch();
+ }
+
+ template <class Clock, class Duration1, class Duration2>
+ inline EA_CONSTEXPR bool operator<=(const time_point<Clock, Duration1>& lhs,
+ const time_point<Clock, Duration2>& rhs)
+ {
+ return !(rhs < lhs);
+ }
+
+ template <class Clock, class Duration1, class Duration2>
+ inline EA_CONSTEXPR bool operator>(const time_point<Clock, Duration1>& lhs, const time_point<Clock, Duration2>& rhs)
+ {
+ return rhs < lhs;
+ }
+
+ template <class Clock, class Duration1, class Duration2>
+ inline EA_CONSTEXPR bool operator>=(const time_point<Clock, Duration1>& lhs,
+ const time_point<Clock, Duration2>& rhs)
+ {
+ return !(lhs < rhs);
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // 20.12.6.7, time_point_cast
+ ///////////////////////////////////////////////////////////////////////////////
+ template <typename ToDuration, typename Clock, typename Duration>
+ EA_CONSTEXPR time_point<Clock, ToDuration> time_point_cast(
+ const time_point<Clock, Duration>& t,
+ typename eastl::enable_if<Internal::IsDuration<ToDuration>::value>::type** = 0)
+ {
+ return time_point<Clock, ToDuration>(duration_cast<ToDuration>(t.time_since_epoch()));
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // 20.12.7, clocks
+ ///////////////////////////////////////////////////////////////////////////////
+
+ namespace Internal
+ {
+ #if defined(EA_PLATFORM_MICROSOFT) && !defined(EA_PLATFORM_MINGW)
+ #define EASTL_NS_PER_TICK 1
+ #elif defined EA_PLATFORM_SONY
+ #define EASTL_NS_PER_TICK 1
+ #elif defined EA_PLATFORM_POSIX
+ #define EASTL_NS_PER_TICK _XTIME_NSECS_PER_TICK
+ #else
+ #define EASTL_NS_PER_TICK 100
+ #endif
+
+ #if defined(EA_PLATFORM_POSIX)
+ typedef chrono::nanoseconds::period SystemClock_Period;
+ typedef chrono::nanoseconds::period SteadyClock_Period;
+ #else
+ typedef eastl::ratio_multiply<eastl::ratio<EASTL_NS_PER_TICK, 1>, nano>::type SystemClock_Period;
+ typedef eastl::ratio_multiply<eastl::ratio<EASTL_NS_PER_TICK, 1>, nano>::type SteadyClock_Period;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // Internal::GetTicks
+ ///////////////////////////////////////////////////////////////////////////////
+ inline uint64_t GetTicks()
+ {
+ #if defined EA_PLATFORM_MICROSOFT
+ auto queryFrequency = []
+ {
+ LARGE_INTEGER frequency;
+ QueryPerformanceFrequency(&frequency);
+ return double(1000000000.0L / (long double)frequency.QuadPart); // nanoseconds per tick
+ };
+
+ auto queryCounter = []
+ {
+ LARGE_INTEGER counter;
+ QueryPerformanceCounter(&counter);
+ return counter.QuadPart;
+ };
+
+ EA_DISABLE_VC_WARNING(4640) // warning C4640: construction of local static object is not thread-safe (VS2013)
+ static auto frequency = queryFrequency(); // cache cpu frequency on first call
+ EA_RESTORE_VC_WARNING()
+ return uint64_t(frequency * (double)queryCounter());
+ #elif defined EA_PLATFORM_SONY
+ static_assert(false, "Implementing GetTicks() requires first party support");
+ return 0;
+ #elif defined(EA_PLATFORM_APPLE)
+ auto queryTimeInfo = []
+ {
+ mach_timebase_info_data_t info;
+ mach_timebase_info(&info);
+ return info;
+ };
+
+ static auto timeInfo = queryTimeInfo();
+ uint64_t t = mach_absolute_time();
+ t *= timeInfo.numer;
+ t /= timeInfo.denom;
+ return t;
+ #elif defined(EA_PLATFORM_POSIX) // Posix means Linux, Unix, and Macintosh OSX, among others (including Linux-based mobile platforms).
+ #if (defined(CLOCK_REALTIME) || defined(CLOCK_MONOTONIC))
+ timespec ts;
+ int result = clock_gettime(CLOCK_MONOTONIC, &ts);
+
+ if (result == -1 && errno == EINVAL)
+ result = clock_gettime(CLOCK_REALTIME, &ts);
+
+ const uint64_t nNanoseconds = (uint64_t)ts.tv_nsec + ((uint64_t)ts.tv_sec * UINT64_C(1000000000));
+ return nNanoseconds;
+ #else
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ const uint64_t nMicroseconds = (uint64_t)tv.tv_usec + ((uint64_t)tv.tv_sec * 1000000);
+ return nMicroseconds;
+ #endif
+ #else
+ #error "chrono not implemented for platform"
+ #endif
+ }
+ } // namespace Internal
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // system_clock
+ ///////////////////////////////////////////////////////////////////////////////
+ class system_clock
+ {
+ public:
+ typedef long long rep; // signed arithmetic type representing the number of ticks in the clock's duration
+ typedef Internal::SystemClock_Period period;
+ typedef chrono::duration<rep, period> duration; // duration<rep, period>, capable of representing negative durations
+ typedef chrono::time_point<system_clock> time_point;
+
+ // true if the time between ticks is always increases monotonically
+ EA_CONSTEXPR_OR_CONST static bool is_steady = false;
+
+ // returns a time point representing the current point in time.
+ static time_point now() EA_NOEXCEPT
+ {
+ return time_point(duration(Internal::GetTicks()));
+ }
+ };
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // steady_clock
+ ///////////////////////////////////////////////////////////////////////////////
+ class steady_clock
+ {
+ public:
+ typedef long long rep; // signed arithmetic type representing the number of ticks in the clock's duration
+ typedef Internal::SteadyClock_Period period;
+ typedef chrono::duration<rep, period> duration; // duration<rep, period>, capable of representing negative durations
+ typedef chrono::time_point<steady_clock> time_point;
+
+ // true if the time between ticks is always increases monotonically
+ EA_CONSTEXPR_OR_CONST static bool is_steady = true;
+
+ // returns a time point representing the current point in time.
+ static time_point now() EA_NOEXCEPT
+ {
+ return time_point(duration(Internal::GetTicks()));
+ }
+ };
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // high_resolution_clock
+ ///////////////////////////////////////////////////////////////////////////////
+ typedef system_clock high_resolution_clock;
+
+
+} // namespace chrono
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // duration common_type specialization
+ ///////////////////////////////////////////////////////////////////////////////
+ template <typename Rep1, typename Period1, typename Rep2, typename Period2>
+ struct common_type<chrono::duration<Rep1, Period1>, chrono::duration<Rep2, Period2>>
+ {
+ typedef chrono::duration<typename eastl::decay<typename eastl::common_type<Rep1, Rep2>::type>::type,
+ typename chrono::Internal::RatioGCD<Period1, Period2>::type> type;
+ };
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // time_point common_type specialization
+ ///////////////////////////////////////////////////////////////////////////////
+ template <typename Clock, typename Duration1, typename Duration2>
+ struct common_type<chrono::time_point<Clock, Duration1>, chrono::time_point<Clock, Duration2>>
+ {
+ typedef chrono::time_point<Clock, typename eastl::common_type<Duration1, Duration2>::type> type;
+ };
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // chrono_literals
+ ///////////////////////////////////////////////////////////////////////////////
+ #if EASTL_USER_LITERALS_ENABLED && EASTL_INLINE_NAMESPACES_ENABLED
+ // Disabling the Clang/GCC/MSVC warning about using user
+ // defined literals without a leading '_' as they are reserved
+ // for standard libary usage.
+ EA_DISABLE_VC_WARNING(4455)
+ EA_DISABLE_CLANG_WARNING(-Wuser-defined-literals)
+ EA_DISABLE_GCC_WARNING(-Wliteral-suffix)
+ inline namespace literals
+ {
+ inline namespace chrono_literals
+ {
+ ///////////////////////////////////////////////////////////////////////////////
+ // integer chrono literals
+ ///////////////////////////////////////////////////////////////////////////////
+ EA_CONSTEXPR chrono::hours operator"" h(unsigned long long h) { return chrono::hours(h); }
+ EA_CONSTEXPR chrono::minutes operator"" min(unsigned long long m) { return chrono::minutes(m); }
+ EA_CONSTEXPR chrono::seconds operator"" s(unsigned long long s) { return chrono::seconds(s); }
+ EA_CONSTEXPR chrono::milliseconds operator"" ms(unsigned long long ms) { return chrono::milliseconds(ms); }
+ EA_CONSTEXPR chrono::microseconds operator"" us(unsigned long long us) { return chrono::microseconds(us); }
+ EA_CONSTEXPR chrono::nanoseconds operator"" ns(unsigned long long ns) { return chrono::nanoseconds(ns); }
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // float chrono literals
+ ///////////////////////////////////////////////////////////////////////////////
+ EA_CONSTEXPR chrono::duration<long double, ratio<3600, 1>> operator"" h(long double h)
+ { return chrono::duration<long double, ratio<3600, 1>>(h); }
+ EA_CONSTEXPR chrono::duration<long double, ratio<60, 1>> operator"" min(long double m)
+ { return chrono::duration<long double, ratio<60, 1>>(m); }
+ EA_CONSTEXPR chrono::duration<long double> operator"" s(long double s)
+ { return chrono::duration<long double>(s); }
+ EA_CONSTEXPR chrono::duration<float, milli> operator"" ms(long double ms)
+ { return chrono::duration<long double, milli>(ms); }
+ EA_CONSTEXPR chrono::duration<float, micro> operator"" us(long double us)
+ { return chrono::duration<long double, micro>(us); }
+ EA_CONSTEXPR chrono::duration<float, nano> operator"" ns(long double ns)
+ { return chrono::duration<long double, nano>(ns); }
+
+ } // namespace chrono_literals
+ }// namespace literals
+ EA_RESTORE_GCC_WARNING() // -Wliteral-suffix
+ EA_RESTORE_CLANG_WARNING() // -Wuser-defined-literals
+ EA_RESTORE_VC_WARNING() // warning: 4455
+ #endif
+
+} // namespace eastl
+
+
+#if EASTL_USER_LITERALS_ENABLED && EASTL_INLINE_NAMESPACES_ENABLED
+namespace chrono
+{
+ using namespace eastl::literals::chrono_literals;
+} // namespace chrono
+#endif
+
+
+#endif
diff --git a/EASTL/include/EASTL/compare.h b/EASTL/include/EASTL/compare.h
new file mode 100644
index 0000000..9bc3bd6
--- /dev/null
+++ b/EASTL/include/EASTL/compare.h
@@ -0,0 +1,45 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_COMPARE_H
+#define EASTL_COMPARE_H
+
+
+#include <EABase/eabase.h>
+
+namespace eastl
+{
+
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ struct synth_three_way
+ {
+ template <typename T, typename U>
+ constexpr auto operator()(const T& t, const U& u) const requires requires
+ {
+ {t < u}->std::convertible_to<bool>;
+ {u < t}->std::convertible_to<bool>;
+ }
+ {
+ if constexpr (std::three_way_comparable_with<T, U>)
+ {
+ return t <=> u;
+ }
+ else
+ {
+ return (t < u) ? std::weak_ordering::less :
+ (u < t) ? std::weak_ordering::greater :
+ std::weak_ordering::equivalent;
+ }
+ }
+ };
+
+ template <typename T, typename U=T>
+ using synth_three_way_result = decltype(synth_three_way{}(declval<T&>(), declval<U&>()));
+#endif
+
+} // namespace eastl
+
+
+#endif // Header include guard \ No newline at end of file
diff --git a/EASTL/include/EASTL/core_allocator.h b/EASTL/include/EASTL/core_allocator.h
new file mode 100644
index 0000000..e437491
--- /dev/null
+++ b/EASTL/include/EASTL/core_allocator.h
@@ -0,0 +1,70 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_CORE_ALLOCATOR_H
+#define EASTL_CORE_ALLOCATOR_H
+
+#if EASTL_CORE_ALLOCATOR_ENABLED
+
+#include <coreallocator/icoreallocator.h>
+
+namespace EA
+{
+ namespace Allocator
+ {
+ /// EASTLCoreAllocatorImpl
+ ///
+ /// EASTL provides an out of the box implementation of the
+ /// ICoreAllocator interface. This is provided as a convenience for
+ /// users who wish to provide ICoreAllocator implementations for EASTL to use.
+ ///
+ /// EASTL has a dependency on coreallocator so to provide an out of
+ /// the box implementation for EASTLCoreAlloctor and EASTLCoreDeleter
+ /// that can be used and tested. Historically we could not test
+ /// ICoreAllocator interface because we relied on the code being linked
+ /// in user code.
+ ///
+
+ class EASTLCoreAllocatorImpl : public ICoreAllocator
+ {
+ public:
+ virtual void* Alloc(size_t size, const char* name, unsigned int flags)
+ {
+ return ::operator new[](size, name, flags, 0, __FILE__, __LINE__);
+ }
+
+ virtual void* Alloc(size_t size, const char* name, unsigned int flags, unsigned int alignment, unsigned int alignOffset = 0)
+ {
+ return ::operator new[](size, alignment, alignOffset, name, flags, 0, __FILE__, __LINE__);
+ }
+
+ virtual void Free(void* ptr, size_t size = 0)
+ {
+ ::operator delete(static_cast<char*>(ptr));
+ }
+
+ virtual void* AllocDebug(size_t size, const DebugParams debugParams, unsigned int flags)
+ {
+ return Alloc(size, debugParams.mName, flags);
+ }
+
+ virtual void* AllocDebug(size_t size, const DebugParams debugParams, unsigned int flags, unsigned int align, unsigned int alignOffset = 0)
+ {
+ return Alloc(size, debugParams.mName, flags, align, alignOffset);
+ }
+
+ static EASTLCoreAllocatorImpl* GetDefaultAllocator();
+ };
+
+ inline EASTLCoreAllocatorImpl* EASTLCoreAllocatorImpl::GetDefaultAllocator()
+ {
+ static EASTLCoreAllocatorImpl allocator;
+ return &allocator;
+ }
+ }
+}
+
+#endif // EASTL_CORE_ALLOCATOR_ENABLED
+#endif // EASTL_CORE_ALLOCATOR_H
+
diff --git a/EASTL/include/EASTL/core_allocator_adapter.h b/EASTL/include/EASTL/core_allocator_adapter.h
new file mode 100644
index 0000000..d6f1827
--- /dev/null
+++ b/EASTL/include/EASTL/core_allocator_adapter.h
@@ -0,0 +1,368 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// Implements an EASTL allocator that uses an ICoreAllocator.
+// However, this header file is not dependent on ICoreAllocator or its package.
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_CORE_ALLOCATOR_ADAPTER_H
+#define EASTL_CORE_ALLOCATOR_ADAPTER_H
+
+#if EASTL_CORE_ALLOCATOR_ENABLED
+
+
+#include <EASTL/internal/config.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+/// EASTL_CORE_ALLOCATOR_ADAPTER_GET_DEFAULT_CORE_ALLOCATOR
+///
+/// This allows the application to override the default name for the default global core allocator.
+/// However, you must be careful in your usage of this, as if this file is shared between uses then
+/// you will need to be careful that your override of this doesn't conflict with others.
+///
+#ifndef EASTL_CORE_ALLOCATOR_ADAPTER_GET_DEFAULT_CORE_ALLOCATOR
+ #define EASTL_CORE_ALLOCATOR_ADAPTER_GET_DEFAULT_CORE_ALLOCATOR AllocatorType::GetDefaultAllocator
+#endif
+
+
+
+namespace EA
+{
+ namespace Allocator
+ {
+ /// CoreAllocatorAdapter
+ ///
+ /// Implements the EASTL allocator interface.
+ /// Allocates memory from an instance of ICoreAllocator or another class with an equivalent interface.
+ /// ICoreAllocator is a pure-virtual memory allocation interface used by a number of EA games and
+ /// shared libraries. It's completely unrelated to EASTL, but it's prevalent enough that it's useful
+ /// for EASTL to have a built-in adapter for this interface. ICoreAllocator is declared in the
+ /// CoreAllocator package icoreallocator_interface.h header, but CoreAllocatorAdapter can work with
+ /// any equivalent interface, as defined below.
+ ///
+ /// Expected interface:
+ /// enum AllocFlags {
+ /// kFlagTempMemory = 0,
+ /// kFlagPermMemory = 1
+ /// };
+ ///
+ /// struct CoreAllocator {
+ /// void* Alloc(size_t size, const char* name, unsigned int allocFlags);
+ /// void* Alloc(size_t size, const char* name, unsigned int allocFlags, // Not required unless you are working with types that require custom alignment.
+ /// unsigned int align, unsigned int alignOffset = 0);
+ /// void Free(void* block, size_t size = 0);
+ /// static CoreAllocator* GetDefaultAllocator();
+ /// };
+ ///
+ /// Example usage:
+ /// #include <coreallocator/icoreallocator_interface.h>
+ /// typedef EA::Allocator::CoreAllocatorAdapter<EASTLTestCoreAllocator> Adapter;
+ /// eastl::list<Widget, Adapter> widgetList(Adapter("UI/WidgetList", pSomeCoreAllocator));
+ /// widgetList.push_back(Widget());
+ ///
+ /// Example usage:
+ /// #include <MyEquivalentCoreAllocatorInterface.h>
+ /// eastl::list<Widget, CoreAllocatorAdapter<MyCoreAllocatorInterface> > widgetList;
+ /// widgetList.push_back(Widget());
+ ///
+ /// Example usage:
+ /// #include <coreallocator/icoreallocator_interface.h>
+ /// typedef EA::Allocator::CoreAllocatorAdapter<EASTLTestCoreAllocator> Adapter;
+ /// typedef eastl::list<Widget, Adapter> WidgetList;
+ /// CoreAllocatorFixed<WidgetList::node_type> widgetCoreAllocator(pFixedAllocatorForWidgetListValueType); // CoreAllocatorFixed is a hypothetical implementation of the ICoreAllocator interface.
+ /// WidgetList widgetList(Adapter("UI/WidgetList", &widgetCoreAllocator)); // Note that the widgetCoreAllocator is declared before and thus destroyed after the widget list.
+ ///
+ template<class AllocatorType>
+ class CoreAllocatorAdapter
+ {
+ public:
+ typedef CoreAllocatorAdapter<AllocatorType> this_type;
+
+ public:
+ // To do: Make this constructor explicit, when there is no known code dependent on it being otherwise.
+ CoreAllocatorAdapter(const char* pName = EASTL_NAME_VAL(EASTL_ALLOCATOR_DEFAULT_NAME), AllocatorType* pAllocator = EASTL_CORE_ALLOCATOR_ADAPTER_GET_DEFAULT_CORE_ALLOCATOR());
+ CoreAllocatorAdapter(const char* pName, AllocatorType* pAllocator, int flags);
+ CoreAllocatorAdapter(const CoreAllocatorAdapter& x);
+ CoreAllocatorAdapter(const CoreAllocatorAdapter& x, const char* pName);
+
+ CoreAllocatorAdapter& operator=(const CoreAllocatorAdapter& x);
+
+ void* allocate(size_t n, int flags = 0);
+ void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0);
+ void deallocate(void* p, size_t n);
+
+ AllocatorType* get_allocator() const;
+ void set_allocator(AllocatorType* pAllocator);
+
+ int get_flags() const;
+ void set_flags(int flags);
+
+ const char* get_name() const;
+ void set_name(const char* pName);
+
+ public: // Public because otherwise VC++ generates (possibly invalid) warnings about inline friend template specializations.
+ AllocatorType* mpCoreAllocator;
+ int mnFlags; // Allocation flags. See ICoreAllocator/AllocFlags.
+
+ #if EASTL_NAME_ENABLED
+ const char* mpName; // Debug name, used to track memory.
+ #endif
+ };
+
+ template<class AllocatorType>
+ bool operator==(const CoreAllocatorAdapter<AllocatorType>& a, const CoreAllocatorAdapter<AllocatorType>& b);
+
+ template<class AllocatorType>
+ bool operator!=(const CoreAllocatorAdapter<AllocatorType>& a, const CoreAllocatorAdapter<AllocatorType>& b);
+
+
+
+ /// EASTLICoreAllocator
+ ///
+ /// Provides a standardized typedef for ICoreAllocator;
+ ///
+ /// Example usage:
+ /// eastl::list<Widget, EASTLICoreAllocator> widgetList("UI/WidgetList", pSomeCoreAllocator);
+ /// widgetList.push_back(Widget());
+ ///
+ class ICoreAllocator;
+ class EASTLCoreAllocatorImpl;
+
+ typedef CoreAllocatorAdapter<ICoreAllocator> EASTLICoreAllocatorAdapter;
+ typedef CoreAllocatorAdapter<EASTLCoreAllocatorImpl> EASTLCoreAllocatorAdapter;
+ typedef EASTLICoreAllocatorAdapter EASTLICoreAllocator; // for backwards compatibility
+
+
+
+ /// EASTLICoreDeleter
+ ///
+ /// Implements a functor which can free memory from the specified
+ /// ICoreAllocator interface. This is a convenience object provided for
+ /// users who wish to have EASTL containers deallocate memory obtained from
+ /// ICoreAllocator interfaces.
+ ///
+ template <class AllocatorType>
+ class CoreDeleterAdapter
+ {
+ public:
+ typedef CoreDeleterAdapter<AllocatorType> this_type;
+ AllocatorType* mpCoreAllocator;
+
+ public:
+ CoreDeleterAdapter(AllocatorType* pAllocator = EASTL_CORE_ALLOCATOR_ADAPTER_GET_DEFAULT_CORE_ALLOCATOR()) EA_NOEXCEPT
+ : mpCoreAllocator(pAllocator) {}
+
+ ~CoreDeleterAdapter() EA_NOEXCEPT {}
+
+ template <typename T>
+ void operator()(T* p)
+ {
+ p->~T();
+ mpCoreAllocator->Free(p);
+ }
+
+ CoreDeleterAdapter(const CoreDeleterAdapter& in) { mpCoreAllocator = in.mpCoreAllocator; }
+
+ CoreDeleterAdapter(CoreDeleterAdapter&& in)
+ {
+ mpCoreAllocator = in.mpCoreAllocator;
+ in.mpCoreAllocator = nullptr;
+ }
+
+ CoreDeleterAdapter& operator=(const CoreDeleterAdapter& in)
+ {
+ mpCoreAllocator = in.mpCoreAllocator;
+ return *this;
+ }
+
+ CoreDeleterAdapter& operator=(CoreDeleterAdapter&& in)
+ {
+ mpCoreAllocator = in.mpCoreAllocator;
+ in.mpCoreAllocator = nullptr;
+ return *this;
+ }
+ };
+
+
+
+ /// EASTLICoreDeleter
+ ///
+ /// Provides a standardized typedef for ICoreAllocator implementations.
+ ///
+ /// Example usage:
+ /// eastl::shared_ptr<A> foo(pA, EASTLCoreDeleter());
+ ///
+ typedef CoreDeleterAdapter<ICoreAllocator> EASTLICoreDeleterAdapter;
+ typedef CoreDeleterAdapter<EASTLCoreAllocatorImpl> EASTLCoreDeleterAdapter;
+
+ } // namespace Allocator
+
+} // namespace EA
+
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Inlines
+///////////////////////////////////////////////////////////////////////////////
+
+namespace EA
+{
+ namespace Allocator
+ {
+ template<class AllocatorType>
+ inline CoreAllocatorAdapter<AllocatorType>::CoreAllocatorAdapter(const char* EASTL_NAME(pName), AllocatorType* pCoreAllocator)
+ : mpCoreAllocator(pCoreAllocator), mnFlags(0)
+ {
+ #if EASTL_NAME_ENABLED
+ mpName = pName ? pName : EASTL_ALLOCATOR_DEFAULT_NAME;
+ #endif
+ }
+
+ template<class AllocatorType>
+ inline CoreAllocatorAdapter<AllocatorType>::CoreAllocatorAdapter(const char* EASTL_NAME(pName), AllocatorType* pCoreAllocator, int flags)
+ : mpCoreAllocator(pCoreAllocator), mnFlags(flags)
+ {
+ #if EASTL_NAME_ENABLED
+ mpName = pName ? pName : EASTL_ALLOCATOR_DEFAULT_NAME;
+ #endif
+ }
+
+ template<class AllocatorType>
+ inline CoreAllocatorAdapter<AllocatorType>::CoreAllocatorAdapter(const CoreAllocatorAdapter& x)
+ : mpCoreAllocator(x.mpCoreAllocator), mnFlags(x.mnFlags)
+ {
+ #if EASTL_NAME_ENABLED
+ mpName = x.mpName;
+ #endif
+ }
+
+ template<class AllocatorType>
+ inline CoreAllocatorAdapter<AllocatorType>::CoreAllocatorAdapter(const CoreAllocatorAdapter& x, const char* EASTL_NAME(pName))
+ : mpCoreAllocator(x.mpCoreAllocator), mnFlags(x.mnFlags)
+ {
+ #if EASTL_NAME_ENABLED
+ mpName = pName ? pName : EASTL_ALLOCATOR_DEFAULT_NAME;
+ #endif
+ }
+
+ template<class AllocatorType>
+ inline CoreAllocatorAdapter<AllocatorType>& CoreAllocatorAdapter<AllocatorType>::operator=(const CoreAllocatorAdapter& x)
+ {
+ mpCoreAllocator = x.mpCoreAllocator;
+ mnFlags = x.mnFlags;
+
+ #if EASTL_NAME_ENABLED
+ mpName = x.mpName;
+ #endif
+
+ return *this;
+ }
+
+ template<class AllocatorType>
+ inline void* CoreAllocatorAdapter<AllocatorType>::allocate(size_t n, int /*flags*/)
+ {
+ // It turns out that EASTL itself doesn't use the flags parameter,
+ // whereas the user here might well want to specify a flags
+ // parameter. So we use ours instead of the one passed in.
+ return mpCoreAllocator->Alloc(n, EASTL_NAME_VAL(mpName), (unsigned)mnFlags);
+ }
+
+ template<class AllocatorType>
+ inline void* CoreAllocatorAdapter<AllocatorType>::allocate(size_t n, size_t alignment, size_t offset, int /*flags*/)
+ {
+ // It turns out that EASTL itself doesn't use the flags parameter,
+ // whereas the user here might well want to specify a flags
+ // parameter. So we use ours instead of the one passed in.
+ return mpCoreAllocator->Alloc(n, EASTL_NAME_VAL(mpName), (unsigned)mnFlags, (unsigned)alignment, (unsigned)offset);
+ }
+
+ template<class AllocatorType>
+ inline void CoreAllocatorAdapter<AllocatorType>::deallocate(void* p, size_t n)
+ {
+ return mpCoreAllocator->Free(p, n);
+ }
+
+ template<class AllocatorType>
+ inline AllocatorType* CoreAllocatorAdapter<AllocatorType>::get_allocator() const
+ {
+ return mpCoreAllocator;
+ }
+
+ template<class AllocatorType>
+ inline void CoreAllocatorAdapter<AllocatorType>::set_allocator(AllocatorType* pAllocator)
+ {
+ mpCoreAllocator = pAllocator;
+ }
+
+ template<class AllocatorType>
+ inline int CoreAllocatorAdapter<AllocatorType>::get_flags() const
+ {
+ return mnFlags;
+ }
+
+ template<class AllocatorType>
+ inline void CoreAllocatorAdapter<AllocatorType>::set_flags(int flags)
+ {
+ mnFlags = flags;
+ }
+
+ template<class AllocatorType>
+ inline const char* CoreAllocatorAdapter<AllocatorType>::get_name() const
+ {
+ #if EASTL_NAME_ENABLED
+ return mpName;
+ #else
+ return EASTL_ALLOCATOR_DEFAULT_NAME;
+ #endif
+ }
+
+ template<class AllocatorType>
+ inline void CoreAllocatorAdapter<AllocatorType>::set_name(const char* pName)
+ {
+ #if EASTL_NAME_ENABLED
+ mpName = pName;
+ #else
+ (void)pName;
+ #endif
+ }
+
+
+
+ template<class AllocatorType>
+ inline bool operator==(const CoreAllocatorAdapter<AllocatorType>& a, const CoreAllocatorAdapter<AllocatorType>& b)
+ {
+ return (a.mpCoreAllocator == b.mpCoreAllocator) &&
+ (a.mnFlags == b.mnFlags);
+ }
+
+ template<class AllocatorType>
+ inline bool operator!=(const CoreAllocatorAdapter<AllocatorType>& a, const CoreAllocatorAdapter<AllocatorType>& b)
+ {
+ return (a.mpCoreAllocator != b.mpCoreAllocator) ||
+ (a.mnFlags != b.mnFlags);
+ }
+
+
+ } // namespace Allocator
+
+} // namespace EA
+
+
+#endif // EASTL_CORE_ALLOCATOR_ENABLED
+#endif // Header include guard
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/deque.h b/EASTL/include/EASTL/deque.h
new file mode 100644
index 0000000..9a812c9
--- /dev/null
+++ b/EASTL/include/EASTL/deque.h
@@ -0,0 +1,2718 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+//////////////////////////////////////////////////////////////////////////////
+// deque design
+//
+// A deque (pronounced "deck") is a double-ended queue, though this is partially
+// of a misnomer. A deque does indeed let you add and remove values from both ends
+// of the container, but it's not usually used for such a thing and instead is used
+// as a more flexible version of a vector. It provides operator[] (random access)
+// and can insert items anywhere and not just at the front and back.
+//
+// While you can implement a double-ended queue via a doubly-linked list, deque is
+// instead implemented as a list of arrays. The benefit of this is that memory usage
+// is lower and that random access can be had with decent efficiency.
+//
+// Our implementation of deque is just like every other implementation of deque,
+// as the C++ standard all but dictates that you make it work this way. Below
+// we have a depiction of an array (or vector) of 48 items, with each node being
+// a '+' character and extra capacity being a '-' character. What we have is one
+// contiguous block of memory:
+//
+// ++++++++++++++++++++++++++++++++++++++++++++++++-----------------
+// 0 47
+//
+// With a deque, the same array of 48 items would be implemented as multiple smaller
+// arrays of contiguous memory, each of fixed size. We will call these "sub-arrays."
+// In the case here, we have six arrays of 8 nodes:
+//
+// ++++++++ ++++++++ ++++++++ ++++++++ ++++++++ ++++++++
+//
+// With an vector, item [0] is the first item and item [47] is the last item. With a
+// deque, item [0] is usually not the first item and neither is item [47]. There is
+// extra capacity on both the front side and the back side of the deque. So a deque
+// (of 24 items) actually looks like this:
+//
+// -------- -----+++ ++++++++ ++++++++ +++++--- --------
+// 0 23
+//
+// To insert items at the front, you move into the capacity on the left, and to insert
+// items at the back, you append items on the right. As you can see, inserting an item
+// at the front doesn't require allocating new memory nor does it require moving any
+// items in the container. It merely involves moving the pointer to the [0] item to
+// the left by one node.
+//
+// We keep track of these sub-arrays by having an array of pointers, with each array
+// entry pointing to each of the sub-arrays. We could alternatively use a linked
+// list of pointers, but it turns out we can implement our deque::operator[] more
+// efficiently if we use an array of pointers instead of a list of pointers.
+//
+// To implement deque::iterator, we could keep a struct which is essentially this:
+// struct iterator {
+// int subArrayIndex;
+// int subArrayOffset;
+// }
+//
+// In practice, we implement iterators a little differently, but in reality our
+// implementation isn't much different from the above. It turns out that it's most
+// simple if we also manage the location of item [0] and item [end] by using these
+// same iterators.
+//
+// To consider: Implement the deque as a circular deque instead of a linear one.
+// This would use a similar subarray layout but iterators would
+// wrap around when they reached the end of the subarray pointer list.
+//
+//////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_DEQUE_H
+#define EASTL_DEQUE_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/allocator.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/type_traits.h>
+#include <EASTL/iterator.h>
+#include <EASTL/memory.h>
+#include <EASTL/initializer_list.h>
+
+EA_DISABLE_ALL_VC_WARNINGS()
+#include <new>
+#include <stddef.h>
+EA_RESTORE_ALL_VC_WARNINGS()
+
+#if EASTL_EXCEPTIONS_ENABLED
+ EA_DISABLE_ALL_VC_WARNINGS()
+ #include <stdexcept> // std::out_of_range, std::length_error.
+ EA_RESTORE_ALL_VC_WARNINGS()
+#endif
+
+
+// 4267 - 'argument' : conversion from 'size_t' to 'const uint32_t', possible loss of data. This is a bogus warning resulting from a bug in VC++.
+// 4345 - Behavior change: an object of POD type constructed with an initializer of the form () will be default-initialized
+// 4480 - nonstandard extension used: specifying underlying type for enum
+// 4530 - C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc
+// 4571 - catch(...) semantics changed since Visual C++ 7.1; structured exceptions (SEH) are no longer caught.
+EA_DISABLE_VC_WARNING(4267 4345 4480 4530 4571);
+
+#if EASTL_EXCEPTIONS_ENABLED
+ // 4703 - potentially uninitialized local pointer variable used. VC++ is mistakenly analyzing the possibility of uninitialized variables, though it's not easy for it to do so.
+ // 4701 - potentially uninitialized local variable used.
+ EA_DISABLE_VC_WARNING(4703 4701)
+#endif
+
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+namespace eastl
+{
+
+ /// EASTL_DEQUE_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_DEQUE_DEFAULT_NAME
+ #define EASTL_DEQUE_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " deque" // Unless the user overrides something, this is "EASTL deque".
+ #endif
+
+
+ /// EASTL_DEQUE_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_DEQUE_DEFAULT_ALLOCATOR
+ #define EASTL_DEQUE_DEFAULT_ALLOCATOR allocator_type(EASTL_DEQUE_DEFAULT_NAME)
+ #endif
+
+
+ /// DEQUE_DEFAULT_SUBARRAY_SIZE
+ ///
+ /// Defines the default number of items in a subarray.
+ /// Note that the user has the option of specifying the subarray size
+ /// in the deque template declaration.
+ ///
+ #if !defined(__GNUC__) || (__GNUC__ >= 3) // GCC 2.x can't handle the declaration below.
+ #define DEQUE_DEFAULT_SUBARRAY_SIZE(T) ((sizeof(T) <= 4) ? 64 : ((sizeof(T) <= 8) ? 32 : ((sizeof(T) <= 16) ? 16 : ((sizeof(T) <= 32) ? 8 : 4))))
+ #else
+ #define DEQUE_DEFAULT_SUBARRAY_SIZE(T) 16
+ #endif
+
+
+
+ /// DequeIterator
+ ///
+ /// The DequeIterator provides both const and non-const iterators for deque.
+ /// It also is used for the tracking of the begin and end for the deque.
+ ///
+ template <typename T, typename Pointer, typename Reference, unsigned kDequeSubarraySize>
+ struct DequeIterator
+ {
+ typedef DequeIterator<T, Pointer, Reference, kDequeSubarraySize> this_type;
+ typedef DequeIterator<T, T*, T&, kDequeSubarraySize> iterator;
+ typedef DequeIterator<T, const T*, const T&, kDequeSubarraySize> const_iterator;
+ typedef ptrdiff_t difference_type;
+ typedef EASTL_ITC_NS::random_access_iterator_tag iterator_category;
+ typedef T value_type;
+ typedef T* pointer;
+ typedef T& reference;
+
+ public:
+ DequeIterator();
+ DequeIterator(const iterator& x);
+
+ pointer operator->() const;
+ reference operator*() const;
+
+ this_type& operator++();
+ this_type operator++(int);
+
+ this_type& operator--();
+ this_type operator--(int);
+
+ this_type& operator+=(difference_type n);
+ this_type& operator-=(difference_type n);
+
+ this_type operator+(difference_type n) const;
+ this_type operator-(difference_type n) const;
+
+ protected:
+ template <typename, typename, typename, unsigned>
+ friend struct DequeIterator;
+
+ template <typename, typename, unsigned>
+ friend struct DequeBase;
+
+ template <typename, typename, unsigned>
+ friend class deque;
+
+ template <typename U, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB, unsigned kDequeSubarraySizeU>
+ friend bool operator==(const DequeIterator<U, PointerA, ReferenceA, kDequeSubarraySizeU>&,
+ const DequeIterator<U, PointerB, ReferenceB, kDequeSubarraySizeU>&);
+
+ template <typename U, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB, unsigned kDequeSubarraySizeU>
+ friend bool operator!=(const DequeIterator<U, PointerA, ReferenceA, kDequeSubarraySizeU>&,
+ const DequeIterator<U, PointerB, ReferenceB, kDequeSubarraySizeU>&);
+
+ template <typename U, typename PointerU, typename ReferenceU, unsigned kDequeSubarraySizeU>
+ friend bool operator!=(const DequeIterator<U, PointerU, ReferenceU, kDequeSubarraySizeU>& a,
+ const DequeIterator<U, PointerU, ReferenceU, kDequeSubarraySizeU>& b);
+
+ template <typename U, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB, unsigned kDequeSubarraySizeU>
+ friend bool operator< (const DequeIterator<U, PointerA, ReferenceA, kDequeSubarraySizeU>&,
+ const DequeIterator<U, PointerB, ReferenceB, kDequeSubarraySizeU>&);
+
+ template <typename U, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB, unsigned kDequeSubarraySizeU>
+ friend bool operator> (const DequeIterator<U, PointerA, ReferenceA, kDequeSubarraySizeU>&,
+ const DequeIterator<U, PointerB, ReferenceB, kDequeSubarraySizeU>&);
+
+ template <typename U, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB, unsigned kDequeSubarraySizeU>
+ friend bool operator<=(const DequeIterator<U, PointerA, ReferenceA, kDequeSubarraySizeU>&,
+ const DequeIterator<U, PointerB, ReferenceB, kDequeSubarraySizeU>&);
+
+ template <typename U, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB, unsigned kDequeSubarraySizeU>
+ friend bool operator>=(const DequeIterator<U, PointerA, ReferenceA, kDequeSubarraySizeU>&,
+ const DequeIterator<U, PointerB, ReferenceB, kDequeSubarraySizeU>&);
+
+ template <typename U, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB, unsigned kDequeSubarraySizeU>
+ friend typename DequeIterator<U, PointerA, ReferenceA, kDequeSubarraySizeU>::difference_type
+ operator-(const DequeIterator<U, PointerA, ReferenceA, kDequeSubarraySizeU>& a,
+ const DequeIterator<U, PointerB, ReferenceB, kDequeSubarraySizeU>& b);
+
+ protected:
+ T* mpCurrent; // Where we currently point. Declared first because it's used most often.
+ T* mpBegin; // The beginning of the current subarray.
+ T* mpEnd; // The end of the current subarray. To consider: remove this member, as it is always equal to 'mpBegin + kDequeSubarraySize'. Given that deque subarrays usually consist of hundreds of bytes, this isn't a massive win. Also, now that we are implementing a zero-allocation new deque policy, mpEnd may in fact not be equal to 'mpBegin + kDequeSubarraySize'.
+ T** mpCurrentArrayPtr; // Pointer to current subarray. We could alternatively implement this as a list node iterator if the deque used a linked list.
+
+ struct Increment {};
+ struct Decrement {};
+ struct FromConst {};
+
+ DequeIterator(T** pCurrentArrayPtr, T* pCurrent);
+ DequeIterator(const const_iterator& x, FromConst) : mpCurrent(x.mpCurrent), mpBegin(x.mpBegin), mpEnd(x.mpEnd), mpCurrentArrayPtr(x.mpCurrentArrayPtr){}
+ DequeIterator(const iterator& x, Increment);
+ DequeIterator(const iterator& x, Decrement);
+
+ this_type copy(const iterator& first, const iterator& last, true_type); // true means that value_type has the type_trait has_trivial_relocate,
+ this_type copy(const iterator& first, const iterator& last, false_type); // false means it does not.
+
+ void copy_backward(const iterator& first, const iterator& last, true_type); // true means that value_type has the type_trait has_trivial_relocate,
+ void copy_backward(const iterator& first, const iterator& last, false_type); // false means it does not.
+
+ void SetSubarray(T** pCurrentArrayPtr);
+ };
+
+
+
+
+ /// DequeBase
+ ///
+ /// The DequeBase implements memory allocation for deque.
+ /// See VectorBase (class vector) for an explanation of why we
+ /// create this separate base class.
+ ///
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ struct DequeBase
+ {
+ typedef T value_type;
+ typedef Allocator allocator_type;
+ typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t.
+ typedef ptrdiff_t difference_type;
+ typedef DequeIterator<T, T*, T&, kDequeSubarraySize> iterator;
+ typedef DequeIterator<T, const T*, const T&, kDequeSubarraySize> const_iterator;
+
+ static const size_type npos = (size_type)-1; /// 'npos' means non-valid position or simply non-position.
+ static const size_type kMaxSize = (size_type)-2; /// -1 is reserved for 'npos'. It also happens to be slightly beneficial that kMaxSize is a value less than -1, as it helps us deal with potential integer wraparound issues.
+
+ enum
+ {
+ kMinPtrArraySize = 8, /// A new empty deque has a ptrArraySize of 0, but any allocated ptrArrays use this min size.
+ kSubarraySize = kDequeSubarraySize ///
+ //kNodeSize = kDequeSubarraySize * sizeof(T) /// Disabled because it prevents the ability to do this: struct X{ eastl::deque<X, EASTLAllocatorType, 16> mDequeOfSelf; };
+ };
+
+ enum Side /// Defines the side of the deque: front or back.
+ {
+ kSideFront, /// Identifies the front side of the deque.
+ kSideBack /// Identifies the back side of the deque.
+ };
+
+ protected:
+ T** mpPtrArray; // Array of pointers to subarrays.
+ size_type mnPtrArraySize; // Possibly we should store this as T** mpArrayEnd.
+ iterator mItBegin; // Where within the subarrays is our beginning.
+ iterator mItEnd; // Where within the subarrays is our end.
+ allocator_type mAllocator; // To do: Use base class optimization to make this go away.
+
+ public:
+ DequeBase(const allocator_type& allocator);
+ DequeBase(size_type n);
+ DequeBase(size_type n, const allocator_type& allocator);
+ ~DequeBase();
+
+ const allocator_type& get_allocator() const EA_NOEXCEPT;
+ allocator_type& get_allocator() EA_NOEXCEPT;
+ void set_allocator(const allocator_type& allocator);
+
+ protected:
+ T* DoAllocateSubarray();
+ void DoFreeSubarray(T* p);
+ void DoFreeSubarrays(T** pBegin, T** pEnd);
+
+ T** DoAllocatePtrArray(size_type n);
+ void DoFreePtrArray(T** p, size_t n);
+
+ iterator DoReallocSubarray(size_type nAdditionalCapacity, Side allocationSide);
+ void DoReallocPtrArray(size_type nAdditionalCapacity, Side allocationSide);
+
+ void DoInit(size_type n);
+
+ }; // DequeBase
+
+
+
+
+ /// deque
+ ///
+ /// Implements a conventional C++ double-ended queue. The implementation used here
+ /// is very much like any other deque implementations you may have seen, as it
+ /// follows the standard algorithm for deque design.
+ ///
+ /// Note:
+ /// As of this writing, deque does not support zero-allocation initial emptiness.
+ /// A newly created deque with zero elements will still allocate a subarray
+ /// pointer set. We are looking for efficient and clean ways to get around this,
+ /// but current efforts have resulted in less efficient and more fragile code.
+ /// The logic of this class doesn't lend itself to a clean implementation.
+ /// It turns out that deques are one of the least likely classes you'd want this
+ /// behaviour in, so until this functionality becomes very important to somebody,
+ /// we will leave it as-is. It can probably be solved by adding some extra code to
+ /// the Do* functions and adding good comments explaining the situation.
+ ///
+ template <typename T, typename Allocator = EASTLAllocatorType, unsigned kDequeSubarraySize = DEQUE_DEFAULT_SUBARRAY_SIZE(T)>
+ class deque : public DequeBase<T, Allocator, kDequeSubarraySize>
+ {
+ public:
+ typedef DequeBase<T, Allocator, kDequeSubarraySize> base_type;
+ typedef deque<T, Allocator, kDequeSubarraySize> this_type;
+ typedef T value_type;
+ typedef T* pointer;
+ typedef const T* const_pointer;
+ typedef T& reference;
+ typedef const T& const_reference;
+ typedef DequeIterator<T, T*, T&, kDequeSubarraySize> iterator;
+ typedef DequeIterator<T, const T*, const T&, kDequeSubarraySize> const_iterator;
+ typedef eastl::reverse_iterator<iterator> reverse_iterator;
+ typedef eastl::reverse_iterator<const_iterator> const_reverse_iterator;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::difference_type difference_type;
+ typedef typename base_type::allocator_type allocator_type;
+
+ using base_type::kSideFront;
+ using base_type::kSideBack;
+ using base_type::mpPtrArray;
+ using base_type::mnPtrArraySize;
+ using base_type::mItBegin;
+ using base_type::mItEnd;
+ using base_type::mAllocator;
+ using base_type::npos;
+ using base_type::DoAllocateSubarray;
+ using base_type::DoFreeSubarray;
+ using base_type::DoFreeSubarrays;
+ using base_type::DoAllocatePtrArray;
+ using base_type::DoFreePtrArray;
+ using base_type::DoReallocSubarray;
+ using base_type::DoReallocPtrArray;
+
+ public:
+ deque();
+ explicit deque(const allocator_type& allocator);
+ explicit deque(size_type n, const allocator_type& allocator = EASTL_DEQUE_DEFAULT_ALLOCATOR);
+ deque(size_type n, const value_type& value, const allocator_type& allocator = EASTL_DEQUE_DEFAULT_ALLOCATOR);
+ deque(const this_type& x);
+ deque(this_type&& x);
+ deque(this_type&& x, const allocator_type& allocator);
+ deque(std::initializer_list<value_type> ilist, const allocator_type& allocator = EASTL_DEQUE_DEFAULT_ALLOCATOR);
+
+ template <typename InputIterator>
+ deque(InputIterator first, InputIterator last); // allocator arg removed because VC7.1 fails on the default arg. To do: Make a second version of this function without a default arg.
+
+ ~deque();
+
+ this_type& operator=(const this_type& x);
+ this_type& operator=(std::initializer_list<value_type> ilist);
+ this_type& operator=(this_type&& x);
+
+ void swap(this_type& x);
+
+ void assign(size_type n, const value_type& value);
+ void assign(std::initializer_list<value_type> ilist);
+
+ template <typename InputIterator> // It turns out that the C++ std::deque<int, int> specifies a two argument
+ void assign(InputIterator first, InputIterator last); // version of assign that takes (int size, int value). These are not
+ // iterators, so we need to do a template compiler trick to do the right thing.
+
+ iterator begin() EA_NOEXCEPT;
+ const_iterator begin() const EA_NOEXCEPT;
+ const_iterator cbegin() const EA_NOEXCEPT;
+
+ iterator end() EA_NOEXCEPT;
+ const_iterator end() const EA_NOEXCEPT;
+ const_iterator cend() const EA_NOEXCEPT;
+
+ reverse_iterator rbegin() EA_NOEXCEPT;
+ const_reverse_iterator rbegin() const EA_NOEXCEPT;
+ const_reverse_iterator crbegin() const EA_NOEXCEPT;
+
+ reverse_iterator rend() EA_NOEXCEPT;
+ const_reverse_iterator rend() const EA_NOEXCEPT;
+ const_reverse_iterator crend() const EA_NOEXCEPT;
+
+ bool empty() const EA_NOEXCEPT;
+ size_type size() const EA_NOEXCEPT;
+
+ void resize(size_type n, const value_type& value);
+ void resize(size_type n);
+
+ void shrink_to_fit();
+ void set_capacity(size_type n = base_type::npos);
+
+ reference operator[](size_type n);
+ const_reference operator[](size_type n) const;
+
+ reference at(size_type n);
+ const_reference at(size_type n) const;
+
+ reference front();
+ const_reference front() const;
+
+ reference back();
+ const_reference back() const;
+
+ void push_front(const value_type& value);
+ reference push_front();
+ void push_front(value_type&& value);
+
+ void push_back(const value_type& value);
+ reference push_back();
+ void push_back(value_type&& value);
+
+ void pop_front();
+ void pop_back();
+
+ template<class... Args>
+ iterator emplace(const_iterator position, Args&&... args);
+
+ template<class... Args>
+ void emplace_front(Args&&... args);
+
+ template<class... Args>
+ void emplace_back(Args&&... args);
+
+ iterator insert(const_iterator position, const value_type& value);
+ iterator insert(const_iterator position, value_type&& value);
+ void insert(const_iterator position, size_type n, const value_type& value);
+ iterator insert(const_iterator position, std::initializer_list<value_type> ilist);
+
+ template <typename InputIterator>
+ void insert(const_iterator position, InputIterator first, InputIterator last);
+
+ iterator erase(const_iterator position);
+ iterator erase(const_iterator first, const_iterator last);
+ reverse_iterator erase(reverse_iterator position);
+ reverse_iterator erase(reverse_iterator first, reverse_iterator last);
+
+ void clear();
+ //void reset_lose_memory(); // Disabled until it can be implemented efficiently and cleanly. // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs.
+
+ bool validate() const;
+ int validate_iterator(const_iterator i) const;
+
+ protected:
+ template <typename Integer>
+ void DoInit(Integer n, Integer value, true_type);
+
+ template <typename InputIterator>
+ void DoInit(InputIterator first, InputIterator last, false_type);
+
+ template <typename InputIterator>
+ void DoInitFromIterator(InputIterator first, InputIterator last, EASTL_ITC_NS::input_iterator_tag);
+
+ template <typename ForwardIterator>
+ void DoInitFromIterator(ForwardIterator first, ForwardIterator last, EASTL_ITC_NS::forward_iterator_tag);
+
+ void DoFillInit(const value_type& value);
+
+ template <typename Integer>
+ void DoAssign(Integer n, Integer value, true_type);
+
+ template <typename InputIterator>
+ void DoAssign(InputIterator first, InputIterator last, false_type);
+
+ void DoAssignValues(size_type n, const value_type& value);
+
+ template <typename Integer>
+ void DoInsert(const const_iterator& position, Integer n, Integer value, true_type);
+
+ template <typename InputIterator>
+ void DoInsert(const const_iterator& position, const InputIterator& first, const InputIterator& last, false_type);
+
+ template <typename InputIterator>
+ void DoInsertFromIterator(const_iterator position, const InputIterator& first, const InputIterator& last, EASTL_ITC_NS::forward_iterator_tag);
+
+ void DoInsertValues(const_iterator position, size_type n, const value_type& value);
+
+ void DoSwap(this_type& x);
+ }; // class deque
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // DequeBase
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ DequeBase<T, Allocator, kDequeSubarraySize>::DequeBase(const allocator_type& allocator)
+ : mpPtrArray(NULL),
+ mnPtrArraySize(0),
+ mItBegin(),
+ mItEnd(),
+ mAllocator(allocator)
+ {
+ // It is assumed here that the deque subclass will init us when/as needed.
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ DequeBase<T, Allocator, kDequeSubarraySize>::DequeBase(size_type n)
+ : mpPtrArray(NULL),
+ mnPtrArraySize(0),
+ mItBegin(),
+ mItEnd(),
+ mAllocator(EASTL_DEQUE_DEFAULT_NAME)
+ {
+ // It's important to note that DoInit creates space for elements and assigns
+ // mItBegin/mItEnd to point to them, but these elements are not constructed.
+ // You need to immediately follow this constructor with code that constructs the values.
+ DoInit(n);
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ DequeBase<T, Allocator, kDequeSubarraySize>::DequeBase(size_type n, const allocator_type& allocator)
+ : mpPtrArray(NULL),
+ mnPtrArraySize(0),
+ mItBegin(),
+ mItEnd(),
+ mAllocator(allocator)
+ {
+ // It's important to note that DoInit creates space for elements and assigns
+ // mItBegin/mItEnd to point to them, but these elements are not constructed.
+ // You need to immediately follow this constructor with code that constructs the values.
+ DoInit(n);
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ DequeBase<T, Allocator, kDequeSubarraySize>::~DequeBase()
+ {
+ if(mpPtrArray)
+ {
+ DoFreeSubarrays(mItBegin.mpCurrentArrayPtr, mItEnd.mpCurrentArrayPtr + 1);
+ DoFreePtrArray(mpPtrArray, mnPtrArraySize);
+ mpPtrArray = nullptr;
+ }
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ const typename DequeBase<T, Allocator, kDequeSubarraySize>::allocator_type&
+ DequeBase<T, Allocator, kDequeSubarraySize>::get_allocator() const EA_NOEXCEPT
+ {
+ return mAllocator;
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ typename DequeBase<T, Allocator, kDequeSubarraySize>::allocator_type&
+ DequeBase<T, Allocator, kDequeSubarraySize>::get_allocator() EA_NOEXCEPT
+ {
+ return mAllocator;
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ void DequeBase<T, Allocator, kDequeSubarraySize>::set_allocator(const allocator_type& allocator)
+ {
+ // The only time you can set an allocator is with an empty unused container, such as right after construction.
+ if(EASTL_LIKELY(mAllocator != allocator))
+ {
+ if(EASTL_LIKELY(mpPtrArray && (mItBegin.mpCurrentArrayPtr == mItEnd.mpCurrentArrayPtr))) // If we are empty and so can safely deallocate the existing memory... We could also test for empty(), but that's a more expensive calculation and more involved clearing, though it would be more flexible.
+ {
+ DoFreeSubarrays(mItBegin.mpCurrentArrayPtr, mItEnd.mpCurrentArrayPtr + 1);
+ DoFreePtrArray(mpPtrArray, mnPtrArraySize);
+
+ mAllocator = allocator;
+ DoInit(0);
+ }
+ else
+ {
+ EASTL_FAIL_MSG("DequeBase::set_allocator -- atempt to change allocator after allocating elements.");
+ }
+ }
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ T* DequeBase<T, Allocator, kDequeSubarraySize>::DoAllocateSubarray()
+ {
+ T* p = (T*)allocate_memory(mAllocator, kDequeSubarraySize * sizeof(T), EASTL_ALIGN_OF(T), 0);
+ EASTL_ASSERT_MSG(p != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined.");
+
+ #if EASTL_DEBUG
+ memset((void*)p, 0, kDequeSubarraySize * sizeof(T));
+ #endif
+
+ return (T*)p;
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ void DequeBase<T, Allocator, kDequeSubarraySize>::DoFreeSubarray(T* p)
+ {
+ if(p)
+ EASTLFree(mAllocator, p, kDequeSubarraySize * sizeof(T));
+ }
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ void DequeBase<T, Allocator, kDequeSubarraySize>::DoFreeSubarrays(T** pBegin, T** pEnd)
+ {
+ while(pBegin < pEnd)
+ DoFreeSubarray(*pBegin++);
+ }
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ T** DequeBase<T, Allocator, kDequeSubarraySize>::DoAllocatePtrArray(size_type n)
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(n >= 0x80000000))
+ EASTL_FAIL_MSG("deque::DoAllocatePtrArray -- improbably large request.");
+ #endif
+
+ T** pp = (T**)allocate_memory(mAllocator, n * sizeof(T*), EASTL_ALIGN_OF(T), 0);
+ EASTL_ASSERT_MSG(pp != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined.");
+
+ #if EASTL_DEBUG
+ memset((void*)pp, 0, n * sizeof(T*));
+ #endif
+
+ return pp;
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ void DequeBase<T, Allocator, kDequeSubarraySize>::DoFreePtrArray(T** pp, size_t n)
+ {
+ if(pp)
+ EASTLFree(mAllocator, pp, n * sizeof(T*));
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ typename DequeBase<T, Allocator, kDequeSubarraySize>::iterator
+ DequeBase<T, Allocator, kDequeSubarraySize>::DoReallocSubarray(size_type nAdditionalCapacity, Side allocationSide)
+ {
+ // nAdditionalCapacity refers to the amount of additional space we need to be
+ // able to store in this deque. Typically this function is called as part of
+ // an insert or append operation. This is the function that makes sure there
+ // is enough capacity for the new elements to be copied into the deque.
+ // The new capacity here is always at the front or back of the deque.
+ // This function returns an iterator to that points to the new begin or
+ // the new end of the deque space, depending on allocationSide.
+
+ if(allocationSide == kSideFront)
+ {
+ // There might be some free space (nCurrentAdditionalCapacity) at the front of the existing subarray.
+ const size_type nCurrentAdditionalCapacity = (size_type)(mItBegin.mpCurrent - mItBegin.mpBegin);
+
+ if(EASTL_UNLIKELY(nCurrentAdditionalCapacity < nAdditionalCapacity)) // If we need to grow downward into a new subarray...
+ {
+ const difference_type nSubarrayIncrease = (difference_type)(((nAdditionalCapacity - nCurrentAdditionalCapacity) + kDequeSubarraySize - 1) / kDequeSubarraySize);
+ difference_type i;
+
+ if(nSubarrayIncrease > (mItBegin.mpCurrentArrayPtr - mpPtrArray)) // If there are not enough pointers in front of the current (first) one...
+ DoReallocPtrArray((size_type)(nSubarrayIncrease - (mItBegin.mpCurrentArrayPtr - mpPtrArray)), kSideFront);
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ for(i = 1; i <= nSubarrayIncrease; ++i)
+ mItBegin.mpCurrentArrayPtr[-i] = DoAllocateSubarray();
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ for(difference_type j = 1; j < i; ++j)
+ DoFreeSubarray(mItBegin.mpCurrentArrayPtr[-j]);
+ throw;
+ }
+ #endif
+ }
+
+ return mItBegin - (difference_type)nAdditionalCapacity;
+ }
+ else // else kSideBack
+ {
+ const size_type nCurrentAdditionalCapacity = (size_type)((mItEnd.mpEnd - 1) - mItEnd.mpCurrent);
+
+ if(EASTL_UNLIKELY(nCurrentAdditionalCapacity < nAdditionalCapacity)) // If we need to grow forward into a new subarray...
+ {
+ const difference_type nSubarrayIncrease = (difference_type)(((nAdditionalCapacity - nCurrentAdditionalCapacity) + kDequeSubarraySize - 1) / kDequeSubarraySize);
+ difference_type i;
+
+ if(nSubarrayIncrease > ((mpPtrArray + mnPtrArraySize) - mItEnd.mpCurrentArrayPtr) - 1) // If there are not enough pointers after the current (last) one...
+ DoReallocPtrArray((size_type)(nSubarrayIncrease - (((mpPtrArray + mnPtrArraySize) - mItEnd.mpCurrentArrayPtr) - 1)), kSideBack);
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ for(i = 1; i <= nSubarrayIncrease; ++i)
+ mItEnd.mpCurrentArrayPtr[i] = DoAllocateSubarray();
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ for(difference_type j = 1; j < i; ++j)
+ DoFreeSubarray(mItEnd.mpCurrentArrayPtr[j]);
+ throw;
+ }
+ #endif
+ }
+
+ return mItEnd + (difference_type)nAdditionalCapacity;
+ }
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ void DequeBase<T, Allocator, kDequeSubarraySize>::DoReallocPtrArray(size_type nAdditionalCapacity, Side allocationSide)
+ {
+ // This function is not called unless the capacity is known to require a resize.
+ //
+ // We have an array of pointers (mpPtrArray), of which a segment of them are in use and
+ // at either end of the array are zero or more unused pointers. This function is being
+ // called because we need to extend the capacity on either side of this array by
+ // nAdditionalCapacity pointers. However, it's possible that if the user is continually
+ // using push_back and pop_front then the pointer array will continue to be extended
+ // on the back side and unused on the front side. So while we are doing this resizing
+ // here we also take the opportunity to recenter the pointers and thus be balanced.
+ // It man turn out that we don't even need to reallocate the pointer array in order
+ // to increase capacity on one side, as simply moving the pointers to the center may
+ // be enough to open up the requires space.
+ //
+ // Balanced pointer array Unbalanced pointer array (unused space at front, no free space at back)
+ // ----++++++++++++---- ---------+++++++++++
+
+ const size_type nUnusedPtrCountAtFront = (size_type)(mItBegin.mpCurrentArrayPtr - mpPtrArray);
+ const size_type nUsedPtrCount = (size_type)(mItEnd.mpCurrentArrayPtr - mItBegin.mpCurrentArrayPtr) + 1;
+ const size_type nUsedPtrSpace = nUsedPtrCount * sizeof(void*);
+ const size_type nUnusedPtrCountAtBack = (mnPtrArraySize - nUnusedPtrCountAtFront) - nUsedPtrCount;
+ value_type** pPtrArrayBegin;
+
+ if((allocationSide == kSideBack) && (nAdditionalCapacity <= nUnusedPtrCountAtFront)) // If we can take advantage of unused pointers at the front without doing any reallocation...
+ {
+ if(nAdditionalCapacity < (nUnusedPtrCountAtFront / 2)) // Possibly use more space than required, if there's a lot of extra space.
+ nAdditionalCapacity = (nUnusedPtrCountAtFront / 2);
+
+ pPtrArrayBegin = mpPtrArray + (nUnusedPtrCountAtFront - nAdditionalCapacity);
+ memmove(pPtrArrayBegin, mItBegin.mpCurrentArrayPtr, nUsedPtrSpace);
+
+ #if EASTL_DEBUG
+ memset(pPtrArrayBegin + nUsedPtrCount, 0, (size_t)(mpPtrArray + mnPtrArraySize) - (size_t)(pPtrArrayBegin + nUsedPtrCount));
+ #endif
+ }
+ else if((allocationSide == kSideFront) && (nAdditionalCapacity <= nUnusedPtrCountAtBack)) // If we can take advantage of unused pointers at the back without doing any reallocation...
+ {
+ if(nAdditionalCapacity < (nUnusedPtrCountAtBack / 2)) // Possibly use more space than required, if there's a lot of extra space.
+ nAdditionalCapacity = (nUnusedPtrCountAtBack / 2);
+
+ pPtrArrayBegin = mItBegin.mpCurrentArrayPtr + nAdditionalCapacity;
+ memmove(pPtrArrayBegin, mItBegin.mpCurrentArrayPtr, nUsedPtrSpace);
+
+ #if EASTL_DEBUG
+ memset(mpPtrArray, 0, (size_t)((uintptr_t)pPtrArrayBegin - (uintptr_t)mpPtrArray));
+ #endif
+ }
+ else
+ {
+ // In this case we will have to do a reallocation.
+ const size_type nNewPtrArraySize = mnPtrArraySize + eastl::max_alt(mnPtrArraySize, nAdditionalCapacity) + 2; // Allocate extra capacity.
+ value_type** const pNewPtrArray = DoAllocatePtrArray(nNewPtrArraySize);
+
+ pPtrArrayBegin = pNewPtrArray + (mItBegin.mpCurrentArrayPtr - mpPtrArray) + ((allocationSide == kSideFront) ? nAdditionalCapacity : 0);
+
+ // The following is equivalent to: eastl::copy(mItBegin.mpCurrentArrayPtr, mItEnd.mpCurrentArrayPtr + 1, pPtrArrayBegin);
+ // It's OK to use memcpy instead of memmove because the destination is guaranteed to non-overlap the source.
+ if(mpPtrArray) // Could also say: 'if(mItBegin.mpCurrentArrayPtr)'
+ memcpy(pPtrArrayBegin, mItBegin.mpCurrentArrayPtr, nUsedPtrSpace);
+
+ DoFreePtrArray(mpPtrArray, mnPtrArraySize);
+
+ mpPtrArray = pNewPtrArray;
+ mnPtrArraySize = nNewPtrArraySize;
+ }
+
+ // We need to reset the begin and end iterators, as code that calls this expects them to *not* be invalidated.
+ mItBegin.SetSubarray(pPtrArrayBegin);
+ mItEnd.SetSubarray((pPtrArrayBegin + nUsedPtrCount) - 1);
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ void DequeBase<T, Allocator, kDequeSubarraySize>::DoInit(size_type n)
+ {
+ // This code is disabled because it doesn't currently work properly.
+ // We are trying to make it so that a deque can have a zero allocation
+ // initial empty state, but we (OK, I) am having a hard time making
+ // this elegant and efficient.
+ //if(n)
+ //{
+ const size_type nNewPtrArraySize = (size_type)((n / kDequeSubarraySize) + 1); // Always have at least one, even if n is zero.
+ const size_type kMinPtrArraySize_ = kMinPtrArraySize;
+
+ mnPtrArraySize = eastl::max_alt(kMinPtrArraySize_, (nNewPtrArraySize + 2));
+ mpPtrArray = DoAllocatePtrArray(mnPtrArraySize);
+
+ value_type** const pPtrArrayBegin = (mpPtrArray + ((mnPtrArraySize - nNewPtrArraySize) / 2)); // Try to place it in the middle.
+ value_type** const pPtrArrayEnd = pPtrArrayBegin + nNewPtrArraySize;
+ value_type** pPtrArrayCurrent = pPtrArrayBegin;
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ try
+ {
+ #endif
+ while(pPtrArrayCurrent < pPtrArrayEnd)
+ *pPtrArrayCurrent++ = DoAllocateSubarray();
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ DoFreeSubarrays(pPtrArrayBegin, pPtrArrayCurrent);
+ throw;
+ }
+ }
+ catch(...)
+ {
+ DoFreePtrArray(mpPtrArray, mnPtrArraySize);
+ mpPtrArray = NULL;
+ mnPtrArraySize = 0;
+ throw;
+ }
+ #endif
+
+ mItBegin.SetSubarray(pPtrArrayBegin);
+ mItBegin.mpCurrent = mItBegin.mpBegin;
+
+ mItEnd.SetSubarray(pPtrArrayEnd - 1);
+ mItEnd.mpCurrent = mItEnd.mpBegin + (difference_type)(n % kDequeSubarraySize);
+ //}
+ //else // Else we do a zero-allocation initialization.
+ //{
+ // mpPtrArray = NULL;
+ // mnPtrArraySize = 0;
+ //
+ // mItBegin.mpCurrentArrayPtr = NULL;
+ // mItBegin.mpBegin = NULL;
+ // mItBegin.mpEnd = NULL; // We intentionally create a situation whereby the subarray that has no capacity.
+ // mItBegin.mpCurrent = NULL;
+ //
+ // mItEnd = mItBegin;
+ //}
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // DequeIterator
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Pointer, typename Reference, unsigned kDequeSubarraySize>
+ DequeIterator<T, Pointer, Reference, kDequeSubarraySize>::DequeIterator()
+ : mpCurrent(NULL), mpBegin(NULL), mpEnd(NULL), mpCurrentArrayPtr(NULL)
+ {
+ // Empty
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, unsigned kDequeSubarraySize>
+ DequeIterator<T, Pointer, Reference, kDequeSubarraySize>::DequeIterator(T** pCurrentArrayPtr, T* pCurrent)
+ : mpCurrent(pCurrent), mpBegin(*pCurrentArrayPtr), mpEnd(pCurrent + kDequeSubarraySize), mpCurrentArrayPtr(pCurrentArrayPtr)
+ {
+ // Empty
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, unsigned kDequeSubarraySize>
+ DequeIterator<T, Pointer, Reference, kDequeSubarraySize>::DequeIterator(const iterator& x)
+ : mpCurrent(x.mpCurrent), mpBegin(x.mpBegin), mpEnd(x.mpEnd), mpCurrentArrayPtr(x.mpCurrentArrayPtr)
+ {
+ // Empty
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, unsigned kDequeSubarraySize>
+ DequeIterator<T, Pointer, Reference, kDequeSubarraySize>::DequeIterator(const iterator& x, Increment)
+ : mpCurrent(x.mpCurrent), mpBegin(x.mpBegin), mpEnd(x.mpEnd), mpCurrentArrayPtr(x.mpCurrentArrayPtr)
+ {
+ operator++();
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, unsigned kDequeSubarraySize>
+ DequeIterator<T, Pointer, Reference, kDequeSubarraySize>::DequeIterator(const iterator& x, Decrement)
+ : mpCurrent(x.mpCurrent), mpBegin(x.mpBegin), mpEnd(x.mpEnd), mpCurrentArrayPtr(x.mpCurrentArrayPtr)
+ {
+ operator--();
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, unsigned kDequeSubarraySize>
+ typename DequeIterator<T, Pointer, Reference, kDequeSubarraySize>::pointer
+ DequeIterator<T, Pointer, Reference, kDequeSubarraySize>::operator->() const
+ {
+ return mpCurrent;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, unsigned kDequeSubarraySize>
+ typename DequeIterator<T, Pointer, Reference, kDequeSubarraySize>::reference
+ DequeIterator<T, Pointer, Reference, kDequeSubarraySize>::operator*() const
+ {
+ return *mpCurrent;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, unsigned kDequeSubarraySize>
+ typename DequeIterator<T, Pointer, Reference, kDequeSubarraySize>::this_type&
+ DequeIterator<T, Pointer, Reference, kDequeSubarraySize>::operator++()
+ {
+ if(EASTL_UNLIKELY(++mpCurrent == mpEnd))
+ {
+ mpBegin = *++mpCurrentArrayPtr;
+ mpEnd = mpBegin + kDequeSubarraySize;
+ mpCurrent = mpBegin;
+ }
+ return *this;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, unsigned kDequeSubarraySize>
+ typename DequeIterator<T, Pointer, Reference, kDequeSubarraySize>::this_type
+ DequeIterator<T, Pointer, Reference, kDequeSubarraySize>::operator++(int)
+ {
+ const this_type temp(*this);
+ operator++();
+ return temp;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, unsigned kDequeSubarraySize>
+ typename DequeIterator<T, Pointer, Reference, kDequeSubarraySize>::this_type&
+ DequeIterator<T, Pointer, Reference, kDequeSubarraySize>::operator--()
+ {
+ if(EASTL_UNLIKELY(mpCurrent == mpBegin))
+ {
+ mpBegin = *--mpCurrentArrayPtr;
+ mpEnd = mpBegin + kDequeSubarraySize;
+ mpCurrent = mpEnd; // fall through...
+ }
+ --mpCurrent;
+ return *this;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, unsigned kDequeSubarraySize>
+ typename DequeIterator<T, Pointer, Reference, kDequeSubarraySize>::this_type
+ DequeIterator<T, Pointer, Reference, kDequeSubarraySize>::operator--(int)
+ {
+ const this_type temp(*this);
+ operator--();
+ return temp;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, unsigned kDequeSubarraySize>
+ typename DequeIterator<T, Pointer, Reference, kDequeSubarraySize>::this_type&
+ DequeIterator<T, Pointer, Reference, kDequeSubarraySize>::operator+=(difference_type n)
+ {
+ const difference_type subarrayPosition = (mpCurrent - mpBegin) + n;
+
+ // Cast from signed to unsigned (size_t) in order to obviate the need to compare to < 0.
+ if((size_t)subarrayPosition < (size_t)kDequeSubarraySize) // If the new position is within the current subarray (i.e. >= 0 && < kSubArraySize)...
+ mpCurrent += n;
+ else
+ {
+ // This implementation is a branchless version which works by offsetting
+ // the math to always be in the positive range. Much of the values here
+ // reduce to constants and both the multiplication and division are of
+ // power of two sizes and so this calculation ends up compiling down to
+ // just one addition, one shift and one subtraction. This algorithm has
+ // a theoretical weakness in that on 32 bit systems it will fail if the
+ // value of n is >= (2^32 - 2^24) or 4,278,190,080 of if kDequeSubarraySize
+ // is >= 2^24 or 16,777,216.
+ EASTL_CT_ASSERT((kDequeSubarraySize & (kDequeSubarraySize - 1)) == 0); // Verify that it is a power of 2.
+ const difference_type subarrayIndex = (((16777216 + subarrayPosition) / (difference_type)kDequeSubarraySize)) - (16777216 / (difference_type)kDequeSubarraySize);
+
+ SetSubarray(mpCurrentArrayPtr + subarrayIndex);
+ mpCurrent = mpBegin + (subarrayPosition - (subarrayIndex * (difference_type)kDequeSubarraySize));
+ }
+ return *this;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, unsigned kDequeSubarraySize>
+ typename DequeIterator<T, Pointer, Reference, kDequeSubarraySize>::this_type&
+ DequeIterator<T, Pointer, Reference, kDequeSubarraySize>::operator-=(difference_type n)
+ {
+ return (*this).operator+=(-n);
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, unsigned kDequeSubarraySize>
+ typename DequeIterator<T, Pointer, Reference, kDequeSubarraySize>::this_type
+ DequeIterator<T, Pointer, Reference, kDequeSubarraySize>::operator+(difference_type n) const
+ {
+ return this_type(*this).operator+=(n);
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, unsigned kDequeSubarraySize>
+ typename DequeIterator<T, Pointer, Reference, kDequeSubarraySize>::this_type
+ DequeIterator<T, Pointer, Reference, kDequeSubarraySize>::operator-(difference_type n) const
+ {
+ return this_type(*this).operator+=(-n);
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, unsigned kDequeSubarraySize>
+ typename DequeIterator<T, Pointer, Reference, kDequeSubarraySize>::this_type
+ DequeIterator<T, Pointer, Reference, kDequeSubarraySize>::copy(const iterator& first, const iterator& last, true_type)
+ {
+ // To do: Implement this as a loop which does memcpys between subarrays appropriately.
+ // Currently we only do memcpy if the entire operation occurs within a single subarray.
+ if((first.mpBegin == last.mpBegin) && (first.mpBegin == mpBegin)) // If all operations are within the same subarray, implement the operation as a memmove.
+ {
+ memmove(mpCurrent, first.mpCurrent, (size_t)((uintptr_t)last.mpCurrent - (uintptr_t)first.mpCurrent));
+ return *this + (last.mpCurrent - first.mpCurrent);
+ }
+ return eastl::copy(eastl::make_move_iterator(first), eastl::make_move_iterator(last), eastl::make_move_iterator(*this)).base();
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, unsigned kDequeSubarraySize>
+ typename DequeIterator<T, Pointer, Reference, kDequeSubarraySize>::this_type
+ DequeIterator<T, Pointer, Reference, kDequeSubarraySize>::copy(const iterator& first, const iterator& last, false_type)
+ {
+ return eastl::copy(eastl::make_move_iterator(first), eastl::make_move_iterator(last), eastl::make_move_iterator(*this)).base();
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, unsigned kDequeSubarraySize>
+ void DequeIterator<T, Pointer, Reference, kDequeSubarraySize>::copy_backward(const iterator& first, const iterator& last, true_type)
+ {
+ // To do: Implement this as a loop which does memmoves between subarrays appropriately.
+ // Currently we only do memcpy if the entire operation occurs within a single subarray.
+ if((first.mpBegin == last.mpBegin) && (first.mpBegin == mpBegin)) // If all operations are within the same subarray, implement the operation as a memcpy.
+ memmove(mpCurrent - (last.mpCurrent - first.mpCurrent), first.mpCurrent, (size_t)((uintptr_t)last.mpCurrent - (uintptr_t)first.mpCurrent));
+ else
+ eastl::copy_backward(eastl::make_move_iterator(first), eastl::make_move_iterator(last), eastl::make_move_iterator(*this));
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, unsigned kDequeSubarraySize>
+ void DequeIterator<T, Pointer, Reference, kDequeSubarraySize>::copy_backward(const iterator& first, const iterator& last, false_type)
+ {
+ eastl::copy_backward(eastl::make_move_iterator(first), eastl::make_move_iterator(last), eastl::make_move_iterator(*this)).base();
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, unsigned kDequeSubarraySize>
+ void DequeIterator<T, Pointer, Reference, kDequeSubarraySize>::SetSubarray(T** pCurrentArrayPtr)
+ {
+ mpCurrentArrayPtr = pCurrentArrayPtr;
+ mpBegin = *pCurrentArrayPtr;
+ mpEnd = mpBegin + kDequeSubarraySize;
+ }
+
+
+ // The C++ defect report #179 requires that we support comparisons between const and non-const iterators.
+ // Thus we provide additional template paremeters here to support this. The defect report does not
+ // require us to support comparisons between reverse_iterators and const_reverse_iterators.
+ template <typename T, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB, unsigned kDequeSubarraySize>
+ inline bool operator==(const DequeIterator<T, PointerA, ReferenceA, kDequeSubarraySize>& a,
+ const DequeIterator<T, PointerB, ReferenceB, kDequeSubarraySize>& b)
+ {
+ return a.mpCurrent == b.mpCurrent;
+ }
+
+
+ template <typename T, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB, unsigned kDequeSubarraySize>
+ inline bool operator!=(const DequeIterator<T, PointerA, ReferenceA, kDequeSubarraySize>& a,
+ const DequeIterator<T, PointerB, ReferenceB, kDequeSubarraySize>& b)
+ {
+ return a.mpCurrent != b.mpCurrent;
+ }
+
+
+ // We provide a version of operator!= for the case where the iterators are of the
+ // same type. This helps prevent ambiguity errors in the presence of rel_ops.
+ template <typename T, typename Pointer, typename Reference, unsigned kDequeSubarraySize>
+ inline bool operator!=(const DequeIterator<T, Pointer, Reference, kDequeSubarraySize>& a,
+ const DequeIterator<T, Pointer, Reference, kDequeSubarraySize>& b)
+ {
+ return a.mpCurrent != b.mpCurrent;
+ }
+
+
+ template <typename T, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB, unsigned kDequeSubarraySize>
+ inline bool operator<(const DequeIterator<T, PointerA, ReferenceA, kDequeSubarraySize>& a,
+ const DequeIterator<T, PointerB, ReferenceB, kDequeSubarraySize>& b)
+ {
+ return (a.mpCurrentArrayPtr == b.mpCurrentArrayPtr) ? (a.mpCurrent < b.mpCurrent) : (a.mpCurrentArrayPtr < b.mpCurrentArrayPtr);
+ }
+
+
+ template <typename T, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB, unsigned kDequeSubarraySize>
+ inline bool operator>(const DequeIterator<T, PointerA, ReferenceA, kDequeSubarraySize>& a,
+ const DequeIterator<T, PointerB, ReferenceB, kDequeSubarraySize>& b)
+ {
+ return (a.mpCurrentArrayPtr == b.mpCurrentArrayPtr) ? (a.mpCurrent > b.mpCurrent) : (a.mpCurrentArrayPtr > b.mpCurrentArrayPtr);
+ }
+
+
+ template <typename T, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB, unsigned kDequeSubarraySize>
+ inline bool operator<=(const DequeIterator<T, PointerA, ReferenceA, kDequeSubarraySize>& a,
+ const DequeIterator<T, PointerB, ReferenceB, kDequeSubarraySize>& b)
+ {
+ return (a.mpCurrentArrayPtr == b.mpCurrentArrayPtr) ? (a.mpCurrent <= b.mpCurrent) : (a.mpCurrentArrayPtr <= b.mpCurrentArrayPtr);
+ }
+
+
+ template <typename T, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB, unsigned kDequeSubarraySize>
+ inline bool operator>=(const DequeIterator<T, PointerA, ReferenceA, kDequeSubarraySize>& a,
+ const DequeIterator<T, PointerB, ReferenceB, kDequeSubarraySize>& b)
+ {
+ return (a.mpCurrentArrayPtr == b.mpCurrentArrayPtr) ? (a.mpCurrent >= b.mpCurrent) : (a.mpCurrentArrayPtr >= b.mpCurrentArrayPtr);
+ }
+
+
+ // Random access iterators must support operator + and operator -.
+ // You can only add an integer to an iterator, and you cannot add two iterators.
+ template <typename T, typename Pointer, typename Reference, unsigned kDequeSubarraySize>
+ inline DequeIterator<T, Pointer, Reference, kDequeSubarraySize>
+ operator+(ptrdiff_t n, const DequeIterator<T, Pointer, Reference, kDequeSubarraySize>& x)
+ {
+ return x + n; // Implement (n + x) in terms of (x + n).
+ }
+
+
+ // You can only add an integer to an iterator, but you can subtract two iterators.
+ // The C++ defect report #179 mentioned above specifically refers to
+ // operator - and states that we support the subtraction of const and non-const iterators.
+ template <typename T, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB, unsigned kDequeSubarraySize>
+ inline typename DequeIterator<T, PointerA, ReferenceA, kDequeSubarraySize>::difference_type
+ operator-(const DequeIterator<T, PointerA, ReferenceA, kDequeSubarraySize>& a,
+ const DequeIterator<T, PointerB, ReferenceB, kDequeSubarraySize>& b)
+ {
+ // This is a fairly clever algorithm that has been used in STL deque implementations since the original HP STL:
+ typedef typename DequeIterator<T, PointerA, ReferenceA, kDequeSubarraySize>::difference_type difference_type;
+
+ return ((difference_type)kDequeSubarraySize * ((a.mpCurrentArrayPtr - b.mpCurrentArrayPtr) - 1)) + (a.mpCurrent - a.mpBegin) + (b.mpEnd - b.mpCurrent);
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // deque
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline deque<T, Allocator, kDequeSubarraySize>::deque()
+ : base_type((size_type)0)
+ {
+ // Empty
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline deque<T, Allocator, kDequeSubarraySize>::deque(const allocator_type& allocator)
+ : base_type((size_type)0, allocator)
+ {
+ // Empty
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline deque<T, Allocator, kDequeSubarraySize>::deque(size_type n, const allocator_type& allocator)
+ : base_type(n, allocator)
+ {
+ DoFillInit(value_type());
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline deque<T, Allocator, kDequeSubarraySize>::deque(size_type n, const value_type& value, const allocator_type& allocator)
+ : base_type(n, allocator)
+ {
+ DoFillInit(value);
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline deque<T, Allocator, kDequeSubarraySize>::deque(const this_type& x)
+ : base_type(x.size(), x.mAllocator)
+ {
+ eastl::uninitialized_copy(x.mItBegin, x.mItEnd, mItBegin);
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline deque<T, Allocator, kDequeSubarraySize>::deque(this_type&& x)
+ : base_type((size_type)0, x.mAllocator)
+ {
+ swap(x);
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline deque<T, Allocator, kDequeSubarraySize>::deque(this_type&& x, const allocator_type& allocator)
+ : base_type((size_type)0, allocator)
+ {
+ swap(x); // member swap handles the case that x has a different allocator than our allocator by doing a copy.
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline deque<T, Allocator, kDequeSubarraySize>::deque(std::initializer_list<value_type> ilist, const allocator_type& allocator)
+ : base_type(allocator)
+ {
+ DoInit(ilist.begin(), ilist.end(), false_type());
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ template <typename InputIterator>
+ inline deque<T, Allocator, kDequeSubarraySize>::deque(InputIterator first, InputIterator last)
+ : base_type(EASTL_DEQUE_DEFAULT_ALLOCATOR) // Call the empty base constructor, which does nothing. We need to do all the work in our own DoInit.
+ {
+ DoInit(first, last, is_integral<InputIterator>());
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline deque<T, Allocator, kDequeSubarraySize>::~deque()
+ {
+ // Call destructors. Parent class will free the memory.
+ for(iterator itCurrent(mItBegin); itCurrent != mItEnd; ++itCurrent)
+ itCurrent.mpCurrent->~value_type();
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ typename deque<T, Allocator, kDequeSubarraySize>::this_type&
+ deque<T, Allocator, kDequeSubarraySize>::operator=(const this_type& x)
+ {
+ if(&x != this) // If not assigning to ourselves...
+ {
+ // If (EASTL_ALLOCATOR_COPY_ENABLED == 1) and the current contents are allocated by an
+ // allocator that's unequal to x's allocator, we need to reallocate our elements with
+ // our current allocator and reallocate it with x's allocator. If the allocators are
+ // equal then we can use a more optimal algorithm that doesn't reallocate our elements
+ // but instead can copy them in place.
+
+ #if EASTL_ALLOCATOR_COPY_ENABLED
+ bool bSlowerPathwayRequired = (mAllocator != x.mAllocator);
+ #else
+ bool bSlowerPathwayRequired = false;
+ #endif
+
+ if(bSlowerPathwayRequired)
+ {
+ // We can't currently use set_capacity(0) or shrink_to_fit, because they
+ // leave a remaining allocation with our old allocator. So we do a similar
+ // thing but set our allocator to x.mAllocator while doing so.
+ this_type temp(x.mAllocator);
+ DoSwap(temp);
+ // Now we have an empty container with an allocator equal to x.mAllocator, ready to assign from x.
+ }
+
+ DoAssign(x.begin(), x.end(), eastl::false_type());
+ }
+
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline typename deque<T, Allocator, kDequeSubarraySize>::this_type&
+ deque<T, Allocator, kDequeSubarraySize>::operator=(this_type&& x)
+ {
+ if(this != &x)
+ {
+ set_capacity(0); // To consider: Are we really required to clear here? x is going away soon and will clear itself in its dtor.
+ swap(x); // member swap handles the case that x has a different allocator than our allocator by doing a copy.
+ }
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline typename deque<T, Allocator, kDequeSubarraySize>::this_type&
+ deque<T, Allocator, kDequeSubarraySize>::operator=(std::initializer_list<value_type> ilist)
+ {
+ DoAssign(ilist.begin(), ilist.end(), false_type());
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline void deque<T, Allocator, kDequeSubarraySize>::assign(size_type n, const value_type& value)
+ {
+ DoAssignValues(n, value);
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline void deque<T, Allocator, kDequeSubarraySize>::assign(std::initializer_list<value_type> ilist)
+ {
+ DoAssign(ilist.begin(), ilist.end(), false_type());
+ }
+
+
+ // It turns out that the C++ std::deque specifies a two argument
+ // version of assign that takes (int size, int value). These are not
+ // iterators, so we need to do a template compiler trick to do the right thing.
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ template <typename InputIterator>
+ inline void deque<T, Allocator, kDequeSubarraySize>::assign(InputIterator first, InputIterator last)
+ {
+ DoAssign(first, last, is_integral<InputIterator>());
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline typename deque<T, Allocator, kDequeSubarraySize>::iterator
+ deque<T, Allocator, kDequeSubarraySize>::begin() EA_NOEXCEPT
+ {
+ return mItBegin;
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline typename deque<T, Allocator, kDequeSubarraySize>::const_iterator
+ deque<T, Allocator, kDequeSubarraySize>::begin() const EA_NOEXCEPT
+ {
+ return mItBegin;
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline typename deque<T, Allocator, kDequeSubarraySize>::const_iterator
+ deque<T, Allocator, kDequeSubarraySize>::cbegin() const EA_NOEXCEPT
+ {
+ return mItBegin;
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline typename deque<T, Allocator, kDequeSubarraySize>::iterator
+ deque<T, Allocator, kDequeSubarraySize>::end() EA_NOEXCEPT
+ {
+ return mItEnd;
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ typename deque<T, Allocator, kDequeSubarraySize>::const_iterator
+ deque<T, Allocator, kDequeSubarraySize>::end() const EA_NOEXCEPT
+ {
+ return mItEnd;
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline typename deque<T, Allocator, kDequeSubarraySize>::const_iterator
+ deque<T, Allocator, kDequeSubarraySize>::cend() const EA_NOEXCEPT
+ {
+ return mItEnd;
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline typename deque<T, Allocator, kDequeSubarraySize>::reverse_iterator
+ deque<T, Allocator, kDequeSubarraySize>::rbegin() EA_NOEXCEPT
+ {
+ return reverse_iterator(mItEnd);
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline typename deque<T, Allocator, kDequeSubarraySize>::const_reverse_iterator
+ deque<T, Allocator, kDequeSubarraySize>::rbegin() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(mItEnd);
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline typename deque<T, Allocator, kDequeSubarraySize>::const_reverse_iterator
+ deque<T, Allocator, kDequeSubarraySize>::crbegin() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(mItEnd);
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline typename deque<T, Allocator, kDequeSubarraySize>::reverse_iterator
+ deque<T, Allocator, kDequeSubarraySize>::rend() EA_NOEXCEPT
+ {
+ return reverse_iterator(mItBegin);
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline typename deque<T, Allocator, kDequeSubarraySize>::const_reverse_iterator
+ deque<T, Allocator, kDequeSubarraySize>::rend() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(mItBegin);
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline typename deque<T, Allocator, kDequeSubarraySize>::const_reverse_iterator
+ deque<T, Allocator, kDequeSubarraySize>::crend() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(mItBegin);
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline bool deque<T, Allocator, kDequeSubarraySize>::empty() const EA_NOEXCEPT
+ {
+ return mItBegin.mpCurrent == mItEnd.mpCurrent;
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ typename deque<T, Allocator, kDequeSubarraySize>::size_type
+ inline deque<T, Allocator, kDequeSubarraySize>::size() const EA_NOEXCEPT
+ {
+ return (size_type)(mItEnd - mItBegin);
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline void deque<T, Allocator, kDequeSubarraySize>::resize(size_type n, const value_type& value)
+ {
+ const size_type nSizeCurrent = size();
+
+ if(n > nSizeCurrent) // We expect that more often than not, resizes will be upsizes.
+ insert(mItEnd, n - nSizeCurrent, value);
+ else
+ erase(mItBegin + (difference_type)n, mItEnd);
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline void deque<T, Allocator, kDequeSubarraySize>::resize(size_type n)
+ {
+ resize(n, value_type());
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline void deque<T, Allocator, kDequeSubarraySize>::shrink_to_fit()
+ {
+ this_type x(eastl::make_move_iterator(begin()), eastl::make_move_iterator(end()));
+ swap(x);
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline void deque<T, Allocator, kDequeSubarraySize>::set_capacity(size_type n)
+ {
+ // Currently there isn't a way to remove all allocations from a deque, as it
+ // requires a single starting allocation for the subarrays. So we can't just
+ // free all memory without leaving it in a bad state. So the best means of
+ // implementing set_capacity() is to do what we do below.
+
+ if(n == 0)
+ {
+ this_type temp(mAllocator);
+ DoSwap(temp);
+ }
+ else if(n < size())
+ {
+ // We currently ignore the request to reduce capacity. To do: Implement this
+ // and do it in a way that doesn't result in temporarily ~doubling our memory usage.
+ // That might involve trimming unused subarrays from the front or back of
+ // the container.
+ resize(n);
+ }
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ typename deque<T, Allocator, kDequeSubarraySize>::reference
+ deque<T, Allocator, kDequeSubarraySize>::operator[](size_type n)
+ {
+ #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(n >= (size_type)(mItEnd - mItBegin)))
+ EASTL_FAIL_MSG("deque::operator[] -- out of range");
+ #elif EASTL_ASSERT_ENABLED
+ // We allow taking a reference to deque[0]
+ if (EASTL_UNLIKELY((n != 0) && n >= (size_type)(mItEnd - mItBegin)))
+ EASTL_FAIL_MSG("deque::operator[] -- out of range");
+ #endif
+
+ // See DequeIterator::operator+=() for an explanation of the code below.
+ iterator it(mItBegin);
+
+ const difference_type subarrayPosition = (difference_type)((it.mpCurrent - it.mpBegin) + (difference_type)n);
+ const difference_type subarrayIndex = (((16777216 + subarrayPosition) / (difference_type)kDequeSubarraySize)) - (16777216 / (difference_type)kDequeSubarraySize);
+
+ return *(*(it.mpCurrentArrayPtr + subarrayIndex) + (subarrayPosition - (subarrayIndex * (difference_type)kDequeSubarraySize)));
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ typename deque<T, Allocator, kDequeSubarraySize>::const_reference
+ deque<T, Allocator, kDequeSubarraySize>::operator[](size_type n) const
+ {
+ #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(n >= (size_type)(mItEnd - mItBegin)))
+ EASTL_FAIL_MSG("deque::operator[] -- out of range");
+ #elif EASTL_ASSERT_ENABLED
+ // We allow the user to use a reference to deque[0] of an empty container.
+ if (EASTL_UNLIKELY((n != 0) && n >= (size_type)(mItEnd - mItBegin)))
+ EASTL_FAIL_MSG("deque::operator[] -- out of range");
+ #endif
+
+ // See DequeIterator::operator+=() for an explanation of the code below.
+ iterator it(mItBegin);
+
+ const difference_type subarrayPosition = (it.mpCurrent - it.mpBegin) + (difference_type)n;
+ const difference_type subarrayIndex = (((16777216 + subarrayPosition) / (difference_type)kDequeSubarraySize)) - (16777216 / (difference_type)kDequeSubarraySize);
+
+ return *(*(it.mpCurrentArrayPtr + subarrayIndex) + (subarrayPosition - (subarrayIndex * (difference_type)kDequeSubarraySize)));
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ typename deque<T, Allocator, kDequeSubarraySize>::reference
+ deque<T, Allocator, kDequeSubarraySize>::at(size_type n)
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ if(n >= (size_type)(mItEnd - mItBegin))
+ throw std::out_of_range("deque::at -- out of range");
+ #elif EASTL_ASSERT_ENABLED
+ if(n >= (size_type)(mItEnd - mItBegin))
+ EASTL_FAIL_MSG("deque::at -- out of range");
+ #endif
+ return *(mItBegin.operator+((difference_type)n));
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ typename deque<T, Allocator, kDequeSubarraySize>::const_reference
+ deque<T, Allocator, kDequeSubarraySize>::at(size_type n) const
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ if(n >= (size_type)(mItEnd - mItBegin))
+ throw std::out_of_range("deque::at -- out of range");
+ #elif EASTL_ASSERT_ENABLED
+ if(n >= (size_type)(mItEnd - mItBegin))
+ EASTL_FAIL_MSG("deque::at -- out of range");
+ #endif
+ return *(mItBegin.operator+((difference_type)n));
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ typename deque<T, Allocator, kDequeSubarraySize>::reference
+ deque<T, Allocator, kDequeSubarraySize>::front()
+ {
+ #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ if (EASTL_UNLIKELY((size_type)(mItEnd == mItBegin)))
+ EASTL_FAIL_MSG("deque::front -- empty deque");
+ #else
+ // We allow the user to reference an empty container.
+ #endif
+
+ return *mItBegin;
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ typename deque<T, Allocator, kDequeSubarraySize>::const_reference
+ deque<T, Allocator, kDequeSubarraySize>::front() const
+ {
+ #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ if (EASTL_UNLIKELY((size_type)(mItEnd == mItBegin)))
+ EASTL_FAIL_MSG("deque::front -- empty deque");
+ #else
+ // We allow the user to reference an empty container.
+ #endif
+
+ return *mItBegin;
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ typename deque<T, Allocator, kDequeSubarraySize>::reference
+ deque<T, Allocator, kDequeSubarraySize>::back()
+ {
+ #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ if (EASTL_UNLIKELY((size_type)(mItEnd == mItBegin)))
+ EASTL_FAIL_MSG("deque::back -- empty deque");
+ #else
+ // We allow the user to reference an empty container.
+ #endif
+
+ return *iterator(mItEnd, typename iterator::Decrement());
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ typename deque<T, Allocator, kDequeSubarraySize>::const_reference
+ deque<T, Allocator, kDequeSubarraySize>::back() const
+ {
+ #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ if (EASTL_UNLIKELY((size_type)(mItEnd == mItBegin)))
+ EASTL_FAIL_MSG("deque::back -- empty deque");
+ #else
+ // We allow the user to reference an empty container.
+ #endif
+
+ return *iterator(mItEnd, typename iterator::Decrement());
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ void deque<T, Allocator, kDequeSubarraySize>::push_front(const value_type& value)
+ {
+ emplace_front(value);
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ void deque<T, Allocator, kDequeSubarraySize>::push_front(value_type&& value)
+ {
+ emplace_front(eastl::move(value));
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ typename deque<T, Allocator, kDequeSubarraySize>::reference
+ deque<T, Allocator, kDequeSubarraySize>::push_front()
+ {
+ emplace_front(value_type());
+ return *mItBegin; // Same as return front();
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ void deque<T, Allocator, kDequeSubarraySize>::push_back(const value_type& value)
+ {
+ emplace_back(value);
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ void deque<T, Allocator, kDequeSubarraySize>::push_back(value_type&& value)
+ {
+ emplace_back(eastl::move(value));
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ typename deque<T, Allocator, kDequeSubarraySize>::reference
+ deque<T, Allocator, kDequeSubarraySize>::push_back()
+ {
+ emplace_back(value_type());
+ return *iterator(mItEnd, typename iterator::Decrement()); // Same thing as return back();
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ void deque<T, Allocator, kDequeSubarraySize>::pop_front()
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY((size_type)(mItEnd == mItBegin)))
+ EASTL_FAIL_MSG("deque::pop_front -- empty deque");
+ #endif
+
+ if((mItBegin.mpCurrent + 1) != mItBegin.mpEnd) // If the operation is very simple...
+ (mItBegin.mpCurrent++)->~value_type();
+ else
+ {
+ // This is executed only when we are popping the end (last) item off the front-most subarray.
+ // In this case we need to free the subarray and point mItBegin to the next subarray.
+ #ifdef EA_DEBUG
+ value_type** pp = mItBegin.mpCurrentArrayPtr;
+ #endif
+
+ mItBegin.mpCurrent->~value_type(); // mpCurrent == mpEnd - 1
+ DoFreeSubarray(mItBegin.mpBegin);
+ mItBegin.SetSubarray(mItBegin.mpCurrentArrayPtr + 1);
+ mItBegin.mpCurrent = mItBegin.mpBegin;
+
+ #ifdef EA_DEBUG
+ *pp = NULL;
+ #endif
+ }
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ void deque<T, Allocator, kDequeSubarraySize>::pop_back()
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY((size_type)(mItEnd == mItBegin)))
+ EASTL_FAIL_MSG("deque::pop_back -- empty deque");
+ #endif
+
+ if(mItEnd.mpCurrent != mItEnd.mpBegin) // If the operation is very simple...
+ (--mItEnd.mpCurrent)->~value_type();
+ else
+ {
+ // This is executed only when we are popping the first item off the last subarray.
+ // In this case we need to free the subarray and point mItEnd to the previous subarray.
+ #ifdef EA_DEBUG
+ value_type** pp = mItEnd.mpCurrentArrayPtr;
+ #endif
+
+ DoFreeSubarray(mItEnd.mpBegin);
+ mItEnd.SetSubarray(mItEnd.mpCurrentArrayPtr - 1);
+ mItEnd.mpCurrent = mItEnd.mpEnd - 1; // Recall that mItEnd points to one-past the last item in the container.
+ mItEnd.mpCurrent->~value_type(); // Thus we need to call the destructor on the item *before* that last item.
+
+ #ifdef EA_DEBUG
+ *pp = NULL;
+ #endif
+ }
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ template<class... Args>
+ typename deque<T, Allocator, kDequeSubarraySize>::iterator
+ deque<T, Allocator, kDequeSubarraySize>::emplace(const_iterator position, Args&&... args)
+ {
+ if(EASTL_UNLIKELY(position.mpCurrent == mItEnd.mpCurrent)) // If we are doing the same thing as push_back...
+ {
+ emplace_back(eastl::forward<Args>(args)...);
+ return iterator(mItEnd, typename iterator::Decrement()); // Unfortunately, we need to make an iterator here, as the above push_back is an operation that can invalidate existing iterators.
+ }
+ else if(EASTL_UNLIKELY(position.mpCurrent == mItBegin.mpCurrent)) // If we are doing the same thing as push_front...
+ {
+ emplace_front(eastl::forward<Args>(args)...);
+ return mItBegin;
+ }
+
+ iterator itPosition(position, typename iterator::FromConst());
+ value_type valueSaved(eastl::forward<Args>(args)...); // We need to save this because value may come from within our container. It would be somewhat tedious to make a workaround that could avoid this.
+ const difference_type i(itPosition - mItBegin);
+
+ #if EASTL_ASSERT_ENABLED
+ EASTL_ASSERT(!empty()); // The push_front and push_back calls below assume that we are non-empty. It turns out this is never called unless so.
+
+ if(EASTL_UNLIKELY(!(validate_iterator(itPosition) & isf_valid)))
+ EASTL_FAIL_MSG("deque::emplace -- invalid iterator");
+ #endif
+
+ if(i < (difference_type)(size() / 2)) // Should we insert at the front or at the back? We divide the range in half.
+ {
+ emplace_front(eastl::move(*mItBegin)); // This operation potentially invalidates all existing iterators and so we need to assign them anew relative to mItBegin below.
+
+ itPosition = mItBegin + i;
+
+ const iterator newPosition (itPosition, typename iterator::Increment());
+ iterator oldBegin (mItBegin, typename iterator::Increment());
+ const iterator oldBeginPlus1(oldBegin, typename iterator::Increment());
+
+ oldBegin.copy(oldBeginPlus1, newPosition, eastl::has_trivial_relocate<value_type>());
+ }
+ else
+ {
+ emplace_back(eastl::move(*iterator(mItEnd, typename iterator::Decrement())));
+
+ itPosition = mItBegin + i;
+
+ iterator oldBack (mItEnd, typename iterator::Decrement());
+ const iterator oldBackMinus1(oldBack, typename iterator::Decrement());
+
+ oldBack.copy_backward(itPosition, oldBackMinus1, eastl::has_trivial_relocate<value_type>());
+ }
+
+ *itPosition = eastl::move(valueSaved);
+
+ return itPosition;
+ }
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ template<class... Args>
+ void deque<T, Allocator, kDequeSubarraySize>::emplace_front(Args&&... args)
+ {
+ if(mItBegin.mpCurrent != mItBegin.mpBegin) // If we have room in the first subarray... we hope that usually this 'new' pathway gets executed, as it is slightly faster.
+ ::new((void*)--mItBegin.mpCurrent) value_type(eastl::forward<Args>(args)...); // Construct in place. If args is a single arg of type value_type&& then it this will be a move construction.
+ else
+ {
+ // To consider: Detect if value isn't coming from within this container and handle that efficiently.
+ value_type valueSaved(eastl::forward<Args>(args)...); // We need to make a temporary, because args may be a value_type that comes from within our container and the operations below may change the container. But we can use move instead of copy.
+
+ if(mItBegin.mpCurrentArrayPtr == mpPtrArray) // If there are no more pointers in front of the current (first) one...
+ DoReallocPtrArray(1, kSideFront);
+
+ mItBegin.mpCurrentArrayPtr[-1] = DoAllocateSubarray();
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ mItBegin.SetSubarray(mItBegin.mpCurrentArrayPtr - 1);
+ mItBegin.mpCurrent = mItBegin.mpEnd - 1;
+ ::new((void*)mItBegin.mpCurrent) value_type(eastl::move(valueSaved));
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ ++mItBegin; // The exception could only occur in the new operation above, after we have incremented mItBegin. So we need to undo it.
+ DoFreeSubarray(mItBegin.mpCurrentArrayPtr[-1]);
+ throw;
+ }
+ #endif
+ }
+ }
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ template<class... Args>
+ void deque<T, Allocator, kDequeSubarraySize>::emplace_back(Args&&... args)
+ {
+ if((mItEnd.mpCurrent + 1) != mItEnd.mpEnd) // If we have room in the last subarray... we hope that usually this 'new' pathway gets executed, as it is slightly faster.
+ ::new((void*)mItEnd.mpCurrent++) value_type(eastl::forward<Args>(args)...); // Construct in place. If args is a single arg of type value_type&& then it this will be a move construction.
+ else
+ {
+ // To consider: Detect if value isn't coming from within this container and handle that efficiently.
+ value_type valueSaved(eastl::forward<Args>(args)...); // We need to make a temporary, because args may be a value_type that comes from within our container and the operations below may change the container. But we can use move instead of copy.
+ if(((mItEnd.mpCurrentArrayPtr - mpPtrArray) + 1) >= (difference_type)mnPtrArraySize) // If there are no more pointers after the current (last) one.
+ DoReallocPtrArray(1, kSideBack);
+
+ mItEnd.mpCurrentArrayPtr[1] = DoAllocateSubarray();
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ ::new((void*)mItEnd.mpCurrent) value_type(eastl::move(valueSaved)); // We can move valueSaved into position.
+ mItEnd.SetSubarray(mItEnd.mpCurrentArrayPtr + 1);
+ mItEnd.mpCurrent = mItEnd.mpBegin;
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ // No need to execute '--mItEnd', as the exception could only occur in the new operation above before we set mItEnd.
+ DoFreeSubarray(mItEnd.mpCurrentArrayPtr[1]);
+ throw;
+ }
+ #endif
+ }
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ typename deque<T, Allocator, kDequeSubarraySize>::iterator
+ deque<T, Allocator, kDequeSubarraySize>::insert(const_iterator position, const value_type& value)
+ {
+ return emplace(position, value);
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ typename deque<T, Allocator, kDequeSubarraySize>::iterator
+ deque<T, Allocator, kDequeSubarraySize>::insert(const_iterator position, value_type&& value)
+ {
+ return emplace(position, eastl::move(value));
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ void deque<T, Allocator, kDequeSubarraySize>::insert(const_iterator position, size_type n, const value_type& value)
+ {
+ DoInsertValues(position, n, value);
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ template <typename InputIterator>
+ void deque<T, Allocator, kDequeSubarraySize>::insert(const_iterator position, InputIterator first, InputIterator last)
+ {
+ DoInsert(position, first, last, is_integral<InputIterator>()); // The C++ standard requires this sort of behaviour, as InputIterator might actually be Integer and 'first' is really 'count' and 'last' is really 'value'.
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ typename deque<T, Allocator, kDequeSubarraySize>::iterator
+ deque<T, Allocator, kDequeSubarraySize>::insert(const_iterator position, std::initializer_list<value_type> ilist)
+ {
+ const difference_type i(position - mItBegin);
+ DoInsert(position, ilist.begin(), ilist.end(), false_type());
+ return (mItBegin + i);
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ typename deque<T, Allocator, kDequeSubarraySize>::iterator
+ deque<T, Allocator, kDequeSubarraySize>::erase(const_iterator position)
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(!(validate_iterator(position) & isf_valid)))
+ EASTL_FAIL_MSG("deque::erase -- invalid iterator");
+
+ if(EASTL_UNLIKELY(position == end()))
+ EASTL_FAIL_MSG("deque::erase -- end() iterator is an invalid iterator for erase");
+ #endif
+
+ iterator itPosition(position, typename iterator::FromConst());
+ iterator itNext(itPosition, typename iterator::Increment());
+ const difference_type i(itPosition - mItBegin);
+
+ if(i < (difference_type)(size() / 2)) // Should we move the front entries forward or the back entries backward? We divide the range in half.
+ {
+ itNext.copy_backward(mItBegin, itPosition, eastl::has_trivial_relocate<value_type>());
+ pop_front();
+ }
+ else
+ {
+ itPosition.copy(itNext, mItEnd, eastl::has_trivial_relocate<value_type>());
+ pop_back();
+ }
+
+ return mItBegin + i;
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ typename deque<T, Allocator, kDequeSubarraySize>::iterator
+ deque<T, Allocator, kDequeSubarraySize>::erase(const_iterator first, const_iterator last)
+ {
+ iterator itFirst(first, typename iterator::FromConst());
+ iterator itLast(last, typename iterator::FromConst());
+
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(!(validate_iterator(itFirst) & isf_valid)))
+ EASTL_FAIL_MSG("deque::erase -- invalid iterator");
+ if(EASTL_UNLIKELY(!(validate_iterator(itLast) & isf_valid)))
+ EASTL_FAIL_MSG("deque::erase -- invalid iterator");
+ #endif
+
+ if((itFirst != mItBegin) || (itLast != mItEnd)) // If not erasing everything... (We expect that the user won't call erase(begin, end) because instead the user would just call clear.)
+ {
+ const difference_type n(itLast - itFirst);
+ const difference_type i(itFirst - mItBegin);
+
+ if(i < (difference_type)((size() - n) / 2)) // Should we move the front entries forward or the back entries backward? We divide the range in half.
+ {
+ const iterator itNewBegin(mItBegin + n);
+ value_type** const pPtrArrayBegin = mItBegin.mpCurrentArrayPtr;
+
+ itLast.copy_backward(mItBegin, itFirst, eastl::has_trivial_relocate<value_type>());
+
+ for(; mItBegin != itNewBegin; ++mItBegin) // Question: If value_type is a POD type, will the compiler generate this loop at all?
+ mItBegin.mpCurrent->~value_type(); // If so, then we need to make a specialization for destructing PODs.
+
+ DoFreeSubarrays(pPtrArrayBegin, itNewBegin.mpCurrentArrayPtr);
+
+ // mItBegin = itNewBegin; <-- Not necessary, as the above loop makes it so already.
+ }
+ else // Else we will be moving back entries backward.
+ {
+ iterator itNewEnd(mItEnd - n);
+ value_type** const pPtrArrayEnd = itNewEnd.mpCurrentArrayPtr + 1;
+
+ itFirst.copy(itLast, mItEnd, eastl::has_trivial_relocate<value_type>());
+
+ for(iterator itTemp(itNewEnd); itTemp != mItEnd; ++itTemp)
+ itTemp.mpCurrent->~value_type();
+
+ DoFreeSubarrays(pPtrArrayEnd, mItEnd.mpCurrentArrayPtr + 1);
+
+ mItEnd = itNewEnd;
+ }
+
+ return mItBegin + i;
+ }
+
+ clear();
+ return mItEnd;
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ typename deque<T, Allocator, kDequeSubarraySize>::reverse_iterator
+ deque<T, Allocator, kDequeSubarraySize>::erase(reverse_iterator position)
+ {
+ return reverse_iterator(erase((++position).base()));
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ typename deque<T, Allocator, kDequeSubarraySize>::reverse_iterator
+ deque<T, Allocator, kDequeSubarraySize>::erase(reverse_iterator first, reverse_iterator last)
+ {
+ // Version which erases in order from first to last.
+ // difference_type i(first.base() - last.base());
+ // while(i--)
+ // first = erase(first);
+ // return first;
+
+ // Version which erases in order from last to first, but is slightly more efficient:
+ return reverse_iterator(erase(last.base(), first.base()));
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ void deque<T, Allocator, kDequeSubarraySize>::clear()
+ {
+ // Destroy all values and all subarrays they belong to, except for the first one,
+ // as we need to reserve some space for a valid mItBegin/mItEnd.
+ if(mItBegin.mpCurrentArrayPtr != mItEnd.mpCurrentArrayPtr) // If there are multiple subarrays (more often than not, this will be so)...
+ {
+ for(value_type* p1 = mItBegin.mpCurrent; p1 < mItBegin.mpEnd; ++p1)
+ p1->~value_type();
+ for(value_type* p2 = mItEnd.mpBegin; p2 < mItEnd.mpCurrent; ++p2)
+ p2->~value_type();
+ DoFreeSubarray(mItEnd.mpBegin); // Leave mItBegin with a valid subarray.
+ }
+ else
+ {
+ for(value_type* p = mItBegin.mpCurrent; p < mItEnd.mpCurrent; ++p)
+ p->~value_type();
+ // Don't free the one existing subarray, as we need it for mItBegin/mItEnd.
+ }
+
+ for(value_type** pPtrArray = mItBegin.mpCurrentArrayPtr + 1; pPtrArray < mItEnd.mpCurrentArrayPtr; ++pPtrArray)
+ {
+ for(value_type* p = *pPtrArray, *pEnd = *pPtrArray + kDequeSubarraySize; p < pEnd; ++p)
+ p->~value_type();
+ DoFreeSubarray(*pPtrArray);
+ }
+
+ mItEnd = mItBegin; // mItBegin/mItEnd will not be dereferencable.
+ }
+
+
+ //template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ //void deque<T, Allocator, kDequeSubarraySize>::reset_lose_memory()
+ //{
+ // // The reset_lose_memory function is a special extension function which unilaterally
+ // // resets the container to an empty state without freeing the memory of
+ // // the contained objects. This is useful for very quickly tearing down a
+ // // container built into scratch memory.
+ //
+ // // Currently we are unable to get this reset_lose_memory operation to work correctly
+ // // as we haven't been able to find a good way to have a deque initialize
+ // // without allocating memory. We can lose the old memory, but DoInit
+ // // would necessarily do a ptrArray allocation. And this is not within
+ // // our definition of how reset_lose_memory works.
+ // base_type::DoInit(0);
+ //
+ //}
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ void deque<T, Allocator, kDequeSubarraySize>::swap(deque& x)
+ {
+ #if defined(EASTL_DEQUE_LEGACY_SWAP_BEHAVIOUR_REQUIRES_COPY_CTOR) && EASTL_DEQUE_LEGACY_SWAP_BEHAVIOUR_REQUIRES_COPY_CTOR
+ if(mAllocator == x.mAllocator) // If allocators are equivalent...
+ DoSwap(x);
+ else // else swap the contents.
+ {
+ const this_type temp(*this); // Can't call eastl::swap because that would
+ *this = x; // itself call this member swap function.
+ x = temp;
+ }
+ #else
+ // NOTE(rparolin): The previous implementation required T to be copy-constructible in the fall-back case where
+ // allocators with unique instances copied elements. This was an unnecessary restriction and prevented the common
+ // usage of deque with non-copyable types (eg. eastl::deque<non_copyable> or eastl::deque<unique_ptr>).
+ //
+ // The previous implementation violated the following requirements of deque::swap so the fall-back code has
+ // been removed. EASTL implicitly defines 'propagate_on_container_swap = false' therefore the fall-back case is
+ // undefined behaviour. We simply swap the contents and the allocator as that is the common expectation of
+ // users and does not put the container into an invalid state since it can not free its memory via its current
+ // allocator instance.
+ //
+ DoSwap(x);
+ #endif
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ template <typename Integer>
+ void deque<T, Allocator, kDequeSubarraySize>::DoInit(Integer n, Integer value, true_type)
+ {
+ base_type::DoInit(n); // Call the base uninitialized init function.
+ DoFillInit(value);
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ template <typename InputIterator>
+ void deque<T, Allocator, kDequeSubarraySize>::DoInit(InputIterator first, InputIterator last, false_type)
+ {
+ typedef typename eastl::iterator_traits<InputIterator>::iterator_category IC;
+ DoInitFromIterator(first, last, IC());
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ template <typename InputIterator>
+ void deque<T, Allocator, kDequeSubarraySize>::DoInitFromIterator(InputIterator first, InputIterator last, EASTL_ITC_NS::input_iterator_tag)
+ {
+ base_type::DoInit(0); // Call the base uninitialized init function, but don't actually allocate any values.
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ // We have little choice but to turn through the source iterator and call
+ // push_back for each item. It can be slow because it will keep reallocating the
+ // container memory as we go. We are not allowed to use distance() on an InputIterator.
+ for(; first != last; ++first) // InputIterators by definition actually only allow you to iterate through them once.
+ { // Thus the standard *requires* that we do this (inefficient) implementation.
+ push_back(*first); // Luckily, InputIterators are in practice almost never used, so this code will likely never get executed.
+ }
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ clear();
+ throw;
+ }
+ #endif
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ template <typename ForwardIterator>
+ void deque<T, Allocator, kDequeSubarraySize>::DoInitFromIterator(ForwardIterator first, ForwardIterator last, EASTL_ITC_NS::forward_iterator_tag)
+ {
+ typedef typename eastl::remove_const<ForwardIterator>::type non_const_iterator_type; // If T is a const type (e.g. const int) then we need to initialize it as if it were non-const.
+ typedef typename eastl::remove_const<value_type>::type non_const_value_type;
+
+ const size_type n = (size_type)eastl::distance(first, last);
+ value_type** pPtrArrayCurrent;
+
+ base_type::DoInit(n); // Call the base uninitialized init function.
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ for(pPtrArrayCurrent = mItBegin.mpCurrentArrayPtr; pPtrArrayCurrent < mItEnd.mpCurrentArrayPtr; ++pPtrArrayCurrent) // Copy to the known-to-be-completely-used subarrays.
+ {
+ // We implment an algorithm here whereby we use uninitialized_copy() and advance() instead of just iterating from first to last and constructing as we go. The reason for this is that we can take advantage of POD data types and implement construction as memcpy operations.
+ ForwardIterator current(first); // To do: Implement a specialization of this algorithm for non-PODs which eliminates the need for 'current'.
+
+ eastl::advance(current, kDequeSubarraySize);
+ eastl::uninitialized_copy((non_const_iterator_type)first, (non_const_iterator_type)current, (non_const_value_type*)*pPtrArrayCurrent);
+ first = current;
+ }
+
+ eastl::uninitialized_copy((non_const_iterator_type)first, (non_const_iterator_type)last, (non_const_value_type*)mItEnd.mpBegin);
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ for(iterator itCurrent(mItBegin), itEnd(pPtrArrayCurrent, *pPtrArrayCurrent); itCurrent != itEnd; ++itCurrent)
+ itCurrent.mpCurrent->~value_type();
+ throw;
+ }
+ #endif
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ void deque<T, Allocator, kDequeSubarraySize>::DoFillInit(const value_type& value)
+ {
+ value_type** pPtrArrayCurrent = mItBegin.mpCurrentArrayPtr;
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ while(pPtrArrayCurrent < mItEnd.mpCurrentArrayPtr)
+ {
+ eastl::uninitialized_fill(*pPtrArrayCurrent, *pPtrArrayCurrent + kDequeSubarraySize, value);
+ ++pPtrArrayCurrent;
+ }
+ eastl::uninitialized_fill(mItEnd.mpBegin, mItEnd.mpCurrent, value);
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ for(iterator itCurrent(mItBegin), itEnd(pPtrArrayCurrent, *pPtrArrayCurrent); itCurrent != itEnd; ++itCurrent)
+ itCurrent.mpCurrent->~value_type();
+ throw;
+ }
+ #endif
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ template <typename Integer>
+ void deque<T, Allocator, kDequeSubarraySize>::DoAssign(Integer n, Integer value, true_type) // false_type means this is the integer version instead of iterator version.
+ {
+ DoAssignValues(static_cast<size_type>(n), static_cast<value_type>(value));
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ template <typename InputIterator>
+ void deque<T, Allocator, kDequeSubarraySize>::DoAssign(InputIterator first, InputIterator last, false_type) // false_type means this is the iterator version instead of integer version.
+ {
+ // Actually, the implementation below requires first/last to be a ForwardIterator and not just an InputIterator.
+ // But Paul Pedriana if you somehow need to work with an InputIterator and we can deal with it.
+ const size_type n = (size_type)eastl::distance(first, last);
+ const size_type nSize = size();
+
+ if(n > nSize) // If we are increasing the size...
+ {
+ InputIterator atEnd(first);
+
+ eastl::advance(atEnd, (difference_type)nSize);
+ eastl::copy(first, atEnd, mItBegin);
+ insert(mItEnd, atEnd, last);
+ }
+ else // n is <= size.
+ {
+ iterator itEnd(eastl::copy(first, last, mItBegin));
+
+ if(n < nSize) // If we need to erase any trailing elements...
+ erase(itEnd, mItEnd);
+ }
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ void deque<T, Allocator, kDequeSubarraySize>::DoAssignValues(size_type n, const value_type& value)
+ {
+ const size_type nSize = size();
+
+ if(n > nSize) // If we are increasing the size...
+ {
+ eastl::fill(mItBegin, mItEnd, value);
+ insert(mItEnd, n - nSize, value);
+ }
+ else
+ {
+ erase(mItBegin + (difference_type)n, mItEnd);
+ eastl::fill(mItBegin, mItEnd, value);
+ }
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ template <typename Integer>
+ void deque<T, Allocator, kDequeSubarraySize>::DoInsert(const const_iterator& position, Integer n, Integer value, true_type)
+ {
+ DoInsertValues(position, (size_type)n, (value_type)value);
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ template <typename InputIterator>
+ void deque<T, Allocator, kDequeSubarraySize>::DoInsert(const const_iterator& position, const InputIterator& first, const InputIterator& last, false_type)
+ {
+ typedef typename eastl::iterator_traits<InputIterator>::iterator_category IC;
+ DoInsertFromIterator(position, first, last, IC());
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ template <typename InputIterator>
+ void deque<T, Allocator, kDequeSubarraySize>::DoInsertFromIterator(const_iterator position, const InputIterator& first, const InputIterator& last, EASTL_ITC_NS::forward_iterator_tag)
+ {
+ const size_type n = (size_type)eastl::distance(first, last);
+
+ // This implementation is nearly identical to DoInsertValues below.
+ // If you make a bug fix to one, you will likely want to fix the other.
+ if(position.mpCurrent == mItBegin.mpCurrent) // If inserting at the beginning or into an empty container...
+ {
+ iterator itNewBegin(DoReallocSubarray(n, kSideFront)); // itNewBegin to mItBegin refers to memory that isn't initialized yet; so it's not truly a valid iterator. Or at least not a dereferencable one.
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ // We would like to use move here instead of copy when possible, which would be useful for
+ // when inserting from a std::initializer_list, for example.
+ // To do: solve this by having a template or runtime parameter which specifies move vs copy.
+ eastl::uninitialized_copy(first, last, itNewBegin);
+ mItBegin = itNewBegin;
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ DoFreeSubarrays(itNewBegin.mpCurrentArrayPtr, mItBegin.mpCurrentArrayPtr);
+ throw;
+ }
+ #endif
+ }
+ else if(EASTL_UNLIKELY(position.mpCurrent == mItEnd.mpCurrent)) // If inserting at the end (i.e. appending)...
+ {
+ const iterator itNewEnd(DoReallocSubarray(n, kSideBack)); // mItEnd to itNewEnd refers to memory that isn't initialized yet; so it's not truly a valid iterator. Or at least not a dereferencable one.
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ // We would like to use move here instead of copy when possible, which would be useful for
+ // when inserting from a std::initializer_list, for example.
+ // To do: solve this by having a template or runtime parameter which specifies move vs copy.
+ eastl::uninitialized_copy(first, last, mItEnd);
+ mItEnd = itNewEnd;
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ DoFreeSubarrays(mItEnd.mpCurrentArrayPtr + 1, itNewEnd.mpCurrentArrayPtr + 1);
+ throw;
+ }
+ #endif
+ }
+ else
+ {
+ const difference_type nInsertionIndex = position - mItBegin;
+ const size_type nSize = size();
+
+ if(nInsertionIndex < (difference_type)(nSize / 2)) // If the insertion index is in the front half of the deque... grow the deque at the front.
+ {
+ const iterator itNewBegin(DoReallocSubarray(n, kSideFront)); // itNewBegin to mItBegin refers to memory that isn't initialized yet; so it's not truly a valid iterator. Or at least not a dereferencable one.
+ const iterator itOldBegin(mItBegin);
+ const iterator itPosition(mItBegin + nInsertionIndex); // We need to reset this value because the reallocation above can invalidate iterators.
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ // We have a problem here: we would like to use move instead of copy, but it may be that the range to be inserted comes from
+ // this container and comes from the segment we need to move. So we can't use move operations unless we are careful to handle
+ // that situation. The newly inserted contents must be contents that were moved to and not moved from. To do: solve this.
+ if(nInsertionIndex >= (difference_type)n) // If the newly inserted items will be entirely within the old area...
+ {
+ iterator itUCopyEnd(mItBegin + (difference_type)n);
+
+ eastl::uninitialized_copy(mItBegin, itUCopyEnd, itNewBegin); // This can throw.
+ itUCopyEnd = eastl::copy(itUCopyEnd, itPosition, itOldBegin); // Recycle 'itUCopyEnd' to mean something else.
+ eastl::copy(first, last, itUCopyEnd);
+ }
+ else // Else the newly inserted items are going within the newly allocated area at the front.
+ {
+ InputIterator mid(first);
+
+ eastl::advance(mid, (difference_type)n - nInsertionIndex);
+ eastl::uninitialized_copy_copy(mItBegin, itPosition, first, mid, itNewBegin); // This can throw.
+ eastl::copy(mid, last, itOldBegin);
+ }
+ mItBegin = itNewBegin;
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ DoFreeSubarrays(itNewBegin.mpCurrentArrayPtr, mItBegin.mpCurrentArrayPtr);
+ throw;
+ }
+ #endif
+ }
+ else
+ {
+ const iterator itNewEnd(DoReallocSubarray(n, kSideBack));
+ const iterator itOldEnd(mItEnd);
+ const difference_type nPushedCount = (difference_type)nSize - nInsertionIndex;
+ const iterator itPosition(mItEnd - nPushedCount); // We need to reset this value because the reallocation above can invalidate iterators.
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ // We have a problem here: we would like to use move instead of copy, but it may be that the range to be inserted comes from
+ // this container and comes from the segment we need to move. So we can't use move operations unless we are careful to handle
+ // that situation. The newly inserted contents must be contents that were moved to and not moved from. To do: solve this.
+ if(nPushedCount > (difference_type)n)
+ {
+ const iterator itUCopyEnd(mItEnd - (difference_type)n);
+
+ eastl::uninitialized_copy(itUCopyEnd, mItEnd, mItEnd);
+ eastl::copy_backward(itPosition, itUCopyEnd, itOldEnd);
+ eastl::copy(first, last, itPosition);
+ }
+ else
+ {
+ InputIterator mid(first);
+
+ eastl::advance(mid, nPushedCount);
+ eastl::uninitialized_copy_copy(mid, last, itPosition, mItEnd, mItEnd);
+ eastl::copy(first, mid, itPosition);
+ }
+ mItEnd = itNewEnd;
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ DoFreeSubarrays(mItEnd.mpCurrentArrayPtr + 1, itNewEnd.mpCurrentArrayPtr + 1);
+ throw;
+ }
+ #endif
+ }
+ }
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ void deque<T, Allocator, kDequeSubarraySize>::DoInsertValues(const_iterator position, size_type n, const value_type& value)
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(!(validate_iterator(position) & isf_valid)))
+ EASTL_FAIL_MSG("deque::insert -- invalid iterator");
+ #endif
+
+ // This implementation is nearly identical to DoInsertFromIterator above.
+ // If you make a bug fix to one, you will likely want to fix the other.
+ if(position.mpCurrent == mItBegin.mpCurrent) // If inserting at the beginning...
+ {
+ const iterator itNewBegin(DoReallocSubarray(n, kSideFront));
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ // Note that we don't make a temp copy of 'value' here. This is because in a
+ // deque, insertion at either the front or back doesn't cause a reallocation
+ // or move of data in the middle. That's a key feature of deques, in fact.
+ eastl::uninitialized_fill(itNewBegin, mItBegin, value);
+ mItBegin = itNewBegin;
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ DoFreeSubarrays(itNewBegin.mpCurrentArrayPtr, mItBegin.mpCurrentArrayPtr);
+ throw;
+ }
+ #endif
+ }
+ else if(EASTL_UNLIKELY(position.mpCurrent == mItEnd.mpCurrent)) // If inserting at the end (i.e. appending)...
+ {
+ const iterator itNewEnd(DoReallocSubarray(n, kSideBack));
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ // Note that we don't make a temp copy of 'value' here. This is because in a
+ // deque, insertion at either the front or back doesn't cause a reallocation
+ // or move of data in the middle. That's a key feature of deques, in fact.
+ eastl::uninitialized_fill(mItEnd, itNewEnd, value);
+ mItEnd = itNewEnd;
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ DoFreeSubarrays(mItEnd.mpCurrentArrayPtr + 1, itNewEnd.mpCurrentArrayPtr + 1);
+ throw;
+ }
+ #endif
+ }
+ else
+ {
+ // A key purpose of a deque is to implement insertions and removals more efficiently
+ // than with a vector. We are inserting into the middle of the deque here. A quick and
+ // dirty implementation of this would be to reallocate the subarrays and simply push
+ // all values in the middle upward like you would do with a vector. Instead we implement
+ // the minimum amount of reallocations needed but may need to do some value moving,
+ // as the subarray sizes need to remain constant and can have no holes in them.
+ const difference_type nInsertionIndex = position - mItBegin;
+ const size_type nSize = size();
+ const value_type valueSaved(value);
+
+ if(nInsertionIndex < (difference_type)(nSize / 2)) // If the insertion index is in the front half of the deque... grow the deque at the front.
+ {
+ const iterator itNewBegin(DoReallocSubarray(n, kSideFront));
+ const iterator itOldBegin(mItBegin);
+ const iterator itPosition(mItBegin + nInsertionIndex); // We need to reset this value because the reallocation above can invalidate iterators.
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ if(nInsertionIndex >= (difference_type)n) // If the newly inserted items will be entirely within the old area...
+ {
+ iterator itUCopyEnd(mItBegin + (difference_type)n);
+
+ eastl::uninitialized_move_if_noexcept(mItBegin, itUCopyEnd, itNewBegin); // This can throw.
+ itUCopyEnd = eastl::move(itUCopyEnd, itPosition, itOldBegin); // Recycle 'itUCopyEnd' to mean something else.
+ eastl::fill(itUCopyEnd, itPosition, valueSaved);
+ }
+ else // Else the newly inserted items are going within the newly allocated area at the front.
+ {
+ eastl::uninitialized_move_fill(mItBegin, itPosition, itNewBegin, mItBegin, valueSaved); // This can throw.
+ eastl::fill(itOldBegin, itPosition, valueSaved);
+ }
+ mItBegin = itNewBegin;
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ DoFreeSubarrays(itNewBegin.mpCurrentArrayPtr, mItBegin.mpCurrentArrayPtr);
+ throw;
+ }
+ #endif
+ }
+ else // Else the insertion index is in the back half of the deque, so grow the deque at the back.
+ {
+ const iterator itNewEnd(DoReallocSubarray(n, kSideBack));
+ const iterator itOldEnd(mItEnd);
+ const difference_type nPushedCount = (difference_type)nSize - nInsertionIndex;
+ const iterator itPosition(mItEnd - nPushedCount); // We need to reset this value because the reallocation above can invalidate iterators.
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ if(nPushedCount > (difference_type)n) // If the newly inserted items will be entirely within the old area...
+ {
+ iterator itUCopyEnd(mItEnd - (difference_type)n);
+
+ eastl::uninitialized_move_if_noexcept(itUCopyEnd, mItEnd, mItEnd); // This can throw.
+ itUCopyEnd = eastl::move_backward(itPosition, itUCopyEnd, itOldEnd); // Recycle 'itUCopyEnd' to mean something else.
+ eastl::fill(itPosition, itUCopyEnd, valueSaved);
+ }
+ else // Else the newly inserted items are going within the newly allocated area at the back.
+ {
+ eastl::uninitialized_fill_move(mItEnd, itPosition + (difference_type)n, valueSaved, itPosition, mItEnd); // This can throw.
+ eastl::fill(itPosition, itOldEnd, valueSaved);
+ }
+ mItEnd = itNewEnd;
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ DoFreeSubarrays(mItEnd.mpCurrentArrayPtr + 1, itNewEnd.mpCurrentArrayPtr + 1);
+ throw;
+ }
+ #endif
+ }
+ }
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline void deque<T, Allocator, kDequeSubarraySize>::DoSwap(this_type& x)
+ {
+ eastl::swap(mpPtrArray, x.mpPtrArray);
+ eastl::swap(mnPtrArraySize, x.mnPtrArraySize);
+ eastl::swap(mItBegin, x.mItBegin);
+ eastl::swap(mItEnd, x.mItEnd);
+ eastl::swap(mAllocator, x.mAllocator); // We do this even if EASTL_ALLOCATOR_COPY_ENABLED is 0.
+
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline bool deque<T, Allocator, kDequeSubarraySize>::validate() const
+ {
+ // To do: More detailed validation.
+ // To do: Try to make the validation resistant to crashes if the data is invalid.
+ if((end() - begin()) < 0)
+ return false;
+ return true;
+ }
+
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline int deque<T, Allocator, kDequeSubarraySize>::validate_iterator(const_iterator i) const
+ {
+ // To do: We don't currently track isf_current, will need to make it do so.
+ // To do: Fix the validation below, as it will not catch all invalid iterators.
+ if((i - begin()) < 0)
+ return isf_none;
+
+ if((end() - i) < 0)
+ return isf_none;
+
+ if(i == end())
+ return (isf_valid | isf_current);
+
+ return (isf_valid | isf_current | isf_can_dereference);
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline bool operator==(const deque<T, Allocator, kDequeSubarraySize>& a, const deque<T, Allocator, kDequeSubarraySize>& b)
+ {
+ return ((a.size() == b.size()) && eastl::equal(a.begin(), a.end(), b.begin()));
+ }
+
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline synth_three_way_result<T> operator<=>(const deque<T, Allocator, kDequeSubarraySize>& a, const deque<T, Allocator, kDequeSubarraySize>& b)
+ {
+ return eastl::lexicographical_compare_three_way(a.begin(), a.end(), b.begin(), b.end(), synth_three_way{});
+ }
+
+#else
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline bool operator!=(const deque<T, Allocator, kDequeSubarraySize>& a, const deque<T, Allocator, kDequeSubarraySize>& b)
+ {
+ return ((a.size() != b.size()) || !eastl::equal(a.begin(), a.end(), b.begin()));
+ }
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline bool operator<(const deque<T, Allocator, kDequeSubarraySize>& a, const deque<T, Allocator, kDequeSubarraySize>& b)
+ {
+ return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end());
+ }
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline bool operator>(const deque<T, Allocator, kDequeSubarraySize>& a, const deque<T, Allocator, kDequeSubarraySize>& b)
+ {
+ return b < a;
+ }
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline bool operator<=(const deque<T, Allocator, kDequeSubarraySize>& a, const deque<T, Allocator, kDequeSubarraySize>& b)
+ {
+ return !(b < a);
+ }
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline bool operator>=(const deque<T, Allocator, kDequeSubarraySize>& a, const deque<T, Allocator, kDequeSubarraySize>& b)
+ {
+ return !(a < b);
+ }
+#endif
+
+ template <typename T, typename Allocator, unsigned kDequeSubarraySize>
+ inline void swap(deque<T, Allocator, kDequeSubarraySize>& a, deque<T, Allocator, kDequeSubarraySize>& b)
+ {
+ a.swap(b);
+ }
+
+ ///////////////////////////////////////////////////////////////////////
+ // erase / erase_if
+ //
+ // https://en.cppreference.com/w/cpp/container/deque/erase2
+ ///////////////////////////////////////////////////////////////////////
+ template <class T, class Allocator, class U>
+ typename deque<T, Allocator>::size_type erase(deque<T, Allocator>& c, const U& value)
+ {
+ // Erases all elements that compare equal to value from the container.
+ auto origEnd = c.end();
+ auto newEnd = eastl::remove(c.begin(), origEnd, value);
+ auto numRemoved = eastl::distance(newEnd, origEnd);
+ c.erase(newEnd, origEnd);
+
+ // Note: This is technically a lossy conversion when size_type
+ // is 32bits and ptrdiff_t is 64bits (could happen on 64bit
+ // systems when EASTL_SIZE_T_32BIT is set). In practice this
+ // is fine because if EASTL_SIZE_T_32BIT is set then the deque
+ // should not have more elements than fit in a uint32_t and so
+ // the distance here should fit in a size_type.
+ return static_cast<typename deque<T, Allocator>::size_type>(numRemoved);
+ }
+
+ template <class T, class Allocator, class Predicate>
+ typename deque<T, Allocator>::size_type erase_if(deque<T, Allocator>& c, Predicate predicate)
+ {
+ // Erases all elements that satisfy the predicate pred from the container.
+ auto origEnd = c.end();
+ auto newEnd = eastl::remove_if(c.begin(), origEnd, predicate);
+ auto numRemoved = eastl::distance(newEnd, origEnd);
+ c.erase(newEnd, origEnd);
+
+ // Note: This is technically a lossy conversion when size_type
+ // is 32bits and ptrdiff_t is 64bits (could happen on 64bit
+ // systems when EASTL_SIZE_T_32BIT is set). In practice this
+ // is fine because if EASTL_SIZE_T_32BIT is set then the deque
+ // should not have more elements than fit in a uint32_t and so
+ // the distance here should fit in a size_type.
+ return static_cast<typename deque<T, Allocator>::size_type>(numRemoved);
+ }
+
+
+} // namespace eastl
+
+
+EA_RESTORE_VC_WARNING();
+#if EASTL_EXCEPTIONS_ENABLED
+ EA_RESTORE_VC_WARNING();
+#endif
+
+
+#endif // Header include guard
diff --git a/EASTL/include/EASTL/finally.h b/EASTL/include/EASTL/finally.h
new file mode 100644
index 0000000..b4ed580
--- /dev/null
+++ b/EASTL/include/EASTL/finally.h
@@ -0,0 +1,93 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// eastl::finally is an implementation of the popular cpp idiom RAII - Resource
+// Acquisition Is Initialization. eastl::finally guarantees that the user
+// provided callable will be executed upon whatever mechanism is used to leave
+// the current scope. This can guard against user errors but this is a popular
+// technique to write robust code in execution environments that have exceptions
+// enabled.
+//
+// Example:
+// void foo()
+// {
+// void* p = malloc(128);
+// auto _ = eastl::make_finally([&] { free(p); });
+//
+// // Code that may throw an exception...
+//
+// } // eastl::finally guaranteed to call 'free' at scope exit.
+//
+// References:
+// * https://www.bfilipek.com/2017/04/finalact.html
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_FINALLY_H
+#define EASTL_FINALLY_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/move_help.h>
+#include <EASTL/type_traits.h>
+
+namespace eastl
+{
+ ///////////////////////////////////////////////////////////////////////////
+ // finally
+ //
+ // finally is the type that calls the users callback on scope exit.
+ //
+ template <typename Functor>
+ class finally
+ {
+ static_assert(!eastl::is_lvalue_reference_v<Functor>, "eastl::finally requires the callable is passed as an rvalue reference.");
+
+ Functor m_functor;
+ bool m_engaged = false;
+
+ public:
+ finally(Functor f) : m_functor(eastl::move(f)), m_engaged(true) {}
+
+ finally(finally&& other) : m_functor(eastl::move(other.m_functor)), m_engaged(other.m_engaged)
+ {
+ other.dismiss();
+ }
+
+ ~finally() { execute(); }
+
+ finally(const finally&) = delete;
+ finally& operator=(const finally&) = delete;
+ finally& operator=(finally&&) = delete;
+
+ inline void dismiss() { m_engaged = false; }
+
+ inline void execute()
+ {
+ if (m_engaged)
+ m_functor();
+
+ dismiss();
+ }
+ };
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // make_finally
+ //
+ // this utility function is the standard mechansim to perform the required
+ // type deduction on the users provided callback inorder to create a
+ // 'finally' object.
+ //
+ template <typename F>
+ auto make_finally(F&& f)
+ {
+ return finally<F>(eastl::forward<F>(f));
+ }
+}
+
+#endif // EASTL_FINALLY_H
diff --git a/EASTL/include/EASTL/fixed_allocator.h b/EASTL/include/EASTL/fixed_allocator.h
new file mode 100644
index 0000000..488eae4
--- /dev/null
+++ b/EASTL/include/EASTL/fixed_allocator.h
@@ -0,0 +1,455 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements the following
+// fixed_allocator
+// fixed_allocator_with_overflow
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_FIXED_ALLOCATOR_H
+#define EASTL_FIXED_ALLOCATOR_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/fixed_pool.h>
+#include <EASTL/functional.h>
+#include <EASTL/memory.h>
+#include <EASTL/allocator.h>
+#include <EASTL/type_traits.h>
+
+EA_DISABLE_ALL_VC_WARNINGS();
+
+#include <new>
+
+EA_RESTORE_ALL_VC_WARNINGS();
+
+EA_DISABLE_VC_WARNING(4275); // non dll-interface class used as base for DLL-interface classkey 'identifier'
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+
+ ///////////////////////////////////////////////////////////////////////////
+ // fixed_allocator
+ ///////////////////////////////////////////////////////////////////////////
+
+ /// fixed_allocator
+ ///
+ /// Implements an allocator which allocates a single fixed size where
+ /// the size, alignment, and memory used for the pool is defined at
+ /// runtime by the user. This is different from fixed containers
+ /// such as fixed_list whereby the size and alignment are determined
+ /// at compile time and the memory is directly built into the container's
+ /// member data.
+ ///
+ /// If the pool's memory is exhausted or was never initialized, the
+ /// allocate function returns NULL. Consider the fixed_allocator_with_overflow
+ /// class as an alternative in order to deal with this situation.
+ ///
+ /// This class requires the user to call container.get_allocator().init()
+ /// after constructing the container. There currently isn't a way to
+ /// construct the container with the initialization parameters, though
+ /// with some effort such a thing could probably be made possible.
+ /// It's not as simple as it might first seem, due to the non-copyable
+ /// nature of fixed allocators. A side effect of this limitation is that
+ /// you cannot copy-construct a container using fixed_allocators.
+ ///
+ /// Another side-effect is that you cannot swap two containers using
+ /// a fixed_allocator, as a swap requires temporary memory allocated by
+ /// an equivalent allocator, and such a thing cannot be done implicitly.
+ /// A workaround for the swap limitation is that you can implement your
+ /// own swap whereby you provide an explicitly created temporary object.
+ ///
+ /// Note: Be careful to set the allocator's node size to the size of the
+ /// container node and not the size of the contained object. Note that the
+ /// example code below uses IntListNode.
+ ///
+ /// Example usage:
+ /// typedef eastl::list<int, fixed_allocator> IntList;
+ /// typedef IntList::node_type IntListNode;
+ ///
+ /// IntListNode buffer[200];
+ /// IntList intList;
+ /// intList.get_allocator().init(buffer, sizeof(buffer), sizeof(IntListNode), __alignof(IntListNode));
+ ///
+ class EASTL_API fixed_allocator : public fixed_pool_base
+ {
+ public:
+ /// fixed_allocator
+ ///
+ /// Default constructor. The user usually will need to call init() after
+ /// constructing via this constructor.
+ ///
+ fixed_allocator(const char* /*pName*/ = EASTL_FIXED_POOL_DEFAULT_NAME)
+ : fixed_pool_base(NULL)
+ {
+ }
+
+
+ /// fixed_allocator
+ ///
+ /// Copy constructor. The user usually will need to call init() after
+ /// constructing via this constructor. By their nature, fixed-allocators
+ /// cannot be copied in any useful way, as by their nature the user
+ /// must manually initialize them.
+ ///
+ fixed_allocator(const fixed_allocator&)
+ : fixed_pool_base(NULL)
+ {
+ }
+
+
+ /// operator=
+ ///
+ /// By their nature, fixed-allocators cannot be copied in any
+ /// useful way, as by their nature the user must manually
+ /// initialize them.
+ ///
+ fixed_allocator& operator=(const fixed_allocator&)
+ {
+ return *this;
+ }
+
+
+ // init
+ //
+ // No init here, as the base class version is sufficient.
+ //
+ //void init(void* pMemory, size_t memorySize, size_t nodeSize,
+ // size_t alignment, size_t alignmentOffset = 0);
+
+
+ /// allocate
+ ///
+ /// Allocates a new object of the size specified upon class initialization.
+ /// Returns NULL if there is no more memory.
+ ///
+ void* allocate(size_t n, int /*flags*/ = 0)
+ {
+ // To consider: Verify that 'n' is what the user initialized us with.
+
+ Link* pLink = mpHead;
+
+ if(pLink) // If we have space...
+ {
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED
+ if(++mnCurrentSize > mnPeakSize)
+ mnPeakSize = mnCurrentSize;
+ #endif
+
+ mpHead = pLink->mpNext;
+ return pLink;
+ }
+ else
+ {
+ // If there's no free node in the free list, just
+ // allocate another from the reserved memory area
+
+ if(mpNext != mpCapacity)
+ {
+ pLink = mpNext;
+
+ mpNext = reinterpret_cast<Link*>(reinterpret_cast<char*>(mpNext) + n);
+
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED
+ if(++mnCurrentSize > mnPeakSize)
+ mnPeakSize = mnCurrentSize;
+ #endif
+
+ return pLink;
+ }
+
+ // EASTL_ASSERT(false); To consider: enable this assert. However, we intentionally disable it because this isn't necessarily an assertable error.
+ return NULL;
+ }
+ }
+
+
+ /// allocate
+ ///
+ void* allocate(size_t n, size_t /*alignment*/, size_t /*offset*/, int flags = 0)
+ {
+ return allocate(n, flags);
+ }
+
+
+ /// deallocate
+ ///
+ /// Frees the given object which was allocated by allocate().
+ /// If the given node was not allocated by allocate() then the behaviour
+ /// is undefined.
+ ///
+ void deallocate(void* p, size_t)
+ {
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED
+ --mnCurrentSize;
+ #endif
+
+ ((Link*)p)->mpNext = mpHead;
+ mpHead = ((Link*)p);
+ }
+
+
+ using fixed_pool_base::can_allocate;
+
+
+ const char* get_name() const
+ {
+ return EASTL_FIXED_POOL_DEFAULT_NAME;
+ }
+
+
+ void set_name(const char*)
+ {
+ // Nothing to do. We don't allocate memory.
+ }
+
+ }; // fixed_allocator
+
+ bool operator==(const fixed_allocator& a, const fixed_allocator& b);
+ bool operator!=(const fixed_allocator& a, const fixed_allocator& b);
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // fixed_allocator_with_overflow
+ ///////////////////////////////////////////////////////////////////////////
+
+ /// fixed_allocator_with_overflow
+ ///
+ /// Implements an allocator which allocates a single fixed size where
+ /// the size, alignment, and memory used for the pool is defined at
+ /// runtime by the user. This is different from fixed containers
+ /// such as fixed_list whereby the size and alignment are determined
+ /// at compile time and the memory is directly built into the container's
+ /// member data.
+ ///
+ /// Note: Be careful to set the allocator's node size to the size of the
+ /// container node and not the size of the contained object. Note that the
+ /// example code below uses IntListNode.
+ ///
+ /// This class requires the user to call container.get_allocator().init()
+ /// after constructing the container. There currently isn't a way to
+ /// construct the container with the initialization parameters, though
+ /// with some effort such a thing could probably be made possible.
+ /// It's not as simple as it might first seem, due to the non-copyable
+ /// nature of fixed allocators. A side effect of this limitation is that
+ /// you cannot copy-construct a container using fixed_allocators.
+ ///
+ /// Another side-effect is that you cannot swap two containers using
+ /// a fixed_allocator, as a swap requires temporary memory allocated by
+ /// an equivalent allocator, and such a thing cannot be done implicitly.
+ /// A workaround for the swap limitation is that you can implement your
+ /// own swap whereby you provide an explicitly created temporary object.
+ ///
+ /// Example usage:
+ /// typedef eastl::list<int, fixed_allocator_with_overflow> IntList;
+ /// typedef IntList::node_type IntListNode;
+ ///
+ /// IntListNode buffer[200];
+ /// IntList intList;
+ /// intList.get_allocator().init(buffer, sizeof(buffer), sizeof(IntListNode), __alignof(IntListNode));
+ ///
+ class EASTL_API fixed_allocator_with_overflow : public fixed_pool_base
+ {
+ public:
+ /// fixed_allocator_with_overflow
+ ///
+ /// Default constructor. The user usually will need to call init() after
+ /// constructing via this constructor.
+ ///
+ fixed_allocator_with_overflow(const char* pName = EASTL_FIXED_POOL_DEFAULT_NAME)
+ : fixed_pool_base(NULL)
+ , mOverflowAllocator(pName)
+ , mpPoolBegin(nullptr)
+ , mpPoolEnd(nullptr)
+ , mnNodeSize(0)
+ {
+ }
+
+
+ /// fixed_allocator_with_overflow
+ ///
+ /// Copy constructor. The user usually will need to call init() after
+ /// constructing via this constructor. By their nature, fixed-allocators
+ /// cannot be copied in any useful way, as by their nature the user
+ /// must manually initialize them.
+ ///
+ fixed_allocator_with_overflow(const fixed_allocator_with_overflow&)
+ : fixed_pool_base(NULL)
+ , mpPoolBegin(nullptr)
+ , mpPoolEnd(nullptr)
+ , mnNodeSize(0)
+ {
+ }
+
+
+ /// operator=
+ ///
+ /// By their nature, fixed-allocators cannot be copied in any
+ /// useful way, as by their nature the user must manually
+ /// initialize them.
+ ///
+ fixed_allocator_with_overflow& operator=(const fixed_allocator_with_overflow& x)
+ {
+ #if EASTL_ALLOCATOR_COPY_ENABLED
+ mOverflowAllocator = x.mOverflowAllocator;
+ #else
+ (void)x;
+ #endif
+
+ return *this;
+ }
+
+
+ /// init
+ ///
+ void init(void* pMemory, size_t memorySize, size_t nodeSize,
+ size_t alignment, size_t alignmentOffset = 0)
+ {
+ fixed_pool_base::init(pMemory, memorySize, nodeSize, alignment, alignmentOffset);
+
+ mpPoolBegin = pMemory;
+ mpPoolEnd = (void*)((uintptr_t)pMemory + memorySize);
+ mnNodeSize = (eastl_size_t)nodeSize;
+ }
+
+
+ /// allocate
+ ///
+ /// Allocates a new object of the size specified upon class initialization.
+ /// Returns NULL if there is no more memory.
+ ///
+ void* allocate(size_t /*n*/, int /*flags*/ = 0)
+ {
+ // To consider: Verify that 'n' is what the user initialized us with.
+
+ void* p;
+
+ if(mpHead) // If we have space...
+ {
+ p = mpHead;
+ mpHead = mpHead->mpNext;
+ }
+ else
+ {
+ // If there's no free node in the free list, just
+ // allocate another from the reserved memory area
+
+ if (mpNext != mpCapacity)
+ {
+ p = mpNext;
+ mpNext = reinterpret_cast<Link*>(reinterpret_cast<char*>(mpNext) + mnNodeSize);
+ }
+ else
+ p = mOverflowAllocator.allocate(mnNodeSize);
+ }
+
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED
+ if(p && (++mnCurrentSize > mnPeakSize))
+ mnPeakSize = mnCurrentSize;
+ #endif
+
+ return p;
+ }
+
+
+ /// allocate
+ ///
+ void* allocate(size_t n, size_t /*alignment*/, size_t /*offset*/, int flags = 0)
+ {
+ return allocate(n, flags);
+ }
+
+
+ /// deallocate
+ ///
+ /// Frees the given object which was allocated by allocate().
+ /// If the given node was not allocated by allocate() then the behaviour
+ /// is undefined.
+ ///
+ void deallocate(void* p, size_t)
+ {
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED
+ --mnCurrentSize;
+ #endif
+
+ if((p >= mpPoolBegin) && (p < mpPoolEnd))
+ {
+ ((Link*)p)->mpNext = mpHead;
+ mpHead = ((Link*)p);
+ }
+ else
+ mOverflowAllocator.deallocate(p, (size_t)mnNodeSize);
+ }
+
+
+ using fixed_pool_base::can_allocate;
+
+
+ const char* get_name() const
+ {
+ return mOverflowAllocator.get_name();
+ }
+
+
+ void set_name(const char* pName)
+ {
+ mOverflowAllocator.set_name(pName);
+ }
+
+ protected:
+ EASTLAllocatorType mOverflowAllocator; // To consider: Allow the user to define the type of this, presumably via a template parameter.
+ void* mpPoolBegin; // To consider: We have these member variables and ideally we shouldn't need them. The problem is that
+ void* mpPoolEnd; // the information about the pool buffer and object size is stored in the owning container
+ eastl_size_t mnNodeSize; // and we can't have access to it without increasing the amount of code we need and by templating
+ // more code. It may turn out that simply storing data here is smaller in the end.
+ }; // fixed_allocator_with_overflow // Granted, this class is usually used for debugging purposes, but perhaps there is an elegant solution.
+
+ bool operator==(const fixed_allocator_with_overflow& a, const fixed_allocator_with_overflow& b);
+ bool operator!=(const fixed_allocator_with_overflow& a, const fixed_allocator_with_overflow& b);
+
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ inline bool operator==(const fixed_allocator&, const fixed_allocator&)
+ {
+ return false;
+ }
+
+ inline bool operator!=(const fixed_allocator&, const fixed_allocator&)
+ {
+ return false;
+ }
+
+ inline bool operator==(const fixed_allocator_with_overflow&, const fixed_allocator_with_overflow&)
+ {
+ return false;
+ }
+
+ inline bool operator!=(const fixed_allocator_with_overflow&, const fixed_allocator_with_overflow&)
+ {
+ return false;
+ }
+
+
+} // namespace eastl
+
+
+EA_RESTORE_VC_WARNING();
+
+#endif // Header include guard
diff --git a/EASTL/include/EASTL/fixed_function.h b/EASTL/include/EASTL/fixed_function.h
new file mode 100644
index 0000000..6aed768
--- /dev/null
+++ b/EASTL/include/EASTL/fixed_function.h
@@ -0,0 +1,218 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_FIXED_FUNCTION_H
+#define EASTL_FIXED_FUNCTION_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <EASTL/internal/function_detail.h>
+
+namespace eastl
+{
+ template <int, typename>
+ class fixed_function;
+
+ namespace internal
+ {
+ template <typename>
+ struct is_fixed_function
+ : public eastl::false_type {};
+
+ template <int SIZE_IN_BYTES, typename R, typename... Args>
+ struct is_fixed_function<eastl::fixed_function<SIZE_IN_BYTES, R(Args...)>>
+ : public eastl::true_type {};
+
+ template<typename T>
+ EA_CONSTEXPR bool is_fixed_function_v = is_fixed_function<T>::value;
+ }
+
+ #define EASTL_INTERNAL_FIXED_FUNCTION_STATIC_ASSERT(TYPE) \
+ static_assert(sizeof(TYPE) <= sizeof(typename Base::FunctorStorageType), \
+ "fixed_function local buffer is not large enough to hold the callable object.")
+
+ #define EASTL_INTERNAL_FIXED_FUNCTION_NEW_SIZE_STATIC_ASSERT(NEW_SIZE_IN_BYTES) \
+ static_assert(SIZE_IN_BYTES >= NEW_SIZE_IN_BYTES, \
+ "fixed_function local buffer is not large enough to hold the new fixed_function type.")
+
+ template <typename Functor>
+ using EASTL_DISABLE_OVERLOAD_IF_FIXED_FUNCTION =
+ eastl::disable_if_t<internal::is_fixed_function_v<eastl::decay_t<Functor>>>;
+
+
+ // fixed_function
+ //
+ template <int SIZE_IN_BYTES, typename R, typename... Args>
+ class fixed_function<SIZE_IN_BYTES, R(Args...)> : public internal::function_detail<SIZE_IN_BYTES, R(Args...)>
+ {
+ using Base = internal::function_detail<SIZE_IN_BYTES, R(Args...)>;
+
+ public:
+ using typename Base::result_type;
+
+ fixed_function() EA_NOEXCEPT = default;
+ fixed_function(std::nullptr_t p) EA_NOEXCEPT
+ : Base(p)
+ {
+ }
+
+ fixed_function(const fixed_function& other)
+ : Base(other)
+ {
+ }
+
+ fixed_function(fixed_function&& other)
+ : Base(eastl::move(other))
+ {
+ }
+
+ template <typename Functor,
+ typename = EASTL_INTERNAL_FUNCTION_VALID_FUNCTION_ARGS(Functor, R, Args..., Base, fixed_function),
+ typename = EASTL_DISABLE_OVERLOAD_IF_FIXED_FUNCTION<Functor>>
+ fixed_function(Functor functor)
+ : Base(eastl::move(functor))
+ {
+ EASTL_INTERNAL_FIXED_FUNCTION_STATIC_ASSERT(Functor);
+ }
+
+ template<int NEW_SIZE_IN_BYTES>
+ fixed_function(const fixed_function<NEW_SIZE_IN_BYTES, R(Args...)>& other)
+ : Base(other)
+ {
+ EASTL_INTERNAL_FIXED_FUNCTION_NEW_SIZE_STATIC_ASSERT(NEW_SIZE_IN_BYTES);
+ }
+
+ template<int NEW_SIZE_IN_BYTES>
+ fixed_function(fixed_function<NEW_SIZE_IN_BYTES, R(Args...)>&& other)
+ : Base(eastl::move(other))
+ {
+ EASTL_INTERNAL_FIXED_FUNCTION_NEW_SIZE_STATIC_ASSERT(NEW_SIZE_IN_BYTES);
+ }
+
+ ~fixed_function() EA_NOEXCEPT = default;
+
+ fixed_function& operator=(const fixed_function& other)
+ {
+ Base::operator=(other);
+ return *this;
+ }
+
+ fixed_function& operator=(fixed_function&& other)
+ {
+ Base::operator=(eastl::move(other));
+ return *this;
+ }
+
+ fixed_function& operator=(std::nullptr_t p) EA_NOEXCEPT
+ {
+ Base::operator=(p);
+ return *this;
+ }
+
+ template<int NEW_SIZE_IN_BYTES>
+ fixed_function& operator=(const fixed_function<NEW_SIZE_IN_BYTES, R(Args...)>& other)
+ {
+ EASTL_INTERNAL_FIXED_FUNCTION_NEW_SIZE_STATIC_ASSERT(NEW_SIZE_IN_BYTES);
+
+ Base::operator=(other);
+ return *this;
+ }
+
+ template<int NEW_SIZE_IN_BYTES>
+ fixed_function& operator=(fixed_function<NEW_SIZE_IN_BYTES, R(Args...)>&& other)
+ {
+ EASTL_INTERNAL_FIXED_FUNCTION_NEW_SIZE_STATIC_ASSERT(NEW_SIZE_IN_BYTES);
+
+ Base::operator=(eastl::move(other));
+ return *this;
+ }
+
+ template <typename Functor,
+ typename = EASTL_INTERNAL_FUNCTION_VALID_FUNCTION_ARGS(Functor, R, Args..., Base, fixed_function),
+ typename = EASTL_DISABLE_OVERLOAD_IF_FIXED_FUNCTION<Functor>>
+ fixed_function& operator=(Functor&& functor)
+ {
+ EASTL_INTERNAL_FIXED_FUNCTION_STATIC_ASSERT(eastl::decay_t<Functor>);
+ Base::operator=(eastl::forward<Functor>(functor));
+ return *this;
+ }
+
+ template <typename Functor>
+ fixed_function& operator=(eastl::reference_wrapper<Functor> f) EA_NOEXCEPT
+ {
+ EASTL_INTERNAL_FIXED_FUNCTION_STATIC_ASSERT(eastl::reference_wrapper<Functor>);
+ Base::operator=(f);
+ return *this;
+ }
+
+ void swap(fixed_function& other) EA_NOEXCEPT
+ {
+ Base::swap(other);
+ }
+
+ explicit operator bool() const EA_NOEXCEPT
+ {
+ return Base::operator bool();
+ }
+
+ R operator ()(Args... args) const
+ {
+ return Base::operator ()(eastl::forward<Args>(args)...);
+ }
+
+ #if EASTL_RTTI_ENABLED
+ const std::type_info& target_type() const EA_NOEXCEPT
+ {
+ return Base::target_type();
+ }
+
+ template <typename Functor>
+ Functor* target() EA_NOEXCEPT
+ {
+ return Base::target();
+ }
+
+ template <typename Functor>
+ const Functor* target() const EA_NOEXCEPT
+ {
+ return Base::target();
+ }
+ #endif
+ };
+
+ template <int S, typename R, typename... Args>
+ bool operator==(const fixed_function<S, R(Args...)>& f, std::nullptr_t) EA_NOEXCEPT
+ {
+ return !f;
+ }
+
+ template <int S, typename R, typename... Args>
+ bool operator==(std::nullptr_t, const fixed_function<S, R(Args...)>& f) EA_NOEXCEPT
+ {
+ return !f;
+ }
+
+ template <int S, typename R, typename... Args>
+ bool operator!=(const fixed_function<S, R(Args...)>& f, std::nullptr_t) EA_NOEXCEPT
+ {
+ return !!f;
+ }
+
+ template <int S, typename R, typename... Args>
+ bool operator!=(std::nullptr_t, const fixed_function<S, R(Args...)>& f) EA_NOEXCEPT
+ {
+ return !!f;
+ }
+
+ template <int S, typename R, typename... Args>
+ void swap(fixed_function<S, R(Args...)>& lhs, fixed_function<S, R(Args...)>& rhs)
+ {
+ lhs.swap(rhs);
+ }
+
+} // namespace eastl
+
+#endif // EASTL_FIXED_FUNCTION_H
diff --git a/EASTL/include/EASTL/fixed_hash_map.h b/EASTL/include/EASTL/fixed_hash_map.h
new file mode 100644
index 0000000..b94ea54
--- /dev/null
+++ b/EASTL/include/EASTL/fixed_hash_map.h
@@ -0,0 +1,828 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements a hash_map and hash_multimap which use a fixed size
+// memory pool for its buckets and nodes.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_FIXED_HASH_MAP_H
+#define EASTL_FIXED_HASH_MAP_H
+
+
+#include <EASTL/hash_map.h>
+#include <EASTL/internal/fixed_pool.h>
+
+EA_DISABLE_VC_WARNING(4127) // Conditional expression is constant
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+namespace eastl
+{
+ /// EASTL_FIXED_HASH_MAP_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ /// In the case of fixed-size containers, the allocator name always refers
+ /// to overflow allocations.
+ ///
+ #ifndef EASTL_FIXED_HASH_MAP_DEFAULT_NAME
+ #define EASTL_FIXED_HASH_MAP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_hash_map" // Unless the user overrides something, this is "EASTL fixed_hash_map".
+ #endif
+
+ #ifndef EASTL_FIXED_HASH_MULTIMAP_DEFAULT_NAME
+ #define EASTL_FIXED_HASH_MULTIMAP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_hash_multimap" // Unless the user overrides something, this is "EASTL fixed_hash_multimap".
+ #endif
+
+
+ /// EASTL_FIXED_HASH_MAP_DEFAULT_ALLOCATOR
+ /// EASTL_FIXED_HASH_MULTIMAP_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_FIXED_HASH_MAP_DEFAULT_ALLOCATOR
+ #define EASTL_FIXED_HASH_MAP_DEFAULT_ALLOCATOR overflow_allocator_type(EASTL_FIXED_HASH_MAP_DEFAULT_NAME)
+ #endif
+
+ #ifndef EASTL_FIXED_HASH_MULTIMAP_DEFAULT_ALLOCATOR
+ #define EASTL_FIXED_HASH_MULTIMAP_DEFAULT_ALLOCATOR overflow_allocator_type(EASTL_FIXED_HASH_MULTIMAP_DEFAULT_NAME)
+ #endif
+
+
+
+ /// fixed_hash_map
+ ///
+ /// Implements a hash_map with a fixed block of memory identified by the nodeCount and bucketCount
+ /// template parameters.
+ ///
+ /// Template parameters:
+ /// Key The key type for the map. This is a map of Key to T (value).
+ /// T The value type for the map.
+ /// nodeCount The max number of objects to contain. This value must be >= 1.
+ /// bucketCount The number of buckets to use. This value must be >= 2.
+ /// bEnableOverflow Whether or not we should use the global heap if our object pool is exhausted.
+ /// Hash hash_set hash function. See hash_set.
+ /// Predicate hash_set equality testing function. See hash_set.
+ ///
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount = nodeCount + 1, bool bEnableOverflow = true,
+ typename Hash = eastl::hash<Key>, typename Predicate = eastl::equal_to<Key>, bool bCacheHashCode = false, typename OverflowAllocator = EASTLAllocatorType>
+ class fixed_hash_map : public hash_map<Key,
+ T,
+ Hash,
+ Predicate,
+ fixed_hashtable_allocator<
+ bucketCount + 1,
+ sizeof(typename hash_map<Key, T, Hash, Predicate, OverflowAllocator, bCacheHashCode>::node_type),
+ nodeCount,
+ EASTL_ALIGN_OF(eastl::pair<Key, T>),
+ 0,
+ bEnableOverflow,
+ OverflowAllocator>,
+ bCacheHashCode>
+ {
+ public:
+ typedef fixed_hashtable_allocator<bucketCount + 1, sizeof(typename hash_map<Key, T, Hash, Predicate,
+ OverflowAllocator, bCacheHashCode>::node_type), nodeCount, EASTL_ALIGN_OF(eastl::pair<Key, T>), 0,
+ bEnableOverflow, OverflowAllocator> fixed_allocator_type;
+ typedef typename fixed_allocator_type::overflow_allocator_type overflow_allocator_type;
+ typedef hash_map<Key, T, Hash, Predicate, fixed_allocator_type, bCacheHashCode> base_type;
+ typedef fixed_hash_map<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator> this_type;
+ typedef typename base_type::value_type value_type;
+ typedef typename base_type::node_type node_type;
+ typedef typename base_type::size_type size_type;
+
+ enum { kMaxSize = nodeCount };
+
+ using base_type::mAllocator;
+ using base_type::clear;
+
+ protected:
+ node_type** mBucketBuffer[bucketCount + 1]; // '+1' because the hash table needs a null terminating bucket.
+ char mNodeBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements.
+
+ public:
+ explicit fixed_hash_map(const overflow_allocator_type& overflowAllocator);
+
+ explicit fixed_hash_map(const Hash& hashFunction = Hash(),
+ const Predicate& predicate = Predicate());
+
+ fixed_hash_map(const Hash& hashFunction,
+ const Predicate& predicate,
+ const overflow_allocator_type& overflowAllocator);
+
+ template <typename InputIterator>
+ fixed_hash_map(InputIterator first, InputIterator last,
+ const Hash& hashFunction = Hash(),
+ const Predicate& predicate = Predicate());
+
+ fixed_hash_map(const this_type& x);
+ fixed_hash_map(this_type&& x);
+ fixed_hash_map(this_type&& x, const overflow_allocator_type& overflowAllocator);
+ fixed_hash_map(std::initializer_list<value_type> ilist, const overflow_allocator_type& overflowAllocator = EASTL_FIXED_HASH_MAP_DEFAULT_ALLOCATOR);
+
+ this_type& operator=(const this_type& x);
+ this_type& operator=(std::initializer_list<value_type> ilist);
+ this_type& operator=(this_type&& x);
+
+ void swap(this_type& x);
+
+ void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs.
+
+ size_type max_size() const;
+
+ const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT;
+ overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT;
+ void set_overflow_allocator(const overflow_allocator_type& allocator);
+
+ void clear(bool clearBuckets);
+ }; // fixed_hash_map
+
+
+
+
+
+ /// fixed_hash_multimap
+ ///
+ /// Implements a hash_multimap with a fixed block of memory identified by the nodeCount and bucketCount
+ /// template parameters.
+ ///
+ /// Template parameters:
+ /// Key The key type for the map. This is a map of Key to T (value).
+ /// T The value type for the map.
+ /// nodeCount The max number of objects to contain. This value must be >= 1.
+ /// bucketCount The number of buckets to use. This value must be >= 2.
+ /// bEnableOverflow Whether or not we should use the global heap if our object pool is exhausted.
+ /// Hash hash_set hash function. See hash_set.
+ /// Predicate hash_set equality testing function. See hash_set.
+ ///
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount = nodeCount + 1, bool bEnableOverflow = true,
+ typename Hash = eastl::hash<Key>, typename Predicate = eastl::equal_to<Key>, bool bCacheHashCode = false, typename OverflowAllocator = EASTLAllocatorType>
+ class fixed_hash_multimap : public hash_multimap<Key,
+ T,
+ Hash,
+ Predicate,
+ fixed_hashtable_allocator<
+ bucketCount + 1,
+ sizeof(typename hash_multimap<Key, T, Hash, Predicate, OverflowAllocator, bCacheHashCode>::node_type),
+ nodeCount,
+ EASTL_ALIGN_OF(eastl::pair<Key, T>),
+ 0,
+ bEnableOverflow,
+ OverflowAllocator>,
+ bCacheHashCode>
+ {
+ public:
+ typedef fixed_hashtable_allocator<bucketCount + 1, sizeof(typename hash_multimap<Key, T, Hash, Predicate,
+ OverflowAllocator, bCacheHashCode>::node_type), nodeCount, EASTL_ALIGN_OF(eastl::pair<Key, T>), 0,
+ bEnableOverflow, OverflowAllocator> fixed_allocator_type;
+ typedef typename fixed_allocator_type::overflow_allocator_type overflow_allocator_type;
+ typedef hash_multimap<Key, T, Hash, Predicate, fixed_allocator_type, bCacheHashCode> base_type;
+ typedef fixed_hash_multimap<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator> this_type;
+ typedef typename base_type::value_type value_type;
+ typedef typename base_type::node_type node_type;
+ typedef typename base_type::size_type size_type;
+
+ enum { kMaxSize = nodeCount };
+
+ using base_type::mAllocator;
+ using base_type::clear;
+
+ protected:
+ node_type** mBucketBuffer[bucketCount + 1]; // '+1' because the hash table needs a null terminating bucket.
+ char mNodeBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements.
+
+ public:
+ explicit fixed_hash_multimap(const overflow_allocator_type& overflowAllocator);
+
+ explicit fixed_hash_multimap(const Hash& hashFunction = Hash(),
+ const Predicate& predicate = Predicate());
+
+ fixed_hash_multimap(const Hash& hashFunction,
+ const Predicate& predicate,
+ const overflow_allocator_type& overflowAllocator);
+
+ template <typename InputIterator>
+ fixed_hash_multimap(InputIterator first, InputIterator last,
+ const Hash& hashFunction = Hash(),
+ const Predicate& predicate = Predicate());
+
+ fixed_hash_multimap(const this_type& x);
+ fixed_hash_multimap(this_type&& x);
+ fixed_hash_multimap(this_type&& x, const overflow_allocator_type& overflowAllocator);
+ fixed_hash_multimap(std::initializer_list<value_type> ilist, const overflow_allocator_type& overflowAllocator = EASTL_FIXED_HASH_MULTIMAP_DEFAULT_ALLOCATOR);
+
+ this_type& operator=(const this_type& x);
+ this_type& operator=(std::initializer_list<value_type> ilist);
+ this_type& operator=(this_type&& x);
+
+ void swap(this_type& x);
+
+ void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs.
+
+ size_type max_size() const;
+
+ const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT;
+ overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT;
+ void set_overflow_allocator(const overflow_allocator_type& allocator);
+
+ void clear(bool clearBuckets);
+ }; // fixed_hash_multimap
+
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // fixed_hash_map
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline fixed_hash_map<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ fixed_hash_map(const overflow_allocator_type& overflowAllocator)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), Hash(),
+ Predicate(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator))
+ {
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+
+ if(!bEnableOverflow)
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_HASH_MAP_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mNodeBuffer);
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline fixed_hash_map<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ fixed_hash_map(const Hash& hashFunction,
+ const Predicate& predicate)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), hashFunction,
+ predicate, fixed_allocator_type(NULL, mBucketBuffer))
+ {
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+
+ if (!bEnableOverflow)
+ {
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+ }
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_HASH_MAP_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mNodeBuffer);
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline fixed_hash_map<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ fixed_hash_map(const Hash& hashFunction,
+ const Predicate& predicate,
+ const overflow_allocator_type& overflowAllocator)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), hashFunction,
+ predicate, fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator))
+ {
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+
+ if (!bEnableOverflow)
+ {
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+ }
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_HASH_MAP_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mNodeBuffer);
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ template <typename InputIterator>
+ fixed_hash_map<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ fixed_hash_map(InputIterator first, InputIterator last,
+ const Hash& hashFunction,
+ const Predicate& predicate)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), hashFunction,
+ predicate, fixed_allocator_type(NULL, mBucketBuffer))
+ {
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+
+ if(!bEnableOverflow)
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_HASH_MAP_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mNodeBuffer);
+ base_type::insert(first, last);
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline fixed_hash_map<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ fixed_hash_map(const this_type& x)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), x.hash_function(),
+ x.equal_function(), fixed_allocator_type(NULL, mBucketBuffer))
+ {
+ mAllocator.copy_overflow_allocator(x.mAllocator);
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
+
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+
+ if(!bEnableOverflow)
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+
+ mAllocator.reset(mNodeBuffer);
+ base_type::insert(x.begin(), x.end());
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline fixed_hash_map<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ fixed_hash_map(this_type&& x)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), x.hash_function(),
+ x.equal_function(), fixed_allocator_type(NULL, mBucketBuffer))
+ {
+ // This implementation is the same as above. If we could rely on using C++11 delegating constructor support then we could just call that here.
+ mAllocator.copy_overflow_allocator(x.mAllocator);
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
+
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+
+ if(!bEnableOverflow)
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+
+ mAllocator.reset(mNodeBuffer);
+ base_type::insert(x.begin(), x.end());
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline fixed_hash_map<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ fixed_hash_map(this_type&& x, const overflow_allocator_type& overflowAllocator)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), x.hash_function(),
+ x.equal_function(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator))
+ {
+ // This implementation is the same as above. If we could rely on using C++11 delegating constructor support then we could just call that here.
+ mAllocator.copy_overflow_allocator(x.mAllocator);
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
+
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+
+ if(!bEnableOverflow)
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+
+ mAllocator.reset(mNodeBuffer);
+ base_type::insert(x.begin(), x.end());
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline fixed_hash_map<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ fixed_hash_map(std::initializer_list<value_type> ilist, const overflow_allocator_type& overflowAllocator)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), Hash(),
+ Predicate(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator))
+ {
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+
+ if(!bEnableOverflow)
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_HASH_MAP_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mNodeBuffer);
+ base_type::insert(ilist.begin(), ilist.end());
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline typename fixed_hash_map<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::this_type&
+ fixed_hash_map<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::operator=(const this_type& x)
+ {
+ base_type::operator=(x);
+ return *this;
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline typename fixed_hash_map<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::this_type&
+ fixed_hash_map<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::operator=(this_type&& x)
+ {
+ base_type::operator=(x);
+ return *this;
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline typename fixed_hash_map<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::this_type&
+ fixed_hash_map<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::operator=(std::initializer_list<value_type> ilist)
+ {
+ base_type::clear();
+ base_type::insert(ilist.begin(), ilist.end());
+ return *this;
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline void fixed_hash_map<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ swap(this_type& x)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(*this, x);
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline void fixed_hash_map<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ reset_lose_memory()
+ {
+ base_type::mnBucketCount = (size_type)base_type::mRehashPolicy.GetPrevBucketCount((uint32_t)bucketCount);
+ base_type::mnElementCount = 0;
+ base_type::mRehashPolicy.mnNextResize = 0;
+ base_type::get_allocator().reset(mNodeBuffer);
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline typename fixed_hash_map<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::size_type
+ fixed_hash_map<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::max_size() const
+ {
+ return kMaxSize;
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline const typename fixed_hash_map<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::overflow_allocator_type&
+ fixed_hash_map<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::get_overflow_allocator() const EA_NOEXCEPT
+ {
+ return mAllocator.get_overflow_allocator();
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline typename fixed_hash_map<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::overflow_allocator_type&
+ fixed_hash_map<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::get_overflow_allocator() EA_NOEXCEPT
+ {
+ return mAllocator.get_overflow_allocator();
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline void fixed_hash_map<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ set_overflow_allocator(const overflow_allocator_type& allocator)
+ {
+ mAllocator.set_overflow_allocator(allocator);
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline void fixed_hash_map<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ clear(bool clearBuckets)
+ {
+ base_type::DoFreeNodes(base_type::mpBucketArray, base_type::mnBucketCount);
+ if(clearBuckets)
+ {
+ base_type::DoFreeBuckets(base_type::mpBucketArray, base_type::mnBucketCount);
+ reset_lose_memory();
+ }
+ base_type::mpBucketArray = (node_type**)mBucketBuffer;
+ base_type::mnElementCount = 0;
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode>
+ inline void swap(fixed_hash_map<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode>& a,
+ fixed_hash_map<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode>& b)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(a, b);
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // fixed_hash_multimap
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline fixed_hash_multimap<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ fixed_hash_multimap(const overflow_allocator_type& overflowAllocator)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), Hash(),
+ Predicate(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator))
+ {
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+
+ if (!bEnableOverflow)
+ {
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+ }
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_HASH_MULTIMAP_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mNodeBuffer);
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline fixed_hash_multimap<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ fixed_hash_multimap(const Hash& hashFunction,
+ const Predicate& predicate)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), hashFunction,
+ predicate, fixed_allocator_type(NULL, mBucketBuffer))
+ {
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+
+ if(!bEnableOverflow)
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_HASH_MULTIMAP_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mNodeBuffer);
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline fixed_hash_multimap<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ fixed_hash_multimap(const Hash& hashFunction,
+ const Predicate& predicate,
+ const overflow_allocator_type& overflowAllocator)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), hashFunction,
+ predicate, fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator))
+ {
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+
+ if(!bEnableOverflow)
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_HASH_MULTIMAP_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mNodeBuffer);
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ template <typename InputIterator>
+ fixed_hash_multimap<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ fixed_hash_multimap(InputIterator first, InputIterator last,
+ const Hash& hashFunction,
+ const Predicate& predicate)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), hashFunction,
+ predicate, fixed_allocator_type(NULL, mBucketBuffer))
+ {
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+
+ if(!bEnableOverflow)
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_HASH_MULTIMAP_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mNodeBuffer);
+ base_type::insert(first, last);
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline fixed_hash_multimap<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ fixed_hash_multimap(const this_type& x)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), x.hash_function(),
+ x.equal_function(),fixed_allocator_type(NULL, mBucketBuffer))
+ {
+ mAllocator.copy_overflow_allocator(x.mAllocator);
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
+
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+
+ if(!bEnableOverflow)
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+
+ mAllocator.reset(mNodeBuffer);
+ base_type::insert(x.begin(), x.end());
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline fixed_hash_multimap<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ fixed_hash_multimap(this_type&& x)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), x.hash_function(),
+ x.equal_function(),fixed_allocator_type(NULL, mBucketBuffer))
+ {
+ // This implementation is the same as above. If we could rely on using C++11 delegating constructor support then we could just call that here.
+ mAllocator.copy_overflow_allocator(x.mAllocator);
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
+
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+
+ if(!bEnableOverflow)
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+
+ mAllocator.reset(mNodeBuffer);
+ base_type::insert(x.begin(), x.end());
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline fixed_hash_multimap<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ fixed_hash_multimap(this_type&& x, const overflow_allocator_type& overflowAllocator)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), x.hash_function(),
+ x.equal_function(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator))
+ {
+ // This implementation is the same as above. If we could rely on using C++11 delegating constructor support then we could just call that here.
+ mAllocator.copy_overflow_allocator(x.mAllocator);
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
+
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+
+ if(!bEnableOverflow)
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+
+ mAllocator.reset(mNodeBuffer);
+ base_type::insert(x.begin(), x.end());
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline fixed_hash_multimap<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ fixed_hash_multimap(std::initializer_list<value_type> ilist, const overflow_allocator_type& overflowAllocator)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), Hash(),
+ Predicate(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator))
+ {
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+
+ if(!bEnableOverflow)
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_HASH_MULTIMAP_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mNodeBuffer);
+ base_type::insert(ilist.begin(), ilist.end());
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline typename fixed_hash_multimap<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::this_type&
+ fixed_hash_multimap<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::operator=(const this_type& x)
+ {
+ base_type::operator=(x);
+ return *this;
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline typename fixed_hash_multimap<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::this_type&
+ fixed_hash_multimap<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::operator=(this_type&& x)
+ {
+ base_type::operator=(x);
+ return *this;
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline typename fixed_hash_multimap<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::this_type&
+ fixed_hash_multimap<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::operator=(std::initializer_list<value_type> ilist)
+ {
+ base_type::clear();
+ base_type::insert(ilist.begin(), ilist.end());
+ return *this;
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline void fixed_hash_multimap<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ swap(this_type& x)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(*this, x);
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline void fixed_hash_multimap<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ reset_lose_memory()
+ {
+ base_type::mnBucketCount = (size_type)base_type::mRehashPolicy.GetPrevBucketCount((uint32_t)bucketCount);
+ base_type::mnElementCount = 0;
+ base_type::mRehashPolicy.mnNextResize = 0;
+ base_type::get_allocator().reset(mNodeBuffer);
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline typename fixed_hash_multimap<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::size_type
+ fixed_hash_multimap<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::max_size() const
+ {
+ return kMaxSize;
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline const typename fixed_hash_multimap<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::overflow_allocator_type&
+ fixed_hash_multimap<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::get_overflow_allocator() const EA_NOEXCEPT
+ {
+ return mAllocator.get_overflow_allocator();
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline typename fixed_hash_multimap<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::overflow_allocator_type&
+ fixed_hash_multimap<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::get_overflow_allocator() EA_NOEXCEPT
+ {
+ return mAllocator.get_overflow_allocator();
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline void fixed_hash_multimap<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::set_overflow_allocator(const overflow_allocator_type& allocator)
+ {
+ mAllocator.set_overflow_allocator(allocator);
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline void fixed_hash_multimap<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ clear(bool clearBuckets)
+ {
+ base_type::DoFreeNodes(base_type::mpBucketArray, base_type::mnBucketCount);
+ if(clearBuckets)
+ {
+ base_type::DoFreeBuckets(base_type::mpBucketArray, base_type::mnBucketCount);
+ reset_lose_memory();
+ }
+ base_type::mpBucketArray = (node_type**)mBucketBuffer;
+ base_type::mnElementCount = 0;
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode>
+ inline void swap(fixed_hash_multimap<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode>& a,
+ fixed_hash_multimap<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode>& b)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(a, b);
+ }
+
+
+
+} // namespace eastl
+
+EA_RESTORE_VC_WARNING()
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/fixed_hash_set.h b/EASTL/include/EASTL/fixed_hash_set.h
new file mode 100644
index 0000000..fa2783a
--- /dev/null
+++ b/EASTL/include/EASTL/fixed_hash_set.h
@@ -0,0 +1,790 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements a hash_set which uses a fixed size memory pool for
+// its buckets and nodes.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_FIXED_HASH_SET_H
+#define EASTL_FIXED_HASH_SET_H
+
+
+#include <EASTL/hash_set.h>
+#include <EASTL/internal/fixed_pool.h>
+
+EA_DISABLE_VC_WARNING(4127) // Conditional expression is constant
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+ /// EASTL_FIXED_HASH_SET_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ /// In the case of fixed-size containers, the allocator name always refers
+ /// to overflow allocations.
+ ///
+ #ifndef EASTL_FIXED_HASH_SET_DEFAULT_NAME
+ #define EASTL_FIXED_HASH_SET_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_hash_set" // Unless the user overrides something, this is "EASTL fixed_hash_set".
+ #endif
+
+ #ifndef EASTL_FIXED_HASH_MULTISET_DEFAULT_NAME
+ #define EASTL_FIXED_HASH_MULTISET_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_hash_multiset" // Unless the user overrides something, this is "EASTL fixed_hash_multiset".
+ #endif
+
+
+ /// EASTL_FIXED_HASH_SET_DEFAULT_ALLOCATOR
+ /// EASTL_FIXED_HASH_MULTISET_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_FIXED_HASH_SET_DEFAULT_ALLOCATOR
+ #define EASTL_FIXED_HASH_SET_DEFAULT_ALLOCATOR overflow_allocator_type(EASTL_FIXED_HASH_SET_DEFAULT_NAME)
+ #endif
+
+ #ifndef EASTL_FIXED_HASH_MULTISET_DEFAULT_ALLOCATOR
+ #define EASTL_FIXED_HASH_MULTISET_DEFAULT_ALLOCATOR overflow_allocator_type(EASTL_FIXED_HASH_MULTISET_DEFAULT_NAME)
+ #endif
+
+
+
+ /// fixed_hash_set
+ ///
+ /// Implements a hash_set with a fixed block of memory identified by the nodeCount and bucketCount
+ /// template parameters.
+ ///
+ /// Template parameters:
+ /// Value The type of object the hash_set holds.
+ /// nodeCount The max number of objects to contain. This value must be >= 1.
+ /// bucketCount The number of buckets to use. This value must be >= 2.
+ /// bEnableOverflow Whether or not we should use the global heap if our object pool is exhausted.
+ /// Hash hash_set hash function. See hash_set.
+ /// Predicate hash_set equality testing function. See hash_set.
+ ///
+ template <typename Value, size_t nodeCount, size_t bucketCount = nodeCount + 1, bool bEnableOverflow = true,
+ typename Hash = eastl::hash<Value>, typename Predicate = eastl::equal_to<Value>, bool bCacheHashCode = false, typename OverflowAllocator = EASTLAllocatorType>
+ class fixed_hash_set : public hash_set<Value,
+ Hash,
+ Predicate,
+ fixed_hashtable_allocator<
+ bucketCount + 1,
+ sizeof(typename hash_set<Value, Hash, Predicate, OverflowAllocator, bCacheHashCode>::node_type),
+ nodeCount,
+ EASTL_ALIGN_OF(typename hash_set<Value, Hash, Predicate, OverflowAllocator, bCacheHashCode>::node_type),
+ 0,
+ bEnableOverflow,
+ OverflowAllocator>,
+ bCacheHashCode>
+ {
+ public:
+ typedef fixed_hashtable_allocator<bucketCount + 1, sizeof(typename hash_set<Value, Hash, Predicate,
+ OverflowAllocator, bCacheHashCode>::node_type), nodeCount,
+ EASTL_ALIGN_OF(typename hash_set<Value, Hash, Predicate, OverflowAllocator, bCacheHashCode>::node_type),
+ 0, bEnableOverflow, OverflowAllocator> fixed_allocator_type;
+ typedef typename fixed_allocator_type::overflow_allocator_type overflow_allocator_type;
+ typedef fixed_hash_set<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator> this_type;
+ typedef hash_set<Value, Hash, Predicate, fixed_allocator_type, bCacheHashCode> base_type;
+ typedef typename base_type::value_type value_type;
+ typedef typename base_type::node_type node_type;
+ typedef typename base_type::size_type size_type;
+
+ enum { kMaxSize = nodeCount };
+
+ using base_type::mAllocator;
+
+ protected:
+ node_type** mBucketBuffer[bucketCount + 1]; // '+1' because the hash table needs a null terminating bucket.
+ char mNodeBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements.
+
+ public:
+ explicit fixed_hash_set(const overflow_allocator_type& overflowAllocator);
+
+ explicit fixed_hash_set(const Hash& hashFunction = Hash(),
+ const Predicate& predicate = Predicate());
+
+ fixed_hash_set(const Hash& hashFunction,
+ const Predicate& predicate,
+ const overflow_allocator_type& overflowAllocator);
+
+ template <typename InputIterator>
+ fixed_hash_set(InputIterator first, InputIterator last,
+ const Hash& hashFunction = Hash(),
+ const Predicate& predicate = Predicate());
+
+ fixed_hash_set(const this_type& x);
+ fixed_hash_set(this_type&& x);
+ fixed_hash_set(this_type&& x, const overflow_allocator_type& overflowAllocator);
+
+ fixed_hash_set(std::initializer_list<value_type> ilist, const overflow_allocator_type& overflowAllocator = EASTL_FIXED_HASH_SET_DEFAULT_ALLOCATOR);
+
+ this_type& operator=(const this_type& x);
+ this_type& operator=(std::initializer_list<value_type> ilist);
+ this_type& operator=(this_type&& x);
+
+ void swap(this_type& x);
+
+ void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs.
+
+ size_type max_size() const;
+
+ const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT;
+ overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT;
+ void set_overflow_allocator(const overflow_allocator_type& allocator);
+ }; // fixed_hash_set
+
+
+
+
+
+
+ /// fixed_hash_multiset
+ ///
+ /// Implements a hash_multiset with a fixed block of memory identified by the nodeCount and bucketCount
+ /// template parameters.
+ ///
+ /// Value The type of object the hash_set holds.
+ /// nodeCount The max number of objects to contain. This value must be >= 1.
+ /// bucketCount The number of buckets to use. This value must be >= 2.
+ /// bEnableOverflow Whether or not we should use the global heap if our object pool is exhausted.
+ /// Hash hash_set hash function. See hash_set.
+ /// Predicate hash_set equality testing function. See hash_set.
+ ///
+ template <typename Value, size_t nodeCount, size_t bucketCount = nodeCount + 1, bool bEnableOverflow = true,
+ typename Hash = eastl::hash<Value>, typename Predicate = eastl::equal_to<Value>, bool bCacheHashCode = false, typename OverflowAllocator = EASTLAllocatorType>
+ class fixed_hash_multiset : public hash_multiset<Value,
+ Hash,
+ Predicate,
+ fixed_hashtable_allocator<
+ bucketCount + 1,
+ sizeof(typename hash_multiset<Value, Hash, Predicate, OverflowAllocator, bCacheHashCode>::node_type),
+ nodeCount,
+ EASTL_ALIGN_OF(typename hash_multiset<Value, Hash, Predicate, OverflowAllocator, bCacheHashCode>::node_type),
+ 0,
+ bEnableOverflow,
+ OverflowAllocator>,
+ bCacheHashCode>
+ {
+ public:
+ typedef fixed_hashtable_allocator<bucketCount + 1, sizeof(typename hash_multiset<Value, Hash, Predicate,
+ OverflowAllocator, bCacheHashCode>::node_type), nodeCount, EASTL_ALIGN_OF(typename hash_multiset<Value, Hash, Predicate,
+ OverflowAllocator, bCacheHashCode>::node_type), 0,
+ bEnableOverflow, OverflowAllocator> fixed_allocator_type;
+ typedef typename fixed_allocator_type::overflow_allocator_type overflow_allocator_type;
+ typedef hash_multiset<Value, Hash, Predicate, fixed_allocator_type, bCacheHashCode> base_type;
+ typedef fixed_hash_multiset<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator> this_type;
+ typedef typename base_type::value_type value_type;
+ typedef typename base_type::node_type node_type;
+ typedef typename base_type::size_type size_type;
+
+ enum { kMaxSize = nodeCount };
+
+ using base_type::mAllocator;
+
+ protected:
+ node_type** mBucketBuffer[bucketCount + 1]; // '+1' because the hash table needs a null terminating bucket.
+ char mNodeBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements.
+
+ public:
+ explicit fixed_hash_multiset(const overflow_allocator_type& overflowAllocator);
+
+ explicit fixed_hash_multiset(const Hash& hashFunction = Hash(),
+ const Predicate& predicate = Predicate());
+
+ fixed_hash_multiset(const Hash& hashFunction,
+ const Predicate& predicate,
+ const overflow_allocator_type& overflowAllocator);
+
+ template <typename InputIterator>
+ fixed_hash_multiset(InputIterator first, InputIterator last,
+ const Hash& hashFunction = Hash(),
+ const Predicate& predicate = Predicate());
+
+ fixed_hash_multiset(const this_type& x);
+ fixed_hash_multiset(this_type&& x);
+ fixed_hash_multiset(this_type&& x, const overflow_allocator_type& overflowAllocator);
+ fixed_hash_multiset(std::initializer_list<value_type> ilist, const overflow_allocator_type& overflowAllocator = EASTL_FIXED_HASH_MULTISET_DEFAULT_ALLOCATOR);
+
+ this_type& operator=(const this_type& x);
+ this_type& operator=(std::initializer_list<value_type> ilist);
+ this_type& operator=(this_type&& x);
+
+ void swap(this_type& x);
+
+ void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs.
+
+ size_type max_size() const;
+
+ const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT;
+ overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT;
+ void set_overflow_allocator(const overflow_allocator_type& allocator);
+ }; // fixed_hash_multiset
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // fixed_hash_set
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Value, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline fixed_hash_set<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ fixed_hash_set(const overflow_allocator_type& overflowAllocator)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount),
+ Hash(), Predicate(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator))
+ {
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+
+ if (!bEnableOverflow)
+ {
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+ }
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_HASH_SET_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mNodeBuffer);
+ }
+
+
+ template <typename Value, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline fixed_hash_set<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ fixed_hash_set(const Hash& hashFunction,
+ const Predicate& predicate)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount),
+ hashFunction, predicate, fixed_allocator_type(NULL, mBucketBuffer))
+ {
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+
+ if(!bEnableOverflow)
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_HASH_SET_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mNodeBuffer);
+ }
+
+
+ template <typename Value, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline fixed_hash_set<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ fixed_hash_set(const Hash& hashFunction,
+ const Predicate& predicate,
+ const overflow_allocator_type& overflowAllocator)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount),
+ hashFunction, predicate, fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator))
+ {
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+
+ if (!bEnableOverflow)
+ {
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+ }
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_HASH_SET_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mNodeBuffer);
+ }
+
+
+ template <typename Value, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ template <typename InputIterator>
+ fixed_hash_set<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ fixed_hash_set(InputIterator first, InputIterator last,
+ const Hash& hashFunction,
+ const Predicate& predicate)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), hashFunction,
+ predicate, fixed_allocator_type(NULL, mBucketBuffer))
+ {
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+
+ if(!bEnableOverflow)
+ {
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+ }
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_HASH_SET_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mNodeBuffer);
+ base_type::insert(first, last);
+ }
+
+
+ template <typename Value, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline fixed_hash_set<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ fixed_hash_set(const this_type& x)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), x.hash_function(),
+ x.equal_function(), fixed_allocator_type(NULL, mBucketBuffer))
+ {
+ mAllocator.copy_overflow_allocator(x.mAllocator);
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
+
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+
+ if(!bEnableOverflow)
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+
+ mAllocator.reset(mNodeBuffer);
+ base_type::insert(x.begin(), x.end());
+ }
+
+
+ template <typename Key, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline fixed_hash_set<Key, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::fixed_hash_set(this_type&& x)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), x.hash_function(),
+ x.equal_function(), fixed_allocator_type(NULL, mBucketBuffer))
+ {
+ // This implementation is the same as above. If we could rely on using C++11 delegating constructor support then we could just call that here.
+ mAllocator.copy_overflow_allocator(x.mAllocator);
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
+
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+
+ if(!bEnableOverflow)
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+
+ mAllocator.reset(mNodeBuffer);
+ base_type::insert(x.begin(), x.end());
+ }
+
+
+ template <typename Key, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline fixed_hash_set<Key, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::fixed_hash_set(this_type&& x, const overflow_allocator_type& overflowAllocator)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount),
+ x.hash_function(), x.equal_function(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator))
+ {
+ // This implementation is the same as above. If we could rely on using C++11 delegating constructor support then we could just call that here.
+ mAllocator.copy_overflow_allocator(x.mAllocator);
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
+
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+
+ if(!bEnableOverflow)
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+
+ mAllocator.reset(mNodeBuffer);
+ base_type::insert(x.begin(), x.end());
+ }
+
+
+ template <typename Key, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline fixed_hash_set<Key, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ fixed_hash_set(std::initializer_list<value_type> ilist, const overflow_allocator_type& overflowAllocator)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), Hash(),
+ Predicate(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator))
+ {
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+
+ if(!bEnableOverflow)
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_HASH_SET_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mNodeBuffer);
+ base_type::insert(ilist.begin(), ilist.end());
+ }
+
+
+ template <typename Value, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ typename fixed_hash_set<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::this_type&
+ fixed_hash_set<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::operator=(const this_type& x)
+ {
+ base_type::operator=(x);
+ return *this;
+ }
+
+
+ template <typename Key, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline typename fixed_hash_set<Key, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::this_type&
+ fixed_hash_set<Key, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::operator=(this_type&& x)
+ {
+ operator=(x);
+ return *this;
+ }
+
+
+ template <typename Key, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline typename fixed_hash_set<Key, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::this_type&
+ fixed_hash_set<Key, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::operator=(std::initializer_list<value_type> ilist)
+ {
+ base_type::clear();
+ base_type::insert(ilist.begin(), ilist.end());
+ return *this;
+ }
+
+
+ template <typename Value, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline void fixed_hash_set<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ swap(this_type& x)
+ {
+ // We must do a brute-force swap, because fixed containers cannot share memory allocations.
+ // Note that we create a temp value on the stack. This approach may fail if the size of the
+ // container is too large. We have a rule against allocating memory from the heap, and so
+ // if the user wants to swap two large objects of this class, the user will currently need
+ // to implement it manually. To consider: add code to allocate a temporary buffer if the
+ // size of the container is too large for the stack.
+ EASTL_ASSERT(sizeof(x) < EASTL_MAX_STACK_USAGE); // It is dangerous to try to create objects that are too big for the stack.
+
+ const this_type temp(*this); // Can't call eastl::swap because that would
+ *this = x; // itself call this member swap function.
+ x = temp;
+ }
+
+
+ template <typename Value, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ void fixed_hash_set<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ reset_lose_memory()
+ {
+ base_type::reset_lose_memory();
+ base_type::get_allocator().reset(mNodeBuffer);
+ }
+
+
+ template <typename Value, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline typename fixed_hash_set<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::size_type
+ fixed_hash_set<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::max_size() const
+ {
+ return kMaxSize;
+ }
+
+
+ template <typename Value, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline const typename fixed_hash_set<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::overflow_allocator_type&
+ fixed_hash_set<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::get_overflow_allocator() const EA_NOEXCEPT
+ {
+ return mAllocator.get_overflow_allocator();
+ }
+
+
+ template <typename Value, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline typename fixed_hash_set<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::overflow_allocator_type&
+ fixed_hash_set<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::get_overflow_allocator() EA_NOEXCEPT
+ {
+ return mAllocator.get_overflow_allocator();
+ }
+
+
+ template <typename Value, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline void fixed_hash_set<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ set_overflow_allocator(const overflow_allocator_type& allocator)
+ {
+ mAllocator.set_overflow_allocator(allocator);
+ }
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Value, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode>
+ inline void swap(fixed_hash_set<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode>& a,
+ fixed_hash_set<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode>& b)
+ {
+ a.swap(b);
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // fixed_hash_multiset
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Value, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline fixed_hash_multiset<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ fixed_hash_multiset(const overflow_allocator_type& overflowAllocator)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), Hash(),
+ Predicate(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator))
+ {
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+
+ if(!bEnableOverflow)
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_HASH_MULTISET_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mNodeBuffer);
+ }
+
+
+ template <typename Value, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline fixed_hash_multiset<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ fixed_hash_multiset(const Hash& hashFunction,
+ const Predicate& predicate)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), hashFunction,
+ predicate, fixed_allocator_type(NULL, mBucketBuffer))
+ {
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+
+ if(!bEnableOverflow)
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_HASH_MULTISET_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mNodeBuffer);
+ }
+
+
+ template <typename Value, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline fixed_hash_multiset<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ fixed_hash_multiset(const Hash& hashFunction,
+ const Predicate& predicate,
+ const overflow_allocator_type& overflowAllocator)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), hashFunction,
+ predicate, fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator))
+ {
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+
+ if(!bEnableOverflow)
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_HASH_MULTISET_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mNodeBuffer);
+ }
+
+
+ template <typename Value, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ template <typename InputIterator>
+ inline fixed_hash_multiset<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ fixed_hash_multiset(InputIterator first, InputIterator last,
+ const Hash& hashFunction,
+ const Predicate& predicate)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), hashFunction,
+ predicate, fixed_allocator_type(NULL, mBucketBuffer))
+ {
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+
+ if(!bEnableOverflow)
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_HASH_MULTISET_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mNodeBuffer);
+ base_type::insert(first, last);
+ }
+
+
+ template <typename Value, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline fixed_hash_multiset<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ fixed_hash_multiset(const this_type& x)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), x.hash_function(),
+ x.equal_function(), fixed_allocator_type(NULL, mBucketBuffer))
+ {
+ mAllocator.copy_overflow_allocator(x.mAllocator);
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
+
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+
+ if(!bEnableOverflow)
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+
+ mAllocator.reset(mNodeBuffer);
+ base_type::insert(x.begin(), x.end());
+ }
+
+
+ template <typename Key, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline fixed_hash_multiset<Key, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::fixed_hash_multiset(this_type&& x)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), x.hash_function(),
+ x.equal_function(), fixed_allocator_type(NULL, mBucketBuffer))
+ {
+ // This implementation is the same as above. If we could rely on using C++11 delegating constructor support then we could just call that here.
+ mAllocator.copy_overflow_allocator(x.mAllocator);
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
+
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+
+ if(!bEnableOverflow)
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+
+ mAllocator.reset(mNodeBuffer);
+ base_type::insert(x.begin(), x.end());
+ }
+
+
+ template <typename Key, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline fixed_hash_multiset<Key, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::fixed_hash_multiset(this_type&& x, const overflow_allocator_type& overflowAllocator)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount),
+ x.hash_function(), x.equal_function(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator))
+ {
+ // This implementation is the same as above. If we could rely on using C++11 delegating constructor support then we could just call that here.
+ mAllocator.copy_overflow_allocator(x.mAllocator);
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
+
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+
+ if(!bEnableOverflow)
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+
+ mAllocator.reset(mNodeBuffer);
+ base_type::insert(x.begin(), x.end());
+ }
+
+
+ template <typename Key, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline fixed_hash_multiset<Key, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ fixed_hash_multiset(std::initializer_list<value_type> ilist, const overflow_allocator_type& overflowAllocator)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), Hash(),
+ Predicate(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator))
+ {
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+
+ if(!bEnableOverflow)
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_HASH_MULTISET_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mNodeBuffer);
+ base_type::insert(ilist.begin(), ilist.end());
+ }
+
+
+ template <typename Value, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline typename fixed_hash_multiset<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::this_type&
+ fixed_hash_multiset<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::operator=(const this_type& x)
+ {
+ base_type::operator=(x);
+ return *this;
+ }
+
+
+ template <typename Key, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline typename fixed_hash_multiset<Key, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::this_type&
+ fixed_hash_multiset<Key, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::operator=(this_type&& x)
+ {
+ base_type::operator=(x);
+ return *this;
+ }
+
+
+ template <typename Key, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline typename fixed_hash_multiset<Key, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::this_type&
+ fixed_hash_multiset<Key, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::operator=(std::initializer_list<value_type> ilist)
+ {
+ base_type::clear();
+ base_type::insert(ilist.begin(), ilist.end());
+ return *this;
+ }
+
+
+ template <typename Value, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline void fixed_hash_multiset<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ swap(this_type& x)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(*this, x);
+ }
+
+
+ template <typename Value, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline void fixed_hash_multiset<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ reset_lose_memory()
+ {
+ base_type::reset_lose_memory();
+ base_type::get_allocator().reset(mNodeBuffer);
+ }
+
+
+ template <typename Value, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline typename fixed_hash_multiset<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::size_type
+ fixed_hash_multiset<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::max_size() const
+ {
+ return kMaxSize;
+ }
+
+
+ template <typename Value, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline const typename fixed_hash_multiset<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::overflow_allocator_type&
+ fixed_hash_multiset<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::get_overflow_allocator() const EA_NOEXCEPT
+ {
+ return mAllocator.get_overflow_allocator();
+ }
+
+
+ template <typename Value, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline typename fixed_hash_multiset<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::overflow_allocator_type&
+ fixed_hash_multiset<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::get_overflow_allocator() EA_NOEXCEPT
+ {
+ return mAllocator.get_overflow_allocator();
+ }
+
+
+ template <typename Value, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode, typename OverflowAllocator>
+ inline void fixed_hash_multiset<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, OverflowAllocator>::
+ set_overflow_allocator(const overflow_allocator_type& allocator)
+ {
+ mAllocator.set_overflow_allocator(allocator);
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Value, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode>
+ inline void swap(fixed_hash_multiset<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode>& a,
+ fixed_hash_multiset<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode>& b)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(a, b);
+ }
+
+
+} // namespace eastl
+
+EA_RESTORE_VC_WARNING()
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/fixed_list.h b/EASTL/include/EASTL/fixed_list.h
new file mode 100644
index 0000000..e57c08b
--- /dev/null
+++ b/EASTL/include/EASTL/fixed_list.h
@@ -0,0 +1,388 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements a list which uses a fixed size memory pool for its nodes.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_FIXED_LIST_H
+#define EASTL_FIXED_LIST_H
+
+
+#include <EASTL/list.h>
+#include <EASTL/internal/fixed_pool.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+ /// EASTL_FIXED_LIST_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ /// In the case of fixed-size containers, the allocator name always refers
+ /// to overflow allocations.
+ ///
+ #ifndef EASTL_FIXED_LIST_DEFAULT_NAME
+ #define EASTL_FIXED_LIST_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_list" // Unless the user overrides something, this is "EASTL fixed_list".
+ #endif
+
+
+ /// EASTL_FIXED_LIST_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_FIXED_LIST_DEFAULT_ALLOCATOR
+ #define EASTL_FIXED_LIST_DEFAULT_ALLOCATOR overflow_allocator_type(EASTL_FIXED_LIST_DEFAULT_NAME)
+ #endif
+
+
+
+ /// fixed_list
+ ///
+ /// fixed_list is a list which uses a single block of contiguous memory
+ /// for its nodes. The purpose of this is to reduce memory usage relative
+ /// to a conventional memory allocation system (with block headers), to
+ /// increase allocation speed (often due to avoidance of mutex locks),
+ /// to increase performance (due to better memory locality), and to decrease
+ /// memory fragmentation due to the way that fixed block allocators work.
+ ///
+ /// The primary downside to a fixed_list is that the number of nodes it
+ /// can contain is fixed upon its declaration. If you want a fixed_list
+ /// that doesn't have this limitation, then you probably don't want a
+ /// fixed_list. You can always create your own memory allocator that works
+ /// the way you want.
+ ///
+ /// Template parameters:
+ /// T The type of object the list holds.
+ /// nodeCount The max number of objects to contain.
+ /// bEnableOverflow Whether or not we should use the overflow heap if our object pool is exhausted.
+ /// OverflowAllocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap.
+ ///
+ template <typename T, size_t nodeCount, bool bEnableOverflow = true, typename OverflowAllocator = EASTLAllocatorType>
+ class fixed_list : public list<T, fixed_node_allocator<sizeof(typename list<T>::node_type),
+ nodeCount, EASTL_ALIGN_OF(typename list<T>::node_type), 0, bEnableOverflow, OverflowAllocator> >
+ {
+ public:
+ typedef fixed_node_allocator<sizeof(typename list<T>::node_type), nodeCount,
+ EASTL_ALIGN_OF(typename list<T>::node_type), 0, bEnableOverflow, OverflowAllocator> fixed_allocator_type;
+ typedef OverflowAllocator overflow_allocator_type;
+ typedef list<T, fixed_allocator_type> base_type;
+ typedef fixed_list<T, nodeCount, bEnableOverflow, OverflowAllocator> this_type;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::value_type value_type;
+ typedef typename base_type::node_type node_type;
+ typedef typename base_type::iterator iterator;
+
+ enum { kMaxSize = nodeCount };
+
+ using base_type::assign;
+ using base_type::resize;
+ using base_type::insert;
+ using base_type::size;
+ using base_type::get_allocator;
+
+ protected:
+ char mBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements.
+
+ using base_type::internalAllocator;
+
+ public:
+ fixed_list();
+ explicit fixed_list(const overflow_allocator_type& overflowAllocator); // Only applicable if bEnableOverflow is true.
+ explicit fixed_list(size_type n); // Currently we don't support overflowAllocator specification for other constructors, for simplicity.
+ fixed_list(size_type n, const value_type& value);
+ fixed_list(const this_type& x);
+ fixed_list(this_type&& x);
+ fixed_list(this_type&&, const overflow_allocator_type& overflowAllocator);
+ fixed_list(std::initializer_list<value_type> ilist, const overflow_allocator_type& overflowAllocator = EASTL_FIXED_LIST_DEFAULT_ALLOCATOR);
+
+ template <typename InputIterator>
+ fixed_list(InputIterator first, InputIterator last);
+
+ this_type& operator=(const this_type& x);
+ this_type& operator=(std::initializer_list<value_type> ilist);
+ this_type& operator=(this_type&& x);
+
+ void swap(this_type& x);
+ void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs.
+ size_type max_size() const; // Returns the max fixed size, which is the user-supplied nodeCount parameter.
+ bool full() const; // Returns true if the fixed space has been fully allocated. Note that if overflow is enabled, the container size can be greater than nodeCount but full() could return true because the fixed space may have a recently freed slot.
+ bool has_overflowed() const; // Returns true if the allocations spilled over into the overflow allocator. Meaningful only if overflow is enabled.
+ bool can_overflow() const; // Returns the value of the bEnableOverflow template parameter.
+
+ // OverflowAllocator
+ const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT;
+ overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT;
+ void set_overflow_allocator(const overflow_allocator_type& allocator);
+ }; // fixed_list
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // fixed_list
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_list<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_list()
+ : base_type(fixed_allocator_type(mBuffer))
+ {
+ #if EASTL_NAME_ENABLED
+ internalAllocator().set_name(EASTL_FIXED_LIST_DEFAULT_NAME);
+ #endif
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_list<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_list(const overflow_allocator_type& overflowAllocator)
+ : base_type(fixed_allocator_type(mBuffer, overflowAllocator))
+ {
+ #if EASTL_NAME_ENABLED
+ internalAllocator().set_name(EASTL_FIXED_LIST_DEFAULT_NAME);
+ #endif
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_list<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_list(size_type n)
+ : base_type(fixed_allocator_type(mBuffer))
+ {
+ #if EASTL_NAME_ENABLED
+ internalAllocator().set_name(EASTL_FIXED_LIST_DEFAULT_NAME);
+ #endif
+
+ resize(n);
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_list<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_list(size_type n, const value_type& value)
+ : base_type(fixed_allocator_type(mBuffer))
+ {
+ #if EASTL_NAME_ENABLED
+ internalAllocator().set_name(EASTL_FIXED_LIST_DEFAULT_NAME);
+ #endif
+
+ resize(n, value);
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_list<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_list(const this_type& x)
+ : base_type(fixed_allocator_type(mBuffer))
+ {
+ internalAllocator().copy_overflow_allocator(x.internalAllocator());
+
+ #if EASTL_NAME_ENABLED
+ internalAllocator().set_name(x.internalAllocator().get_name());
+ #endif
+
+ assign(x.begin(), x.end());
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_list<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_list(this_type&& x)
+ : base_type(fixed_allocator_type(mBuffer))
+ {
+ // Since we are a fixed_list, we can't normally swap pointers unless both this and
+ // x are using using overflow and the overflow allocators are equal. To do:
+ //if(has_overflowed() && x.has_overflowed() && (get_overflow_allocator() == x.get_overflow_allocator()))
+ //{
+ // We can swap contents and may need to swap the allocators as well.
+ //}
+
+ // The following is currently identical to the fixed_vector(const this_type& x) code above. If it stays that
+ // way then we may want to make a shared implementation.
+ internalAllocator().copy_overflow_allocator(x.internalAllocator());
+
+ #if EASTL_NAME_ENABLED
+ internalAllocator().set_name(x.internalAllocator().get_name());
+ #endif
+
+ assign(x.begin(), x.end());
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_list<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_list(this_type&& x, const overflow_allocator_type& overflowAllocator)
+ : base_type(fixed_allocator_type(mBuffer, overflowAllocator))
+ {
+ // See comments above.
+ internalAllocator().copy_overflow_allocator(x.internalAllocator());
+
+ #if EASTL_NAME_ENABLED
+ internalAllocator().set_name(x.internalAllocator().get_name());
+ #endif
+
+ assign(x.begin(), x.end());
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_list<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_list(std::initializer_list<value_type> ilist, const overflow_allocator_type& overflowAllocator)
+ : base_type(fixed_allocator_type(mBuffer, overflowAllocator))
+ {
+ assign(ilist.begin(), ilist.end());
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ template <typename InputIterator>
+ fixed_list<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_list(InputIterator first, InputIterator last)
+ : base_type(fixed_allocator_type(mBuffer))
+ {
+ #if EASTL_NAME_ENABLED
+ internalAllocator().set_name(EASTL_FIXED_LIST_DEFAULT_NAME);
+ #endif
+
+ assign(first, last);
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline typename fixed_list<T, nodeCount, bEnableOverflow, OverflowAllocator>::this_type&
+ fixed_list<T, nodeCount, bEnableOverflow, OverflowAllocator>::operator=(const this_type& x)
+ {
+ if(this != &x)
+ {
+ base_type::clear();
+
+ #if EASTL_ALLOCATOR_COPY_ENABLED
+ internalAllocator() = x.internalAllocator(); // The primary effect of this is to copy the overflow allocator.
+ #endif
+
+ base_type::assign(x.begin(), x.end()); // It would probably be better to implement this like list::operator=.
+ }
+ return *this;
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline typename fixed_list<T, nodeCount, bEnableOverflow, OverflowAllocator>::this_type&
+ fixed_list<T, nodeCount, bEnableOverflow, OverflowAllocator>::operator=(this_type&& x)
+ {
+ return operator=(x);
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline typename fixed_list<T, nodeCount, bEnableOverflow, OverflowAllocator>::this_type&
+ fixed_list<T, nodeCount, bEnableOverflow, OverflowAllocator>::operator=(std::initializer_list<value_type> ilist)
+ {
+ base_type::clear();
+ base_type::assign(ilist.begin(), ilist.end());
+ return *this;
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline void fixed_list<T, nodeCount, bEnableOverflow, OverflowAllocator>::swap(this_type& x)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(*this, x);
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline void fixed_list<T, nodeCount, bEnableOverflow, OverflowAllocator>::reset_lose_memory()
+ {
+ base_type::reset_lose_memory();
+ get_allocator().reset(mBuffer);
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline typename fixed_list<T, nodeCount, bEnableOverflow, OverflowAllocator>::size_type
+ fixed_list<T, nodeCount, bEnableOverflow, OverflowAllocator>::max_size() const
+ {
+ return kMaxSize;
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline bool fixed_list<T, nodeCount, bEnableOverflow, OverflowAllocator>::full() const
+ {
+ // Note: This implementation isn't right in the case of bEnableOverflow = true because it will return
+ // false for the case that there are free nodes from the buffer but also nodes from the dynamic heap.
+ // This can happen if the container exceeds the fixed size and then frees some of the nodes from the fixed buffer.
+ // The only simple fix for this is to take on another member variable which tracks whether this overflow
+ // has occurred at some point in the past.
+ return !internalAllocator().can_allocate(); // This is the quickest way of detecting this. has_overflowed uses a different method because it can't use this quick method.
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline bool fixed_list<T, nodeCount, bEnableOverflow, OverflowAllocator>::has_overflowed() const
+ {
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED // If we can use this faster pathway (as size() may be slow)...
+ return (internalAllocator().mPool.mnPeakSize > kMaxSize);
+ #else
+ return (size() > kMaxSize);
+ #endif
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline bool fixed_list<T, nodeCount, bEnableOverflow, OverflowAllocator>::can_overflow() const
+ {
+ return bEnableOverflow;
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline const typename fixed_list<T, nodeCount, bEnableOverflow, OverflowAllocator>::overflow_allocator_type&
+ fixed_list<T, nodeCount, bEnableOverflow, OverflowAllocator>::get_overflow_allocator() const EA_NOEXCEPT
+ {
+ return internalAllocator().get_overflow_allocator();
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline typename fixed_list<T, nodeCount, bEnableOverflow, OverflowAllocator>::overflow_allocator_type&
+ fixed_list<T, nodeCount, bEnableOverflow, OverflowAllocator>::get_overflow_allocator() EA_NOEXCEPT
+ {
+ return internalAllocator().get_overflow_allocator();
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline void
+ fixed_list<T, nodeCount, bEnableOverflow, OverflowAllocator>::set_overflow_allocator(const overflow_allocator_type& allocator)
+ {
+ internalAllocator().set_overflow_allocator(allocator);
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline void swap(fixed_list<T, nodeCount, bEnableOverflow, OverflowAllocator>& a,
+ fixed_list<T, nodeCount, bEnableOverflow, OverflowAllocator>& b)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(a, b);
+ }
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/fixed_map.h b/EASTL/include/EASTL/fixed_map.h
new file mode 100644
index 0000000..c01db08
--- /dev/null
+++ b/EASTL/include/EASTL/fixed_map.h
@@ -0,0 +1,580 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements a map and multimap which use a fixed size memory
+// pool for their nodes.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_FIXED_MAP_H
+#define EASTL_FIXED_MAP_H
+
+
+#include <EASTL/map.h>
+#include <EASTL/fixed_set.h> // Included because fixed_rbtree_base resides here.
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+ /// EASTL_FIXED_MAP_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ /// In the case of fixed-size containers, the allocator name always refers
+ /// to overflow allocations.
+ ///
+ #ifndef EASTL_FIXED_MAP_DEFAULT_NAME
+ #define EASTL_FIXED_MAP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_map" // Unless the user overrides something, this is "EASTL fixed_map".
+ #endif
+
+ #ifndef EASTL_FIXED_MULTIMAP_DEFAULT_NAME
+ #define EASTL_FIXED_MULTIMAP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_multimap" // Unless the user overrides something, this is "EASTL fixed_multimap".
+ #endif
+
+
+ /// EASTL_FIXED_MAP_DEFAULT_ALLOCATOR
+ /// EASTL_FIXED_MULTIMAP_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_FIXED_MAP_DEFAULT_ALLOCATOR
+ #define EASTL_FIXED_MAP_DEFAULT_ALLOCATOR overflow_allocator_type(EASTL_FIXED_MAP_DEFAULT_NAME)
+ #endif
+
+ #ifndef EASTL_FIXED_MULTIMAP_DEFAULT_ALLOCATOR
+ #define EASTL_FIXED_MULTIMAP_DEFAULT_ALLOCATOR overflow_allocator_type(EASTL_FIXED_MULTIMAP_DEFAULT_NAME)
+ #endif
+
+
+
+ /// fixed_map
+ ///
+ /// Implements a map with a fixed block of memory identified by the
+ /// nodeCount template parameter.
+ ///
+ /// Key The key object (key in the key/value pair).
+ /// T The mapped object (value in the key/value pair).
+ /// nodeCount The max number of objects to contain.
+ /// bEnableOverflow Whether or not we should use the global heap if our object pool is exhausted.
+ /// Compare Compare function/object for set ordering.
+ /// OverflowAllocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap.
+ ///
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow = true, typename Compare = eastl::less<Key>, typename OverflowAllocator = EASTLAllocatorType>
+ class fixed_map : public map<Key, T, Compare, fixed_node_allocator<sizeof(typename map<Key, T>::node_type),
+ nodeCount, EASTL_ALIGN_OF(eastl::pair<Key, T>), 0, bEnableOverflow, OverflowAllocator> >
+ {
+ public:
+ typedef fixed_node_allocator<sizeof(typename map<Key, T>::node_type), nodeCount,
+ EASTL_ALIGN_OF(eastl::pair<Key, T>), 0, bEnableOverflow, OverflowAllocator> fixed_allocator_type;
+ typedef typename fixed_allocator_type::overflow_allocator_type overflow_allocator_type;
+ typedef fixed_map<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator> this_type;
+ typedef map<Key, T, Compare, fixed_allocator_type> base_type;
+ typedef typename base_type::value_type value_type;
+ typedef typename base_type::node_type node_type;
+ typedef typename base_type::size_type size_type;
+
+ enum { kMaxSize = nodeCount };
+
+ using base_type::insert;
+
+ protected:
+ char mBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements.
+
+ using base_type::mAllocator;
+
+ public:
+ fixed_map();
+ explicit fixed_map(const overflow_allocator_type& overflowAllocator);
+ explicit fixed_map(const Compare& compare);
+ fixed_map(const this_type& x);
+ fixed_map(this_type&& x);
+ fixed_map(this_type&& x, const overflow_allocator_type& overflowAllocator);
+ fixed_map(std::initializer_list<value_type> ilist, const overflow_allocator_type& overflowAllocator = EASTL_FIXED_MAP_DEFAULT_ALLOCATOR);
+
+ template <typename InputIterator>
+ fixed_map(InputIterator first, InputIterator last);
+
+ this_type& operator=(const this_type& x);
+ this_type& operator=(std::initializer_list<value_type> ilist);
+ this_type& operator=(this_type&& x);
+
+ void swap(this_type& x);
+
+ void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs.
+
+ size_type max_size() const;
+
+ const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT;
+ overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT;
+ void set_overflow_allocator(const overflow_allocator_type& allocator);
+ }; // fixed_map
+
+
+
+
+ /// fixed_multimap
+ ///
+ /// Implements a multimap with a fixed block of memory identified by the
+ /// nodeCount template parameter.
+ ///
+ /// Key The key object (key in the key/value pair).
+ /// T The mapped object (value in the key/value pair).
+ /// nodeCount The max number of objects to contain.
+ /// bEnableOverflow Whether or not we should use the global heap if our object pool is exhausted.
+ /// Compare Compare function/object for set ordering.
+ /// OverflowAllocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap.
+ ///
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow = true, typename Compare = eastl::less<Key>, typename OverflowAllocator = EASTLAllocatorType>
+ class fixed_multimap : public multimap<Key, T, Compare, fixed_node_allocator<sizeof(typename multimap<Key, T>::node_type),
+ nodeCount, EASTL_ALIGN_OF(eastl::pair<Key, T>), 0, bEnableOverflow, OverflowAllocator> >
+ {
+ public:
+ typedef fixed_node_allocator<sizeof(typename multimap<Key, T>::node_type), nodeCount,
+ EASTL_ALIGN_OF(eastl::pair<Key, T>), 0, bEnableOverflow, OverflowAllocator> fixed_allocator_type;
+ typedef typename fixed_allocator_type::overflow_allocator_type overflow_allocator_type;
+ typedef multimap<Key, T, Compare, fixed_allocator_type> base_type;
+ typedef fixed_multimap<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator> this_type;
+ typedef typename base_type::value_type value_type;
+ typedef typename base_type::node_type node_type;
+ typedef typename base_type::size_type size_type;
+
+ enum { kMaxSize = nodeCount };
+
+ using base_type::insert;
+
+ protected:
+ char mBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements.
+
+ using base_type::mAllocator;
+ using base_type::get_compare;
+
+ public:
+ fixed_multimap();
+ fixed_multimap(const overflow_allocator_type& overflowAllocator);
+ explicit fixed_multimap(const Compare& compare);
+ fixed_multimap(const this_type& x);
+ fixed_multimap(this_type&& x);
+ fixed_multimap(this_type&& x, const overflow_allocator_type& overflowAllocator);
+ fixed_multimap(std::initializer_list<value_type> ilist, const overflow_allocator_type& overflowAllocator = EASTL_FIXED_MULTIMAP_DEFAULT_ALLOCATOR);
+
+ template <typename InputIterator>
+ fixed_multimap(InputIterator first, InputIterator last);
+
+ this_type& operator=(const this_type& x);
+ this_type& operator=(std::initializer_list<value_type> ilist);
+ this_type& operator=(this_type&& x);
+
+ void swap(this_type& x);
+
+ void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs.
+
+ size_type max_size() const;
+
+ const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT;
+ overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT;
+ void set_overflow_allocator(const overflow_allocator_type& allocator);
+ }; // fixed_multimap
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // fixed_map
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline fixed_map<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::fixed_map()
+ : base_type(fixed_allocator_type(mBuffer))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_MAP_DEFAULT_NAME);
+ #endif
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline fixed_map<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::fixed_map(const overflow_allocator_type& overflowAllocator)
+ : base_type(fixed_allocator_type(mBuffer, overflowAllocator))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_MAP_DEFAULT_NAME);
+ #endif
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline fixed_map<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::fixed_map(const Compare& compare)
+ : base_type(compare, fixed_allocator_type(mBuffer))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_MAP_DEFAULT_NAME);
+ #endif
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline fixed_map<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::fixed_map(const this_type& x)
+ : base_type(x.get_compare(), fixed_allocator_type(mBuffer))
+ {
+ mAllocator.copy_overflow_allocator(x.mAllocator);
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
+
+ base_type::operator=(x);
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline fixed_map<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::fixed_map(this_type&& x)
+ : base_type(x.get_compare(), fixed_allocator_type(mBuffer))
+ {
+ mAllocator.copy_overflow_allocator(x.mAllocator);
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
+
+ base_type::operator=(x);
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline fixed_map<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::fixed_map(this_type&& x, const overflow_allocator_type& overflowAllocator)
+ : base_type(x.get_compare(), fixed_allocator_type(mBuffer, overflowAllocator))
+ {
+ mAllocator.copy_overflow_allocator(x.mAllocator);
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
+
+ base_type::operator=(x);
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ fixed_map<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::fixed_map(std::initializer_list<value_type> ilist, const overflow_allocator_type& overflowAllocator)
+ : base_type(fixed_allocator_type(mBuffer, overflowAllocator))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_MAP_DEFAULT_NAME);
+ #endif
+
+ insert(ilist.begin(), ilist.end());
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ template <typename InputIterator>
+ fixed_map<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::fixed_map(InputIterator first, InputIterator last)
+ : base_type(fixed_allocator_type(mBuffer))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_MAP_DEFAULT_NAME);
+ #endif
+
+ insert(first, last);
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline typename fixed_map<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::this_type&
+ fixed_map<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::operator=(const this_type& x)
+ {
+ base_type::operator=(x);
+ return *this;
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline typename fixed_map<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::this_type&
+ fixed_map<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::operator=(std::initializer_list<value_type> ilist)
+ {
+ base_type::clear();
+ insert(ilist.begin(), ilist.end());
+ return *this;
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline typename fixed_map<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::this_type&
+ fixed_map<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::operator=(this_type&& x)
+ {
+ base_type::operator=(x);
+ return *this;
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline void fixed_map<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::swap(this_type& x)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(*this, x);
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline void fixed_map<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::reset_lose_memory()
+ {
+ base_type::reset_lose_memory();
+ base_type::get_allocator().reset(mBuffer);
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline typename fixed_map<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::size_type
+ fixed_map<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::max_size() const
+ {
+ return kMaxSize;
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline const typename fixed_map<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::overflow_allocator_type&
+ fixed_map<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::get_overflow_allocator() const EA_NOEXCEPT
+ {
+ return mAllocator.get_overflow_allocator();
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline typename fixed_map<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::overflow_allocator_type&
+ fixed_map<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::get_overflow_allocator() EA_NOEXCEPT
+ {
+ return mAllocator.get_overflow_allocator();
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline void
+ fixed_map<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::set_overflow_allocator(const overflow_allocator_type& allocator)
+ {
+ mAllocator.set_overflow_allocator(allocator);
+ }
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline void swap(fixed_map<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>& a,
+ fixed_map<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>& b)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(a, b);
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // fixed_multimap
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline fixed_multimap<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::fixed_multimap()
+ : base_type(fixed_allocator_type(mBuffer))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_MULTIMAP_DEFAULT_NAME);
+ #endif
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline fixed_multimap<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::fixed_multimap(const overflow_allocator_type& overflowAllocator)
+ : base_type(fixed_allocator_type(mBuffer, overflowAllocator))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_MULTIMAP_DEFAULT_NAME);
+ #endif
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline fixed_multimap<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::fixed_multimap(const Compare& compare)
+ : base_type(compare, fixed_allocator_type(mBuffer))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_MULTIMAP_DEFAULT_NAME);
+ #endif
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline fixed_multimap<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::fixed_multimap(const this_type& x)
+ : base_type(x.get_compare(), fixed_allocator_type(mBuffer))
+ {
+ mAllocator.copy_overflow_allocator(x.mAllocator);
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
+
+ base_type::operator=(x);
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline fixed_multimap<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::fixed_multimap(this_type&& x)
+ : base_type(x.get_compare(), fixed_allocator_type(mBuffer))
+ {
+ mAllocator.copy_overflow_allocator(x.mAllocator);
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
+
+ base_type::operator=(x);
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline fixed_multimap<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::fixed_multimap(this_type&& x, const overflow_allocator_type& overflowAllocator)
+ : base_type(x.get_compare(), fixed_allocator_type(mBuffer, overflowAllocator))
+ {
+ mAllocator.copy_overflow_allocator(x.mAllocator);
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
+
+ base_type::operator=(x);
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ fixed_multimap<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::fixed_multimap(std::initializer_list<value_type> ilist, const overflow_allocator_type& overflowAllocator)
+ : base_type(fixed_allocator_type(mBuffer, overflowAllocator))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_MULTIMAP_DEFAULT_NAME);
+ #endif
+
+ insert(ilist.begin(), ilist.end());
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ template <typename InputIterator>
+ fixed_multimap<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::
+ fixed_multimap(InputIterator first, InputIterator last)
+ : base_type(fixed_allocator_type(mBuffer))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_MULTIMAP_DEFAULT_NAME);
+ #endif
+
+ insert(first, last);
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline typename fixed_multimap<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::this_type&
+ fixed_multimap<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::operator=(const this_type& x)
+ {
+ base_type::operator=(x);
+ return *this;
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline typename fixed_multimap<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::this_type&
+ fixed_multimap<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::operator=(std::initializer_list<value_type> ilist)
+ {
+ base_type::clear();
+ insert(ilist.begin(), ilist.end());
+ return *this;
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline typename fixed_multimap<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::this_type&
+ fixed_multimap<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::operator=(this_type&& x)
+ {
+ base_type::operator=(x);
+ return *this;
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline void fixed_multimap<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::swap(this_type& x)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(*this, x);
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline void fixed_multimap<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::reset_lose_memory()
+ {
+ base_type::reset_lose_memory();
+ base_type::get_allocator().reset(mBuffer);
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline typename fixed_multimap<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::size_type
+ fixed_multimap<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::max_size() const
+ {
+ return kMaxSize;
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline const typename fixed_multimap<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::overflow_allocator_type&
+ fixed_multimap<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::get_overflow_allocator() const EA_NOEXCEPT
+ {
+ return mAllocator.get_overflow_allocator();
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline typename fixed_multimap<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::overflow_allocator_type&
+ fixed_multimap<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::get_overflow_allocator() EA_NOEXCEPT
+ {
+ return mAllocator.get_overflow_allocator();
+ }
+
+
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline void
+ fixed_multimap<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::set_overflow_allocator(const overflow_allocator_type& allocator)
+ {
+ mAllocator.set_overflow_allocator(allocator);
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline void swap(fixed_multimap<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>& a,
+ fixed_multimap<Key, T, nodeCount, bEnableOverflow, Compare, OverflowAllocator>& b)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(a, b);
+ }
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/fixed_set.h b/EASTL/include/EASTL/fixed_set.h
new file mode 100644
index 0000000..e5f0023
--- /dev/null
+++ b/EASTL/include/EASTL/fixed_set.h
@@ -0,0 +1,578 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements a set and multiset which use a fixed size memory
+// pool for their nodes.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_FIXED_SET_H
+#define EASTL_FIXED_SET_H
+
+
+#include <EASTL/set.h>
+#include <EASTL/internal/fixed_pool.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+ /// EASTL_FIXED_SET_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ /// In the case of fixed-size containers, the allocator name always refers
+ /// to overflow allocations.
+ ///
+ #ifndef EASTL_FIXED_SET_DEFAULT_NAME
+ #define EASTL_FIXED_SET_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_set" // Unless the user overrides something, this is "EASTL fixed_set".
+ #endif
+
+ #ifndef EASTL_FIXED_MULTISET_DEFAULT_NAME
+ #define EASTL_FIXED_MULTISET_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_multiset" // Unless the user overrides something, this is "EASTL fixed_multiset".
+ #endif
+
+
+ /// EASTL_FIXED_SET_DEFAULT_ALLOCATOR
+ /// EASTL_FIXED_MULTISET_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_FIXED_SET_DEFAULT_ALLOCATOR
+ #define EASTL_FIXED_SET_DEFAULT_ALLOCATOR overflow_allocator_type(EASTL_FIXED_SET_DEFAULT_NAME)
+ #endif
+
+ #ifndef EASTL_FIXED_MULTISET_DEFAULT_ALLOCATOR
+ #define EASTL_FIXED_MULTISET_DEFAULT_ALLOCATOR overflow_allocator_type(EASTL_FIXED_MULTISET_DEFAULT_NAME)
+ #endif
+
+
+
+ /// fixed_set
+ ///
+ /// Implements a set with a fixed block of memory identified by the
+ /// nodeCount template parameter.
+ ///
+ /// Template parameters:
+ /// Key The type of object the set holds (a.k.a. value).
+ /// nodeCount The max number of objects to contain.
+ /// bEnableOverflow Whether or not we should use the global heap if our object pool is exhausted.
+ /// Compare Compare function/object for set ordering.
+ /// OverflowAllocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap.
+ ///
+ template <typename Key, size_t nodeCount, bool bEnableOverflow = true, typename Compare = eastl::less<Key>, typename OverflowAllocator = EASTLAllocatorType>
+ class fixed_set : public set<Key, Compare, fixed_node_allocator<sizeof(typename set<Key>::node_type),
+ nodeCount, EASTL_ALIGN_OF(Key), 0, bEnableOverflow, OverflowAllocator> >
+ {
+ public:
+ typedef fixed_node_allocator<sizeof(typename set<Key>::node_type), nodeCount,
+ EASTL_ALIGN_OF(Key), 0, bEnableOverflow, OverflowAllocator> fixed_allocator_type;
+ typedef typename fixed_allocator_type::overflow_allocator_type overflow_allocator_type;
+ typedef set<Key, Compare, fixed_allocator_type> base_type;
+ typedef fixed_set<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator> this_type;
+ typedef typename base_type::value_type value_type;
+ typedef typename base_type::node_type node_type;
+ typedef typename base_type::size_type size_type;
+
+ enum { kMaxSize = nodeCount };
+
+ using base_type::insert;
+
+ protected:
+ char mBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements.
+
+ using base_type::mAllocator;
+ using base_type::get_compare;
+
+ public:
+ fixed_set();
+ fixed_set(const overflow_allocator_type& overflowAllocator);
+ explicit fixed_set(const Compare& compare);
+ fixed_set(const this_type& x);
+ fixed_set(this_type&& x);
+ fixed_set(this_type&& x, const overflow_allocator_type& overflowAllocator);
+ fixed_set(std::initializer_list<value_type> ilist, const overflow_allocator_type& overflowAllocator = EASTL_FIXED_SET_DEFAULT_ALLOCATOR);
+
+ template <typename InputIterator>
+ fixed_set(InputIterator first, InputIterator last);
+
+ this_type& operator=(const this_type& x);
+ this_type& operator=(std::initializer_list<value_type> ilist);
+ this_type& operator=(this_type&& x);
+
+ void swap(this_type& x);
+
+ void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs.
+
+ size_type max_size() const;
+
+ const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT;
+ overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT;
+ void set_overflow_allocator(const overflow_allocator_type& allocator);
+ }; // fixed_set
+
+
+
+
+
+
+ /// fixed_multiset
+ ///
+ /// Implements a multiset with a fixed block of memory identified by the
+ /// nodeCount template parameter.
+ ///
+ /// Key The type of object the set holds (a.k.a. value).
+ /// nodeCount The max number of objects to contain.
+ /// bEnableOverflow Whether or not we should use the global heap if our object pool is exhausted.
+ /// Compare Compare function/object for set ordering.
+ /// OverflowAllocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap.
+ ///
+ template <typename Key, size_t nodeCount, bool bEnableOverflow = true, typename Compare = eastl::less<Key>, typename OverflowAllocator = EASTLAllocatorType>
+ class fixed_multiset : public multiset<Key, Compare, fixed_node_allocator<sizeof(typename multiset<Key>::node_type),
+ nodeCount, EASTL_ALIGN_OF(Key), 0, bEnableOverflow, OverflowAllocator> >
+ {
+ public:
+ typedef fixed_node_allocator<sizeof(typename multiset<Key>::node_type), nodeCount,
+ EASTL_ALIGN_OF(Key), 0, bEnableOverflow, OverflowAllocator> fixed_allocator_type;
+ typedef typename fixed_allocator_type::overflow_allocator_type overflow_allocator_type;
+ typedef multiset<Key, Compare, fixed_allocator_type> base_type;
+ typedef fixed_multiset<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator> this_type;
+ typedef typename base_type::value_type value_type;
+ typedef typename base_type::node_type node_type;
+ typedef typename base_type::size_type size_type;
+
+ enum { kMaxSize = nodeCount };
+
+ using base_type::insert;
+
+ protected:
+ char mBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements.
+
+ using base_type::mAllocator;
+
+ public:
+ fixed_multiset();
+ fixed_multiset(const overflow_allocator_type& overflowAllocator);
+ explicit fixed_multiset(const Compare& compare);
+ fixed_multiset(const this_type& x);
+ fixed_multiset(this_type&& x);
+ fixed_multiset(this_type&& x, const overflow_allocator_type& overflowAllocator);
+ fixed_multiset(std::initializer_list<value_type> ilist, const overflow_allocator_type& overflowAllocator = EASTL_FIXED_MULTISET_DEFAULT_ALLOCATOR);
+
+ template <typename InputIterator>
+ fixed_multiset(InputIterator first, InputIterator last);
+
+ this_type& operator=(const this_type& x);
+ this_type& operator=(std::initializer_list<value_type> ilist);
+ this_type& operator=(this_type&& x);
+
+ void swap(this_type& x);
+
+ void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs.
+
+ size_type max_size() const;
+
+ const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT;
+ overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT;
+ void set_overflow_allocator(const overflow_allocator_type& allocator);
+ }; // fixed_multiset
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // fixed_set
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Key, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline fixed_set<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::fixed_set()
+ : base_type(fixed_allocator_type(mBuffer))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_SET_DEFAULT_NAME);
+ #endif
+ }
+
+
+ template <typename Key, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline fixed_set<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::fixed_set(const overflow_allocator_type& overflowAllocator)
+ : base_type(fixed_allocator_type(mBuffer, overflowAllocator))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_SET_DEFAULT_NAME);
+ #endif
+ }
+
+
+ template <typename Key, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline fixed_set<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::fixed_set(const Compare& compare)
+ : base_type(compare, fixed_allocator_type(mBuffer))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_SET_DEFAULT_NAME);
+ #endif
+ }
+
+
+ template <typename Key, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline fixed_set<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::fixed_set(const this_type& x)
+ : base_type(x.get_compare(), fixed_allocator_type(mBuffer))
+ {
+ mAllocator.copy_overflow_allocator(x.mAllocator);
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
+
+ base_type::operator=(x);
+ }
+
+
+ template <typename Key, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline fixed_set<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::fixed_set(this_type&& x)
+ : base_type(x.get_compare(), fixed_allocator_type(mBuffer))
+ {
+ mAllocator.copy_overflow_allocator(x.mAllocator);
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
+
+ base_type::operator=(x);
+ }
+
+
+ template <typename Key, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline fixed_set<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::fixed_set(this_type&& x, const overflow_allocator_type& overflowAllocator)
+ : base_type(x.get_compare(), fixed_allocator_type(mBuffer, overflowAllocator))
+ {
+ mAllocator.copy_overflow_allocator(x.mAllocator);
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
+
+ base_type::operator=(x);
+ }
+
+
+ template <typename Key, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ fixed_set<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::fixed_set(std::initializer_list<value_type> ilist, const overflow_allocator_type& overflowAllocator)
+ : base_type(fixed_allocator_type(mBuffer, overflowAllocator))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_SET_DEFAULT_NAME);
+ #endif
+
+ insert(ilist.begin(), ilist.end());
+ }
+
+
+ template <typename Key, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ template <typename InputIterator>
+ fixed_set<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::fixed_set(InputIterator first, InputIterator last)
+ : base_type(fixed_allocator_type(mBuffer))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_SET_DEFAULT_NAME);
+ #endif
+
+ insert(first, last);
+ }
+
+
+ template <typename Key, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline typename fixed_set<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::this_type&
+ fixed_set<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::operator=(const this_type& x)
+ {
+ base_type::operator=(x);
+ return *this;
+ }
+
+
+ template <typename Key, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline typename fixed_set<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::this_type&
+ fixed_set<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::operator=(std::initializer_list<value_type> ilist)
+ {
+ base_type::clear();
+ insert(ilist.begin(), ilist.end());
+ return *this;
+ }
+
+
+ template <typename Key, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline typename fixed_set<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::this_type&
+ fixed_set<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::operator=(this_type&& x)
+ {
+ base_type::operator=(x);
+ return *this;
+ }
+
+
+ template <typename Key, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline void fixed_set<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::swap(this_type& x)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(*this, x);
+ }
+
+
+ template <typename Key, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline void fixed_set<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::reset_lose_memory()
+ {
+ base_type::reset_lose_memory();
+ base_type::get_allocator().reset(mBuffer);
+ }
+
+
+ template <typename Key, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline typename fixed_set<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::size_type
+ fixed_set<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::max_size() const
+ {
+ return kMaxSize;
+ }
+
+
+ template <typename Key, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline const typename fixed_set<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::overflow_allocator_type&
+ fixed_set<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::get_overflow_allocator() const EA_NOEXCEPT
+ {
+ return mAllocator.get_overflow_allocator();
+ }
+
+
+ template <typename Key, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline typename fixed_set<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::overflow_allocator_type&
+ fixed_set<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::get_overflow_allocator() EA_NOEXCEPT
+ {
+ return mAllocator.get_overflow_allocator();
+ }
+
+
+ template <typename Key, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline void fixed_set<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::set_overflow_allocator(const overflow_allocator_type& allocator)
+ {
+ mAllocator.set_overflow_allocator(allocator);
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Key, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline void swap(fixed_set<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>& a,
+ fixed_set<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>& b)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(a, b);
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // fixed_multiset
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Key, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline fixed_multiset<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::fixed_multiset()
+ : base_type(fixed_allocator_type(mBuffer))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_MULTISET_DEFAULT_NAME);
+ #endif
+ }
+
+
+ template <typename Key, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline fixed_multiset<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::fixed_multiset(const overflow_allocator_type& overflowAllocator)
+ : base_type(fixed_allocator_type(mBuffer, overflowAllocator))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_MULTISET_DEFAULT_NAME);
+ #endif
+ }
+
+
+ template <typename Key, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline fixed_multiset<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::fixed_multiset(const Compare& compare)
+ : base_type(compare, fixed_allocator_type(mBuffer))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_MULTISET_DEFAULT_NAME);
+ #endif
+ }
+
+
+ template <typename Key, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline fixed_multiset<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::fixed_multiset(const this_type& x)
+ : base_type(x.get_compare(), fixed_allocator_type(mBuffer))
+ {
+ mAllocator.copy_overflow_allocator(x.mAllocator);
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
+
+ base_type::operator=(x);
+ }
+
+
+ template <typename Key, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline fixed_multiset<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::fixed_multiset(this_type&& x)
+ : base_type(x.get_compare(), fixed_allocator_type(mBuffer))
+ {
+ mAllocator.copy_overflow_allocator(x.mAllocator);
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
+
+ base_type::operator=(x);
+ }
+
+
+ template <typename Key, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline fixed_multiset<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::fixed_multiset(this_type&& x, const overflow_allocator_type& overflowAllocator)
+ : base_type(x.get_compare(), fixed_allocator_type(mBuffer, overflowAllocator))
+ {
+ mAllocator.copy_overflow_allocator(x.mAllocator);
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
+
+ base_type::operator=(x);
+ }
+
+
+ template <typename Key, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ fixed_multiset<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::fixed_multiset(std::initializer_list<value_type> ilist, const overflow_allocator_type& overflowAllocator)
+ : base_type(fixed_allocator_type(mBuffer, overflowAllocator))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_MULTISET_DEFAULT_NAME);
+ #endif
+
+ insert(ilist.begin(), ilist.end());
+ }
+
+
+ template <typename Key, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ template <typename InputIterator>
+ fixed_multiset<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::fixed_multiset(InputIterator first, InputIterator last)
+ : base_type(fixed_allocator_type(mBuffer))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_MULTISET_DEFAULT_NAME);
+ #endif
+
+ insert(first, last);
+ }
+
+
+ template <typename Key, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline typename fixed_multiset<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::this_type&
+ fixed_multiset<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::operator=(const this_type& x)
+ {
+ base_type::operator=(x);
+ return *this;
+ }
+
+
+ template <typename Key, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline typename fixed_multiset<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::this_type&
+ fixed_multiset<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::operator=(std::initializer_list<value_type> ilist)
+ {
+ base_type::clear();
+ insert(ilist.begin(), ilist.end());
+ return *this;
+ }
+
+
+ template <typename Key, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline typename fixed_multiset<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::this_type&
+ fixed_multiset<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::operator=(this_type&& x)
+ {
+ base_type::operator=(x);
+ return *this;
+ }
+
+
+ template <typename Key, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline void fixed_multiset<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::swap(this_type& x)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(*this, x);
+ }
+
+
+ template <typename Key, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline void fixed_multiset<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::reset_lose_memory()
+ {
+ base_type::reset_lose_memory();
+ base_type::get_allocator().reset(mBuffer);
+ }
+
+
+ template <typename Key, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline typename fixed_multiset<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::size_type
+ fixed_multiset<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::max_size() const
+ {
+ return kMaxSize;
+ }
+
+
+ template <typename Key, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline const typename fixed_multiset<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::overflow_allocator_type&
+ fixed_multiset<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::get_overflow_allocator() const EA_NOEXCEPT
+ {
+ return mAllocator.get_overflow_allocator();
+ }
+
+
+ template <typename Key, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline typename fixed_multiset<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::overflow_allocator_type&
+ fixed_multiset<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::get_overflow_allocator() EA_NOEXCEPT
+ {
+ return mAllocator.get_overflow_allocator();
+ }
+
+
+ template <typename Key, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline void fixed_multiset<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>::set_overflow_allocator(const overflow_allocator_type& allocator)
+ {
+ mAllocator.set_overflow_allocator(allocator);
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Key, size_t nodeCount, bool bEnableOverflow, typename Compare, typename OverflowAllocator>
+ inline void swap(fixed_multiset<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>& a,
+ fixed_multiset<Key, nodeCount, bEnableOverflow, Compare, OverflowAllocator>& b)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(a, b);
+ }
+
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/fixed_slist.h b/EASTL/include/EASTL/fixed_slist.h
new file mode 100644
index 0000000..abad7ad
--- /dev/null
+++ b/EASTL/include/EASTL/fixed_slist.h
@@ -0,0 +1,389 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements an slist which uses a fixed size memory pool for its nodes.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_FIXED_SLIST_H
+#define EASTL_FIXED_SLIST_H
+
+
+#include <EASTL/slist.h>
+#include <EASTL/internal/fixed_pool.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+ /// EASTL_FIXED_SLIST_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ /// In the case of fixed-size containers, the allocator name always refers
+ /// to overflow allocations.
+ ///
+ #ifndef EASTL_FIXED_SLIST_DEFAULT_NAME
+ #define EASTL_FIXED_SLIST_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_slist" // Unless the user overrides something, this is "EASTL fixed_slist".
+ #endif
+
+
+ /// EASTL_FIXED_SLIST_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_FIXED_SLIST_DEFAULT_ALLOCATOR
+ #define EASTL_FIXED_SLIST_DEFAULT_ALLOCATOR overflow_allocator_type(EASTL_FIXED_SLIST_DEFAULT_NAME)
+ #endif
+
+
+
+ /// fixed_slist
+ ///
+ /// fixed_slist is an slist which uses a single block of contiguous memory
+ /// for its nodes. The purpose of this is to reduce memory usage relative
+ /// to a conventional memory allocation system (with block headers), to
+ /// increase allocation speed (often due to avoidance of mutex locks),
+ /// to increase performance (due to better memory locality), and to decrease
+ /// memory fragmentation due to the way that fixed block allocators work.
+ ///
+ /// The primary downside to a fixed_slist is that the number of nodes it
+ /// can contain is fixed upon its declaration. If you want a fixed_slist
+ /// that doesn't have this limitation, then you probably don't want a
+ /// fixed_slist. You can always create your own memory allocator that works
+ /// the way you want.
+ ///
+ /// Template parameters:
+ /// T The type of object the slist holds.
+ /// nodeCount The max number of objects to contain.
+ /// bEnableOverflow Whether or not we should use the overflow heap if our object pool is exhausted.
+ /// OverflowAllocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap.
+ ///
+ template <typename T, size_t nodeCount, bool bEnableOverflow = true, typename OverflowAllocator = EASTLAllocatorType>
+ class fixed_slist : public slist<T, fixed_node_allocator<sizeof(typename slist<T>::node_type),
+ nodeCount, EASTL_ALIGN_OF(typename slist<T>::node_type), 0, bEnableOverflow, OverflowAllocator> >
+ {
+ public:
+ typedef fixed_node_allocator<sizeof(typename slist<T>::node_type), nodeCount,
+ EASTL_ALIGN_OF(typename slist<T>::node_type), 0, bEnableOverflow, OverflowAllocator> fixed_allocator_type;
+ typedef OverflowAllocator overflow_allocator_type;
+ typedef slist<T, fixed_allocator_type> base_type;
+ typedef fixed_slist<T, nodeCount, bEnableOverflow, OverflowAllocator> this_type;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::value_type value_type;
+ typedef typename base_type::node_type node_type;
+
+ enum { kMaxSize = nodeCount };
+
+ using base_type::assign;
+ using base_type::resize;
+ using base_type::size;
+
+ protected:
+ char mBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements.
+
+ using base_type::internalAllocator;
+
+ public:
+ fixed_slist();
+ explicit fixed_slist(const overflow_allocator_type& overflowAllocator); // Only applicable if bEnableOverflow is true.
+ explicit fixed_slist(size_type n); // Currently we don't support overflowAllocator specification for other constructors, for simplicity.
+ fixed_slist(size_type n, const value_type& value);
+ fixed_slist(const this_type& x);
+ fixed_slist(this_type&& x);
+ fixed_slist(this_type&&, const overflow_allocator_type&);
+ fixed_slist(std::initializer_list<value_type> ilist, const overflow_allocator_type& overflowAllocator = EASTL_FIXED_SLIST_DEFAULT_ALLOCATOR);
+
+ template <typename InputIterator>
+ fixed_slist(InputIterator first, InputIterator last);
+
+ this_type& operator=(const this_type& x);
+ this_type& operator=(std::initializer_list<value_type> ilist);
+ this_type& operator=(this_type&& x);
+
+ void swap(this_type& x);
+ void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs.
+ size_type max_size() const; // Returns the max fixed size, which is the user-supplied nodeCount parameter.
+ bool full() const; // Returns true if the fixed space has been fully allocated. Note that if overflow is enabled, the container size can be greater than nodeCount but full() could return true because the fixed space may have a recently freed slot.
+ bool has_overflowed() const; // Returns true if the allocations spilled over into the overflow allocator. Meaningful only if overflow is enabled.
+ bool can_overflow() const; // Returns the value of the bEnableOverflow template parameter.
+
+ // OverflowAllocator
+ const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT;
+ overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT;
+ void set_overflow_allocator(const overflow_allocator_type& allocator);
+ }; // fixed_slist
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // slist
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_slist<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_slist()
+ : base_type(fixed_allocator_type(mBuffer))
+ {
+ #if EASTL_NAME_ENABLED
+ internalAllocator().set_name(EASTL_FIXED_SLIST_DEFAULT_NAME);
+ #endif
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_slist<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_slist(const overflow_allocator_type& overflowAllocator)
+ : base_type(fixed_allocator_type(mBuffer, overflowAllocator))
+ {
+ #if EASTL_NAME_ENABLED
+ internalAllocator().set_name(EASTL_FIXED_SLIST_DEFAULT_NAME);
+ #endif
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_slist<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_slist(size_type n)
+ : base_type(fixed_allocator_type(mBuffer))
+ {
+ #if EASTL_NAME_ENABLED
+ internalAllocator().set_name(EASTL_FIXED_SLIST_DEFAULT_NAME);
+ #endif
+
+ resize(n);
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_slist<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_slist(size_type n, const value_type& value)
+ : base_type(fixed_allocator_type(mBuffer))
+ {
+ #if EASTL_NAME_ENABLED
+ internalAllocator().set_name(EASTL_FIXED_SLIST_DEFAULT_NAME);
+ #endif
+
+ resize(n, value);
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_slist<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_slist(const this_type& x)
+ : base_type(fixed_allocator_type(mBuffer))
+ {
+ internalAllocator().copy_overflow_allocator(x.internalAllocator());
+
+ #if EASTL_NAME_ENABLED
+ internalAllocator().set_name(x.internalAllocator().get_name());
+ #endif
+
+ assign(x.begin(), x.end());
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_slist<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_slist(this_type&& x)
+ : base_type(fixed_allocator_type(mBuffer))
+ {
+ // Since we are a fixed_list, we can't normally swap pointers unless both this and
+ // x are using using overflow and the overflow allocators are equal. To do:
+ //if(has_overflowed() && x.has_overflowed() && (get_overflow_allocator() == x.get_overflow_allocator()))
+ //{
+ // We can swap contents and may need to swap the allocators as well.
+ //}
+
+ // The following is currently identical to the fixed_vector(const this_type& x) code above. If it stays that
+ // way then we may want to make a shared implementation.
+ internalAllocator().copy_overflow_allocator(x.internalAllocator());
+
+ #if EASTL_NAME_ENABLED
+ internalAllocator().set_name(x.internalAllocator().get_name());
+ #endif
+
+ assign(x.begin(), x.end());
+ }
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_slist<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_slist(this_type&& x, const overflow_allocator_type& overflowAllocator)
+ : base_type(fixed_allocator_type(mBuffer, overflowAllocator))
+ {
+ // See comments above.
+ internalAllocator().copy_overflow_allocator(x.internalAllocator());
+
+ #if EASTL_NAME_ENABLED
+ internalAllocator().set_name(x.internalAllocator().get_name());
+ #endif
+
+ assign(x.begin(), x.end());
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_slist<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_slist(std::initializer_list<value_type> ilist, const overflow_allocator_type& overflowAllocator)
+ : base_type(fixed_allocator_type(mBuffer, overflowAllocator))
+ {
+ #if EASTL_NAME_ENABLED
+ internalAllocator().set_name(EASTL_FIXED_SLIST_DEFAULT_NAME);
+ #endif
+
+ assign(ilist.begin(), ilist.end());
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ template <typename InputIterator>
+ fixed_slist<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_slist(InputIterator first, InputIterator last)
+ : base_type(fixed_allocator_type(mBuffer))
+ {
+ #if EASTL_NAME_ENABLED
+ internalAllocator().set_name(EASTL_FIXED_SLIST_DEFAULT_NAME);
+ #endif
+
+ assign(first, last);
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline typename fixed_slist<T, nodeCount, bEnableOverflow, OverflowAllocator>::this_type&
+ fixed_slist<T, nodeCount, bEnableOverflow, OverflowAllocator>::operator=(const this_type& x)
+ {
+ if(this != &x)
+ {
+ base_type::clear();
+
+ #if EASTL_ALLOCATOR_COPY_ENABLED
+ internalAllocator() = x.internalAllocator(); // The primary effect of this is to copy the overflow allocator.
+ #endif
+
+ base_type::assign(x.begin(), x.end()); // It would probably be better to implement this like slist::operator=.
+ }
+ return *this;
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline typename fixed_slist<T, nodeCount, bEnableOverflow, OverflowAllocator>::this_type&
+ fixed_slist<T, nodeCount, bEnableOverflow, OverflowAllocator>::operator=(this_type&& x)
+ {
+ return operator=(x);
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline typename fixed_slist<T, nodeCount, bEnableOverflow, OverflowAllocator>::this_type&
+ fixed_slist<T, nodeCount, bEnableOverflow, OverflowAllocator>::operator=(std::initializer_list<value_type> ilist)
+ {
+ base_type::clear();
+ base_type::assign(ilist.begin(), ilist.end());
+ return *this;
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline void fixed_slist<T, nodeCount, bEnableOverflow, OverflowAllocator>::swap(this_type& x)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(*this, x);
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline void fixed_slist<T, nodeCount, bEnableOverflow, OverflowAllocator>::reset_lose_memory()
+ {
+ base_type::reset_lose_memory();
+ base_type::get_allocator().reset(mBuffer);
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline typename fixed_slist<T, nodeCount, bEnableOverflow, OverflowAllocator>::size_type
+ fixed_slist<T, nodeCount, bEnableOverflow, OverflowAllocator>::max_size() const
+ {
+ return kMaxSize;
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline bool fixed_slist<T, nodeCount, bEnableOverflow, OverflowAllocator>::full() const
+ {
+ // Note: This implementation isn't right in the case of bEnableOverflow = true because it will return
+ // false for the case that there are free nodes from the buffer but also nodes from the dynamic heap.
+ // This can happen if the container exceeds the fixed size and then frees some of the nodes from the fixed buffer.
+ return !internalAllocator().can_allocate(); // This is the quickest way of detecting this. has_overflowed uses a different method because it can't use this quick method.
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline bool fixed_slist<T, nodeCount, bEnableOverflow, OverflowAllocator>::has_overflowed() const
+ {
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED // If we can use this faster pathway (as size() may be slow)...
+ return (internalAllocator().mPool.mnPeakSize > kMaxSize);
+ #else
+ return (size() > kMaxSize);
+ #endif
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline bool fixed_slist<T, nodeCount, bEnableOverflow, OverflowAllocator>::can_overflow() const
+ {
+ return bEnableOverflow;
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline const typename fixed_slist<T, nodeCount, bEnableOverflow, OverflowAllocator>::overflow_allocator_type&
+ fixed_slist<T, nodeCount, bEnableOverflow, OverflowAllocator>::get_overflow_allocator() const EA_NOEXCEPT
+ {
+ return internalAllocator().get_overflow_allocator();
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline typename fixed_slist<T, nodeCount, bEnableOverflow, OverflowAllocator>::overflow_allocator_type&
+ fixed_slist<T, nodeCount, bEnableOverflow, OverflowAllocator>::get_overflow_allocator() EA_NOEXCEPT
+ {
+ return internalAllocator().get_overflow_allocator();
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline void
+ fixed_slist<T, nodeCount, bEnableOverflow, OverflowAllocator>::set_overflow_allocator(const overflow_allocator_type& allocator)
+ {
+ internalAllocator().set_overflow_allocator(allocator);
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline void swap(fixed_slist<T, nodeCount, bEnableOverflow, OverflowAllocator>& a,
+ fixed_slist<T, nodeCount, bEnableOverflow, OverflowAllocator>& b)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(a, b);
+ }
+
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/fixed_string.h b/EASTL/include/EASTL/fixed_string.h
new file mode 100644
index 0000000..f646302
--- /dev/null
+++ b/EASTL/include/EASTL/fixed_string.h
@@ -0,0 +1,805 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements a string which uses a fixed size memory pool.
+// The bEnableOverflow template parameter allows the container to resort to
+// heap allocations if the memory pool is exhausted.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_FIXED_STRING_H
+#define EASTL_FIXED_STRING_H
+
+#include <EASTL/internal/config.h>
+#include <EASTL/string.h>
+#include <EASTL/internal/fixed_pool.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+namespace eastl
+{
+ /// EASTL_FIXED_STRING_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ /// In the case of fixed-size containers, the allocator name always refers
+ /// to overflow allocations.
+ ///
+ #ifndef EASTL_FIXED_STRING_DEFAULT_NAME
+ #define EASTL_FIXED_STRING_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_string" // Unless the user overrides something, this is "EASTL fixed_string".
+ #endif
+
+
+
+ /// fixed_string
+ ///
+ /// A fixed_string with bEnableOverflow == true is identical to a regular
+ /// string in terms of its behavior. All the expectations of regular string
+ /// apply to it and no additional expectations come from it. When bEnableOverflow
+ /// is false, fixed_string behaves like regular string with the exception that
+ /// its capacity can never increase. All operations you do on such a fixed_string
+ /// which require a capacity increase will result in undefined behavior or an
+ /// C++ allocation exception, depending on the configuration of EASTL.
+ ///
+ /// Note: The nodeCount value is the amount of characters to allocate, which needs to
+ /// take into account a terminating zero. Thus if you want to store strings with a strlen
+ /// of 30, the nodeCount value must be at least 31.
+ ///
+ /// Template parameters:
+ /// T The type of object the string holds (char, wchar_t, char8_t, char16_t, char32_t).
+ /// nodeCount The max number of objects to contain.
+ /// bEnableOverflow Whether or not we should use the overflow heap if our object pool is exhausted.
+ /// OverflowAllocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap.
+ ///
+ /// Notes:
+ /// The nodeCount value must be at least 2, one for a character and one for a terminating 0.
+ ///
+ /// As of this writing, the string class necessarily reallocates when an insert of
+ /// self is done into self. As a result, the fixed_string class doesn't support
+ /// inserting self into self unless the bEnableOverflow template parameter is true.
+ ///
+ /// Example usage:
+ /// fixed_string<char, 128 + 1, true> fixedString("hello world"); // Can hold up to a strlen of 128.
+ ///
+ /// fixedString = "hola mundo";
+ /// fixedString.clear();
+ /// fixedString.resize(200);
+ /// fixedString.sprintf("%f", 1.5f);
+ ///
+ template <typename T, int nodeCount, bool bEnableOverflow = true, typename OverflowAllocator = EASTLAllocatorType>
+ class fixed_string : public basic_string<T, fixed_vector_allocator<sizeof(T), nodeCount, EASTL_ALIGN_OF(T), 0, bEnableOverflow, OverflowAllocator> >
+ {
+ public:
+ typedef fixed_vector_allocator<sizeof(T), nodeCount, EASTL_ALIGN_OF(T),
+ 0, bEnableOverflow, OverflowAllocator> fixed_allocator_type;
+ typedef typename fixed_allocator_type::overflow_allocator_type overflow_allocator_type;
+ typedef basic_string<T, fixed_allocator_type> base_type;
+ typedef fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator> this_type;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::value_type value_type;
+ typedef typename base_type::CtorDoNotInitialize CtorDoNotInitialize;
+ typedef typename base_type::CtorSprintf CtorSprintf;
+ typedef aligned_buffer<nodeCount * sizeof(T), EASTL_ALIGN_OF(T)> aligned_buffer_type;
+
+ enum { kMaxSize = nodeCount - 1 }; // -1 because we need to save one element for the silent terminating null.
+
+ using base_type::npos;
+ using base_type::mPair;
+ using base_type::append;
+ using base_type::resize;
+ using base_type::clear;
+ using base_type::capacity;
+ using base_type::size;
+ using base_type::sprintf_va_list;
+ using base_type::DoAllocate;
+ using base_type::DoFree;
+ using base_type::internalLayout;
+ using base_type::get_allocator;
+
+ protected:
+ union // We define a union in order to avoid strict pointer aliasing issues with compilers like GCC.
+ {
+ value_type mArray[1];
+ aligned_buffer_type mBuffer; // Question: Why are we doing this aligned_buffer thing? Why not just do an array of value_type, given that we are using just strings of char types.
+ };
+
+ public:
+ fixed_string();
+ explicit fixed_string(const overflow_allocator_type& overflowAllocator); // Only applicable if bEnableOverflow is true.
+ fixed_string(const base_type& x, size_type position, size_type n = base_type::npos); // Currently we don't support overflowAllocator specification for other constructors, for simplicity.
+ fixed_string(const value_type* p, size_type n);
+ fixed_string(const value_type* p);
+ fixed_string(size_type n, const value_type& value);
+ fixed_string(const this_type& x);
+ fixed_string(const this_type& x, const overflow_allocator_type& overflowAllocator);
+ fixed_string(const base_type& x);
+ fixed_string(const value_type* pBegin, const value_type* pEnd);
+ fixed_string(CtorDoNotInitialize, size_type n);
+ fixed_string(CtorSprintf, const value_type* pFormat, ...);
+ fixed_string(std::initializer_list<T> ilist, const overflow_allocator_type& overflowAllocator);
+ fixed_string(this_type&& x);
+ fixed_string(this_type&& x, const overflow_allocator_type& overflowAllocator);
+
+ this_type& operator=(const this_type& x);
+ this_type& operator=(const base_type& x);
+ this_type& operator=(const value_type* p);
+ this_type& operator=(const value_type c);
+ this_type& operator=(std::initializer_list<T> ilist);
+ this_type& operator=(this_type&& x);
+
+ void swap(this_type& x);
+
+ void set_capacity(size_type n);
+ void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs.
+ size_type max_size() const;
+ bool full() const; // Returns true if the fixed space has been fully allocated. Note that if overflow is enabled, the container size can be greater than nodeCount but full() could return true because the fixed space may have a recently freed slot.
+ bool has_overflowed() const; // Returns true if the allocations spilled over into the overflow allocator. Meaningful only if overflow is enabled.
+ bool can_overflow() const; // Returns the value of the bEnableOverflow template parameter.
+
+ // The inherited versions of substr/left/right call the basic_string constructor,
+ // which will call the overflow allocator and fail if bEnableOverflow == false
+ this_type substr(size_type position, size_type n) const;
+ this_type left(size_type n) const;
+ this_type right(size_type n) const;
+
+ // OverflowAllocator
+ const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT;
+ overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT;
+ void set_overflow_allocator(const overflow_allocator_type& allocator);
+ }; // fixed_string
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // fixed_string
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_string()
+ : base_type(fixed_allocator_type(mBuffer.buffer))
+ {
+ #if EASTL_NAME_ENABLED
+ get_allocator().set_name(EASTL_FIXED_STRING_DEFAULT_NAME);
+ #endif
+
+ internalLayout().SetHeapBeginPtr(mArray);
+ internalLayout().SetHeapCapacity(nodeCount - 1);
+ internalLayout().SetHeapSize(0);
+
+ *internalLayout().HeapBeginPtr() = 0;
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_string(const overflow_allocator_type& overflowAllocator)
+ : base_type(fixed_allocator_type(mBuffer.buffer, overflowAllocator))
+ {
+ #if EASTL_NAME_ENABLED
+ get_allocator().set_name(EASTL_FIXED_STRING_DEFAULT_NAME);
+ #endif
+
+ internalLayout().SetHeapBeginPtr(mArray);
+ internalLayout().SetHeapCapacity(nodeCount - 1);
+ internalLayout().SetHeapSize(0);
+
+ *internalLayout().HeapBeginPtr() = 0;
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_string(const this_type& x)
+ : base_type(fixed_allocator_type(mBuffer.buffer))
+ {
+ get_allocator().copy_overflow_allocator(x.get_allocator());
+
+ #if EASTL_NAME_ENABLED
+ get_allocator().set_name(x.get_allocator().get_name());
+ #endif
+
+ internalLayout().SetHeapBeginPtr(mArray);
+ internalLayout().SetHeapCapacity(nodeCount - 1);
+ internalLayout().SetHeapSize(0);
+
+ *internalLayout().HeapBeginPtr() = 0;
+
+ append(x);
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_string(const this_type& x, const overflow_allocator_type& overflowAllocator)
+ : base_type(fixed_allocator_type(mBuffer.buffer, overflowAllocator))
+ {
+ get_allocator().copy_overflow_allocator(x.get_allocator());
+
+ #if EASTL_NAME_ENABLED
+ get_allocator().set_name(x.get_allocator().get_name());
+ #endif
+
+ internalLayout().SetHeapBeginPtr(mArray);
+ internalLayout().SetHeapCapacity(nodeCount - 1);
+ internalLayout().SetHeapSize(0);
+
+ *internalLayout().HeapBeginPtr() = 0;
+
+ append(x);
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_string(const base_type& x)
+ : base_type(fixed_allocator_type(mBuffer.buffer))
+ {
+ #if EASTL_NAME_ENABLED
+ get_allocator().set_name(x.get_allocator().get_name());
+ #endif
+
+ internalLayout().SetHeapBeginPtr(mArray);
+ internalLayout().SetHeapCapacity(nodeCount - 1);
+ internalLayout().SetHeapSize(0);
+
+ *internalLayout().HeapBeginPtr() = 0;
+
+ append(x);
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_string(const base_type& x, size_type position, size_type n)
+ : base_type(fixed_allocator_type(mBuffer.buffer))
+ {
+ #if EASTL_NAME_ENABLED
+ get_allocator().set_name(x.get_allocator().get_name());
+ #endif
+
+ internalLayout().SetHeapBeginPtr(mArray);
+ internalLayout().SetHeapCapacity(nodeCount - 1);
+ internalLayout().SetHeapSize(0);
+
+ *internalLayout().HeapBeginPtr() = 0;
+
+ append(x, position, n);
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_string(const value_type* p, size_type n)
+ : base_type(fixed_allocator_type(mBuffer.buffer))
+ {
+ #if EASTL_NAME_ENABLED
+ get_allocator().set_name(EASTL_FIXED_STRING_DEFAULT_NAME);
+ #endif
+
+ internalLayout().SetHeapBeginPtr(mArray);
+ internalLayout().SetHeapCapacity(nodeCount - 1);
+ internalLayout().SetHeapSize(0);
+
+ *internalLayout().HeapBeginPtr() = 0;
+
+ append(p, n);
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_string(const value_type* p)
+ : base_type(fixed_allocator_type(mBuffer.buffer))
+ {
+ #if EASTL_NAME_ENABLED
+ get_allocator().set_name(EASTL_FIXED_STRING_DEFAULT_NAME);
+ #endif
+
+ internalLayout().SetHeapBeginPtr(mArray);
+ internalLayout().SetHeapCapacity(nodeCount - 1);
+ internalLayout().SetHeapSize(0);
+
+ *internalLayout().HeapBeginPtr() = 0;
+
+ append(p); // There better be enough space to hold the assigned string.
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_string(size_type n, const value_type& value)
+ : base_type(fixed_allocator_type(mBuffer.buffer))
+ {
+ #if EASTL_NAME_ENABLED
+ get_allocator().set_name(EASTL_FIXED_STRING_DEFAULT_NAME);
+ #endif
+
+ internalLayout().SetHeapBeginPtr(mArray);
+ internalLayout().SetHeapCapacity(nodeCount - 1);
+ internalLayout().SetHeapSize(0);
+
+ *internalLayout().HeapBeginPtr() = 0;
+
+ append(n, value); // There better be enough space to hold the assigned string.
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_string(const value_type* pBegin, const value_type* pEnd)
+ : base_type(fixed_allocator_type(mBuffer.buffer))
+ {
+ #if EASTL_NAME_ENABLED
+ get_allocator().set_name(EASTL_FIXED_STRING_DEFAULT_NAME);
+ #endif
+
+ internalLayout().SetHeapBeginPtr(mArray);
+ internalLayout().SetHeapCapacity(nodeCount - 1);
+ internalLayout().SetHeapSize(0);
+
+ *internalLayout().HeapBeginPtr() = 0;
+
+ append(pBegin, pEnd);
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_string(CtorDoNotInitialize, size_type n)
+ : base_type(fixed_allocator_type(mBuffer.buffer))
+ {
+ #if EASTL_NAME_ENABLED
+ get_allocator().set_name(EASTL_FIXED_STRING_DEFAULT_NAME);
+ #endif
+
+ internalLayout().SetHeapBeginPtr(mArray);
+ internalLayout().SetHeapCapacity(nodeCount - 1);
+
+ if(n < nodeCount)
+ {
+ internalLayout().SetHeapSize(n);
+ *internalLayout().HeapEndPtr() = 0;
+ }
+ else
+ {
+ internalLayout().SetHeapSize(0);
+ *internalLayout().HeapEndPtr() = 0;
+
+ resize(n);
+ }
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_string(CtorSprintf, const value_type* pFormat, ...)
+ : base_type(fixed_allocator_type(mBuffer.buffer))
+ {
+ #if EASTL_NAME_ENABLED
+ get_allocator().set_name(EASTL_FIXED_STRING_DEFAULT_NAME);
+ #endif
+
+ internalLayout().SetHeapBeginPtr(mArray);
+ internalLayout().SetHeapCapacity(nodeCount - 1);
+ internalLayout().SetHeapSize(0);
+ *internalLayout().HeapBeginPtr() = 0;
+
+ va_list arguments;
+ va_start(arguments, pFormat);
+ sprintf_va_list(pFormat, arguments);
+ va_end(arguments);
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_string(std::initializer_list<T> ilist, const overflow_allocator_type& overflowAllocator)
+ : base_type(fixed_allocator_type(mBuffer.buffer, overflowAllocator))
+ {
+ #if EASTL_NAME_ENABLED
+ get_allocator().set_name(EASTL_FIXED_STRING_DEFAULT_NAME);
+ #endif
+
+ internalLayout().SetHeapBeginPtr(mArray);
+ internalLayout().SetHeapCapacity(nodeCount - 1);
+ internalLayout().SetHeapSize(0);
+
+ *internalLayout().HeapBeginPtr() = 0;
+
+ append(ilist.begin(), ilist.end());
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_string(this_type&& x)
+ : base_type(fixed_allocator_type(mBuffer.buffer))
+ {
+ // We copy from x instead of trade with it. We need to do so because fixed_ containers use local memory buffers.
+ #if EASTL_NAME_ENABLED
+ get_allocator().set_name(x.get_allocator().get_name());
+ #endif
+
+ internalLayout().SetHeapBeginPtr(mArray);
+ internalLayout().SetHeapCapacity(nodeCount - 1);
+ internalLayout().SetHeapSize(0);
+
+ *internalLayout().HeapBeginPtr() = 0;
+
+ append(x); // Let x destruct its own items.
+ }
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_string(this_type&& x, const overflow_allocator_type& overflowAllocator)
+ : base_type(fixed_allocator_type(mBuffer.buffer, overflowAllocator))
+ {
+ // We copy from x instead of trade with it. We need to do so because fixed_ containers use local memory buffers.
+ #if EASTL_NAME_ENABLED
+ get_allocator().set_name(x.get_allocator().get_name());
+ #endif
+
+ internalLayout().SetHeapBeginPtr(mArray);
+ internalLayout().SetHeapCapacity(nodeCount - 1);
+ internalLayout().SetHeapSize(0);
+
+ *internalLayout().HeapBeginPtr() = 0;
+
+ append(x); // Let x destruct its own items.
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline typename fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::this_type&
+ fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::operator=(const this_type& x)
+ {
+ if(this != &x)
+ {
+ clear();
+
+ #if EASTL_ALLOCATOR_COPY_ENABLED
+ get_allocator() = x.get_allocator();
+ #endif
+
+ append(x);
+ }
+ return *this;
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline typename fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::
+ this_type& fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::operator=(const base_type& x)
+ {
+ if(static_cast<base_type*>(this) != &x)
+ {
+ clear();
+
+ #if EASTL_ALLOCATOR_COPY_ENABLED
+ get_allocator() = x.get_allocator();
+ #endif
+
+ append(x);
+ }
+ return *this;
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline typename fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::
+ this_type& fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::operator=(const value_type* p)
+ {
+ if(internalLayout().HeapBeginPtr() != p)
+ {
+ clear();
+ append(p);
+ }
+ return *this;
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline typename fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::
+ this_type& fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::operator=(const value_type c)
+ {
+ clear();
+ append((size_type)1, c);
+ return *this;
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline typename fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::
+ this_type& fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::operator=(std::initializer_list<T> ilist)
+ {
+ clear();
+ append(ilist.begin(), ilist.end());
+ return *this;
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline typename fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::
+ this_type& fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::operator=(this_type&& x)
+ {
+ // We copy from x instead of trade with it. We need to do so because fixed_ containers use local memory buffers.
+
+ // if(static_cast<base_type*>(this) != &x) This should be impossible, so we disable it until proven otherwise.
+ {
+ clear();
+
+ #if EASTL_ALLOCATOR_COPY_ENABLED
+ get_allocator() = x.get_allocator();
+ #endif
+
+ append(x); // Let x destruct its own items.
+ }
+ return *this;
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline void fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::swap(this_type& x)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(*this, x);
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline void fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::set_capacity(size_type n)
+ {
+ const size_type nPrevSize = internalLayout().GetSize();
+ const size_type nPrevCapacity = capacity();
+
+ if(n == npos) // If the user means to set the capacity so that it equals the size (i.e. free excess capacity)...
+ n = nPrevSize;
+
+ if(n != nPrevCapacity) // If the request results in a capacity change...
+ {
+ const size_type allocSize = (n + 1); // +1 because the terminating 0 isn't included in the supplied capacity value. So now n refers the amount of memory we need.
+
+ if(can_overflow() && (((uintptr_t)internalLayout().HeapBeginPtr() != (uintptr_t)mBuffer.buffer) || (allocSize > kMaxSize))) // If we are or would be using dynamically allocated memory instead of our fixed-size member buffer...
+ {
+ T* const pNewData = (allocSize <= kMaxSize) ? (T*)&mBuffer.buffer[0] : DoAllocate(allocSize);
+ T* const pCopyEnd = (n < nPrevSize) ? (internalLayout().HeapBeginPtr() + n) : internalLayout().HeapEndPtr();
+ CharStringUninitializedCopy(internalLayout().HeapBeginPtr(), pCopyEnd, pNewData); // Copy [internalLayout().heap.mpBegin, pCopyEnd) to pNewData.
+ if((uintptr_t)internalLayout().HeapBeginPtr() != (uintptr_t)mBuffer.buffer)
+ DoFree(internalLayout().HeapBeginPtr(), internalLayout().GetHeapCapacity() + 1);
+
+ internalLayout().SetHeapSize((size_type)(pCopyEnd - internalLayout().HeapBeginPtr()));
+ internalLayout().SetHeapBeginPtr(pNewData);
+ internalLayout().SetHeapCapacity(allocSize - 1);
+ } // Else the new capacity would be within our fixed buffer.
+ else if(n < nPrevSize) // If the newly requested capacity is less than our size, we do what vector::set_capacity does and resize, even though we actually aren't reducing the capacity.
+ resize(n);
+ }
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline void fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::reset_lose_memory()
+ {
+ internalLayout().SetHeapBeginPtr(mArray);
+ internalLayout().SetHeapSize(0);
+ internalLayout().SetHeapCapacity(nodeCount - 1);
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline typename fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::
+ size_type fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::max_size() const
+ {
+ return kMaxSize;
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline bool fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::full() const
+ {
+ // If size >= capacity, then we are definitely full.
+ // Also, if our size is smaller but we've switched away from mBuffer due to a previous overflow, then we are considered full.
+ return ((size_t)(internalLayout().HeapEndPtr() - internalLayout().HeapBeginPtr()) >= kMaxSize) || ((void*)internalLayout().HeapBeginPtr() != (void*)mBuffer.buffer);
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline bool fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::has_overflowed() const
+ {
+ // This will be incorrect for the case that bOverflowEnabled is true and the container was resized
+ // down to a small size where the fixed buffer could take over ownership of the data again.
+ // The only simple fix for this is to take on another member variable which tracks whether this overflow
+ // has occurred at some point in the past.
+ return ((void*)internalLayout().HeapBeginPtr() != (void*)mBuffer.buffer);
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline bool fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::can_overflow() const
+ {
+ return bEnableOverflow;
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline typename fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::
+ this_type fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::substr(size_type position, size_type n) const
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(position > internalLayout().GetSize())
+ base_type::ThrowRangeException();
+ #endif
+
+ return fixed_string(internalLayout().HeapBeginPtr() + position,
+ internalLayout().HeapBeginPtr() + position + eastl::min_alt(n, internalLayout().GetSize() - position));
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline typename fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::
+ this_type fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::left(size_type n) const
+ {
+ const size_type nLength = size();
+ if(n < nLength)
+ return fixed_string(internalLayout().HeapBeginPtr(), internalLayout().HeapBeginPtr() + n);
+ return *this;
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline typename fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::
+ this_type fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::right(size_type n) const
+ {
+ const size_type nLength = size();
+ if(n < nLength)
+ return fixed_string(internalLayout().HeapEndPtr() - n, internalLayout().HeapEndPtr());
+ return *this;
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline const typename fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::
+ overflow_allocator_type& fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::get_overflow_allocator() const EA_NOEXCEPT
+ {
+ return get_allocator().get_overflow_allocator();
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline typename fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::
+ overflow_allocator_type& fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::get_overflow_allocator() EA_NOEXCEPT
+ {
+ return get_allocator().get_overflow_allocator();
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline void
+ fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::set_overflow_allocator(const overflow_allocator_type& allocator)
+ {
+ get_allocator().set_overflow_allocator(allocator);
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+
+ // Operator +
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator> operator+(const fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>& a,
+ const fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>& b)
+ {
+ // We have a problem here because need to return an fixed_string by value. This will typically result in it
+ // using stack space equal to its size. That size may be too large to be workable.
+ typedef fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator> this_type;
+
+ this_type result(const_cast<this_type&>(a).get_overflow_allocator());
+ result.append(a);
+ result.append(b);
+ return result;
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator> operator+(const typename fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::value_type* p,
+ const fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>& b)
+ {
+ typedef fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator> this_type;
+
+ const typename this_type::size_type n = (typename this_type::size_type)CharStrlen(p);
+ this_type result(const_cast<this_type&>(b).get_overflow_allocator());
+ result.append(p, p + n);
+ result.append(b);
+ return result;
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator> operator+(typename fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::value_type c,
+ const fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>& b)
+ {
+ typedef fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator> this_type;
+
+ this_type result(const_cast<this_type&>(b).get_overflow_allocator());
+ result.push_back(c);
+ result.append(b);
+ return result;
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator> operator+(const fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>& a,
+ const typename fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::value_type* p)
+ {
+ typedef fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator> this_type;
+
+ const typename this_type::size_type n = (typename this_type::size_type)CharStrlen(p);
+ this_type result(const_cast<this_type&>(a).get_overflow_allocator());
+ result.append(a);
+ result.append(p, p + n);
+ return result;
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator> operator+(const fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>& a,
+ typename fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::value_type c)
+ {
+ typedef fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator> this_type;
+
+ this_type result(const_cast<this_type&>(a).get_overflow_allocator());
+ result.append(a);
+ result.push_back(c);
+ return result;
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator> operator+(fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>&& a,
+ fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>&& b)
+ {
+ a.append(b); // Using an rvalue by name results in it becoming an lvalue.
+ return eastl::move(a);
+ }
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator> operator+(fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>&& a,
+ const fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>& b)
+ {
+ a.append(b);
+ return eastl::move(a);
+ }
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator> operator+(const typename fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::value_type* p,
+ fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>&& b)
+ {
+ b.insert(0, p);
+ return eastl::move(b);
+ }
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator> operator+(fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>&& a,
+ const typename fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::value_type* p)
+ {
+ a.append(p);
+ return eastl::move(a);
+ }
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator> operator+(fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>&& a,
+ typename fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>::value_type c)
+ {
+ a.push_back(c);
+ return eastl::move(a);
+ }
+
+
+ // operator ==, !=, <, >, <=, >= come from the string implementations.
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline void swap(fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>& a,
+ fixed_string<T, nodeCount, bEnableOverflow, OverflowAllocator>& b)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(a, b);
+ }
+
+
+} // namespace eastl
+
+#endif // Header include guard
diff --git a/EASTL/include/EASTL/fixed_substring.h b/EASTL/include/EASTL/fixed_substring.h
new file mode 100644
index 0000000..e186cfc
--- /dev/null
+++ b/EASTL/include/EASTL/fixed_substring.h
@@ -0,0 +1,275 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_FIXED_SUBSTRING_H
+#define EASTL_FIXED_SUBSTRING_H
+
+
+#include <EASTL/string.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+
+ /// fixed_substring
+ ///
+ /// Implements a string which is a reference to a segment of characters.
+ /// This class is efficient because it allocates no memory and copies no
+ /// memory during construction and assignment, but rather refers directly
+ /// to the segment of chracters. A common use of this is to have a
+ /// fixed_substring efficiently refer to a substring within another string.
+ ///
+ /// You cannot directly resize a fixed_substring (e.g. via resize, insert,
+ /// append, erase), but you can assign a different substring to it.
+ /// You can modify the characters within a substring in place.
+ /// As of this writing, in the name of being lean and simple it is the
+ /// user's responsibility to not call unsupported resizing functions
+ /// such as those listed above. A detailed listing of the functions which
+ /// are not supported is given below in the class declaration.
+ ///
+ /// The c_str function doesn't act as one might hope, as it simply
+ /// returns the pointer to the beginning of the string segment and the
+ /// 0-terminator may be beyond the end of the segment. If you want to
+ /// always be able to use c_str as expected, use the fixed string solution
+ /// we describe below.
+ ///
+ /// Another use of fixed_substring is to provide C++ string-like functionality
+ /// with a C character array. This allows you to work on a C character array
+ /// as if it were a C++ string as opposed using the C string API. Thus you
+ /// can do this:
+ ///
+ /// void DoSomethingForUser(char* timeStr, size_t timeStrCapacity)
+ /// {
+ /// fixed_substring tmp(timeStr, timeStrCapacity);
+ /// tmp = "hello ";
+ /// tmp += "world";
+ /// }
+ ///
+ /// Note that this class constructs and assigns from const string pointers
+ /// and const string objects, yet this class does not declare its member
+ /// data as const. This is a concession in order to allow this implementation
+ /// to be simple and lean. It is the user's responsibility to make sure
+ /// that strings that should not or can not be modified are either not
+ /// used by fixed_substring or are not modified by fixed_substring.
+ ///
+ /// A more flexible alternative to fixed_substring is fixed_string.
+ /// fixed_string has none of the functional limitations that fixed_substring
+ /// has and like fixed_substring it doesn't allocate memory. However,
+ /// fixed_string makes a *copy* of the source string and uses local
+ /// memory to store that copy. Also, fixed_string objects on the stack
+ /// are going to have a limit as to their maximum size.
+ ///
+ /// Notes:
+ /// As of this writing, the string class necessarily reallocates when
+ /// an insert of self is done into self. As a result, the fixed_substring
+ /// class doesn't support inserting self into self.
+ ///
+ /// Example usage:
+ /// basic_string<char> str("hello world");
+ /// fixed_substring<char> sub(str, 2, 5); // sub == "llo w"
+ ///
+ template <typename T>
+ class fixed_substring : public basic_string<T>
+ {
+ public:
+ typedef basic_string<T> base_type;
+ typedef fixed_substring<T> this_type;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::value_type value_type;
+ typedef typename base_type::iterator iterator;
+ typedef typename base_type::const_iterator const_iterator;
+
+ using base_type::npos;
+ using base_type::mPair;
+ using base_type::AllocateSelf;
+ using base_type::internalLayout;
+ using base_type::get_allocator;
+
+ private:
+
+ void SetInternalHeapLayout(value_type* pBeginPtr, size_type nSize, size_type nCap)
+ {
+ internalLayout().SetHeapBeginPtr(pBeginPtr);
+ internalLayout().SetHeapSize(nSize);
+ internalLayout().SetHeapCapacity(nCap);
+ }
+
+
+ public:
+ fixed_substring()
+ : base_type()
+ {
+ }
+
+ fixed_substring(const fixed_substring& x)
+ : fixed_substring(static_cast<const base_type&>(x))
+ {}
+
+ fixed_substring(const base_type& x)
+ : base_type()
+ {
+ #if EASTL_NAME_ENABLED
+ get_allocator().set_name(x.get_allocator().get_name());
+ #endif
+
+ assign(x);
+ }
+
+ // We gain no benefit from having an rvalue move constructor or assignment operator,
+ // as this class is a const class.
+
+ fixed_substring(const base_type& x, size_type position, size_type n = base_type::npos)
+ : base_type()
+ {
+ #if EASTL_NAME_ENABLED
+ get_allocator().set_name(x.get_allocator().get_name());
+ #endif
+
+ assign(x, position, n);
+ }
+
+ fixed_substring(const value_type* p, size_type n)
+ : base_type()
+ {
+ assign(p, n);
+ }
+
+ fixed_substring(const value_type* p)
+ : base_type()
+ {
+ assign(p);
+ }
+
+ fixed_substring(const value_type* pBegin, const value_type* pEnd)
+ : base_type()
+ {
+ assign(pBegin, pEnd);
+ }
+
+ ~fixed_substring()
+ {
+ // We need to reset, as otherwise the parent destructor will
+ // attempt to free our memory.
+ AllocateSelf();
+ }
+
+ this_type& operator=(const this_type& x)
+ {
+ assign(x);
+ return *this;
+ }
+
+ this_type& operator=(const base_type& x)
+ {
+ assign(x);
+ return *this;
+ }
+
+ this_type& operator=(const value_type* p)
+ {
+ assign(p);
+ return *this;
+ }
+
+ this_type& assign(const base_type& x)
+ {
+ // By design, we need to cast away const-ness here.
+ SetInternalHeapLayout(const_cast<value_type*>(x.data()), x.size(), x.size());
+ return *this;
+ }
+
+ this_type& assign(const base_type& x, size_type position, size_type n)
+ {
+ // By design, we need to cast away const-ness here.
+ SetInternalHeapLayout(const_cast<value_type*>(x.data()) + position, n, n);
+ return *this;
+ }
+
+ this_type& assign(const value_type* p, size_type n)
+ {
+ // By design, we need to cast away const-ness here.
+ SetInternalHeapLayout(const_cast<value_type*>(p), n, n);
+ return *this;
+ }
+
+ this_type& assign(const value_type* p)
+ {
+ // By design, we need to cast away const-ness here.
+ SetInternalHeapLayout(const_cast<value_type*>(p), (size_type)CharStrlen(p), (size_type)CharStrlen(p));
+ return *this;
+ }
+
+ this_type& assign(const value_type* pBegin, const value_type* pEnd)
+ {
+ // By design, we need to cast away const-ness here.
+ SetInternalHeapLayout(const_cast<value_type*>(pBegin), (size_type)(pEnd - pBegin), (size_type)(pEnd - pBegin));
+ return *this;
+ }
+
+
+ // Partially supported functionality
+ //
+ // When using fixed_substring on a character sequence that is within another
+ // string, the following functions may do one of two things:
+ // 1 Attempt to reallocate
+ // 2 Write a 0 char at the end of the fixed_substring
+ //
+ // Item #1 will result in a crash, due to the attempt by the underlying
+ // string class to free the substring memory. Item #2 will result in a 0
+ // char being written to the character array. Item #2 may or may not be
+ // a problem, depending on how you use fixed_substring. Thus the following
+ // functions cannot be used safely.
+
+ #if 0 // !defined(EA_COMPILER_NO_DELETED_FUNCTIONS) We may want to enable these deletions after some investigation of possible user impact.
+ this_type& operator=(value_type c) = delete;
+ void resize(size_type n, value_type c) = delete;
+ void resize(size_type n) = delete;
+ void reserve(size_type = 0) = delete;
+ void set_capacity(size_type n) = delete;
+ void clear() = delete;
+ this_type& operator+=(const base_type& x) = delete;
+ this_type& operator+=(const value_type* p) = delete;
+ this_type& operator+=(value_type c) = delete;
+ this_type& append(const base_type& x) = delete;
+ this_type& append(const base_type& x, size_type position, size_type n) = delete;
+ this_type& append(const value_type* p, size_type n) = delete;
+ this_type& append(const value_type* p) = delete;
+ this_type& append(size_type n) = delete;
+ this_type& append(size_type n, value_type c) = delete;
+ this_type& append(const value_type* pBegin, const value_type* pEnd) = delete;
+ this_type& append_sprintf_va_list(const value_type* pFormat, va_list arguments) = delete;
+ this_type& append_sprintf(const value_type* pFormat, ...) = delete;
+ void push_back(value_type c) = delete;
+ void pop_back() = delete;
+ this_type& assign(size_type n, value_type c) = delete;
+ this_type& insert(size_type position, const base_type& x) = delete;
+ this_type& insert(size_type position, const base_type& x, size_type beg, size_type n) = delete;
+ this_type& insert(size_type position, const value_type* p, size_type n) = delete;
+ this_type& insert(size_type position, const value_type* p) = delete;
+ this_type& insert(size_type position, size_type n, value_type c) = delete;
+ iterator insert(const_iterator p, value_type c) = delete;
+ void insert(const_iterator p, size_type n, value_type c) = delete;
+ void insert(const_iterator p, const value_type* pBegin, const value_type* pEnd) = delete;
+ this_type& erase(size_type position = 0, size_type n = npos) = delete;
+ iterator erase(const_iterator p) = delete;
+ iterator erase(const_iterator pBegin, const_iterator pEnd) = delete;
+ void swap(base_type& x) = delete;
+ this_type& sprintf_va_list(const value_type* pFormat, va_list arguments) = delete;
+ this_type& sprintf(const value_type* pFormat, ...) = delete;
+ #endif
+
+ }; // fixed_substring
+
+
+} // namespace eastl
+
+
+
+#endif // Header include guard
diff --git a/EASTL/include/EASTL/fixed_vector.h b/EASTL/include/EASTL/fixed_vector.h
new file mode 100644
index 0000000..1dc482b
--- /dev/null
+++ b/EASTL/include/EASTL/fixed_vector.h
@@ -0,0 +1,625 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements a vector which uses a fixed size memory pool.
+// The bEnableOverflow template parameter allows the container to resort to
+// heap allocations if the memory pool is exhausted.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_FIXED_VECTOR_H
+#define EASTL_FIXED_VECTOR_H
+
+
+#include <EASTL/vector.h>
+#include <EASTL/internal/fixed_pool.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+ /// EASTL_FIXED_VECTOR_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ /// In the case of fixed-size containers, the allocator name always refers
+ /// to overflow allocations.
+ ///
+ #ifndef EASTL_FIXED_VECTOR_DEFAULT_NAME
+ #define EASTL_FIXED_VECTOR_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_vector" // Unless the user overrides something, this is "EASTL fixed_vector".
+ #endif
+
+
+ /// EASTL_FIXED_VECTOR_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_FIXED_VECTOR_DEFAULT_ALLOCATOR
+ #define EASTL_FIXED_VECTOR_DEFAULT_ALLOCATOR overflow_allocator_type(EASTL_FIXED_VECTOR_DEFAULT_NAME)
+ #endif
+
+
+ /// fixed_vector
+ ///
+ /// A fixed_vector with bEnableOverflow == true is identical to a regular
+ /// vector in terms of its behavior. All the expectations of regular vector
+ /// apply to it and no additional expectations come from it. When bEnableOverflow
+ /// is false, fixed_vector behaves like regular vector with the exception that
+ /// its capacity can never increase. All operations you do on such a fixed_vector
+ /// which require a capacity increase will result in undefined behavior or an
+ /// C++ allocation exception, depending on the configuration of EASTL.
+ ///
+ /// Template parameters:
+ /// T The type of object the vector holds.
+ /// nodeCount The max number of objects to contain.
+ /// bEnableOverflow Whether or not we should use the overflow heap if our object pool is exhausted.
+ /// OverflowAllocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap.
+ ///
+ /// Note: The nodeCount value must be at least 1.
+ ///
+ /// Example usage:
+ /// fixed_vector<Widget, 128, true> fixedVector);
+ ///
+ /// fixedVector.push_back(Widget());
+ /// fixedVector.resize(200);
+ /// fixedVector.clear();
+ ///
+ template <typename T, size_t nodeCount, bool bEnableOverflow = true, typename OverflowAllocator = typename eastl::type_select<bEnableOverflow, EASTLAllocatorType, EASTLDummyAllocatorType>::type>
+ class fixed_vector : public vector<T, fixed_vector_allocator<sizeof(T), nodeCount, EASTL_ALIGN_OF(T), 0, bEnableOverflow, OverflowAllocator> >
+ {
+ public:
+ typedef fixed_vector_allocator<sizeof(T), nodeCount, EASTL_ALIGN_OF(T),
+ 0, bEnableOverflow, OverflowAllocator> fixed_allocator_type;
+ typedef OverflowAllocator overflow_allocator_type;
+ typedef vector<T, fixed_allocator_type> base_type;
+ typedef fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator> this_type;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::value_type value_type;
+ typedef typename base_type::reference reference;
+ typedef typename base_type::iterator iterator;
+ typedef typename base_type::const_iterator const_iterator;
+ typedef aligned_buffer<nodeCount * sizeof(T), EASTL_ALIGN_OF(T)> aligned_buffer_type;
+
+ enum { kMaxSize = nodeCount };
+
+ using base_type::get_allocator;
+ using base_type::mpBegin;
+ using base_type::mpEnd;
+ using base_type::internalCapacityPtr;
+ using base_type::resize;
+ using base_type::clear;
+ using base_type::size;
+ using base_type::assign;
+ using base_type::npos;
+ using base_type::DoAllocate;
+ using base_type::DoFree;
+ using base_type::DoAssign;
+ using base_type::DoAssignFromIterator;
+
+ protected:
+ aligned_buffer_type mBuffer;
+
+ public:
+ fixed_vector();
+ explicit fixed_vector(const overflow_allocator_type& overflowAllocator); // Only applicable if bEnableOverflow is true.
+ explicit fixed_vector(size_type n); // Currently we don't support overflowAllocator specification for other constructors, for simplicity.
+ fixed_vector(size_type n, const value_type& value);
+ fixed_vector(const this_type& x);
+ fixed_vector(this_type&& x);
+ fixed_vector(this_type&& x, const overflow_allocator_type& overflowAllocator);
+ fixed_vector(std::initializer_list<T> ilist, const overflow_allocator_type& overflowAllocator = EASTL_FIXED_VECTOR_DEFAULT_ALLOCATOR);
+
+ template <typename InputIterator>
+ fixed_vector(InputIterator first, InputIterator last);
+
+ this_type& operator=(const this_type& x);
+ this_type& operator=(std::initializer_list<T> ilist);
+ this_type& operator=(this_type&& x);
+
+ void swap(this_type& x);
+
+ void set_capacity(size_type n);
+ void clear(bool freeOverflow);
+ void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs.
+ size_type max_size() const; // Returns the max fixed size, which is the user-supplied nodeCount parameter.
+ bool full() const; // Returns true if the fixed space has been fully allocated. Note that if overflow is enabled, the container size can be greater than nodeCount but full() could return true because the fixed space may have a recently freed slot.
+ bool has_overflowed() const; // Returns true if the allocations spilled over into the overflow allocator. Meaningful only if overflow is enabled.
+ bool can_overflow() const; // Returns the value of the bEnableOverflow template parameter.
+
+ void* push_back_uninitialized();
+ void push_back(const value_type& value); // We implement push_back here because we have a specialization that's
+ reference push_back(); // smaller for the case of overflow being disabled.
+ void push_back(value_type&& value);
+
+ // OverflowAllocator
+ const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT;
+ overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT;
+ void set_overflow_allocator(const overflow_allocator_type& allocator);
+
+ protected:
+ void* DoPushBackUninitialized(true_type);
+ void* DoPushBackUninitialized(false_type);
+
+ void DoPushBack(true_type, const value_type& value);
+ void DoPushBack(false_type, const value_type& value);
+
+ void DoPushBackMove(true_type, value_type&& value);
+ void DoPushBackMove(false_type, value_type&& value);
+
+ reference DoPushBack(false_type);
+ reference DoPushBack(true_type);
+
+ }; // fixed_vector
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // fixed_vector
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_vector()
+ : base_type(fixed_allocator_type(mBuffer.buffer))
+ {
+ #if EASTL_NAME_ENABLED
+ get_allocator().set_name(EASTL_FIXED_VECTOR_DEFAULT_NAME);
+ #endif
+
+ mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0];
+ internalCapacityPtr() = mpBegin + nodeCount;
+ }
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_vector(const overflow_allocator_type& overflowAllocator)
+ : base_type(fixed_allocator_type(mBuffer.buffer, overflowAllocator))
+ {
+ #if EASTL_NAME_ENABLED
+ get_allocator().set_name(EASTL_FIXED_VECTOR_DEFAULT_NAME);
+ #endif
+
+ mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0];
+ internalCapacityPtr() = mpBegin + nodeCount;
+ }
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_vector(size_type n)
+ : base_type(fixed_allocator_type(mBuffer.buffer))
+ {
+ #if EASTL_NAME_ENABLED
+ get_allocator().set_name(EASTL_FIXED_VECTOR_DEFAULT_NAME);
+ #endif
+
+ mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0];
+ internalCapacityPtr() = mpBegin + nodeCount;
+ resize(n);
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_vector(size_type n, const value_type& value)
+ : base_type(fixed_allocator_type(mBuffer.buffer))
+ {
+ #if EASTL_NAME_ENABLED
+ get_allocator().set_name(EASTL_FIXED_VECTOR_DEFAULT_NAME);
+ #endif
+
+ mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0];
+ internalCapacityPtr() = mpBegin + nodeCount;
+ resize(n, value);
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_vector(const this_type& x)
+ : base_type(fixed_allocator_type(mBuffer.buffer))
+ {
+ get_allocator().copy_overflow_allocator(x.get_allocator());
+
+ #if EASTL_NAME_ENABLED
+ get_allocator().set_name(x.get_allocator().get_name());
+ #endif
+
+ mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0];
+ internalCapacityPtr() = mpBegin + nodeCount;
+ base_type::template DoAssign<const_iterator, false>(x.begin(), x.end(), false_type());
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_vector(this_type&& x)
+ : base_type(fixed_allocator_type(mBuffer.buffer))
+ {
+ // Since we are a fixed_vector, we can't swap pointers. We can possibly so something like fixed_swap or
+ // we can just do an assignment from x. If we want to do the former then we need to have some complicated
+ // code to deal with overflow or no overflow, and whether the memory is in the fixed-size buffer or in
+ // the overflow allocator. 90% of the time the memory should be in the fixed buffer, in which case
+ // a simple assignment is no worse than the fancy pathway.
+
+ // Since we are a fixed_list, we can't normally swap pointers unless both this and
+ // x are using using overflow and the overflow allocators are equal. To do:
+ //if(has_overflowed() && x.has_overflowed() && (get_overflow_allocator() == x.get_overflow_allocator()))
+ //{
+ // We can swap contents and may need to swap the allocators as well.
+ //}
+
+ // The following is currently identical to the fixed_vector(const this_type& x) code above. If it stays that
+ // way then we may want to make a shared implementation.
+ get_allocator().copy_overflow_allocator(x.get_allocator());
+
+ #if EASTL_NAME_ENABLED
+ get_allocator().set_name(x.get_allocator().get_name());
+ #endif
+
+ mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0];
+ internalCapacityPtr() = mpBegin + nodeCount;
+ base_type::template DoAssign<move_iterator<iterator>, true>(eastl::make_move_iterator(x.begin()), eastl::make_move_iterator(x.end()), false_type());
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_vector(this_type&& x, const overflow_allocator_type& overflowAllocator)
+ : base_type(fixed_allocator_type(mBuffer.buffer, overflowAllocator))
+ {
+ // See the discussion above.
+
+ // The following is currently identical to the fixed_vector(const this_type& x) code above. If it stays that
+ // way then we may want to make a shared implementation.
+ get_allocator().copy_overflow_allocator(x.get_allocator());
+
+ #if EASTL_NAME_ENABLED
+ get_allocator().set_name(x.get_allocator().get_name());
+ #endif
+
+ mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0];
+ internalCapacityPtr() = mpBegin + nodeCount;
+ base_type::template DoAssign<iterator, true>(x.begin(), x.end(), false_type());
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_vector(std::initializer_list<T> ilist, const overflow_allocator_type& overflowAllocator)
+ : base_type(fixed_allocator_type(mBuffer.buffer, overflowAllocator))
+ {
+ typedef typename std::initializer_list<value_type>::iterator InputIterator;
+ typedef typename eastl::iterator_traits<InputIterator>::iterator_category IC;
+
+ mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0];
+ internalCapacityPtr() = mpBegin + nodeCount;
+ base_type::template DoAssignFromIterator<InputIterator, false>(ilist.begin(), ilist.end(), IC());
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ template <typename InputIterator>
+ fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::fixed_vector(InputIterator first, InputIterator last)
+ : base_type(fixed_allocator_type(mBuffer.buffer))
+ {
+ #if EASTL_NAME_ENABLED
+ get_allocator().set_name(EASTL_FIXED_VECTOR_DEFAULT_NAME);
+ #endif
+
+ mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0];
+ internalCapacityPtr() = mpBegin + nodeCount;
+ base_type::template DoAssign<InputIterator, false>(first, last, is_integral<InputIterator>());
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline typename fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::this_type&
+ fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::operator=(const this_type& x)
+ {
+ if(this != &x)
+ {
+ clear();
+
+ #if EASTL_ALLOCATOR_COPY_ENABLED
+ get_allocator() = x.get_allocator(); // The primary effect of this is to copy the overflow allocator.
+ #endif
+
+ base_type::template DoAssign<const_iterator, false>(x.begin(), x.end(), false_type()); // Shorter route.
+ }
+ return *this;
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline typename fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::this_type&
+ fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::operator=(std::initializer_list<T> ilist)
+ {
+ typedef typename std::initializer_list<value_type>::iterator InputIterator;
+ typedef typename eastl::iterator_traits<InputIterator>::iterator_category IC;
+
+ clear();
+ base_type::template DoAssignFromIterator<InputIterator, false>(ilist.begin(), ilist.end(), IC());
+ return *this;
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline typename fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::this_type&
+ fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::operator=(this_type&& x)
+ {
+ // Since we are a fixed_vector, we can't swap pointers. We can possibly do something like fixed_swap or
+ // we can just do an assignment from x. If we want to do the former then we need to have some complicated
+ // code to deal with overflow or no overflow, and whether the memory is in the fixed-size buffer or in
+ // the overflow allocator. 90% of the time the memory should be in the fixed buffer, in which case
+ // a simple assignment is no worse than the fancy pathway.
+ if (this != &x)
+ {
+ clear();
+
+ #if EASTL_ALLOCATOR_COPY_ENABLED
+ get_allocator() = x.get_allocator(); // The primary effect of this is to copy the overflow allocator.
+ #endif
+
+ base_type::template DoAssign<move_iterator<iterator>, true>(eastl::make_move_iterator(x.begin()), eastl::make_move_iterator(x.end()), false_type()); // Shorter route.
+ }
+ return *this;
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline void fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::swap(this_type& x)
+ {
+ if((has_overflowed() && x.has_overflowed()) && (get_overflow_allocator() == x.get_overflow_allocator())) // If both containers are using the heap instead of local memory
+ { // then we can do a fast pointer swap instead of content swap.
+ eastl::swap(mpBegin, x.mpBegin);
+ eastl::swap(mpEnd, x.mpEnd);
+ eastl::swap(internalCapacityPtr(), x.internalCapacityPtr());
+ }
+ else
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(*this, x);
+ }
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline void fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::set_capacity(size_type n)
+ {
+ const size_type nPrevSize = (size_type)(mpEnd - mpBegin);
+ const size_type nPrevCapacity = (size_type)(internalCapacityPtr() - mpBegin);
+
+ if(n == npos) // If the user means to set the capacity so that it equals the size (i.e. free excess capacity)...
+ n = nPrevSize;
+
+ if(n != nPrevCapacity) // If the request results in a capacity change...
+ {
+ if(can_overflow() && (((uintptr_t)mpBegin != (uintptr_t)mBuffer.buffer) || (n > kMaxSize))) // If we are or would be using dynamically allocated memory instead of our fixed-size member buffer...
+ {
+ T* const pNewData = (n <= kMaxSize) ? (T*)&mBuffer.buffer[0] : DoAllocate(n);
+ T* const pCopyEnd = (n < nPrevSize) ? (mpBegin + n) : mpEnd;
+ eastl::uninitialized_move_ptr(mpBegin, pCopyEnd, pNewData); // Move [mpBegin, pCopyEnd) to p.
+ eastl::destruct(mpBegin, mpEnd);
+ if((uintptr_t)mpBegin != (uintptr_t)mBuffer.buffer)
+ DoFree(mpBegin, (size_type)(internalCapacityPtr() - mpBegin));
+
+ mpEnd = pNewData + (pCopyEnd - mpBegin);
+ mpBegin = pNewData;
+ internalCapacityPtr() = mpBegin + n;
+ } // Else the new capacity would be within our fixed buffer.
+ else if(n < nPrevSize) // If the newly requested capacity is less than our size, we do what vector::set_capacity does and resize, even though we actually aren't reducing the capacity.
+ resize(n);
+ }
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename Allocator>
+ inline void fixed_vector<T, nodeCount, bEnableOverflow, Allocator>::clear(bool freeOverflow)
+ {
+ base_type::clear();
+ if (freeOverflow && mpBegin != (value_type*)&mBuffer.buffer[0])
+ {
+ EASTLFree(get_allocator(), mpBegin, (internalCapacityPtr() - mpBegin) * sizeof(T));
+ mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0];
+ internalCapacityPtr() = mpBegin + nodeCount;
+ }
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline void fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::reset_lose_memory()
+ {
+ mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0];
+ internalCapacityPtr() = mpBegin + nodeCount;
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline typename fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::size_type
+ fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::max_size() const
+ {
+ return kMaxSize;
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline bool fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::full() const
+ {
+ // If size >= capacity, then we are definitely full.
+ // Also, if our size is smaller but we've switched away from mBuffer due to a previous overflow, then we are considered full.
+ return ((size_t)(mpEnd - mpBegin) >= kMaxSize) || ((void*)mpBegin != (void*)mBuffer.buffer);
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline bool fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::has_overflowed() const
+ {
+ // This will be incorrect for the case that bOverflowEnabled is true and the container was resized
+ // down to a small size where the fixed buffer could take over ownership of the data again.
+ // The only simple fix for this is to take on another member variable which tracks whether this overflow
+ // has occurred at some point in the past.
+ return ((void*)mpBegin != (void*)mBuffer.buffer);
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline bool fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::can_overflow() const
+ {
+ return bEnableOverflow;
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline void* fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::push_back_uninitialized()
+ {
+ return DoPushBackUninitialized(typename type_select<bEnableOverflow, true_type, false_type>::type());
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline void* fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::DoPushBackUninitialized(true_type)
+ {
+ return base_type::push_back_uninitialized();
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline void* fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::DoPushBackUninitialized(false_type)
+ {
+ EASTL_ASSERT(mpEnd < internalCapacityPtr());
+
+ return mpEnd++;
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline void fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::push_back(const value_type& value)
+ {
+ DoPushBack(typename type_select<bEnableOverflow, true_type, false_type>::type(), value);
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline void fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::DoPushBack(true_type, const value_type& value)
+ {
+ base_type::push_back(value);
+ }
+
+
+ // This template specializes for overflow NOT enabled.
+ // In this configuration, there is no need for the heavy weight push_back() which tests to see if the container should grow (it never will)
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline void fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::DoPushBack(false_type, const value_type& value)
+ {
+ EASTL_ASSERT(mpEnd < internalCapacityPtr());
+
+ ::new((void*)mpEnd++) value_type(value);
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline typename fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::reference fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::push_back()
+ {
+ return DoPushBack(typename type_select<bEnableOverflow, true_type, false_type>::type());
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline typename fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::reference fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::DoPushBack(true_type)
+ {
+ return base_type::push_back();
+ }
+
+
+ // This template specializes for overflow NOT enabled.
+ // In this configuration, there is no need for the heavy weight push_back() which tests to see if the container should grow (it never will)
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline typename fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::reference fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::DoPushBack(false_type)
+ {
+ EASTL_ASSERT(mpEnd < internalCapacityPtr());
+
+ ::new((void*)mpEnd++) value_type; // Note that this isn't value_type() as that syntax doesn't work on all compilers for POD types.
+
+ return *(mpEnd - 1); // Same as return back();
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline void fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::push_back(value_type&& value)
+ {
+ DoPushBackMove(typename type_select<bEnableOverflow, true_type, false_type>::type(), eastl::move(value));
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline void fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::DoPushBackMove(true_type, value_type&& value)
+ {
+ base_type::push_back(eastl::move(value)); // This will call vector::push_back(value_type &&), and possibly swap value with *mpEnd.
+ }
+
+
+ // This template specializes for overflow NOT enabled.
+ // In this configuration, there is no need for the heavy weight push_back() which tests to see if the container should grow (it never will)
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline void fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::DoPushBackMove(false_type, value_type&& value)
+ {
+ EASTL_ASSERT(mpEnd < internalCapacityPtr());
+
+ ::new((void*)mpEnd++) value_type(eastl::move(value)); // This will call the value_type(value_type&&) constructor, and possibly swap value with *mpEnd.
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline const typename fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::overflow_allocator_type&
+ fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::get_overflow_allocator() const EA_NOEXCEPT
+ {
+ return get_allocator().get_overflow_allocator();
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline typename fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::overflow_allocator_type&
+ fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::get_overflow_allocator() EA_NOEXCEPT
+ {
+ return get_allocator().get_overflow_allocator();
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline void
+ fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>::set_overflow_allocator(const overflow_allocator_type& allocator)
+ {
+ get_allocator().set_overflow_allocator(allocator);
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ // operator ==, !=, <, >, <=, >= come from the vector implementations.
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename OverflowAllocator>
+ inline void swap(fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>& a,
+ fixed_vector<T, nodeCount, bEnableOverflow, OverflowAllocator>& b)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(a, b);
+ }
+
+
+
+} // namespace eastl
+
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/functional.h b/EASTL/include/EASTL/functional.h
new file mode 100644
index 0000000..6fa3489
--- /dev/null
+++ b/EASTL/include/EASTL/functional.h
@@ -0,0 +1,1255 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_FUNCTIONAL_H
+#define EASTL_FUNCTIONAL_H
+
+
+#include <EABase/eabase.h>
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/move_help.h>
+#include <EASTL/type_traits.h>
+#include <EASTL/internal/functional_base.h>
+#include <EASTL/internal/mem_fn.h>
+
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+ ///////////////////////////////////////////////////////////////////////
+ // Primary C++ functions
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T = void>
+ struct plus : public binary_function<T, T, T>
+ {
+ EA_CPP14_CONSTEXPR T operator()(const T& a, const T& b) const
+ { return a + b; }
+ };
+
+ // http://en.cppreference.com/w/cpp/utility/functional/plus_void
+ template <>
+ struct plus<void>
+ {
+ typedef int is_transparent;
+ template<typename A, typename B>
+ EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const
+ -> decltype(eastl::forward<A>(a) + eastl::forward<B>(b))
+ { return eastl::forward<A>(a) + eastl::forward<B>(b); }
+ };
+
+ template <typename T = void>
+ struct minus : public binary_function<T, T, T>
+ {
+ EA_CPP14_CONSTEXPR T operator()(const T& a, const T& b) const
+ { return a - b; }
+ };
+
+ // http://en.cppreference.com/w/cpp/utility/functional/minus_void
+ template <>
+ struct minus<void>
+ {
+ typedef int is_transparent;
+ template<typename A, typename B>
+ EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const
+ -> decltype(eastl::forward<A>(a) - eastl::forward<B>(b))
+ { return eastl::forward<A>(a) - eastl::forward<B>(b); }
+ };
+
+ template <typename T = void>
+ struct multiplies : public binary_function<T, T, T>
+ {
+ EA_CPP14_CONSTEXPR T operator()(const T& a, const T& b) const
+ { return a * b; }
+ };
+
+ // http://en.cppreference.com/w/cpp/utility/functional/multiplies_void
+ template <>
+ struct multiplies<void>
+ {
+ typedef int is_transparent;
+ template<typename A, typename B>
+ EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const
+ -> decltype(eastl::forward<A>(a) * eastl::forward<B>(b))
+ { return eastl::forward<A>(a) * eastl::forward<B>(b); }
+ };
+
+ template <typename T = void>
+ struct divides : public binary_function<T, T, T>
+ {
+ EA_CPP14_CONSTEXPR T operator()(const T& a, const T& b) const
+ { return a / b; }
+ };
+
+ // http://en.cppreference.com/w/cpp/utility/functional/divides_void
+ template <>
+ struct divides<void>
+ {
+ typedef int is_transparent;
+ template<typename A, typename B>
+ EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const
+ -> decltype(eastl::forward<A>(a) / eastl::forward<B>(b))
+ { return eastl::forward<A>(a) / eastl::forward<B>(b); }
+ };
+
+ template <typename T = void>
+ struct modulus : public binary_function<T, T, T>
+ {
+ EA_CPP14_CONSTEXPR T operator()(const T& a, const T& b) const
+ { return a % b; }
+ };
+
+ // http://en.cppreference.com/w/cpp/utility/functional/modulus_void
+ template <>
+ struct modulus<void>
+ {
+ typedef int is_transparent;
+ template<typename A, typename B>
+ EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const
+ -> decltype(eastl::forward<A>(a) % eastl::forward<B>(b))
+ { return eastl::forward<A>(a) % eastl::forward<B>(b); }
+ };
+
+ template <typename T = void>
+ struct negate : public unary_function<T, T>
+ {
+ EA_CPP14_CONSTEXPR T operator()(const T& a) const
+ { return -a; }
+ };
+
+ // http://en.cppreference.com/w/cpp/utility/functional/negate_void
+ template <>
+ struct negate<void>
+ {
+ typedef int is_transparent;
+ template<typename T>
+ EA_CPP14_CONSTEXPR auto operator()(T&& t) const
+ -> decltype(-eastl::forward<T>(t))
+ { return -eastl::forward<T>(t); }
+ };
+
+ template <typename T = void>
+ struct equal_to : public binary_function<T, T, bool>
+ {
+ EA_CPP14_CONSTEXPR bool operator()(const T& a, const T& b) const
+ { return a == b; }
+ };
+
+ // http://en.cppreference.com/w/cpp/utility/functional/equal_to_void
+ template <>
+ struct equal_to<void>
+ {
+ typedef int is_transparent;
+ template<typename A, typename B>
+ EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const
+ -> decltype(eastl::forward<A>(a) == eastl::forward<B>(b))
+ { return eastl::forward<A>(a) == eastl::forward<B>(b); }
+ };
+
+ template <typename T, typename Compare>
+ bool validate_equal_to(const T& a, const T& b, Compare compare)
+ {
+ return compare(a, b) == compare(b, a);
+ }
+
+ template <typename T = void>
+ struct not_equal_to : public binary_function<T, T, bool>
+ {
+ EA_CPP14_CONSTEXPR bool operator()(const T& a, const T& b) const
+ { return a != b; }
+ };
+
+ // http://en.cppreference.com/w/cpp/utility/functional/not_equal_to_void
+ template <>
+ struct not_equal_to<void>
+ {
+ typedef int is_transparent;
+ template<typename A, typename B>
+ EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const
+ -> decltype(eastl::forward<A>(a) != eastl::forward<B>(b))
+ { return eastl::forward<A>(a) != eastl::forward<B>(b); }
+ };
+
+ template <typename T, typename Compare>
+ bool validate_not_equal_to(const T& a, const T& b, Compare compare)
+ {
+ return compare(a, b) == compare(b, a); // We want the not equal comparison results to be equal.
+ }
+
+ /// str_equal_to
+ ///
+ /// Compares two 0-terminated string types.
+ /// The T types are expected to be iterators or act like iterators.
+ /// The expected behavior of str_less is the same as (strcmp(p1, p2) == 0).
+ ///
+ /// Example usage:
+ /// hash_set<const char*, hash<const char*>, str_equal_to<const char*> > stringHashSet;
+ ///
+ /// Note:
+ /// You couldn't use str_equal_to like this:
+ /// bool result = equal("hi", "hi" + 2, "ho", str_equal_to<const char*>());
+ /// This is because equal tests an array of something, with each element by
+ /// the comparison function. But str_equal_to tests an array of something itself.
+ ///
+ /// To consider: Update this code to use existing word-based comparison optimizations,
+ /// such as that used in the EAStdC Strcmp function.
+ ///
+ template <typename T>
+ struct str_equal_to : public binary_function<T, T, bool>
+ {
+ EA_CPP14_CONSTEXPR bool operator()(T a, T b) const
+ {
+ while(*a && (*a == *b))
+ {
+ ++a;
+ ++b;
+ }
+ return (*a == *b);
+ }
+ };
+
+ template <typename T = void>
+ struct greater : public binary_function<T, T, bool>
+ {
+ EA_CPP14_CONSTEXPR bool operator()(const T& a, const T& b) const
+ { return a > b; }
+ };
+
+ // http://en.cppreference.com/w/cpp/utility/functional/greater_void
+ template <>
+ struct greater<void>
+ {
+ template<typename A, typename B>
+ EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const
+ -> decltype(eastl::forward<A>(a) > eastl::forward<B>(b))
+ { return eastl::forward<A>(a) > eastl::forward<B>(b); }
+ };
+
+ template <typename T, typename Compare>
+ bool validate_greater(const T& a, const T& b, Compare compare)
+ {
+ return !compare(a, b) || !compare(b, a); // If (a > b), then !(b > a)
+ }
+
+
+ template <typename T, typename Compare>
+ bool validate_less(const T& a, const T& b, Compare compare)
+ {
+ return !compare(a, b) || !compare(b, a); // If (a < b), then !(b < a)
+ }
+
+ /// str_less
+ ///
+ /// Compares two 0-terminated string types.
+ /// The T types are expected to be iterators or act like iterators,
+ /// and that includes being a pointer to a C character array.
+ /// The expected behavior of str_less is the same as (strcmp(p1, p2) < 0).
+ /// This function is not Unicode-correct and it's not guaranteed to work
+ /// with all Unicode strings.
+ ///
+ /// Example usage:
+ /// set<const char*, str_less<const char*> > stringSet;
+ ///
+ /// To consider: Update this code to use existing word-based comparison optimizations,
+ /// such as that used in the EAStdC Strcmp function.
+ ///
+ template <typename T>
+ struct str_less : public binary_function<T, T, bool>
+ {
+ bool operator()(T a, T b) const
+ {
+ while(static_cast<typename make_unsigned<typename remove_pointer<T>::type>::type>(*a) ==
+ static_cast<typename make_unsigned<typename remove_pointer<T>::type>::type>(*b))
+ {
+ if(*a == 0)
+ return (*b != 0);
+ ++a;
+ ++b;
+ }
+
+ char aValue = static_cast<typename remove_pointer<T>::type>(*a);
+ char bValue = static_cast<typename remove_pointer<T>::type>(*b);
+
+ typename make_unsigned<char>::type aValueU = static_cast<typename make_unsigned<char>::type>(aValue);
+ typename make_unsigned<char>::type bValueU = static_cast<typename make_unsigned<char>::type>(bValue);
+
+ return aValueU < bValueU;
+
+ //return (static_cast<typename make_unsigned<typename remove_pointer<T>::type>::type>(*a) <
+ // static_cast<typename make_unsigned<typename remove_pointer<T>::type>::type>(*b));
+ }
+ };
+
+ template <typename T = void>
+ struct greater_equal : public binary_function<T, T, bool>
+ {
+ EA_CPP14_CONSTEXPR bool operator()(const T& a, const T& b) const
+ { return a >= b; }
+ };
+
+ // http://en.cppreference.com/w/cpp/utility/functional/greater_equal_void
+ template <>
+ struct greater_equal<void>
+ {
+ template<typename A, typename B>
+ EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const
+ -> decltype(eastl::forward<A>(a) >= eastl::forward<B>(b))
+ { return eastl::forward<A>(a) >= eastl::forward<B>(b); }
+ };
+
+ template <typename T, typename Compare>
+ bool validate_greater_equal(const T& a, const T& b, Compare compare)
+ {
+ return !compare(a, b) || !compare(b, a); // If (a >= b), then !(b >= a)
+ }
+
+ template <typename T = void>
+ struct less_equal : public binary_function<T, T, bool>
+ {
+ EA_CPP14_CONSTEXPR bool operator()(const T& a, const T& b) const
+ { return a <= b; }
+ };
+
+ // http://en.cppreference.com/w/cpp/utility/functional/less_equal_void
+ template <>
+ struct less_equal<void>
+ {
+ template<typename A, typename B>
+ EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const
+ -> decltype(eastl::forward<A>(a) <= eastl::forward<B>(b))
+ { return eastl::forward<A>(a) <= eastl::forward<B>(b); }
+ };
+
+ template <typename T, typename Compare>
+ bool validate_less_equal(const T& a, const T& b, Compare compare)
+ {
+ return !compare(a, b) || !compare(b, a); // If (a <= b), then !(b <= a)
+ }
+
+ template <typename T = void>
+ struct logical_and : public binary_function<T, T, bool>
+ {
+ EA_CPP14_CONSTEXPR bool operator()(const T& a, const T& b) const
+ { return a && b; }
+ };
+
+ // http://en.cppreference.com/w/cpp/utility/functional/logical_and_void
+ template <>
+ struct logical_and<void>
+ {
+ template<typename A, typename B>
+ EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const
+ -> decltype(eastl::forward<A>(a) && eastl::forward<B>(b))
+ { return eastl::forward<A>(a) && eastl::forward<B>(b); }
+ };
+
+ template <typename T = void>
+ struct logical_or : public binary_function<T, T, bool>
+ {
+ EA_CPP14_CONSTEXPR bool operator()(const T& a, const T& b) const
+ { return a || b; }
+ };
+
+ // http://en.cppreference.com/w/cpp/utility/functional/logical_or_void
+ template <>
+ struct logical_or<void>
+ {
+ template<typename A, typename B>
+ EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const
+ -> decltype(eastl::forward<A>(a) || eastl::forward<B>(b))
+ { return eastl::forward<A>(a) || eastl::forward<B>(b); }
+ };
+
+ template <typename T = void>
+ struct logical_not : public unary_function<T, bool>
+ {
+ EA_CPP14_CONSTEXPR bool operator()(const T& a) const
+ { return !a; }
+ };
+
+ // http://en.cppreference.com/w/cpp/utility/functional/logical_not_void
+ template <>
+ struct logical_not<void>
+ {
+ template<typename T>
+ EA_CPP14_CONSTEXPR auto operator()(T&& t) const
+ -> decltype(!eastl::forward<T>(t))
+ { return !eastl::forward<T>(t); }
+ };
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // Dual type functions
+ ///////////////////////////////////////////////////////////////////////
+
+
+ template <typename T, typename U>
+ struct equal_to_2 : public binary_function<T, U, bool>
+ {
+ EA_CPP14_CONSTEXPR bool operator()(const T& a, const U& b) const
+ { return a == b; }
+
+ template <typename T_ = T, typename U_ = U, typename = eastl::enable_if_t<!eastl::is_same_v<eastl::remove_const_t<T_>, eastl::remove_const_t<U_>>>>
+ EA_CPP14_CONSTEXPR bool operator()(const U& b, const T& a) const
+ { return b == a; }
+ };
+
+ template <typename T, typename U>
+ struct not_equal_to_2 : public binary_function<T, U, bool>
+ {
+ EA_CPP14_CONSTEXPR bool operator()(const T& a, const U& b) const
+ { return a != b; }
+
+ template <typename T_ = T, typename U_ = U, typename = eastl::enable_if_t<!eastl::is_same_v<eastl::remove_const_t<T_>, eastl::remove_const_t<U_>>>>
+ EA_CPP14_CONSTEXPR bool operator()(const U& b, const T& a) const
+ { return b != a; }
+ };
+
+
+ template <typename T, typename U>
+ struct less_2 : public binary_function<T, U, bool>
+ {
+ EA_CPP14_CONSTEXPR bool operator()(const T& a, const U& b) const
+ { return a < b; }
+
+ template <typename T_ = T, typename U_ = U, typename = eastl::enable_if_t<!eastl::is_same_v<eastl::remove_const_t<T_>, eastl::remove_const_t<U_>>>>
+ EA_CPP14_CONSTEXPR bool operator()(const U& b, const T& a) const
+ { return b < a; }
+ };
+
+
+ /// unary_negate
+ ///
+ template <typename Predicate>
+ class unary_negate : public unary_function<typename Predicate::argument_type, bool>
+ {
+ protected:
+ Predicate mPredicate;
+ public:
+ explicit unary_negate(const Predicate& a)
+ : mPredicate(a) {}
+ EA_CPP14_CONSTEXPR bool operator()(const typename Predicate::argument_type& a) const
+ { return !mPredicate(a); }
+ };
+
+ template <typename Predicate>
+ inline EA_CPP14_CONSTEXPR unary_negate<Predicate> not1(const Predicate& predicate)
+ { return unary_negate<Predicate>(predicate); }
+
+
+
+ /// binary_negate
+ ///
+ template <typename Predicate>
+ class binary_negate : public binary_function<typename Predicate::first_argument_type, typename Predicate::second_argument_type, bool>
+ {
+ protected:
+ Predicate mPredicate;
+ public:
+ explicit binary_negate(const Predicate& a)
+ : mPredicate(a) { }
+ EA_CPP14_CONSTEXPR bool operator()(const typename Predicate::first_argument_type& a, const typename Predicate::second_argument_type& b) const
+ { return !mPredicate(a, b); }
+ };
+
+ template <typename Predicate>
+ inline EA_CPP14_CONSTEXPR binary_negate<Predicate> not2(const Predicate& predicate)
+ { return binary_negate<Predicate>(predicate); }
+
+
+
+ /// unary_compose
+ ///
+ template<typename Operation1, typename Operation2>
+ struct unary_compose : public unary_function<typename Operation2::argument_type, typename Operation1::result_type>
+ {
+ protected:
+ Operation1 op1;
+ Operation2 op2;
+
+ public:
+ unary_compose(const Operation1& x, const Operation2& y)
+ : op1(x), op2(y) {}
+
+ typename Operation1::result_type operator()(const typename Operation2::argument_type& x) const
+ { return op1(op2(x)); }
+
+ typename Operation1::result_type operator()(typename Operation2::argument_type& x) const
+ { return op1(op2(x)); }
+ };
+
+ template<typename Operation1,typename Operation2>
+ inline unary_compose<Operation1,Operation2>
+ compose1(const Operation1& op1, const Operation2& op2)
+ {
+ return unary_compose<Operation1, Operation2>(op1,op2);
+ }
+
+
+ /// binary_compose
+ ///
+ template <class Operation1, class Operation2, class Operation3>
+ class binary_compose : public unary_function<typename Operation2::argument_type, typename Operation1::result_type>
+ {
+ protected:
+ Operation1 op1;
+ Operation2 op2;
+ Operation3 op3;
+
+ public:
+ // Support binary functors too.
+ typedef typename Operation2::argument_type first_argument_type;
+ typedef typename Operation3::argument_type second_argument_type;
+
+ binary_compose(const Operation1& x, const Operation2& y, const Operation3& z)
+ : op1(x), op2(y), op3(z) { }
+
+ typename Operation1::result_type operator()(const typename Operation2::argument_type& x) const
+ { return op1(op2(x),op3(x)); }
+
+ typename Operation1::result_type operator()(typename Operation2::argument_type& x) const
+ { return op1(op2(x),op3(x)); }
+
+ typename Operation1::result_type operator()(const typename Operation2::argument_type& x,const typename Operation3::argument_type& y) const
+ { return op1(op2(x),op3(y)); }
+
+ typename Operation1::result_type operator()(typename Operation2::argument_type& x, typename Operation3::argument_type& y) const
+ { return op1(op2(x),op3(y)); }
+ };
+
+
+ template <class Operation1, class Operation2, class Operation3>
+ inline binary_compose<Operation1, Operation2, Operation3>
+ compose2(const Operation1& op1, const Operation2& op2, const Operation3& op3)
+ {
+ return binary_compose<Operation1, Operation2, Operation3>(op1, op2, op3);
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // pointer_to_unary_function
+ ///////////////////////////////////////////////////////////////////////
+
+ /// pointer_to_unary_function
+ ///
+ /// This is an adapter template which converts a pointer to a standalone
+ /// function to a function object. This allows standalone functions to
+ /// work in many cases where the system requires a function object.
+ ///
+ /// Example usage:
+ /// ptrdiff_t Rand(ptrdiff_t n) { return rand() % n; } // Note: The C rand function is poor and slow.
+ /// pointer_to_unary_function<ptrdiff_t, ptrdiff_t> randInstance(Rand);
+ /// random_shuffle(pArrayBegin, pArrayEnd, randInstance);
+ ///
+ template <typename Arg, typename Result>
+ class pointer_to_unary_function : public unary_function<Arg, Result>
+ {
+ protected:
+ Result (*mpFunction)(Arg);
+
+ public:
+ pointer_to_unary_function()
+ { }
+
+ explicit pointer_to_unary_function(Result (*pFunction)(Arg))
+ : mpFunction(pFunction) { }
+
+ Result operator()(Arg x) const
+ { return mpFunction(x); }
+ };
+
+
+ /// ptr_fun
+ ///
+ /// This ptr_fun is simply shorthand for usage of pointer_to_unary_function.
+ ///
+ /// Example usage (actually, you don't need to use ptr_fun here, but it works anyway):
+ /// int factorial(int x) { return (x > 1) ? (x * factorial(x - 1)) : x; }
+ /// transform(pIntArrayBegin, pIntArrayEnd, pIntArrayBegin, ptr_fun(factorial));
+ ///
+ template <typename Arg, typename Result>
+ inline pointer_to_unary_function<Arg, Result>
+ ptr_fun(Result (*pFunction)(Arg))
+ { return pointer_to_unary_function<Arg, Result>(pFunction); }
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // pointer_to_binary_function
+ ///////////////////////////////////////////////////////////////////////
+
+ /// pointer_to_binary_function
+ ///
+ /// This is an adapter template which converts a pointer to a standalone
+ /// function to a function object. This allows standalone functions to
+ /// work in many cases where the system requires a function object.
+ ///
+ template <typename Arg1, typename Arg2, typename Result>
+ class pointer_to_binary_function : public binary_function<Arg1, Arg2, Result>
+ {
+ protected:
+ Result (*mpFunction)(Arg1, Arg2);
+
+ public:
+ pointer_to_binary_function()
+ { }
+
+ explicit pointer_to_binary_function(Result (*pFunction)(Arg1, Arg2))
+ : mpFunction(pFunction) {}
+
+ Result operator()(Arg1 x, Arg2 y) const
+ { return mpFunction(x, y); }
+ };
+
+
+ /// This ptr_fun is simply shorthand for usage of pointer_to_binary_function.
+ ///
+ /// Example usage (actually, you don't need to use ptr_fun here, but it works anyway):
+ /// int multiply(int x, int y) { return x * y; }
+ /// transform(pIntArray1Begin, pIntArray1End, pIntArray2Begin, pIntArray1Begin, ptr_fun(multiply));
+ ///
+ template <typename Arg1, typename Arg2, typename Result>
+ inline pointer_to_binary_function<Arg1, Arg2, Result>
+ ptr_fun(Result (*pFunction)(Arg1, Arg2))
+ { return pointer_to_binary_function<Arg1, Arg2, Result>(pFunction); }
+
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // mem_fun
+ // mem_fun1
+ //
+ // Note that mem_fun calls member functions via *pointers* to classes
+ // and not instances of classes. mem_fun_ref is for calling functions
+ // via instances of classes or references to classes.
+ //
+ // NOTE:
+ // mem_fun was deprecated in C++11 and removed in C++17, in favor
+ // of the more general mem_fn and bind.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ /// mem_fun_t
+ ///
+ /// Member function with no arguments.
+ ///
+ template <typename Result, typename T>
+ class mem_fun_t : public unary_function<T*, Result>
+ {
+ public:
+ typedef Result (T::*MemberFunction)();
+
+ inline explicit mem_fun_t(MemberFunction pMemberFunction)
+ : mpMemberFunction(pMemberFunction)
+ {
+ // Empty
+ }
+
+ inline Result operator()(T* pT) const
+ {
+ return (pT->*mpMemberFunction)();
+ }
+
+ protected:
+ MemberFunction mpMemberFunction;
+ };
+
+
+ /// mem_fun1_t
+ ///
+ /// Member function with one argument.
+ ///
+ template <typename Result, typename T, typename Argument>
+ class mem_fun1_t : public binary_function<T*, Argument, Result>
+ {
+ public:
+ typedef Result (T::*MemberFunction)(Argument);
+
+ inline explicit mem_fun1_t(MemberFunction pMemberFunction)
+ : mpMemberFunction(pMemberFunction)
+ {
+ // Empty
+ }
+
+ inline Result operator()(T* pT, Argument arg) const
+ {
+ return (pT->*mpMemberFunction)(arg);
+ }
+
+ protected:
+ MemberFunction mpMemberFunction;
+ };
+
+
+ /// const_mem_fun_t
+ ///
+ /// Const member function with no arguments.
+ /// Note that we inherit from unary_function<const T*, Result>
+ /// instead of what the C++ standard specifies: unary_function<T*, Result>.
+ /// The C++ standard is in error and this has been recognized by the defect group.
+ ///
+ template <typename Result, typename T>
+ class const_mem_fun_t : public unary_function<const T*, Result>
+ {
+ public:
+ typedef Result (T::*MemberFunction)() const;
+
+ inline explicit const_mem_fun_t(MemberFunction pMemberFunction)
+ : mpMemberFunction(pMemberFunction)
+ {
+ // Empty
+ }
+
+ inline Result operator()(const T* pT) const
+ {
+ return (pT->*mpMemberFunction)();
+ }
+
+ protected:
+ MemberFunction mpMemberFunction;
+ };
+
+
+ /// const_mem_fun1_t
+ ///
+ /// Const member function with one argument.
+ /// Note that we inherit from unary_function<const T*, Result>
+ /// instead of what the C++ standard specifies: unary_function<T*, Result>.
+ /// The C++ standard is in error and this has been recognized by the defect group.
+ ///
+ template <typename Result, typename T, typename Argument>
+ class const_mem_fun1_t : public binary_function<const T*, Argument, Result>
+ {
+ public:
+ typedef Result (T::*MemberFunction)(Argument) const;
+
+ inline explicit const_mem_fun1_t(MemberFunction pMemberFunction)
+ : mpMemberFunction(pMemberFunction)
+ {
+ // Empty
+ }
+
+ inline Result operator()(const T* pT, Argument arg) const
+ {
+ return (pT->*mpMemberFunction)(arg);
+ }
+
+ protected:
+ MemberFunction mpMemberFunction;
+ };
+
+
+ /// mem_fun
+ ///
+ /// This is the high level interface to the mem_fun_t family.
+ ///
+ /// Example usage:
+ /// struct TestClass { void print() { puts("hello"); } }
+ /// TestClass* pTestClassArray[3] = { ... };
+ /// for_each(pTestClassArray, pTestClassArray + 3, &TestClass::print);
+ ///
+ /// Note: using conventional inlining here to avoid issues on GCC/Linux
+ ///
+ template <typename Result, typename T>
+ inline mem_fun_t<Result, T>
+ mem_fun(Result (T::*MemberFunction)())
+ {
+ return eastl::mem_fun_t<Result, T>(MemberFunction);
+ }
+
+ template <typename Result, typename T, typename Argument>
+ inline mem_fun1_t<Result, T, Argument>
+ mem_fun(Result (T::*MemberFunction)(Argument))
+ {
+ return eastl::mem_fun1_t<Result, T, Argument>(MemberFunction);
+ }
+
+ template <typename Result, typename T>
+ inline const_mem_fun_t<Result, T>
+ mem_fun(Result (T::*MemberFunction)() const)
+ {
+ return eastl::const_mem_fun_t<Result, T>(MemberFunction);
+ }
+
+ template <typename Result, typename T, typename Argument>
+ inline const_mem_fun1_t<Result, T, Argument>
+ mem_fun(Result (T::*MemberFunction)(Argument) const)
+ {
+ return eastl::const_mem_fun1_t<Result, T, Argument>(MemberFunction);
+ }
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // mem_fun_ref
+ // mem_fun1_ref
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ /// mem_fun_ref_t
+ ///
+ template <typename Result, typename T>
+ class mem_fun_ref_t : public unary_function<T, Result>
+ {
+ public:
+ typedef Result (T::*MemberFunction)();
+
+ inline explicit mem_fun_ref_t(MemberFunction pMemberFunction)
+ : mpMemberFunction(pMemberFunction)
+ {
+ // Empty
+ }
+
+ inline Result operator()(T& t) const
+ {
+ return (t.*mpMemberFunction)();
+ }
+
+ protected:
+ MemberFunction mpMemberFunction;
+ };
+
+
+ /// mem_fun1_ref_t
+ ///
+ template <typename Result, typename T, typename Argument>
+ class mem_fun1_ref_t : public binary_function<T, Argument, Result>
+ {
+ public:
+ typedef Result (T::*MemberFunction)(Argument);
+
+ inline explicit mem_fun1_ref_t(MemberFunction pMemberFunction)
+ : mpMemberFunction(pMemberFunction)
+ {
+ // Empty
+ }
+
+ inline Result operator()(T& t, Argument arg) const
+ {
+ return (t.*mpMemberFunction)(arg);
+ }
+
+ protected:
+ MemberFunction mpMemberFunction;
+ };
+
+
+ /// const_mem_fun_ref_t
+ ///
+ template <typename Result, typename T>
+ class const_mem_fun_ref_t : public unary_function<T, Result>
+ {
+ public:
+ typedef Result (T::*MemberFunction)() const;
+
+ inline explicit const_mem_fun_ref_t(MemberFunction pMemberFunction)
+ : mpMemberFunction(pMemberFunction)
+ {
+ // Empty
+ }
+
+ inline Result operator()(const T& t) const
+ {
+ return (t.*mpMemberFunction)();
+ }
+
+ protected:
+ MemberFunction mpMemberFunction;
+ };
+
+
+ /// const_mem_fun1_ref_t
+ ///
+ template <typename Result, typename T, typename Argument>
+ class const_mem_fun1_ref_t : public binary_function<T, Argument, Result>
+ {
+ public:
+ typedef Result (T::*MemberFunction)(Argument) const;
+
+ inline explicit const_mem_fun1_ref_t(MemberFunction pMemberFunction)
+ : mpMemberFunction(pMemberFunction)
+ {
+ // Empty
+ }
+
+ inline Result operator()(const T& t, Argument arg) const
+ {
+ return (t.*mpMemberFunction)(arg);
+ }
+
+ protected:
+ MemberFunction mpMemberFunction;
+ };
+
+
+ /// mem_fun_ref
+ /// Example usage:
+ /// struct TestClass { void print() { puts("hello"); } }
+ /// TestClass testClassArray[3];
+ /// for_each(testClassArray, testClassArray + 3, &TestClass::print);
+ ///
+ /// Note: using conventional inlining here to avoid issues on GCC/Linux
+ ///
+ template <typename Result, typename T>
+ inline mem_fun_ref_t<Result, T>
+ mem_fun_ref(Result (T::*MemberFunction)())
+ {
+ return eastl::mem_fun_ref_t<Result, T>(MemberFunction);
+ }
+
+ template <typename Result, typename T, typename Argument>
+ inline mem_fun1_ref_t<Result, T, Argument>
+ mem_fun_ref(Result (T::*MemberFunction)(Argument))
+ {
+ return eastl::mem_fun1_ref_t<Result, T, Argument>(MemberFunction);
+ }
+
+ template <typename Result, typename T>
+ inline const_mem_fun_ref_t<Result, T>
+ mem_fun_ref(Result (T::*MemberFunction)() const)
+ {
+ return eastl::const_mem_fun_ref_t<Result, T>(MemberFunction);
+ }
+
+ template <typename Result, typename T, typename Argument>
+ inline const_mem_fun1_ref_t<Result, T, Argument>
+ mem_fun_ref(Result (T::*MemberFunction)(Argument) const)
+ {
+ return eastl::const_mem_fun1_ref_t<Result, T, Argument>(MemberFunction);
+ }
+
+
+ // not_fn_ret
+ // not_fn_ret is a implementation specified return type of eastl::not_fn.
+ // The type name is not specified but it does have mandated functions that conforming implementations must support.
+ //
+ // http://en.cppreference.com/w/cpp/utility/functional/not_fn
+ //
+ template <typename F>
+ struct not_fn_ret
+ {
+ explicit not_fn_ret(F&& f) : mDecayF(eastl::forward<F>(f)) {}
+ not_fn_ret(not_fn_ret&& f) = default;
+ not_fn_ret(const not_fn_ret& f) = default;
+
+ // overloads for lvalues
+ template <class... Args>
+ auto operator()(Args&&... args) &
+ -> decltype(!eastl::declval<eastl::invoke_result_t<eastl::decay_t<F>&, Args...>>())
+ { return !eastl::invoke(mDecayF, eastl::forward<Args>(args)...); }
+
+ template <class... Args>
+ auto operator()(Args&&... args) const &
+ -> decltype(!eastl::declval<eastl::invoke_result_t<eastl::decay_t<F> const&, Args...>>())
+ { return !eastl::invoke(mDecayF, eastl::forward<Args>(args)...); }
+
+ // overloads for rvalues
+ template <class... Args>
+ auto operator()(Args&&... args) &&
+ -> decltype(!eastl::declval<eastl::invoke_result_t<eastl::decay_t<F>, Args...>>())
+ { return !eastl::invoke(eastl::move(mDecayF), eastl::forward<Args>(args)...); }
+
+ template <class... Args>
+ auto operator()(Args&&... args) const &&
+ -> decltype(!eastl::declval<eastl::invoke_result_t<eastl::decay_t<F> const, Args...>>())
+ { return !eastl::invoke(eastl::move(mDecayF), eastl::forward<Args>(args)...); }
+
+ eastl::decay_t<F> mDecayF;
+ };
+
+ /// not_fn
+ ///
+ /// Creates an implementation specified functor that returns the complement of the callable object it was passed.
+ /// not_fn is intended to replace the C++03-era negators eastl::not1 and eastl::not2.
+ ///
+ /// http://en.cppreference.com/w/cpp/utility/functional/not_fn
+ ///
+ /// Example usage:
+ ///
+ /// auto nf = eastl::not_fn([]{ return false; });
+ /// assert(nf()); // return true
+ ///
+ template <class F>
+ inline not_fn_ret<F> not_fn(F&& f)
+ {
+ return not_fn_ret<F>(eastl::forward<F>(f));
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // hash
+ ///////////////////////////////////////////////////////////////////////
+ namespace Internal
+ {
+ // utility to disable the generic template specialization that is
+ // used for enum types only.
+ template <typename T, bool Enabled>
+ struct EnableHashIf {};
+
+ template <typename T>
+ struct EnableHashIf<T, true>
+ {
+ size_t operator()(T p) const { return size_t(p); }
+ };
+ } // namespace Internal
+
+
+ template <typename T> struct hash;
+
+ template <typename T>
+ struct hash : Internal::EnableHashIf<T, is_enum_v<T>> {};
+
+ template <typename T> struct hash<T*> // Note that we use the pointer as-is and don't divide by sizeof(T*). This is because the table is of a prime size and this division doesn't benefit distribution.
+ { size_t operator()(T* p) const { return size_t(uintptr_t(p)); } };
+
+ template <> struct hash<bool>
+ { size_t operator()(bool val) const { return static_cast<size_t>(val); } };
+
+ template <> struct hash<char>
+ { size_t operator()(char val) const { return static_cast<size_t>(val); } };
+
+ template <> struct hash<signed char>
+ { size_t operator()(signed char val) const { return static_cast<size_t>(val); } };
+
+ template <> struct hash<unsigned char>
+ { size_t operator()(unsigned char val) const { return static_cast<size_t>(val); } };
+
+ #if defined(EA_CHAR8_UNIQUE) && EA_CHAR8_UNIQUE
+ template <> struct hash<char8_t>
+ { size_t operator()(char8_t val) const { return static_cast<size_t>(val); } };
+ #endif
+
+ #if defined(EA_CHAR16_NATIVE) && EA_CHAR16_NATIVE
+ template <> struct hash<char16_t>
+ { size_t operator()(char16_t val) const { return static_cast<size_t>(val); } };
+ #endif
+
+ #if defined(EA_CHAR32_NATIVE) && EA_CHAR32_NATIVE
+ template <> struct hash<char32_t>
+ { size_t operator()(char32_t val) const { return static_cast<size_t>(val); } };
+ #endif
+
+ // If wchar_t is a native type instead of simply a define to an existing type...
+ #if !defined(EA_WCHAR_T_NON_NATIVE)
+ template <> struct hash<wchar_t>
+ { size_t operator()(wchar_t val) const { return static_cast<size_t>(val); } };
+ #endif
+
+ template <> struct hash<signed short>
+ { size_t operator()(signed short val) const { return static_cast<size_t>(val); } };
+
+ template <> struct hash<unsigned short>
+ { size_t operator()(unsigned short val) const { return static_cast<size_t>(val); } };
+
+ template <> struct hash<signed int>
+ { size_t operator()(signed int val) const { return static_cast<size_t>(val); } };
+
+ template <> struct hash<unsigned int>
+ { size_t operator()(unsigned int val) const { return static_cast<size_t>(val); } };
+
+ template <> struct hash<signed long>
+ { size_t operator()(signed long val) const { return static_cast<size_t>(val); } };
+
+ template <> struct hash<unsigned long>
+ { size_t operator()(unsigned long val) const { return static_cast<size_t>(val); } };
+
+ template <> struct hash<signed long long>
+ { size_t operator()(signed long long val) const { return static_cast<size_t>(val); } };
+
+ template <> struct hash<unsigned long long>
+ { size_t operator()(unsigned long long val) const { return static_cast<size_t>(val); } };
+
+ template <> struct hash<float>
+ { size_t operator()(float val) const { return static_cast<size_t>(val); } };
+
+ template <> struct hash<double>
+ { size_t operator()(double val) const { return static_cast<size_t>(val); } };
+
+ template <> struct hash<long double>
+ { size_t operator()(long double val) const { return static_cast<size_t>(val); } };
+
+ #if defined(EA_HAVE_INT128) && EA_HAVE_INT128
+ template <> struct hash<uint128_t>
+ { size_t operator()(uint128_t val) const { return static_cast<size_t>(val); } };
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // string hashes
+ //
+ // Note that our string hashes here intentionally are slow for long strings.
+ // The reasoning for this is so:
+ // - The large majority of hashed strings are only a few bytes long.
+ // - The hash function is significantly more efficient if it can make this assumption.
+ // - The user is welcome to make a custom hash for those uncommon cases where
+ // long strings need to be hashed. Indeed, the user can probably make a
+ // special hash customized for such strings that's better than what we provide.
+ ///////////////////////////////////////////////////////////////////////////
+
+ template <> struct hash<char*>
+ {
+ size_t operator()(const char* p) const
+ {
+ uint32_t c, result = 2166136261U; // FNV1 hash. Perhaps the best string hash. Intentionally uint32_t instead of size_t, so the behavior is the same regardless of size.
+ while((c = (uint8_t)*p++) != 0) // Using '!=' disables compiler warnings.
+ result = (result * 16777619) ^ c;
+ return (size_t)result;
+ }
+ };
+
+ template <> struct hash<const char*>
+ {
+ size_t operator()(const char* p) const
+ {
+ uint32_t c, result = 2166136261U; // Intentionally uint32_t instead of size_t, so the behavior is the same regardless of size.
+ while((c = (uint8_t)*p++) != 0) // cast to unsigned 8 bit.
+ result = (result * 16777619) ^ c;
+ return (size_t)result;
+ }
+ };
+
+#if EA_CHAR8_UNIQUE
+ template <> struct hash<char8_t*>
+ {
+ size_t operator()(const char8_t* p) const
+ {
+ uint32_t c, result = 2166136261U; // FNV1 hash. Perhaps the best string hash. Intentionally uint32_t instead of size_t, so the behavior is the same regardless of size.
+ while((c = (uint8_t)*p++) != 0) // Using '!=' disables compiler warnings.
+ result = (result * 16777619) ^ c;
+ return (size_t)result;
+ }
+ };
+
+ template <> struct hash<const char8_t*>
+ {
+ size_t operator()(const char8_t* p) const
+ {
+ uint32_t c, result = 2166136261U; // Intentionally uint32_t instead of size_t, so the behavior is the same regardless of size.
+ while((c = (uint8_t)*p++) != 0) // cast to unsigned 8 bit.
+ result = (result * 16777619) ^ c;
+ return (size_t)result;
+ }
+ };
+#endif
+
+
+ template <> struct hash<char16_t*>
+ {
+ size_t operator()(const char16_t* p) const
+ {
+ uint32_t c, result = 2166136261U; // Intentionally uint32_t instead of size_t, so the behavior is the same regardless of size.
+ while((c = (uint16_t)*p++) != 0) // cast to unsigned 16 bit.
+ result = (result * 16777619) ^ c;
+ return (size_t)result;
+ }
+ };
+
+ template <> struct hash<const char16_t*>
+ {
+ size_t operator()(const char16_t* p) const
+ {
+ uint32_t c, result = 2166136261U; // Intentionally uint32_t instead of size_t, so the behavior is the same regardless of size.
+ while((c = (uint16_t)*p++) != 0) // cast to unsigned 16 bit.
+ result = (result * 16777619) ^ c;
+ return (size_t)result;
+ }
+ };
+
+ template <> struct hash<char32_t*>
+ {
+ size_t operator()(const char32_t* p) const
+ {
+ uint32_t c, result = 2166136261U; // Intentionally uint32_t instead of size_t, so the behavior is the same regardless of size.
+ while((c = (uint32_t)*p++) != 0) // cast to unsigned 32 bit.
+ result = (result * 16777619) ^ c;
+ return (size_t)result;
+ }
+ };
+
+ template <> struct hash<const char32_t*>
+ {
+ size_t operator()(const char32_t* p) const
+ {
+ uint32_t c, result = 2166136261U; // Intentionally uint32_t instead of size_t, so the behavior is the same regardless of size.
+ while((c = (uint32_t)*p++) != 0) // cast to unsigned 32 bit.
+ result = (result * 16777619) ^ c;
+ return (size_t)result;
+ }
+ };
+
+#if defined(EA_WCHAR_UNIQUE) && EA_WCHAR_UNIQUE
+ template<> struct hash<wchar_t*>
+ {
+ size_t operator()(const wchar_t* p) const
+ {
+ uint32_t c, result = 2166136261U; // Intentionally uint32_t instead of size_t, so the behavior is the same regardless of size.
+ while ((c = (uint32_t)*p++) != 0) // cast to unsigned 32 bit.
+ result = (result * 16777619) ^ c;
+ return (size_t)result;
+ }
+ };
+
+ template<> struct hash<const wchar_t*>
+ {
+ size_t operator()(const wchar_t* p) const
+ {
+ uint32_t c, result = 2166136261U; // Intentionally uint32_t instead of size_t, so the behavior is the same regardless of size.
+ while ((c = (uint32_t)*p++) != 0) // cast to unsigned 32 bit.
+ result = (result * 16777619) ^ c;
+ return (size_t)result;
+ }
+ };
+#endif
+
+ /// string_hash
+ ///
+ /// Defines a generic string hash for an arbitrary EASTL basic_string container.
+ ///
+ /// Example usage:
+ /// eastl::hash_set<MyString, eastl::string_hash<MyString> > hashSet;
+ ///
+ template <typename String>
+ struct string_hash
+ {
+ typedef String string_type;
+ typedef typename String::value_type value_type;
+ typedef typename eastl::add_unsigned<value_type>::type unsigned_value_type;
+
+ size_t operator()(const string_type& s) const
+ {
+ const unsigned_value_type* p = (const unsigned_value_type*)s.c_str();
+ uint32_t c, result = 2166136261U; // Intentionally uint32_t instead of size_t, so the behavior is the same regardless of size.
+ while((c = *p++) != 0)
+ result = (result * 16777619) ^ c;
+ return (size_t)result;
+ }
+ };
+
+
+} // namespace eastl
+
+#include <EASTL/internal/function.h>
+
+#endif // Header include guard
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/hash_map.h b/EASTL/include/EASTL/hash_map.h
new file mode 100644
index 0000000..e7cad7b
--- /dev/null
+++ b/EASTL/include/EASTL/hash_map.h
@@ -0,0 +1,636 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file is based on the TR1 (technical report 1) reference implementation
+// of the unordered_set/unordered_map C++ classes as of about 4/2005. Most likely
+// many or all C++ library vendors' implementations of this classes will be
+// based off of the reference version and so will look pretty similar to this
+// file as well as other vendors' versions.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_HASH_MAP_H
+#define EASTL_HASH_MAP_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/hashtable.h>
+#include <EASTL/functional.h>
+#include <EASTL/utility.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+
+ /// EASTL_HASH_MAP_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_HASH_MAP_DEFAULT_NAME
+ #define EASTL_HASH_MAP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " hash_map" // Unless the user overrides something, this is "EASTL hash_map".
+ #endif
+
+
+ /// EASTL_HASH_MULTIMAP_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_HASH_MULTIMAP_DEFAULT_NAME
+ #define EASTL_HASH_MULTIMAP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " hash_multimap" // Unless the user overrides something, this is "EASTL hash_multimap".
+ #endif
+
+
+ /// EASTL_HASH_MAP_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_HASH_MAP_DEFAULT_ALLOCATOR
+ #define EASTL_HASH_MAP_DEFAULT_ALLOCATOR allocator_type(EASTL_HASH_MAP_DEFAULT_NAME)
+ #endif
+
+ /// EASTL_HASH_MULTIMAP_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_HASH_MULTIMAP_DEFAULT_ALLOCATOR
+ #define EASTL_HASH_MULTIMAP_DEFAULT_ALLOCATOR allocator_type(EASTL_HASH_MULTIMAP_DEFAULT_NAME)
+ #endif
+
+
+
+ /// hash_map
+ ///
+ /// Implements a hash_map, which is a hashed associative container.
+ /// Lookups are O(1) (that is, they are fast) but the container is
+ /// not sorted. Note that lookups are only O(1) if the hash table
+ /// is well-distributed (non-colliding). The lookup approaches
+ /// O(n) behavior as the table becomes increasingly poorly distributed.
+ ///
+ /// set_max_load_factor
+ /// If you want to make a hashtable never increase its bucket usage,
+ /// call set_max_load_factor with a very high value such as 100000.f.
+ ///
+ /// bCacheHashCode
+ /// We provide the boolean bCacheHashCode template parameter in order
+ /// to allow the storing of the hash code of the key within the map.
+ /// When this option is disabled, the rehashing of the table will
+ /// call the hash function on the key. Setting bCacheHashCode to true
+ /// is useful for cases whereby the calculation of the hash value for
+ /// a contained object is very expensive.
+ ///
+ /// find_as
+ /// In order to support the ability to have a hashtable of strings but
+ /// be able to do efficiently lookups via char pointers (i.e. so they
+ /// aren't converted to string objects), we provide the find_as
+ /// function. This function allows you to do a find with a key of a
+ /// type other than the hashtable key type.
+ ///
+ /// Example find_as usage:
+ /// hash_map<string, int> hashMap;
+ /// i = hashMap.find_as("hello"); // Use default hash and compare.
+ ///
+ /// Example find_as usage (namespaces omitted for brevity):
+ /// hash_map<string, int> hashMap;
+ /// i = hashMap.find_as("hello", hash<char*>(), equal_to_2<string, char*>());
+ ///
+ template <typename Key, typename T, typename Hash = eastl::hash<Key>, typename Predicate = eastl::equal_to<Key>,
+ typename Allocator = EASTLAllocatorType, bool bCacheHashCode = false>
+ class hash_map
+ : public hashtable<Key, eastl::pair<const Key, T>, Allocator, eastl::use_first<eastl::pair<const Key, T> >, Predicate,
+ Hash, mod_range_hashing, default_ranged_hash, prime_rehash_policy, bCacheHashCode, true, true>
+ {
+ public:
+ typedef hashtable<Key, eastl::pair<const Key, T>, Allocator,
+ eastl::use_first<eastl::pair<const Key, T> >,
+ Predicate, Hash, mod_range_hashing, default_ranged_hash,
+ prime_rehash_policy, bCacheHashCode, true, true> base_type;
+ typedef hash_map<Key, T, Hash, Predicate, Allocator, bCacheHashCode> this_type;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::key_type key_type;
+ typedef T mapped_type;
+ typedef typename base_type::value_type value_type; // NOTE: 'value_type = pair<const key_type, mapped_type>'.
+ typedef typename base_type::allocator_type allocator_type;
+ typedef typename base_type::node_type node_type;
+ typedef typename base_type::insert_return_type insert_return_type;
+ typedef typename base_type::iterator iterator;
+ typedef typename base_type::const_iterator const_iterator;
+
+ using base_type::insert;
+
+ public:
+ /// hash_map
+ ///
+ /// Default constructor.
+ ///
+ hash_map()
+ : this_type(EASTL_HASH_MAP_DEFAULT_ALLOCATOR)
+ {
+ // Empty
+ }
+
+
+ /// hash_map
+ ///
+ /// Constructor which creates an empty container with allocator.
+ ///
+ explicit hash_map(const allocator_type& allocator)
+ : base_type(0, Hash(), mod_range_hashing(), default_ranged_hash(),
+ Predicate(), eastl::use_first<eastl::pair<const Key, T> >(), allocator)
+ {
+ // Empty
+ }
+
+
+ /// hash_map
+ ///
+ /// Constructor which creates an empty container, but start with nBucketCount buckets.
+ /// We default to a small nBucketCount value, though the user really should manually
+ /// specify an appropriate value in order to prevent memory from being reallocated.
+ ///
+ explicit hash_map(size_type nBucketCount, const Hash& hashFunction = Hash(),
+ const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_MAP_DEFAULT_ALLOCATOR)
+ : base_type(nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(),
+ predicate, eastl::use_first<eastl::pair<const Key, T> >(), allocator)
+ {
+ // Empty
+ }
+
+
+ hash_map(const this_type& x)
+ : base_type(x)
+ {
+ }
+
+
+ hash_map(this_type&& x)
+ : base_type(eastl::move(x))
+ {
+ }
+
+
+ hash_map(this_type&& x, const allocator_type& allocator)
+ : base_type(eastl::move(x), allocator)
+ {
+ }
+
+
+ /// hash_map
+ ///
+ /// initializer_list-based constructor.
+ /// Allows for initializing with brace values (e.g. hash_map<int, char*> hm = { {3,"c"}, {4,"d"}, {5,"e"} }; )
+ ///
+ hash_map(std::initializer_list<value_type> ilist, size_type nBucketCount = 0, const Hash& hashFunction = Hash(),
+ const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_MAP_DEFAULT_ALLOCATOR)
+ : base_type(ilist.begin(), ilist.end(), nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(),
+ predicate, eastl::use_first<eastl::pair<const Key, T> >(), allocator)
+ {
+ // Empty
+ }
+
+
+ /// hash_map
+ ///
+ /// An input bucket count of <= 1 causes the bucket count to be equal to the number of
+ /// elements in the input range.
+ ///
+ template <typename ForwardIterator>
+ hash_map(ForwardIterator first, ForwardIterator last, size_type nBucketCount = 0, const Hash& hashFunction = Hash(),
+ const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_MAP_DEFAULT_ALLOCATOR)
+ : base_type(first, last, nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(),
+ predicate, eastl::use_first<eastl::pair<const Key, T> >(), allocator)
+ {
+ // Empty
+ }
+
+
+ this_type& operator=(const this_type& x)
+ {
+ return static_cast<this_type&>(base_type::operator=(x));
+ }
+
+
+ this_type& operator=(std::initializer_list<value_type> ilist)
+ {
+ return static_cast<this_type&>(base_type::operator=(ilist));
+ }
+
+
+ this_type& operator=(this_type&& x)
+ {
+ return static_cast<this_type&>(base_type::operator=(eastl::move(x)));
+ }
+
+
+ /// insert
+ ///
+ /// This is an extension to the C++ standard. We insert a default-constructed
+ /// element with the given key. The reason for this is that we can avoid the
+ /// potentially expensive operation of creating and/or copying a mapped_type
+ /// object on the stack.
+ insert_return_type insert(const key_type& key)
+ {
+ return base_type::DoInsertKey(true_type(), key);
+ }
+
+ T& at(const key_type& k)
+ {
+ iterator it = base_type::find(k);
+
+ if (it == base_type::end())
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ // throw exeption if exceptions enabled
+ throw std::out_of_range("invalid hash_map<K, T> key");
+ #else
+ // assert false if asserts enabled
+ EASTL_ASSERT_MSG(false, "invalid hash_map<K, T> key");
+ #endif
+ }
+ // undefined behaviour if exceptions and asserts are disabled and it == end()
+ return it->second;
+ }
+
+
+ const T& at(const key_type& k) const
+ {
+ const_iterator it = base_type::find(k);
+
+ if (it == base_type::end())
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ // throw exeption if exceptions enabled
+ throw std::out_of_range("invalid hash_map<K, T> key");
+ #else
+ // assert false if asserts enabled
+ EASTL_ASSERT_MSG(false, "invalid hash_map<K, T> key");
+ #endif
+ }
+ // undefined behaviour if exceptions and asserts are disabled and it == end()
+ return it->second;
+ }
+
+
+ insert_return_type insert(key_type&& key)
+ {
+ return base_type::DoInsertKey(true_type(), eastl::move(key));
+ }
+
+
+ mapped_type& operator[](const key_type& key)
+ {
+ return (*base_type::DoInsertKey(true_type(), key).first).second;
+
+ // Slower reference version:
+ //const typename base_type::iterator it = base_type::find(key);
+ //if(it != base_type::end())
+ // return (*it).second;
+ //return (*base_type::insert(value_type(key, mapped_type())).first).second;
+ }
+
+ mapped_type& operator[](key_type&& key)
+ {
+ // The Standard states that this function "inserts the value value_type(std::move(key), mapped_type())"
+ return (*base_type::DoInsertKey(true_type(), eastl::move(key)).first).second;
+ }
+
+ // try_emplace API added in C++17
+ template <class... Args>
+ inline insert_return_type try_emplace(const key_type& k, Args&&... args)
+ {
+ return try_emplace_forwarding(k, eastl::forward<Args>(args)...);
+ }
+
+ template <class... Args>
+ inline insert_return_type try_emplace(key_type&& k, Args&&... args) {
+ return try_emplace_forwarding(eastl::move(k), eastl::forward<Args>(args)...);
+ }
+
+ template <class... Args>
+ inline iterator try_emplace(const_iterator, const key_type& k, Args&&... args) {
+ // Currently, the first parameter is ignored.
+ insert_return_type result = try_emplace(k, eastl::forward<Args>(args)...);
+ return base_type::DoGetResultIterator(true_type(), result);
+ }
+
+ template <class... Args>
+ inline iterator try_emplace(const_iterator, key_type&& k, Args&&... args) {
+ // Currently, the first parameter is ignored.
+ insert_return_type result = try_emplace(eastl::move(k), eastl::forward<Args>(args)...);
+ return base_type::DoGetResultIterator(true_type(), result);
+ }
+
+ private:
+ template <class K, class... Args>
+ insert_return_type try_emplace_forwarding(K&& k, Args&&... args)
+ {
+ const auto key_data = base_type::DoFindKeyData(k);
+ if (key_data.node)
+ { // Node exists, no insertion needed.
+ return eastl::pair<iterator, bool>(
+ iterator(key_data.node, base_type::mpBucketArray + key_data.bucket_index), false);
+ }
+ else
+ {
+ node_type* const pNodeNew =
+ base_type::DoAllocateNode(piecewise_construct, eastl::forward_as_tuple(eastl::forward<K>(k)),
+ forward_as_tuple(eastl::forward<Args>(args)...));
+ // the key might have been moved from above, so we can't use `k` anymore.
+ const auto& key = base_type::mExtractKey(pNodeNew->mValue);
+ return base_type::template DoInsertUniqueNode<true>(key, key_data.code, key_data.bucket_index, pNodeNew);
+ }
+ }
+ }; // hash_map
+
+ /// hash_map erase_if
+ ///
+ /// https://en.cppreference.com/w/cpp/container/unordered_map/erase_if
+ template <typename Key, typename T, typename Hash, typename Predicate, typename Allocator, bool bCacheHashCode, typename UserPredicate>
+ typename eastl::hash_map<Key, T, Hash, Predicate, Allocator, bCacheHashCode>::size_type erase_if(eastl::hash_map<Key, T, Hash, Predicate, Allocator, bCacheHashCode>& c, UserPredicate predicate)
+ {
+ auto oldSize = c.size();
+ // Erases all elements that satisfy the predicate from the container.
+ for (auto i = c.begin(), last = c.end(); i != last;)
+ {
+ if (predicate(*i))
+ {
+ i = c.erase(i);
+ }
+ else
+ {
+ ++i;
+ }
+ }
+ return oldSize - c.size();
+ }
+
+
+ /// hash_multimap
+ ///
+ /// Implements a hash_multimap, which is the same thing as a hash_map
+ /// except that contained elements need not be unique. See the
+ /// documentation for hash_set for details.
+ ///
+ template <typename Key, typename T, typename Hash = eastl::hash<Key>, typename Predicate = eastl::equal_to<Key>,
+ typename Allocator = EASTLAllocatorType, bool bCacheHashCode = false>
+ class hash_multimap
+ : public hashtable<Key, eastl::pair<const Key, T>, Allocator, eastl::use_first<eastl::pair<const Key, T> >, Predicate,
+ Hash, mod_range_hashing, default_ranged_hash, prime_rehash_policy, bCacheHashCode, true, false>
+ {
+ public:
+ typedef hashtable<Key, eastl::pair<const Key, T>, Allocator,
+ eastl::use_first<eastl::pair<const Key, T> >,
+ Predicate, Hash, mod_range_hashing, default_ranged_hash,
+ prime_rehash_policy, bCacheHashCode, true, false> base_type;
+ typedef hash_multimap<Key, T, Hash, Predicate, Allocator, bCacheHashCode> this_type;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::key_type key_type;
+ typedef T mapped_type;
+ typedef typename base_type::value_type value_type; // Note that this is pair<const key_type, mapped_type>.
+ typedef typename base_type::allocator_type allocator_type;
+ typedef typename base_type::node_type node_type;
+ typedef typename base_type::insert_return_type insert_return_type;
+ typedef typename base_type::iterator iterator;
+
+ using base_type::insert;
+
+ private:
+ using base_type::insert_or_assign;
+
+ public:
+ /// hash_multimap
+ ///
+ /// Default constructor.
+ ///
+ explicit hash_multimap(const allocator_type& allocator = EASTL_HASH_MULTIMAP_DEFAULT_ALLOCATOR)
+ : base_type(0, Hash(), mod_range_hashing(), default_ranged_hash(),
+ Predicate(), eastl::use_first<eastl::pair<const Key, T> >(), allocator)
+ {
+ // Empty
+ }
+
+
+ /// hash_multimap
+ ///
+ /// Constructor which creates an empty container, but start with nBucketCount buckets.
+ /// We default to a small nBucketCount value, though the user really should manually
+ /// specify an appropriate value in order to prevent memory from being reallocated.
+ ///
+ explicit hash_multimap(size_type nBucketCount, const Hash& hashFunction = Hash(),
+ const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_MULTIMAP_DEFAULT_ALLOCATOR)
+ : base_type(nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(),
+ predicate, eastl::use_first<eastl::pair<const Key, T> >(), allocator)
+ {
+ // Empty
+ }
+
+
+ hash_multimap(const this_type& x)
+ : base_type(x)
+ {
+ }
+
+
+ hash_multimap(this_type&& x)
+ : base_type(eastl::move(x))
+ {
+ }
+
+
+ hash_multimap(this_type&& x, const allocator_type& allocator)
+ : base_type(eastl::move(x), allocator)
+ {
+ }
+
+
+ /// hash_multimap
+ ///
+ /// initializer_list-based constructor.
+ /// Allows for initializing with brace values (e.g. hash_multimap<int, char*> hm = { {3,"c"}, {3,"C"}, {4,"d"} }; )
+ ///
+ hash_multimap(std::initializer_list<value_type> ilist, size_type nBucketCount = 0, const Hash& hashFunction = Hash(),
+ const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_MULTIMAP_DEFAULT_ALLOCATOR)
+ : base_type(ilist.begin(), ilist.end(), nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(),
+ predicate, eastl::use_first<eastl::pair<const Key, T> >(), allocator)
+ {
+ // Empty
+ }
+
+
+ /// hash_multimap
+ ///
+ /// An input bucket count of <= 1 causes the bucket count to be equal to the number of
+ /// elements in the input range.
+ ///
+ template <typename ForwardIterator>
+ hash_multimap(ForwardIterator first, ForwardIterator last, size_type nBucketCount = 0, const Hash& hashFunction = Hash(),
+ const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_MULTIMAP_DEFAULT_ALLOCATOR)
+ : base_type(first, last, nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(),
+ predicate, eastl::use_first<eastl::pair<const Key, T> >(), allocator)
+ {
+ // Empty
+ }
+
+
+ this_type& operator=(const this_type& x)
+ {
+ return static_cast<this_type&>(base_type::operator=(x));
+ }
+
+
+ this_type& operator=(std::initializer_list<value_type> ilist)
+ {
+ return static_cast<this_type&>(base_type::operator=(ilist));
+ }
+
+
+ this_type& operator=(this_type&& x)
+ {
+ return static_cast<this_type&>(base_type::operator=(eastl::move(x)));
+ }
+
+
+ /// insert
+ ///
+ /// This is an extension to the C++ standard. We insert a default-constructed
+ /// element with the given key. The reason for this is that we can avoid the
+ /// potentially expensive operation of creating and/or copying a mapped_type
+ /// object on the stack.
+ insert_return_type insert(const key_type& key)
+ {
+ return base_type::DoInsertKey(false_type(), key);
+ }
+
+
+ insert_return_type insert(key_type&& key)
+ {
+ return base_type::DoInsertKey(false_type(), eastl::move(key));
+ }
+
+ }; // hash_multimap
+
+ /// hash_multimap erase_if
+ ///
+ /// https://en.cppreference.com/w/cpp/container/unordered_multimap/erase_if
+ template <typename Key, typename T, typename Hash, typename Predicate, typename Allocator, bool bCacheHashCode, typename UserPredicate>
+ typename eastl::hash_multimap<Key, T, Hash, Predicate, Allocator, bCacheHashCode>::size_type erase_if(eastl::hash_multimap<Key, T, Hash, Predicate, Allocator, bCacheHashCode>& c, UserPredicate predicate)
+ {
+ auto oldSize = c.size();
+ // Erases all elements that satisfy the predicate from the container.
+ for (auto i = c.begin(), last = c.end(); i != last;)
+ {
+ if (predicate(*i))
+ {
+ i = c.erase(i);
+ }
+ else
+ {
+ ++i;
+ }
+ }
+ return oldSize - c.size();
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Key, typename T, typename Hash, typename Predicate, typename Allocator, bool bCacheHashCode>
+ inline bool operator==(const hash_map<Key, T, Hash, Predicate, Allocator, bCacheHashCode>& a,
+ const hash_map<Key, T, Hash, Predicate, Allocator, bCacheHashCode>& b)
+ {
+ typedef typename hash_map<Key, T, Hash, Predicate, Allocator, bCacheHashCode>::const_iterator const_iterator;
+
+ // We implement branching with the assumption that the return value is usually false.
+ if(a.size() != b.size())
+ return false;
+
+ // For map (with its unique keys), we need only test that each element in a can be found in b,
+ // as there can be only one such pairing per element. multimap needs to do a something more elaborate.
+ for(const_iterator ai = a.begin(), aiEnd = a.end(), biEnd = b.end(); ai != aiEnd; ++ai)
+ {
+ const_iterator bi = b.find(ai->first);
+
+ if((bi == biEnd) || !(*ai == *bi)) // We have to compare the values, because lookups are done by keys alone but the full value_type of a map is a key/value pair.
+ return false; // It's possible that two elements in the two containers have identical keys but different values.
+ }
+
+ return true;
+ }
+
+#if !defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ template <typename Key, typename T, typename Hash, typename Predicate, typename Allocator, bool bCacheHashCode>
+ inline bool operator!=(const hash_map<Key, T, Hash, Predicate, Allocator, bCacheHashCode>& a,
+ const hash_map<Key, T, Hash, Predicate, Allocator, bCacheHashCode>& b)
+ {
+ return !(a == b);
+ }
+#endif
+
+ template <typename Key, typename T, typename Hash, typename Predicate, typename Allocator, bool bCacheHashCode>
+ inline bool operator==(const hash_multimap<Key, T, Hash, Predicate, Allocator, bCacheHashCode>& a,
+ const hash_multimap<Key, T, Hash, Predicate, Allocator, bCacheHashCode>& b)
+ {
+ typedef typename hash_multimap<Key, T, Hash, Predicate, Allocator, bCacheHashCode>::const_iterator const_iterator;
+ typedef typename eastl::iterator_traits<const_iterator>::difference_type difference_type;
+
+ // We implement branching with the assumption that the return value is usually false.
+ if(a.size() != b.size())
+ return false;
+
+ // We can't simply search for each element of a in b, as it may be that the bucket for
+ // two elements in a has those same two elements in b but in different order (which should
+ // still result in equality). Also it's possible that one bucket in a has two elements which
+ // both match a solitary element in the equivalent bucket in b (which shouldn't result in equality).
+ eastl::pair<const_iterator, const_iterator> aRange;
+ eastl::pair<const_iterator, const_iterator> bRange;
+
+ for(const_iterator ai = a.begin(), aiEnd = a.end(); ai != aiEnd; ai = aRange.second) // For each element in a...
+ {
+ aRange = a.equal_range(ai->first); // Get the range of elements in a that are equal to ai.
+ bRange = b.equal_range(ai->first); // Get the range of elements in b that are equal to ai.
+
+ // We need to verify that aRange == bRange. First make sure the range sizes are equivalent...
+ const difference_type aDistance = eastl::distance(aRange.first, aRange.second);
+ const difference_type bDistance = eastl::distance(bRange.first, bRange.second);
+
+ if(aDistance != bDistance)
+ return false;
+
+ // At this point, aDistance > 0 and aDistance == bDistance.
+ // Implement a fast pathway for the case that there's just a single element.
+ if(aDistance == 1)
+ {
+ if(!(*aRange.first == *bRange.first)) // We have to compare the values, because lookups are done by keys alone but the full value_type of a map is a key/value pair.
+ return false; // It's possible that two elements in the two containers have identical keys but different values. Ditto for the permutation case below.
+ }
+ else
+ {
+ // Check to see if these aRange and bRange are any permutation of each other.
+ // This check gets slower as there are more elements in the range.
+ if(!eastl::is_permutation(aRange.first, aRange.second, bRange.first))
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+#if !defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ template <typename Key, typename T, typename Hash, typename Predicate, typename Allocator, bool bCacheHashCode>
+ inline bool operator!=(const hash_multimap<Key, T, Hash, Predicate, Allocator, bCacheHashCode>& a,
+ const hash_multimap<Key, T, Hash, Predicate, Allocator, bCacheHashCode>& b)
+ {
+ return !(a == b);
+ }
+#endif
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
diff --git a/EASTL/include/EASTL/hash_set.h b/EASTL/include/EASTL/hash_set.h
new file mode 100644
index 0000000..3215d36
--- /dev/null
+++ b/EASTL/include/EASTL/hash_set.h
@@ -0,0 +1,486 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file is based on the TR1 (technical report 1) reference implementation
+// of the unordered_set/unordered_map C++ classes as of about 4/2005. Most likely
+// many or all C++ library vendors' implementations of this classes will be
+// based off of the reference version and so will look pretty similar to this
+// file as well as other vendors' versions.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_HASH_SET_H
+#define EASTL_HASH_SET_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/hashtable.h>
+#include <EASTL/functional.h>
+#include <EASTL/utility.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+
+ /// EASTL_HASH_SET_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_HASH_SET_DEFAULT_NAME
+ #define EASTL_HASH_SET_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " hash_set" // Unless the user overrides something, this is "EASTL hash_set".
+ #endif
+
+
+ /// EASTL_HASH_MULTISET_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_HASH_MULTISET_DEFAULT_NAME
+ #define EASTL_HASH_MULTISET_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " hash_multiset" // Unless the user overrides something, this is "EASTL hash_multiset".
+ #endif
+
+
+ /// EASTL_HASH_SET_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_HASH_SET_DEFAULT_ALLOCATOR
+ #define EASTL_HASH_SET_DEFAULT_ALLOCATOR allocator_type(EASTL_HASH_SET_DEFAULT_NAME)
+ #endif
+
+ /// EASTL_HASH_MULTISET_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_HASH_MULTISET_DEFAULT_ALLOCATOR
+ #define EASTL_HASH_MULTISET_DEFAULT_ALLOCATOR allocator_type(EASTL_HASH_MULTISET_DEFAULT_NAME)
+ #endif
+
+
+
+ /// hash_set
+ ///
+ /// Implements a hash_set, which is a hashed unique-item container.
+ /// Lookups are O(1) (that is, they are fast) but the container is
+ /// not sorted. Note that lookups are only O(1) if the hash table
+ /// is well-distributed (non-colliding). The lookup approaches
+ /// O(n) behavior as the table becomes increasingly poorly distributed.
+ ///
+ /// set_max_load_factor
+ /// If you want to make a hashtable never increase its bucket usage,
+ /// call set_max_load_factor with a very high value such as 100000.f.
+ ///
+ /// bCacheHashCode
+ /// We provide the boolean bCacheHashCode template parameter in order
+ /// to allow the storing of the hash code of the key within the map.
+ /// When this option is disabled, the rehashing of the table will
+ /// call the hash function on the key. Setting bCacheHashCode to true
+ /// is useful for cases whereby the calculation of the hash value for
+ /// a contained object is very expensive.
+ ///
+ /// find_as
+ /// In order to support the ability to have a hashtable of strings but
+ /// be able to do efficiently lookups via char pointers (i.e. so they
+ /// aren't converted to string objects), we provide the find_as
+ /// function. This function allows you to do a find with a key of a
+ /// type other than the hashtable key type.
+ ///
+ /// Example find_as usage:
+ /// hash_set<string> hashSet;
+ /// i = hashSet.find_as("hello"); // Use default hash and compare.
+ ///
+ /// Example find_as usage (namespaces omitted for brevity):
+ /// hash_set<string> hashSet;
+ /// i = hashSet.find_as("hello", hash<char*>(), equal_to_2<string, char*>());
+ ///
+ template <typename Value, typename Hash = eastl::hash<Value>, typename Predicate = eastl::equal_to<Value>,
+ typename Allocator = EASTLAllocatorType, bool bCacheHashCode = false>
+ class hash_set
+ : public hashtable<Value, Value, Allocator, eastl::use_self<Value>, Predicate,
+ Hash, mod_range_hashing, default_ranged_hash,
+ prime_rehash_policy, bCacheHashCode, false, true>
+ {
+ public:
+ typedef hashtable<Value, Value, Allocator, eastl::use_self<Value>, Predicate,
+ Hash, mod_range_hashing, default_ranged_hash,
+ prime_rehash_policy, bCacheHashCode, false, true> base_type;
+ typedef hash_set<Value, Hash, Predicate, Allocator, bCacheHashCode> this_type;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::value_type value_type;
+ typedef typename base_type::allocator_type allocator_type;
+ typedef typename base_type::node_type node_type;
+
+ public:
+ /// hash_set
+ ///
+ /// Default constructor.
+ ///
+ hash_set()
+ : this_type(EASTL_HASH_SET_DEFAULT_ALLOCATOR)
+ {
+ // Empty
+ }
+
+
+ /// hash_set
+ ///
+ /// Constructor which creates an empty container with allocator.
+ ///
+ explicit hash_set(const allocator_type& allocator)
+ : base_type(0, Hash(), mod_range_hashing(), default_ranged_hash(), Predicate(), eastl::use_self<Value>(), allocator)
+ {
+ // Empty
+ }
+
+
+ /// hash_set
+ ///
+ /// Constructor which creates an empty container, but start with nBucketCount buckets.
+ /// We default to a small nBucketCount value, though the user really should manually
+ /// specify an appropriate value in order to prevent memory from being reallocated.
+ ///
+ explicit hash_set(size_type nBucketCount, const Hash& hashFunction = Hash(), const Predicate& predicate = Predicate(),
+ const allocator_type& allocator = EASTL_HASH_SET_DEFAULT_ALLOCATOR)
+ : base_type(nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(), predicate, eastl::use_self<Value>(), allocator)
+ {
+ // Empty
+ }
+
+
+ hash_set(const this_type& x)
+ : base_type(x)
+ {
+ }
+
+
+ hash_set(this_type&& x)
+ : base_type(eastl::move(x))
+ {
+ }
+
+
+ hash_set(this_type&& x, const allocator_type& allocator)
+ : base_type(eastl::move(x), allocator)
+ {
+ }
+
+
+ /// hash_set
+ ///
+ /// initializer_list-based constructor.
+ /// Allows for initializing with brace values (e.g. hash_set<int> hs = { 3, 4, 5, }; )
+ ///
+ hash_set(std::initializer_list<value_type> ilist, size_type nBucketCount = 0, const Hash& hashFunction = Hash(),
+ const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_SET_DEFAULT_ALLOCATOR)
+ : base_type(ilist.begin(), ilist.end(), nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(), predicate, eastl::use_self<Value>(), allocator)
+ {
+ // Empty
+ }
+
+
+ /// hash_set
+ ///
+ /// An input bucket count of <= 1 causes the bucket count to be equal to the number of
+ /// elements in the input range.
+ ///
+ template <typename FowardIterator>
+ hash_set(FowardIterator first, FowardIterator last, size_type nBucketCount = 0, const Hash& hashFunction = Hash(),
+ const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_SET_DEFAULT_ALLOCATOR)
+ : base_type(first, last, nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(), predicate, eastl::use_self<Value>(), allocator)
+ {
+ // Empty
+ }
+
+
+ this_type& operator=(const this_type& x)
+ {
+ return static_cast<this_type&>(base_type::operator=(x));
+ }
+
+
+ this_type& operator=(std::initializer_list<value_type> ilist)
+ {
+ return static_cast<this_type&>(base_type::operator=(ilist));
+ }
+
+
+ this_type& operator=(this_type&& x)
+ {
+ return static_cast<this_type&>(base_type::operator=(eastl::move(x)));
+ }
+
+ }; // hash_set
+
+ /// hash_set erase_if
+ ///
+ /// https://en.cppreference.com/w/cpp/container/unordered_set/erase_if
+ template <typename Value, typename Hash, typename Predicate, typename Allocator, bool bCacheHashCode, typename UserPredicate>
+ typename eastl::hash_set<Value, Hash, Predicate, Allocator, bCacheHashCode>::size_type erase_if(eastl::hash_set<Value, Hash, Predicate, Allocator, bCacheHashCode>& c, UserPredicate predicate)
+ {
+ auto oldSize = c.size();
+ // Erases all elements that satisfy the predicate pred from the container.
+ for (auto i = c.begin(), last = c.end(); i != last;)
+ {
+ if (predicate(*i))
+ {
+ i = c.erase(i);
+ }
+ else
+ {
+ ++i;
+ }
+ }
+ return oldSize - c.size();
+ }
+
+
+ /// hash_multiset
+ ///
+ /// Implements a hash_multiset, which is the same thing as a hash_set
+ /// except that contained elements need not be unique. See the documentation
+ /// for hash_set for details.
+ ///
+ template <typename Value, typename Hash = eastl::hash<Value>, typename Predicate = eastl::equal_to<Value>,
+ typename Allocator = EASTLAllocatorType, bool bCacheHashCode = false>
+ class hash_multiset
+ : public hashtable<Value, Value, Allocator, eastl::use_self<Value>, Predicate,
+ Hash, mod_range_hashing, default_ranged_hash,
+ prime_rehash_policy, bCacheHashCode, false, false>
+ {
+ public:
+ typedef hashtable<Value, Value, Allocator, eastl::use_self<Value>, Predicate,
+ Hash, mod_range_hashing, default_ranged_hash,
+ prime_rehash_policy, bCacheHashCode, false, false> base_type;
+ typedef hash_multiset<Value, Hash, Predicate, Allocator, bCacheHashCode> this_type;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::value_type value_type;
+ typedef typename base_type::allocator_type allocator_type;
+ typedef typename base_type::node_type node_type;
+
+ public:
+ /// hash_multiset
+ ///
+ /// Default constructor.
+ ///
+ explicit hash_multiset(const allocator_type& allocator = EASTL_HASH_MULTISET_DEFAULT_ALLOCATOR)
+ : base_type(0, Hash(), mod_range_hashing(), default_ranged_hash(), Predicate(), eastl::use_self<Value>(), allocator)
+ {
+ // Empty
+ }
+
+
+ /// hash_multiset
+ ///
+ /// Constructor which creates an empty container, but start with nBucketCount buckets.
+ /// We default to a small nBucketCount value, though the user really should manually
+ /// specify an appropriate value in order to prevent memory from being reallocated.
+ ///
+ explicit hash_multiset(size_type nBucketCount, const Hash& hashFunction = Hash(),
+ const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_MULTISET_DEFAULT_ALLOCATOR)
+ : base_type(nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(), predicate, eastl::use_self<Value>(), allocator)
+ {
+ // Empty
+ }
+
+
+ hash_multiset(const this_type& x)
+ : base_type(x)
+ {
+ }
+
+
+ hash_multiset(this_type&& x)
+ : base_type(eastl::move(x))
+ {
+ }
+
+
+ hash_multiset(this_type&& x, const allocator_type& allocator)
+ : base_type(eastl::move(x), allocator)
+ {
+ }
+
+
+ /// hash_multiset
+ ///
+ /// initializer_list-based constructor.
+ /// Allows for initializing with brace values (e.g. hash_set<int> hs = { 3, 3, 4, }; )
+ ///
+ hash_multiset(std::initializer_list<value_type> ilist, size_type nBucketCount = 0, const Hash& hashFunction = Hash(),
+ const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_MULTISET_DEFAULT_ALLOCATOR)
+ : base_type(ilist.begin(), ilist.end(), nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(), predicate, eastl::use_self<Value>(), allocator)
+ {
+ // Empty
+ }
+
+
+ /// hash_multiset
+ ///
+ /// An input bucket count of <= 1 causes the bucket count to be equal to the number of
+ /// elements in the input range.
+ ///
+ template <typename FowardIterator>
+ hash_multiset(FowardIterator first, FowardIterator last, size_type nBucketCount = 0, const Hash& hashFunction = Hash(),
+ const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_MULTISET_DEFAULT_ALLOCATOR)
+ : base_type(first, last, nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(), predicate, eastl::use_self<Value>(), allocator)
+ {
+ // Empty
+ }
+
+
+ this_type& operator=(const this_type& x)
+ {
+ return static_cast<this_type&>(base_type::operator=(x));
+ }
+
+
+ this_type& operator=(std::initializer_list<value_type> ilist)
+ {
+ return static_cast<this_type&>(base_type::operator=(ilist));
+ }
+
+
+ this_type& operator=(this_type&& x)
+ {
+ return static_cast<this_type&>(base_type::operator=(eastl::move(x)));
+ }
+
+ }; // hash_multiset
+
+ /// hash_multiset erase_if
+ ///
+ /// https://en.cppreference.com/w/cpp/container/unordered_multiset/erase_if
+ template <typename Value, typename Hash, typename Predicate, typename Allocator, bool bCacheHashCode, typename UserPredicate>
+ typename eastl::hash_multiset<Value, Hash, Predicate, Allocator, bCacheHashCode>::size_type erase_if(eastl::hash_multiset<Value, Hash, Predicate, Allocator, bCacheHashCode>& c, UserPredicate predicate)
+ {
+ auto oldSize = c.size();
+ // Erases all elements that satisfy the predicate pred from the container.
+ for (auto i = c.begin(), last = c.end(); i != last;)
+ {
+ if (predicate(*i))
+ {
+ i = c.erase(i);
+ }
+ else
+ {
+ ++i;
+ }
+ }
+ return oldSize - c.size();
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Value, typename Hash, typename Predicate, typename Allocator, bool bCacheHashCode>
+ inline bool operator==(const hash_set<Value, Hash, Predicate, Allocator, bCacheHashCode>& a,
+ const hash_set<Value, Hash, Predicate, Allocator, bCacheHashCode>& b)
+ {
+ typedef typename hash_set<Value, Hash, Predicate, Allocator, bCacheHashCode>::const_iterator const_iterator;
+
+ // We implement branching with the assumption that the return value is usually false.
+ if(a.size() != b.size())
+ return false;
+
+ // For set (with its unique keys), we need only test that each element in a can be found in b,
+ // as there can be only one such pairing per element. multiset needs to do a something more elaborate.
+ for(const_iterator ai = a.begin(), aiEnd = a.end(), biEnd = b.end(); ai != aiEnd; ++ai)
+ {
+ const_iterator bi = b.find(*ai);
+
+ if((bi == biEnd) || !(*ai == *bi)) // We have to compare values in addition to making sure the lookups succeeded. This is because the lookup is done via the user-supplised Predicate
+ return false; // which isn't strictly required to be identical to the Value operator==, though 99% of the time it will be so.
+ }
+
+ return true;
+ }
+
+#if !defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ template <typename Value, typename Hash, typename Predicate, typename Allocator, bool bCacheHashCode>
+ inline bool operator!=(const hash_set<Value, Hash, Predicate, Allocator, bCacheHashCode>& a,
+ const hash_set<Value, Hash, Predicate, Allocator, bCacheHashCode>& b)
+ {
+ return !(a == b);
+ }
+#endif
+
+ template <typename Value, typename Hash, typename Predicate, typename Allocator, bool bCacheHashCode>
+ inline bool operator==(const hash_multiset<Value, Hash, Predicate, Allocator, bCacheHashCode>& a,
+ const hash_multiset<Value, Hash, Predicate, Allocator, bCacheHashCode>& b)
+ {
+ typedef typename hash_multiset<Value, Hash, Predicate, Allocator, bCacheHashCode>::const_iterator const_iterator;
+ typedef typename eastl::iterator_traits<const_iterator>::difference_type difference_type;
+
+ // We implement branching with the assumption that the return value is usually false.
+ if(a.size() != b.size())
+ return false;
+
+ // We can't simply search for each element of a in b, as it may be that the bucket for
+ // two elements in a has those same two elements in b but in different order (which should
+ // still result in equality). Also it's possible that one bucket in a has two elements which
+ // both match a solitary element in the equivalent bucket in b (which shouldn't result in equality).
+ eastl::pair<const_iterator, const_iterator> aRange;
+ eastl::pair<const_iterator, const_iterator> bRange;
+
+ for(const_iterator ai = a.begin(), aiEnd = a.end(); ai != aiEnd; ai = aRange.second) // For each element in a...
+ {
+ aRange = a.equal_range(*ai); // Get the range of elements in a that are equal to ai.
+ bRange = b.equal_range(*ai); // Get the range of elements in b that are equal to ai.
+
+ // We need to verify that aRange == bRange. First make sure the range sizes are equivalent...
+ const difference_type aDistance = eastl::distance(aRange.first, aRange.second);
+ const difference_type bDistance = eastl::distance(bRange.first, bRange.second);
+
+ if(aDistance != bDistance)
+ return false;
+
+ // At this point, aDistance > 0 and aDistance == bDistance.
+ // Implement a fast pathway for the case that there's just a single element.
+ if(aDistance == 1)
+ {
+ if(!(*aRange.first == *bRange.first)) // We have to compare values in addition to making sure the distance (element count) was equal. This is because the lookup is done via the user-supplised Predicate
+ return false; // which isn't strictly required to be identical to the Value operator==, though 99% of the time it will be so. Ditto for the is_permutation usage below.
+ }
+ else
+ {
+ // Check to see if these aRange and bRange are any permutation of each other.
+ // This check gets slower as there are more elements in the range.
+ if(!eastl::is_permutation(aRange.first, aRange.second, bRange.first))
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+#if !defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ template <typename Value, typename Hash, typename Predicate, typename Allocator, bool bCacheHashCode>
+ inline bool operator!=(const hash_multiset<Value, Hash, Predicate, Allocator, bCacheHashCode>& a,
+ const hash_multiset<Value, Hash, Predicate, Allocator, bCacheHashCode>& b)
+ {
+ return !(a == b);
+ }
+#endif
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/heap.h b/EASTL/include/EASTL/heap.h
new file mode 100644
index 0000000..a8e4260
--- /dev/null
+++ b/EASTL/include/EASTL/heap.h
@@ -0,0 +1,685 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements heap functionality much like the std C++ heap algorithms.
+// Such heaps are not the same thing as memory heaps or pools, but rather are
+// semi-sorted random access containers which have the primary purpose of
+// supporting the implementation of priority_queue and similar data structures.
+//
+// The primary distinctions between this heap functionality and std::heap are:
+// - This heap exposes some extra functionality such as is_heap and change_heap.
+// - This heap is more efficient than versions found in typical STL
+// implementations such as STLPort, Microsoft, and Metrowerks. This comes
+// about due to better use of array dereferencing and branch prediction.
+// You should expect of 5-30%, depending on the usage and platform.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// The publicly usable functions we define are:
+// push_heap -- Adds an entry to a heap. Same as C++ std::push_heap.
+// pop_heap -- Removes the top entry from a heap. Same as C++ std::pop_heap.
+// make_heap -- Converts an array to a heap. Same as C++ std::make_heap.
+// sort_heap -- Sorts a heap in place. Same as C++ std::sort_heap.
+// remove_heap -- Removes an arbitrary entry from a heap.
+// change_heap -- Changes the priority of an entry in the heap.
+// is_heap -- Returns true if an array appears is in heap format. Same as C++11 std::is_heap.
+// is_heap_until -- Returns largest part of the range which is a heap. Same as C++11 std::is_heap_until.
+///////////////////////////////////////////////////////////////////////////////
+
+
+
+#ifndef EASTL_HEAP_H
+#define EASTL_HEAP_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/iterator.h>
+#include <stddef.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+
+ ///////////////////////////////////////////////////////////////////////
+ // promote_heap (internal function)
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename RandomAccessIterator, typename Distance, typename T, typename ValueType>
+ inline void promote_heap_impl(RandomAccessIterator first, Distance topPosition, Distance position, T value)
+ {
+ for(Distance parentPosition = (position - 1) >> 1; // This formula assumes that (position > 0). // We use '>> 1' instead of '/ 2' because we have seen VC++ generate better code with >>.
+ (position > topPosition) && eastl::less<ValueType>()(*(first + parentPosition), value);
+ parentPosition = (position - 1) >> 1)
+ {
+ *(first + position) = eastl::forward<ValueType>(*(first + parentPosition)); // Swap the node with its parent.
+ position = parentPosition;
+ }
+
+ *(first + position) = eastl::forward<ValueType>(value);
+ }
+
+ /// promote_heap
+ ///
+ /// Moves a value in the heap from a given position upward until
+ /// it is sorted correctly. It's kind of like bubble-sort, except that
+ /// instead of moving linearly from the back of a list to the front,
+ /// it moves from the bottom of the tree up the branches towards the
+ /// top. But otherwise is just like bubble-sort.
+ ///
+ /// This function requires that the value argument refer to a value
+ /// that is currently not within the heap.
+ ///
+ template <typename RandomAccessIterator, typename Distance, typename T>
+ inline void promote_heap(RandomAccessIterator first, Distance topPosition, Distance position, const T& value)
+ {
+ typedef typename iterator_traits<RandomAccessIterator>::value_type value_type;
+ promote_heap_impl<RandomAccessIterator, Distance, const T&, const value_type>(first, topPosition, position, value);
+ }
+
+
+ /// promote_heap
+ ///
+ /// Moves a value in the heap from a given position upward until
+ /// it is sorted correctly. It's kind of like bubble-sort, except that
+ /// instead of moving linearly from the back of a list to the front,
+ /// it moves from the bottom of the tree up the branches towards the
+ /// top. But otherwise is just like bubble-sort.
+ ///
+ /// This function requires that the value argument refer to a value
+ /// that is currently not within the heap.
+ ///
+ template <typename RandomAccessIterator, typename Distance, typename T>
+ inline void promote_heap(RandomAccessIterator first, Distance topPosition, Distance position, T&& value)
+ {
+ typedef typename iterator_traits<RandomAccessIterator>::value_type value_type;
+ promote_heap_impl<RandomAccessIterator, Distance, T&&, value_type>(first, topPosition, position, eastl::forward<T>(value));
+ }
+
+
+ template <typename RandomAccessIterator, typename Distance, typename T, typename Compare, typename ValueType>
+ inline void promote_heap_impl(RandomAccessIterator first, Distance topPosition, Distance position, T value, Compare compare)
+ {
+ for(Distance parentPosition = (position - 1) >> 1; // This formula assumes that (position > 0). // We use '>> 1' instead of '/ 2' because we have seen VC++ generate better code with >>.
+ (position > topPosition) && compare(*(first + parentPosition), value);
+ parentPosition = (position - 1) >> 1)
+ {
+ *(first + position) = eastl::forward<ValueType>(*(first + parentPosition)); // Swap the node with its parent.
+ position = parentPosition;
+ }
+
+ *(first + position) = eastl::forward<ValueType>(value);
+ }
+
+
+ /// promote_heap
+ ///
+ /// Takes a Compare(a, b) function (or function object) which returns true if a < b.
+ /// For example, you could use the standard 'less' comparison object.
+ ///
+ /// The Compare function must work equivalently to the compare function used
+ /// to make and maintain the heap.
+ ///
+ /// This function requires that the value argument refer to a value
+ /// that is currently not within the heap.
+ ///
+ template <typename RandomAccessIterator, typename Distance, typename T, typename Compare>
+ inline void promote_heap(RandomAccessIterator first, Distance topPosition, Distance position, const T& value, Compare compare)
+ {
+ typedef typename iterator_traits<RandomAccessIterator>::value_type value_type;
+ promote_heap_impl<RandomAccessIterator, Distance, const T&, Compare, const value_type>(first, topPosition, position, value, compare);
+ }
+
+
+ /// promote_heap
+ ///
+ /// Takes a Compare(a, b) function (or function object) which returns true if a < b.
+ /// For example, you could use the standard 'less' comparison object.
+ ///
+ /// The Compare function must work equivalently to the compare function used
+ /// to make and maintain the heap.
+ ///
+ /// This function requires that the value argument refer to a value
+ /// that is currently not within the heap.
+ ///
+ template <typename RandomAccessIterator, typename Distance, typename T, typename Compare>
+ inline void promote_heap(RandomAccessIterator first, Distance topPosition, Distance position, T&& value, Compare compare)
+ {
+ typedef typename iterator_traits<RandomAccessIterator>::value_type value_type;
+ promote_heap_impl<RandomAccessIterator, Distance, T&&, Compare, value_type>(first, topPosition, position, eastl::forward<T>(value), compare);
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // adjust_heap (internal function)
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename RandomAccessIterator, typename Distance, typename T, typename ValueType>
+ void adjust_heap_impl(RandomAccessIterator first, Distance topPosition, Distance heapSize, Distance position, T value)
+ {
+ // We do the conventional approach of moving the position down to the
+ // bottom then inserting the value at the back and moving it up.
+ Distance childPosition = (2 * position) + 2;
+
+ for(; childPosition < heapSize; childPosition = (2 * childPosition) + 2)
+ {
+ if(eastl::less<ValueType>()(*(first + childPosition), *(first + (childPosition - 1)))) // Choose the larger of the two children.
+ --childPosition;
+ *(first + position) = eastl::forward<ValueType>(*(first + childPosition)); // Swap positions with this child.
+ position = childPosition;
+ }
+
+ if(childPosition == heapSize) // If we are at the very last index of the bottom...
+ {
+ *(first + position) = eastl::forward<ValueType>(*(first + (childPosition - 1)));
+ position = childPosition - 1;
+ }
+
+ eastl::promote_heap<RandomAccessIterator, Distance, T>(first, topPosition, position, eastl::forward<ValueType>(value));
+ }
+
+ /// adjust_heap
+ ///
+ /// Given a position that has just been vacated, this function moves
+ /// new values into that vacated position appropriately. The value
+ /// argument is an entry which will be inserted into the heap after
+ /// we move nodes into the positions that were vacated.
+ ///
+ /// This function requires that the value argument refer to a value
+ /// that is currently not within the heap.
+ ///
+ template <typename RandomAccessIterator, typename Distance, typename T>
+ void adjust_heap(RandomAccessIterator first, Distance topPosition, Distance heapSize, Distance position, const T& value)
+ {
+ typedef typename iterator_traits<RandomAccessIterator>::value_type value_type;
+ adjust_heap_impl<RandomAccessIterator, Distance, const T&, const value_type>(first, topPosition, heapSize, position, eastl::forward<const T&>(value));
+ }
+
+
+ /// adjust_heap
+ ///
+ /// Given a position that has just been vacated, this function moves
+ /// new values into that vacated position appropriately. The value
+ /// argument is an entry which will be inserted into the heap after
+ /// we move nodes into the positions that were vacated.
+ ///
+ /// This function requires that the value argument refer to a value
+ /// that is currently not within the heap.
+ ///
+ template <typename RandomAccessIterator, typename Distance, typename T>
+ void adjust_heap(RandomAccessIterator first, Distance topPosition, Distance heapSize, Distance position, T&& value)
+ {
+ typedef typename iterator_traits<RandomAccessIterator>::value_type value_type;
+ adjust_heap_impl<RandomAccessIterator, Distance, T&&, value_type>(first, topPosition, heapSize, position, eastl::forward<T>(value));
+ }
+
+
+ template <typename RandomAccessIterator, typename Distance, typename T, typename Compare, typename ValueType>
+ void adjust_heap_impl(RandomAccessIterator first, Distance topPosition, Distance heapSize, Distance position, T value, Compare compare)
+ {
+ // We do the conventional approach of moving the position down to the
+ // bottom then inserting the value at the back and moving it up.
+ Distance childPosition = (2 * position) + 2;
+
+ for(; childPosition < heapSize; childPosition = (2 * childPosition) + 2)
+ {
+ if(compare(*(first + childPosition), *(first + (childPosition - 1)))) // Choose the larger of the two children.
+ --childPosition;
+ *(first + position) = eastl::forward<ValueType>(*(first + childPosition)); // Swap positions with this child.
+ position = childPosition;
+ }
+
+ if(childPosition == heapSize) // If we are at the bottom...
+ {
+ *(first + position) = eastl::forward<ValueType>(*(first + (childPosition - 1)));
+ position = childPosition - 1;
+ }
+
+ eastl::promote_heap<RandomAccessIterator, Distance, T, Compare>(first, topPosition, position, eastl::forward<ValueType>(value), compare);
+ }
+
+ /// adjust_heap
+ ///
+ /// The Compare function must work equivalently to the compare function used
+ /// to make and maintain the heap.
+ ///
+ /// This function requires that the value argument refer to a value
+ /// that is currently not within the heap.
+ ///
+ template <typename RandomAccessIterator, typename Distance, typename T, typename Compare>
+ void adjust_heap(RandomAccessIterator first, Distance topPosition, Distance heapSize, Distance position, const T& value, Compare compare)
+ {
+ typedef typename iterator_traits<RandomAccessIterator>::value_type value_type;
+ adjust_heap_impl<RandomAccessIterator, Distance, const T&, Compare, const value_type>(first, topPosition, heapSize, position, eastl::forward<const T&>(value), compare);
+ }
+
+
+ /// adjust_heap
+ ///
+ /// The Compare function must work equivalently to the compare function used
+ /// to make and maintain the heap.
+ ///
+ /// This function requires that the value argument refer to a value
+ /// that is currently not within the heap.
+ ///
+ template <typename RandomAccessIterator, typename Distance, typename T, typename Compare>
+ void adjust_heap(RandomAccessIterator first, Distance topPosition, Distance heapSize, Distance position, T&& value, Compare compare)
+ {
+ typedef typename iterator_traits<RandomAccessIterator>::value_type value_type;
+ adjust_heap_impl<RandomAccessIterator, Distance, T&&, Compare, value_type>(first, topPosition, heapSize, position, eastl::forward<T>(value), compare);
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // push_heap
+ ///////////////////////////////////////////////////////////////////////
+
+ /// push_heap
+ ///
+ /// Adds an item to a heap (which is an array). The item necessarily
+ /// comes from the back of the heap (array). Thus, the insertion of a
+ /// new item in a heap is a two step process: push_back and push_heap.
+ ///
+ /// Example usage:
+ /// vector<int> heap;
+ ///
+ /// heap.push_back(3);
+ /// push_heap(heap.begin(), heap.end()); // Places '3' appropriately.
+ ///
+ template <typename RandomAccessIterator>
+ inline void push_heap(RandomAccessIterator first, RandomAccessIterator last)
+ {
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::difference_type difference_type;
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ const value_type tempBottom(eastl::forward<value_type>(*(last - 1)));
+
+ eastl::promote_heap<RandomAccessIterator, difference_type, value_type>
+ (first, (difference_type)0, (difference_type)(last - first - 1), eastl::forward<const value_type>(tempBottom));
+ }
+
+
+ /// push_heap
+ ///
+ /// This version is useful for cases where your object comparison is unusual
+ /// or where you want to have the heap store pointers to objects instead of
+ /// storing the objects themselves (often in order to improve cache coherency
+ /// while doing sorting).
+ ///
+ /// The Compare function must work equivalently to the compare function used
+ /// to make and maintain the heap.
+ ///
+ template <typename RandomAccessIterator, typename Compare>
+ inline void push_heap(RandomAccessIterator first, RandomAccessIterator last, Compare compare)
+ {
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::difference_type difference_type;
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ const value_type tempBottom(*(last - 1));
+
+ eastl::promote_heap<RandomAccessIterator, difference_type, value_type, Compare>
+ (first, (difference_type)0, (difference_type)(last - first - 1), tempBottom, compare);
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // pop_heap
+ ///////////////////////////////////////////////////////////////////////
+
+ /// pop_heap
+ ///
+ /// Removes the first item from the heap (which is an array), and adjusts
+ /// the heap so that the highest priority item becomes the new first item.
+ ///
+ /// Example usage:
+ /// vector<int> heap;
+ ///
+ /// heap.push_back(2);
+ /// heap.push_back(3);
+ /// heap.push_back(1);
+ /// <use heap[0], which is the highest priority item in the heap>
+ /// pop_heap(heap.begin(), heap.end()); // Moves heap[0] to the back of the heap and adjusts the heap.
+ /// heap.pop_back(); // Remove value that was just at the top of the heap
+ ///
+ template <typename RandomAccessIterator>
+ inline void pop_heap(RandomAccessIterator first, RandomAccessIterator last)
+ {
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::difference_type difference_type;
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ value_type tempBottom(eastl::forward<value_type>(*(last - 1)));
+ *(last - 1) = eastl::forward<value_type>(*first);
+ eastl::adjust_heap<RandomAccessIterator, difference_type, value_type>
+ (first, (difference_type)0, (difference_type)(last - first - 1), 0, eastl::forward<value_type>(tempBottom));
+ }
+
+
+
+ /// pop_heap
+ ///
+ /// This version is useful for cases where your object comparison is unusual
+ /// or where you want to have the heap store pointers to objects instead of
+ /// storing the objects themselves (often in order to improve cache coherency
+ /// while doing sorting).
+ ///
+ /// The Compare function must work equivalently to the compare function used
+ /// to make and maintain the heap.
+ ///
+ template <typename RandomAccessIterator, typename Compare>
+ inline void pop_heap(RandomAccessIterator first, RandomAccessIterator last, Compare compare)
+ {
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::difference_type difference_type;
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ value_type tempBottom(eastl::forward<value_type>(*(last - 1)));
+ *(last - 1) = eastl::forward<value_type>(*first);
+ eastl::adjust_heap<RandomAccessIterator, difference_type, value_type, Compare>
+ (first, (difference_type)0, (difference_type)(last - first - 1), 0, eastl::forward<value_type>(tempBottom), compare);
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // make_heap
+ ///////////////////////////////////////////////////////////////////////
+
+
+ /// make_heap
+ ///
+ /// Given an array, this function converts it into heap format.
+ /// The complexity is O(n), where n is count of the range.
+ /// The input range is not required to be in any order.
+ ///
+ template <typename RandomAccessIterator>
+ void make_heap(RandomAccessIterator first, RandomAccessIterator last)
+ {
+ // We do bottom-up heap construction as per Sedgewick. Such construction is O(n).
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::difference_type difference_type;
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ const difference_type heapSize = last - first;
+
+ if(heapSize >= 2) // If there is anything to do... (we need this check because otherwise the math fails below).
+ {
+ difference_type parentPosition = ((heapSize - 2) >> 1) + 1; // We use '>> 1' instead of '/ 2' because we have seen VC++ generate better code with >>.
+
+ do{
+ --parentPosition;
+ value_type temp(eastl::forward<value_type>(*(first + parentPosition)));
+ eastl::adjust_heap<RandomAccessIterator, difference_type, value_type>
+ (first, parentPosition, heapSize, parentPosition, eastl::forward<value_type>(temp));
+ } while(parentPosition != 0);
+ }
+ }
+
+
+ template <typename RandomAccessIterator, typename Compare>
+ void make_heap(RandomAccessIterator first, RandomAccessIterator last, Compare compare)
+ {
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::difference_type difference_type;
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ const difference_type heapSize = last - first;
+
+ if(heapSize >= 2) // If there is anything to do... (we need this check because otherwise the math fails below).
+ {
+ difference_type parentPosition = ((heapSize - 2) >> 1) + 1; // We use '>> 1' instead of '/ 2' because we have seen VC++ generate better code with >>.
+
+ do{
+ --parentPosition;
+ value_type temp(eastl::forward<value_type>(*(first + parentPosition)));
+ eastl::adjust_heap<RandomAccessIterator, difference_type, value_type, Compare>
+ (first, parentPosition, heapSize, parentPosition, eastl::forward<value_type>(temp), compare);
+ } while(parentPosition != 0);
+ }
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // sort_heap
+ ///////////////////////////////////////////////////////////////////////
+
+ /// sort_heap
+ ///
+ /// After the application if this algorithm, the range it was applied to
+ /// is no longer a heap, though it will be a reverse heap (smallest first).
+ /// The item with the lowest priority will be first, and the highest last.
+ /// This is not a stable sort because the relative order of equivalent
+ /// elements is not necessarily preserved.
+ /// The range referenced must be valid; all pointers must be dereferenceable
+ /// and within the sequence the last position is reachable from the first
+ /// by incrementation.
+ /// The complexity is at most O(n * log(n)), where n is count of the range.
+ ///
+ template <typename RandomAccessIterator>
+ inline void sort_heap(RandomAccessIterator first, RandomAccessIterator last)
+ {
+ for(; (last - first) > 1; --last) // We simply use the heap to sort itself.
+ eastl::pop_heap<RandomAccessIterator>(first, last);
+ }
+
+
+ /// sort_heap
+ ///
+ /// The Compare function must work equivalently to the compare function used
+ /// to make and maintain the heap.
+ ///
+ template <typename RandomAccessIterator, typename Compare>
+ inline void sort_heap(RandomAccessIterator first, RandomAccessIterator last, Compare compare)
+ {
+ for(; (last - first) > 1; --last) // We simply use the heap to sort itself.
+ eastl::pop_heap<RandomAccessIterator, Compare>(first, last, compare);
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // remove_heap
+ ///////////////////////////////////////////////////////////////////////
+
+ /// remove_heap
+ ///
+ /// Removes an arbitrary entry from the heap and adjusts the heap appropriately.
+ /// This function is unlike pop_heap in that pop_heap moves the top item
+ /// to the back of the heap, whereas remove_heap moves an arbitrary item to
+ /// the back of the heap.
+ ///
+ /// Note: Since this function moves the element to the back of the heap and
+ /// doesn't actually remove it from the given container, the user must call
+ /// the container erase function if the user wants to erase the element
+ /// from the container.
+ ///
+ template <typename RandomAccessIterator, typename Distance>
+ inline void remove_heap(RandomAccessIterator first, Distance heapSize, Distance position)
+ {
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::difference_type difference_type;
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ const value_type tempBottom(*(first + heapSize - 1));
+ *(first + heapSize - 1) = *(first + position);
+ eastl::adjust_heap<RandomAccessIterator, difference_type, value_type>
+ (first, (difference_type)0, (difference_type)(heapSize - 1), (difference_type)position, tempBottom);
+ }
+
+
+ /// remove_heap
+ ///
+ /// The Compare function must work equivalently to the compare function used
+ /// to make and maintain the heap.
+ ///
+ /// Note: Since this function moves the element to the back of the heap and
+ /// doesn't actually remove it from the given container, the user must call
+ /// the container erase function if the user wants to erase the element
+ /// from the container.
+ ///
+ template <typename RandomAccessIterator, typename Distance, typename Compare>
+ inline void remove_heap(RandomAccessIterator first, Distance heapSize, Distance position, Compare compare)
+ {
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::difference_type difference_type;
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ const value_type tempBottom(*(first + heapSize - 1));
+ *(first + heapSize - 1) = *(first + position);
+ eastl::adjust_heap<RandomAccessIterator, difference_type, value_type, Compare>
+ (first, (difference_type)0, (difference_type)(heapSize - 1), (difference_type)position, tempBottom, compare);
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // change_heap
+ ///////////////////////////////////////////////////////////////////////
+
+ /// change_heap
+ ///
+ /// Given a value in the heap that has changed in priority, this function
+ /// adjusts the heap appropriately. The heap size remains unchanged after
+ /// this operation.
+ ///
+ template <typename RandomAccessIterator, typename Distance>
+ inline void change_heap(RandomAccessIterator first, Distance heapSize, Distance position)
+ {
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::difference_type difference_type;
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ eastl::remove_heap<RandomAccessIterator, Distance>(first, heapSize, position);
+
+ value_type tempBottom(*(first + heapSize - 1));
+
+ eastl::promote_heap<RandomAccessIterator, difference_type, value_type>
+ (first, (difference_type)0, (difference_type)(heapSize - 1), tempBottom);
+ }
+
+
+ /// change_heap
+ ///
+ /// The Compare function must work equivalently to the compare function used
+ /// to make and maintain the heap.
+ ///
+ template <typename RandomAccessIterator, typename Distance, typename Compare>
+ inline void change_heap(RandomAccessIterator first, Distance heapSize, Distance position, Compare compare)
+ {
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::difference_type difference_type;
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ eastl::remove_heap<RandomAccessIterator, Distance, Compare>(first, heapSize, position, compare);
+
+ value_type tempBottom(*(first + heapSize - 1));
+
+ eastl::promote_heap<RandomAccessIterator, difference_type, value_type, Compare>
+ (first, (difference_type)0, (difference_type)(heapSize - 1), tempBottom, compare);
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_heap_until
+ ///////////////////////////////////////////////////////////////////////
+
+ /// is_heap_until
+ ///
+ template <typename RandomAccessIterator>
+ inline RandomAccessIterator is_heap_until(RandomAccessIterator first, RandomAccessIterator last)
+ {
+ int counter = 0;
+
+ for(RandomAccessIterator child = first + 1; child < last; ++child, counter ^= 1)
+ {
+ if(*first < *child) // We must use operator <, and are not allowed to use > or >= here.
+ return child;
+ first += counter; // counter switches between 0 and 1 every time through.
+ }
+
+ return last;
+ }
+
+
+ /// is_heap_until
+ ///
+ /// The Compare function must work equivalently to the compare function used
+ /// to make and maintain the heap.
+ ///
+ template <typename RandomAccessIterator, typename Compare>
+ inline RandomAccessIterator is_heap_until(RandomAccessIterator first, RandomAccessIterator last, Compare compare)
+ {
+ int counter = 0;
+
+ for(RandomAccessIterator child = first + 1; child < last; ++child, counter ^= 1)
+ {
+ if(compare(*first, *child))
+ return child;
+ first += counter; // counter switches between 0 and 1 every time through.
+ }
+
+ return last;
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_heap
+ ///////////////////////////////////////////////////////////////////////
+
+ /// is_heap
+ ///
+ /// This is a useful debugging algorithm for verifying that a random
+ /// access container is in heap format.
+ ///
+ template <typename RandomAccessIterator>
+ inline bool is_heap(RandomAccessIterator first, RandomAccessIterator last)
+ {
+ return (eastl::is_heap_until(first, last) == last);
+ }
+
+
+ /// is_heap
+ ///
+ /// The Compare function must work equivalently to the compare function used
+ /// to make and maintain the heap.
+ ///
+ template <typename RandomAccessIterator, typename Compare>
+ inline bool is_heap(RandomAccessIterator first, RandomAccessIterator last, Compare compare)
+ {
+ return (eastl::is_heap_until(first, last, compare) == last);
+ }
+
+
+ // To consider: The following may be a faster implementation for most cases.
+ //
+ // template <typename RandomAccessIterator>
+ // inline bool is_heap(RandomAccessIterator first, RandomAccessIterator last)
+ // {
+ // if(((uintptr_t)(last - first) & 1) == 0) // If the range has an even number of elements...
+ // --last;
+ //
+ // RandomAccessIterator parent = first, child = (first + 1);
+ //
+ // for(; child < last; child += 2, ++parent)
+ // {
+ // if((*parent < *child) || (*parent < *(child + 1)))
+ // return false;
+ // }
+ //
+ // if((((uintptr_t)(last - first) & 1) == 0) && (*parent < *child))
+ // return false;
+ //
+ // return true;
+ // }
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
diff --git a/EASTL/include/EASTL/initializer_list.h b/EASTL/include/EASTL/initializer_list.h
new file mode 100644
index 0000000..028fb4f
--- /dev/null
+++ b/EASTL/include/EASTL/initializer_list.h
@@ -0,0 +1,96 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+//
+// This file #includes <initializer_list> if it's available, else it defines
+// its own version of std::initializer_list. It does not define eastl::initializer_list
+// because that would not provide any use, due to how the C++11 Standard works.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INITIALIZER_LIST_H
+#define EASTL_INITIALIZER_LIST_H
+
+
+#include <EASTL/internal/config.h>
+#include <EABase/eahave.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+#if defined(EA_HAVE_CPP11_INITIALIZER_LIST) // If the compiler can generate calls to std::initializer_list...
+
+ // The initializer_list type must be declared in the std namespace, as that's the
+ // namespace the compiler uses when generating code to use it.
+ EA_DISABLE_ALL_VC_WARNINGS()
+ #include <initializer_list>
+ EA_RESTORE_ALL_VC_WARNINGS()
+
+#else
+
+ // If you get an error here about initializer_list being already defined, then the EA_HAVE_CPP11_INITIALIZER_LIST define from <EABase/eahave.h> needs to be updated.
+ namespace std
+ {
+ // See the C++11 Standard, section 18.9.
+ template<class E>
+ class initializer_list
+ {
+ public:
+ typedef E value_type;
+ typedef const E& reference;
+ typedef const E& const_reference;
+ typedef size_t size_type;
+ typedef const E* iterator; // Must be const, as initializer_list (and its mpArray) is an immutable temp object.
+ typedef const E* const_iterator;
+
+ private:
+ iterator mpArray;
+ size_type mArraySize;
+
+ // This constructor is private, but the C++ compiler has the ability to call it, as per the C++11 Standard.
+ initializer_list(const_iterator pArray, size_type arraySize)
+ : mpArray(pArray), mArraySize(arraySize) { }
+
+ public:
+ initializer_list() EA_NOEXCEPT // EA_NOEXCEPT requires a recent version of EABase.
+ : mpArray(NULL), mArraySize(0) { }
+
+ size_type size() const EA_NOEXCEPT { return mArraySize; }
+ const_iterator begin() const EA_NOEXCEPT { return mpArray; } // Must be const_iterator, as initializer_list (and its mpArray) is an immutable temp object.
+ const_iterator end() const EA_NOEXCEPT { return mpArray + mArraySize; }
+ };
+
+
+ template<class T>
+ const T* begin(std::initializer_list<T> ilist) EA_NOEXCEPT
+ {
+ return ilist.begin();
+ }
+
+ template<class T>
+ const T* end(std::initializer_list<T> ilist) EA_NOEXCEPT
+ {
+ return ilist.end();
+ }
+ }
+
+#endif
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/internal/atomic/arch/arch.h b/EASTL/include/EASTL/internal/atomic/arch/arch.h
new file mode 100644
index 0000000..4924a59
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/arch.h
@@ -0,0 +1,65 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// Include the architecture specific implementations
+//
+#if defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64)
+
+ #include "x86/arch_x86.h"
+
+#elif defined(EA_PROCESSOR_ARM32) || defined(EA_PROCESSOR_ARM64)
+
+ #include "arm/arch_arm.h"
+
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#include "arch_fetch_add.h"
+#include "arch_fetch_sub.h"
+
+#include "arch_fetch_and.h"
+#include "arch_fetch_xor.h"
+#include "arch_fetch_or.h"
+
+#include "arch_add_fetch.h"
+#include "arch_sub_fetch.h"
+
+#include "arch_and_fetch.h"
+#include "arch_xor_fetch.h"
+#include "arch_or_fetch.h"
+
+#include "arch_exchange.h"
+
+#include "arch_cmpxchg_weak.h"
+#include "arch_cmpxchg_strong.h"
+
+#include "arch_load.h"
+#include "arch_store.h"
+
+#include "arch_compiler_barrier.h"
+
+#include "arch_cpu_pause.h"
+
+#include "arch_memory_barrier.h"
+
+#include "arch_signal_fence.h"
+
+#include "arch_thread_fence.h"
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/arch_add_fetch.h b/EASTL/include/EASTL/internal/atomic/arch/arch_add_fetch.h
new file mode 100644
index 0000000..65771f8
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/arch_add_fetch.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_ADD_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_ADD_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_ADD_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_8)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_8)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_8)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_8)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_16)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_16)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_16)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_16)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_32)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_32)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_32)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_32)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_64)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_64)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_64)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_64)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_128)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_128)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_128)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_128)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_ADD_FETCH_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/arch_and_fetch.h b/EASTL/include/EASTL/internal/atomic/arch/arch_and_fetch.h
new file mode 100644
index 0000000..df7ba35
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/arch_and_fetch.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_AND_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_AND_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_AND_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_8)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_8)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_8)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_8)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_16)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_16)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_16)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_16)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_32)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_32)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_32)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_32)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_64)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_64)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_64)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_64)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_128)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_128)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_128)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_128)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_AND_FETCH_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/arch_cmpxchg_strong.h b/EASTL/include/EASTL/internal/atomic/arch/arch_cmpxchg_strong.h
new file mode 100644
index 0000000..1005dc3
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/arch_cmpxchg_strong.h
@@ -0,0 +1,430 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_CMPXCHG_STRONG_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_CMPXCHG_STRONG_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_*_*_N(type, bool ret, type * ptr, type * expected, type desired)
+//
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_*_N(type, bool ret, type * ptr, type * expected, type desired)
+//
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_8_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_8_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_8_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_8(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_8_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_8(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_8_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_8(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_16_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_16_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_16_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_16(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_16_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_16(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_16_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_16(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_32_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_32_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_32_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_32(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_32_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_32(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_32_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_32(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_64_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_64_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_64_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_64(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_64_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_64(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_64_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_64(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_128_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_128_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_128_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_128(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_128_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_128(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_128_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_128(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_CMPXCHG_STRONG_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/arch_cmpxchg_weak.h b/EASTL/include/EASTL/internal/atomic/arch/arch_cmpxchg_weak.h
new file mode 100644
index 0000000..5ce2638
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/arch_cmpxchg_weak.h
@@ -0,0 +1,430 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_CMPXCHG_WEAK_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_CMPXCHG_WEAK_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_*_*_N(type, bool ret, type * ptr, type * expected, type desired)
+//
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_*_N(type, bool ret, type * ptr, type * expected, type desired)
+//
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_8_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_8_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_8_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_8(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_8_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_8(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_8_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_8(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_16_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_16_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_16_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_16(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_16_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_16(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_16_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_16(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_32_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_32_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_32_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_32(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_32_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_32(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_32_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_32(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_64_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_64_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_64_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_64(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_64_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_64(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_64_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_64(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_128_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_128_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_128_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_128(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_128_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_128(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_128_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_128(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_CMPXCHG_WEAK_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/arch_compiler_barrier.h b/EASTL/include/EASTL/internal/atomic/arch/arch_compiler_barrier.h
new file mode 100644
index 0000000..0652469
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/arch_compiler_barrier.h
@@ -0,0 +1,19 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_COMPILER_BARRIER_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_COMPILER_BARRIER_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_ARCH_ATOMIC_COMPILER_BARRIER_AVAILABLE 0
+
+#define EASTL_ARCH_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY_AVAILABLE 0
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_COMPILER_BARRIER_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/arch_cpu_pause.h b/EASTL/include/EASTL/internal/atomic/arch/arch_cpu_pause.h
new file mode 100644
index 0000000..e8c2d1d
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/arch_cpu_pause.h
@@ -0,0 +1,25 @@
+/////////////////////////////////////////////////////////////////////////////////
+// copyright (c) electronic arts inc. all rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_CPU_PAUSE_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_CPU_PAUSE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_CPU_PAUSE()
+//
+#if defined(EASTL_ARCH_ATOMIC_CPU_PAUSE)
+ #define EASTL_ARCH_ATOMIC_CPU_PAUSE_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CPU_PAUSE_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_CPU_PAUSE_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/arch_exchange.h b/EASTL/include/EASTL/internal/atomic/arch/arch_exchange.h
new file mode 100644
index 0000000..7600318
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/arch_exchange.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_EXCHANGE_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_EXCHANGE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_EXCHANGE_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_8)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_8)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_8)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_8)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_16)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_16)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_16)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_16)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_32)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_32)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_32)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_32)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_64)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_64)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_64)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_64)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_128)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_128)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_128)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_128)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_EXCHANGE_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/arch_fetch_add.h b/EASTL/include/EASTL/internal/atomic/arch/arch_fetch_add.h
new file mode 100644
index 0000000..71907f7
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/arch_fetch_add.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_FETCH_ADD_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_FETCH_ADD_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_FETCH_ADD_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_FETCH_ADD_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/arch_fetch_and.h b/EASTL/include/EASTL/internal/atomic/arch/arch_fetch_and.h
new file mode 100644
index 0000000..f2b39a4
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/arch_fetch_and.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_FETCH_AND_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_FETCH_AND_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_FETCH_AND_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_FETCH_AND_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/arch_fetch_or.h b/EASTL/include/EASTL/internal/atomic/arch/arch_fetch_or.h
new file mode 100644
index 0000000..dd6dd0d
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/arch_fetch_or.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_FETCH_OR_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_FETCH_OR_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_FETCH_OR_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_FETCH_OR_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/arch_fetch_sub.h b/EASTL/include/EASTL/internal/atomic/arch/arch_fetch_sub.h
new file mode 100644
index 0000000..ea63db7
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/arch_fetch_sub.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_FETCH_SUB_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_FETCH_SUB_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_FETCH_SUB_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_FETCH_SUB_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/arch_fetch_xor.h b/EASTL/include/EASTL/internal/atomic/arch/arch_fetch_xor.h
new file mode 100644
index 0000000..b41ad2d
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/arch_fetch_xor.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_FETCH_XOR_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_FETCH_XOR_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_FETCH_XOR_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_FETCH_XOR_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/arch_load.h b/EASTL/include/EASTL/internal/atomic/arch/arch_load.h
new file mode 100644
index 0000000..eea7cf4
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/arch_load.h
@@ -0,0 +1,125 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_LOAD_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_LOAD_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_LOAD_*_N(type, type ret, type * ptr)
+//
+#if defined(EASTL_ARCH_ATOMIC_LOAD_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_8)
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_8)
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_LOAD_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_16)
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_16)
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_LOAD_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_32)
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_32)
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_LOAD_READ_DEPENDS_32)
+ #define EASTL_ARCH_ATOMIC_LOAD_READ_DEPENDS_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_LOAD_READ_DEPENDS_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_LOAD_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_64)
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_64)
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_LOAD_READ_DEPENDS_64)
+ #define EASTL_ARCH_ATOMIC_LOAD_READ_DEPENDS_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_LOAD_READ_DEPENDS_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_LOAD_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_128)
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_128)
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_LOAD_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/arch_memory_barrier.h b/EASTL/include/EASTL/internal/atomic/arch/arch_memory_barrier.h
new file mode 100644
index 0000000..c6cc6bf
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/arch_memory_barrier.h
@@ -0,0 +1,47 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_MEMORY_BARRIER_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_MEMORY_BARRIER_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_CPU_MB()
+//
+#if defined(EASTL_ARCH_ATOMIC_CPU_MB)
+ #define EASTL_ARCH_ATOMIC_CPU_MB_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CPU_MB_AVAILABLE 0
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_CPU_WMB()
+//
+#if defined(EASTL_ARCH_ATOMIC_CPU_WMB)
+ #define EASTL_ARCH_ATOMIC_CPU_WMB_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CPU_WMB_AVAILABLE 0
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_CPU_RMB()
+//
+#if defined(EASTL_ARCH_ATOMIC_CPU_RMB)
+ #define EASTL_ARCH_ATOMIC_CPU_RMB_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CPU_RMB_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_MEMORY_BARRIER_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/arch_or_fetch.h b/EASTL/include/EASTL/internal/atomic/arch/arch_or_fetch.h
new file mode 100644
index 0000000..110326b
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/arch_or_fetch.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_OR_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_OR_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_OR_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_8)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_8)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_8)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_8)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_16)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_16)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_16)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_16)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_32)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_32)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_32)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_32)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_64)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_64)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_64)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_64)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_128)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_128)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_128)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_128)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_OR_FETCH_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/arch_signal_fence.h b/EASTL/include/EASTL/internal/atomic/arch/arch_signal_fence.h
new file mode 100644
index 0000000..65b64fc
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/arch_signal_fence.h
@@ -0,0 +1,21 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_SIGNAL_FENCE_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_SIGNAL_FENCE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_ARCH_ATOMIC_SIGNAL_FENCE_RELAXED_AVAILABLE 0
+#define EASTL_ARCH_ATOMIC_SIGNAL_FENCE_ACQUIRE_AVAILABLE 0
+#define EASTL_ARCH_ATOMIC_SIGNAL_FENCE_RELEASE_AVAILABLE 0
+#define EASTL_ARCH_ATOMIC_SIGNAL_FENCE_ACQ_REL_AVAILABLE 0
+#define EASTL_ARCH_ATOMIC_SIGNAL_FENCE_SEQ_CST_AVAILABLE 0
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_SIGNAL_FENCE_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/arch_store.h b/EASTL/include/EASTL/internal/atomic/arch/arch_store.h
new file mode 100644
index 0000000..9a4112c
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/arch_store.h
@@ -0,0 +1,113 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_STORE_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_STORE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_STORE_*_N(type, type * ptr, type val)
+//
+#if defined(EASTL_ARCH_ATOMIC_STORE_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_STORE_RELEASE_8)
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_STORE_SEQ_CST_8)
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_STORE_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_STORE_RELEASE_16)
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_STORE_SEQ_CST_16)
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_STORE_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_STORE_RELEASE_32)
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_STORE_SEQ_CST_32)
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_STORE_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_STORE_RELEASE_64)
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_STORE_SEQ_CST_64)
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_STORE_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_STORE_RELEASE_128)
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_STORE_SEQ_CST_128)
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_STORE_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/arch_sub_fetch.h b/EASTL/include/EASTL/internal/atomic/arch/arch_sub_fetch.h
new file mode 100644
index 0000000..20241b1
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/arch_sub_fetch.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_SUB_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_SUB_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_SUB_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_8)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_8)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_8)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_8)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_16)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_16)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_16)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_16)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_32)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_32)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_32)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_32)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_64)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_64)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_64)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_64)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_128)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_128)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_128)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_128)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_SUB_FETCH_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/arch_thread_fence.h b/EASTL/include/EASTL/internal/atomic/arch/arch_thread_fence.h
new file mode 100644
index 0000000..676fbf1
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/arch_thread_fence.h
@@ -0,0 +1,49 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_THREAD_FENCE_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_THREAD_FENCE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_THREAD_FENCE_*()
+//
+#if defined(EASTL_ARCH_ATOMIC_THREAD_FENCE_RELAXED)
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_RELAXED_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_RELAXED_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQUIRE)
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQUIRE_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQUIRE_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_THREAD_FENCE_RELEASE)
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_RELEASE_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_RELEASE_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQ_REL)
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQ_REL_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQ_REL_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_THREAD_FENCE_SEQ_CST)
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_SEQ_CST_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_SEQ_CST_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_THREAD_FENCE_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/arch_xor_fetch.h b/EASTL/include/EASTL/internal/atomic/arch/arch_xor_fetch.h
new file mode 100644
index 0000000..63548c2
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/arch_xor_fetch.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_XOR_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_XOR_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_XOR_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_8)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_8)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_8)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_8)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_16)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_16)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_16)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_16)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_32)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_32)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_32)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_32)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_64)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_64)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_64)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_64)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_128)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_128)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_128)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_128)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_XOR_FETCH_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/arm/arch_arm.h b/EASTL/include/EASTL/internal/atomic/arch/arm/arch_arm.h
new file mode 100644
index 0000000..cc2ce52
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/arm/arch_arm.h
@@ -0,0 +1,89 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_ARM_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_ARM_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/**
+ * NOTE: We use this mapping
+ *
+ * ARMv7 Mapping 'trailing sync;':
+ *
+ * Load Relaxed : ldr
+ * Load Acquire : ldr; dmb ish
+ * Load Seq_Cst : ldr; dmb ish
+ *
+ * Store Relaxed : str
+ * Store Release : dmb ish; str
+ * Store Seq_Cst : dmb ish; str; dmb ish
+ *
+ * Relaxed Fence :
+ * Acquire Fence : dmb ish
+ * Release Fence : dmb ish
+ * Acq_Rel Fence : dmb ish
+ * Seq_Cst Fence : dmb ish
+ */
+
+/**
+ * ARMv7 Mapping 'leading sync;';
+ *
+ * Load Relaxed : ldr
+ * Load Acquire : ldr; dmb ish
+ * Load Seq_Cst : dmb ish; ldr; dmb ish
+ *
+ * Store Relaxed : str
+ * Store Release : dmb ish; str
+ * Store Seq_Cst : dmb ish: str
+ *
+ * Relaxed Fence :
+ * Acquire Fence : dmb ish
+ * Release Fence : dmb ish
+ * Acq_Rel Fence : dmb ish
+ * Seq_Cst Fence : dmb ish
+ */
+
+/**
+ * NOTE:
+ *
+ * On ARM32/64, we use the 'trailing sync;' convention with the stricter load acquire that uses
+ * a dmb instead of a control dependency + isb to ensure the IRIW litmus test is satisfied
+ * as one reason. See EASTL/atomic.h for futher explanation and deep-dive.
+ *
+ * For ARMv8 we could move to use the new proper store release and load acquire, RCsc variant.
+ * All ARMv7 approaches work on ARMv8 and this code path is only used on msvc which isn't used
+ * heavily. Most of the ARM code will end up going thru clang or gcc since microsoft arm devices
+ * aren't that abundant.
+ */
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#if defined(EA_COMPILER_MSVC)
+
+ #if EA_PLATFORM_PTR_SIZE == 8
+ #define EASTL_ARCH_ATOMIC_HAS_128BIT
+ #endif
+
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#include "arch_arm_load.h"
+#include "arch_arm_store.h"
+
+#include "arch_arm_memory_barrier.h"
+
+#include "arch_arm_thread_fence.h"
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_ARM_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/arm/arch_arm_load.h b/EASTL/include/EASTL/internal/atomic/arch/arm/arch_arm_load.h
new file mode 100644
index 0000000..e3b79b8
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/arm/arch_arm_load.h
@@ -0,0 +1,156 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_ARM_LOAD_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_ARM_LOAD_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_LOAD_*_N(type, type ret, type * ptr)
+//
+#if defined(EA_COMPILER_MSVC)
+
+
+ /**
+ * NOTE:
+ *
+ * Even 8-byte aligned 64-bit memory accesses on ARM32 are not
+ * guaranteed to be atomic on all ARM32 cpus. Only guaranteed on
+ * cpus with the LPAE extension. We need to use a
+ * ldrexd instruction in order to ensure no shearing is observed
+ * for all ARM32 processors.
+ */
+ #if defined(EA_PROCESSOR_ARM32)
+
+ #define EASTL_ARCH_ATOMIC_ARM32_LDREXD(ret, ptr) \
+ ret = __ldrexd((ptr))
+
+ #endif
+
+
+ #define EASTL_ARCH_ATOMIC_ARM_LOAD_N(integralType, bits, type, ret, ptr) \
+ { \
+ integralType retIntegral; \
+ retIntegral = EA_PREPROCESSOR_JOIN(__iso_volatile_load, bits)(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr))); \
+ \
+ ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, retIntegral); \
+ }
+
+
+ #define EASTL_ARCH_ATOMIC_ARM_LOAD_8(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_ARM_LOAD_N(__int8, 8, type, ret, ptr)
+
+ #define EASTL_ARCH_ATOMIC_ARM_LOAD_16(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_ARM_LOAD_N(__int16, 16, type, ret, ptr)
+
+ #define EASTL_ARCH_ATOMIC_ARM_LOAD_32(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_ARM_LOAD_N(__int32, 32, type, ret, ptr)
+
+
+ #if defined(EA_PROCESSOR_ARM32)
+
+
+ #define EASTL_ARCH_ATOMIC_LOAD_64(type, ret, ptr) \
+ { \
+ __int64 loadRet64; \
+ EASTL_ARCH_ATOMIC_ARM32_LDREXD(loadRet64, EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(__int64, (ptr))); \
+ \
+ ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, loadRet64); \
+ }
+
+ #else
+
+ #define EASTL_ARCH_ATOMIC_ARM_LOAD_64(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_ARM_LOAD_N(__int64, 64, type, ret, ptr)
+
+ #endif
+
+
+ /**
+ * NOTE:
+ *
+ * The ARM documentation states the following:
+ * A 64-bit pair requires the address to be quadword aligned and is single-copy atomic for each doubleword at doubleword granularity
+ *
+ * Thus we must ensure the store succeeds inorder for the load to be observed as atomic.
+ * Thus we must use the full cmpxchg in order to do a proper atomic load.
+ */
+ #define EASTL_ARCH_ATOMIC_ARM_LOAD_128(type, ret, ptr, MemoryOrder) \
+ { \
+ bool cmpxchgRetBool; \
+ ret = *(ptr); \
+ do \
+ { \
+ EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_CMPXCHG_STRONG_, MemoryOrder), _128)(type, cmpxchgRetBool, \
+ ptr, &(ret), ret); \
+ } while (!cmpxchgRetBool); \
+ }
+
+
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_8(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_ARM_LOAD_8(type, ret, ptr)
+
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_16(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_ARM_LOAD_16(type, ret, ptr)
+
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_32(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_ARM_LOAD_32(type, ret, ptr)
+
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_64(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_ARM_LOAD_64(type, ret, ptr)
+
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_128(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_ARM_LOAD_128(type, ret, ptr, RELAXED)
+
+
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_8(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_ARM_LOAD_8(type, ret, ptr); \
+ EASTL_ATOMIC_CPU_MB()
+
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_16(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_ARM_LOAD_16(type, ret, ptr); \
+ EASTL_ATOMIC_CPU_MB()
+
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_32(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_ARM_LOAD_32(type, ret, ptr); \
+ EASTL_ATOMIC_CPU_MB()
+
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_64(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_ARM_LOAD_64(type, ret, ptr); \
+ EASTL_ATOMIC_CPU_MB()
+
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_128(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_ARM_LOAD_128(type, ret, ptr, ACQUIRE)
+
+
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_8(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_ARM_LOAD_8(type, ret, ptr); \
+ EASTL_ATOMIC_CPU_MB()
+
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_16(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_ARM_LOAD_16(type, ret, ptr); \
+ EASTL_ATOMIC_CPU_MB()
+
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_32(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_ARM_LOAD_32(type, ret, ptr); \
+ EASTL_ATOMIC_CPU_MB()
+
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_64(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_ARM_LOAD_64(type, ret, ptr); \
+ EASTL_ATOMIC_CPU_MB()
+
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_128(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_ARM_LOAD_128(type, ret, ptr, SEQ_CST)
+
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_ARM_LOAD_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/arm/arch_arm_memory_barrier.h b/EASTL/include/EASTL/internal/atomic/arch/arm/arch_arm_memory_barrier.h
new file mode 100644
index 0000000..44dc991
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/arm/arch_arm_memory_barrier.h
@@ -0,0 +1,97 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_ARM_MEMORY_BARRIER_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_ARM_MEMORY_BARRIER_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#if defined(EA_COMPILER_MSVC) && !defined(EA_COMPILER_CLANG_CL)
+
+ #if defined(EA_PROCESSOR_ARM32)
+
+ #define EASTL_ARM_DMB_ISH _ARM_BARRIER_ISH
+
+ #define EASTL_ARM_DMB_ISHST _ARM_BARRIER_ISHST
+
+ #define EASTL_ARM_DMB_ISHLD _ARM_BARRIER_ISH
+
+ #elif defined(EA_PROCESSOR_ARM64)
+
+ #define EASTL_ARM_DMB_ISH _ARM64_BARRIER_ISH
+
+ #define EASTL_ARM_DMB_ISHST _ARM64_BARRIER_ISHST
+
+ #define EASTL_ARM_DMB_ISHLD _ARM64_BARRIER_ISHLD
+
+ #endif
+
+
+ /**
+ * NOTE:
+ *
+ * While it makes no sense for a hardware memory barrier to not imply a compiler barrier.
+ * MSVC docs do not explicitly state that, so better to be safe than sorry chasing down
+ * hard to find bugs due to the compiler deciding to reorder things.
+ */
+
+ #define EASTL_ARCH_ATOMIC_ARM_EMIT_DMB(option) \
+ EASTL_ATOMIC_COMPILER_BARRIER(); \
+ __dmb(option); \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+
+#elif defined(EA_COMPILER_GNUC) || defined(__clang__)
+
+ #define EASTL_ARM_DMB_ISH ish
+
+ #define EASTL_ARM_DMB_ISHST ishst
+
+ #if defined(EA_PROCESSOR_ARM32)
+
+ #define EASTL_ARM_DMB_ISHLD ish
+
+ #elif defined(EA_PROCESSOR_ARM64)
+
+ #define EASTL_ARM_DMB_ISHLD ishld
+
+ #endif
+
+
+ #define EASTL_ARCH_ATOMIC_ARM_EMIT_DMB(option) \
+ __asm__ __volatile__ ("dmb " EA_STRINGIFY(option) ::: "memory")
+
+
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_CPU_MB()
+//
+#define EASTL_ARCH_ATOMIC_CPU_MB() \
+ EASTL_ARCH_ATOMIC_ARM_EMIT_DMB(EASTL_ARM_DMB_ISH)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_CPU_WMB()
+//
+#define EASTL_ARCH_ATOMIC_CPU_WMB() \
+ EASTL_ARCH_ATOMIC_ARM_EMIT_DMB(EASTL_ARM_DMB_ISHST)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_CPU_RMB()
+//
+#define EASTL_ARCH_ATOMIC_CPU_RMB() \
+ EASTL_ARCH_ATOMIC_ARM_EMIT_DMB(EASTL_ARM_DMB_ISHLD)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_ARM_MEMORY_BARRIER_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/arm/arch_arm_store.h b/EASTL/include/EASTL/internal/atomic/arch/arm/arch_arm_store.h
new file mode 100644
index 0000000..ab53b9d
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/arm/arch_arm_store.h
@@ -0,0 +1,142 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_ARM_STORE_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_ARM_STORE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_STORE_*_N(type, type * ptr, type val)
+//
+#if defined(EA_COMPILER_MSVC)
+
+
+ #define EASTL_ARCH_ATOMIC_ARM_STORE_N(integralType, bits, type, ptr, val) \
+ EA_PREPROCESSOR_JOIN(__iso_volatile_store, bits)(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), EASTL_ATOMIC_TYPE_PUN_CAST(integralType, (val)))
+
+
+ #define EASTL_ARCH_ATOMIC_ARM_STORE_8(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_ARM_STORE_N(__int8, 8, type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_ARM_STORE_16(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_ARM_STORE_N(__int16, 16, type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_ARM_STORE_32(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_ARM_STORE_N(__int32, 32, type, ptr, val)
+
+
+ #if defined(EA_PROCESSOR_ARM64)
+
+ #define EASTL_ARCH_ATOMIC_ARM_STORE_64(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_ARM_STORE_N(__int64, 64, type, ptr, val)
+
+ #endif
+
+
+ #define EASTL_ARCH_ATOMIC_ARM_STORE_128(type, ptr, val, MemoryOrder) \
+ { \
+ type exchange128; EA_UNUSED(exchange128); \
+ EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_EXCHANGE_, MemoryOrder), _128)(type, exchange128, ptr, val); \
+ }
+
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_8(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_ARM_STORE_8(type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_16(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_ARM_STORE_16(type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_32(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_ARM_STORE_32(type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_128(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_ARM_STORE_128(type, ptr, val, RELAXED)
+
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_8(type, ptr, val) \
+ EASTL_ATOMIC_CPU_MB(); \
+ EASTL_ARCH_ATOMIC_ARM_STORE_8(type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_16(type, ptr, val) \
+ EASTL_ATOMIC_CPU_MB(); \
+ EASTL_ARCH_ATOMIC_ARM_STORE_16(type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_32(type, ptr, val) \
+ EASTL_ATOMIC_CPU_MB(); \
+ EASTL_ARCH_ATOMIC_ARM_STORE_32(type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_128(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_ARM_STORE_128(type, ptr, val, RELEASE)
+
+
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_8(type, ptr, val) \
+ EASTL_ATOMIC_CPU_MB(); \
+ EASTL_ARCH_ATOMIC_ARM_STORE_8(type, ptr, val) ; \
+ EASTL_ATOMIC_CPU_MB()
+
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_16(type, ptr, val) \
+ EASTL_ATOMIC_CPU_MB(); \
+ EASTL_ARCH_ATOMIC_ARM_STORE_16(type, ptr, val); \
+ EASTL_ATOMIC_CPU_MB()
+
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_32(type, ptr, val) \
+ EASTL_ATOMIC_CPU_MB(); \
+ EASTL_ARCH_ATOMIC_ARM_STORE_32(type, ptr, val); \
+ EASTL_ATOMIC_CPU_MB()
+
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_128(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_ARM_STORE_128(type, ptr, val, SEQ_CST)
+
+
+ #if defined(EA_PROCESSOR_ARM32)
+
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_64(type, ptr, val) \
+ { \
+ type retExchange64; EA_UNUSED(retExchange64); \
+ EASTL_ATOMIC_EXCHANGE_RELAXED_64(type, retExchange64, ptr, val); \
+ }
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_64(type, ptr, val) \
+ { \
+ type retExchange64; EA_UNUSED(retExchange64); \
+ EASTL_ATOMIC_EXCHANGE_RELEASE_64(type, retExchange64, ptr, val); \
+ }
+
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_64(type, ptr, val) \
+ { \
+ type retExchange64; EA_UNUSED(retExchange64); \
+ EASTL_ATOMIC_EXCHANGE_SEQ_CST_64(type, retExchange64, ptr, val); \
+ }
+
+
+ #elif defined(EA_PROCESSOR_ARM64)
+
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_64(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_ARM_STORE_64(type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_64(type, ptr, val) \
+ EASTL_ATOMIC_CPU_MB(); \
+ EASTL_ARCH_ATOMIC_ARM_STORE_64(type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_64(type, ptr, val) \
+ EASTL_ATOMIC_CPU_MB(); \
+ EASTL_ARCH_ATOMIC_ARM_STORE_64(type, ptr, val); \
+ EASTL_ATOMIC_CPU_MB()
+
+
+ #endif
+
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_ARM_STORE_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/arm/arch_arm_thread_fence.h b/EASTL/include/EASTL/internal/atomic/arch/arm/arch_arm_thread_fence.h
new file mode 100644
index 0000000..391c64e
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/arm/arch_arm_thread_fence.h
@@ -0,0 +1,37 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_ARM_THREAD_FENCE_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_ARM_THREAD_FENCE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_THREAD_FENCE_*()
+//
+#if defined(EA_COMPILER_MSVC)
+
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_RELAXED()
+
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQUIRE() \
+ EASTL_ATOMIC_CPU_MB()
+
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_RELEASE() \
+ EASTL_ATOMIC_CPU_MB()
+
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQ_REL() \
+ EASTL_ATOMIC_CPU_MB()
+
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_SEQ_CST() \
+ EASTL_ATOMIC_CPU_MB()
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_ARM_THREAD_FENCE_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86.h b/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86.h
new file mode 100644
index 0000000..77c383a
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86.h
@@ -0,0 +1,158 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_X86_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/**
+ * x86 && x64 Mappings
+ *
+ * Load Relaxed : MOV
+ * Load Acquire : MOV; COMPILER_BARRIER;
+ * Load Seq_Cst : MOV; COMPILER_BARRIER;
+ *
+ * Store Relaxed : MOV
+ * Store Release : COMPILER_BARRIER; MOV;
+ * Store Seq_Cst : LOCK XCHG : MOV; MFENCE;
+ *
+ * Relaxed Fence :
+ * Acquire Fence : COMPILER_BARRIER
+ * Release Fence : COMPILER_BARRIER
+ * Acq_Rel Fence : COMPILER_BARRIER
+ * Seq_Cst FENCE : MFENCE
+ */
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+#if (defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)
+ #define EASTL_ARCH_ATOMIC_HAS_128BIT
+#elif defined(EA_COMPILER_MSVC)
+ #if EA_PLATFORM_PTR_SIZE == 8
+ #define EASTL_ARCH_ATOMIC_HAS_128BIT
+ #endif
+#endif
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+/**
+ * NOTE:
+ *
+ * On 32-bit x86 CPUs Intel Pentium and newer, AMD K5 and newer
+ * and any i586 class of x86 CPUs support only 64-bit cmpxchg
+ * known as cmpxchg8b.
+ *
+ * On these class of cpus we can guarantee that 64-bit loads/stores are
+ * also atomic by using the SSE2 movq, SSE1 movlps, or x87 fild/fstp instructions.
+ *
+ * We support all other atomic operations
+ * on compilers that only provide this 64-bit cmpxchg instruction
+ * by wrapping them around the 64-bit cmpxchg8b instruction.
+ */
+#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86)
+
+
+ #define EASTL_ARCH_ATOMIC_X86_NOP_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ static_assert(false, "EASTL_ARCH_ATOMIC_X86_NOP_PRE_COMPUTE_DESIRED() must be implmented!");
+
+ #define EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET(ret, prevObserved, val)
+
+
+ #define EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, MemoryOrder, PRE_COMPUTE_DESIRED, POST_COMPUTE_RET) \
+ { \
+ EASTL_ATOMIC_DEFAULT_INIT(bool, cmpxchgRet); \
+ EASTL_ATOMIC_LOAD_RELAXED_64(type, ret, ptr); \
+ do \
+ { \
+ type computedDesired; \
+ PRE_COMPUTE_DESIRED(computedDesired, ret, (val)); \
+ EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_CMPXCHG_STRONG_, MemoryOrder), _64)(type, cmpxchgRet, ptr, &(ret), computedDesired); \
+ } while (!cmpxchgRet); \
+ POST_COMPUTE_RET(ret, ret, (val)); \
+ }
+
+
+#endif
+
+
+/**
+ * NOTE:
+ *
+ * 64-bit x64 CPUs support only 128-bit cmpxchg known as cmpxchg16b.
+ *
+ * We support all other atomic operations by wrapping them around
+ * the 128-bit cmpxchg16b instruction.
+ *
+ * 128-bit loads are only atomic by using the cmpxchg16b instruction.
+ * SSE 128-bit loads are not guaranteed to be atomic even though some CPUs
+ * make them atomic such as AMD Ryzen or Intel SandyBridge.
+ */
+#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+
+
+ #define EASTL_ARCH_ATOMIC_X86_NOP_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ static_assert(false, "EASTL_ARCH_ATOMIC_X86_NOP_PRE_COMPUTE_DESIRED() must be implmented!");
+
+ #define EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET(ret, prevObserved, val)
+
+
+ #define EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, MemoryOrder, PRE_COMPUTE_DESIRED, POST_COMPUTE_RET) \
+ { \
+ EASTL_ATOMIC_DEFAULT_INIT(bool, cmpxchgRet); \
+ /* This is intentionally a non-atomic 128-bit load which may observe shearing. */ \
+ /* Either we do not observe *(ptr) but then the cmpxchg will fail and the observed */ \
+ /* atomic load will be returned. Or the non-atomic load got lucky and the cmpxchg succeeds */ \
+ /* because the observed value equals the value in *(ptr) thus we optimistically do a non-atomic load. */ \
+ ret = *(ptr); \
+ do \
+ { \
+ type computedDesired; \
+ PRE_COMPUTE_DESIRED(computedDesired, ret, (val)); \
+ EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_CMPXCHG_STRONG_, MemoryOrder), _128)(type, cmpxchgRet, ptr, &(ret), computedDesired); \
+ } while (!cmpxchgRet); \
+ POST_COMPUTE_RET(ret, ret, (val)); \
+ }
+
+
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#include "arch_x86_fetch_add.h"
+#include "arch_x86_fetch_sub.h"
+
+#include "arch_x86_fetch_and.h"
+#include "arch_x86_fetch_xor.h"
+#include "arch_x86_fetch_or.h"
+
+#include "arch_x86_add_fetch.h"
+#include "arch_x86_sub_fetch.h"
+
+#include "arch_x86_and_fetch.h"
+#include "arch_x86_xor_fetch.h"
+#include "arch_x86_or_fetch.h"
+
+#include "arch_x86_exchange.h"
+
+#include "arch_x86_cmpxchg_weak.h"
+#include "arch_x86_cmpxchg_strong.h"
+
+#include "arch_x86_memory_barrier.h"
+
+#include "arch_x86_thread_fence.h"
+
+#include "arch_x86_load.h"
+#include "arch_x86_store.h"
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_add_fetch.h b/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_add_fetch.h
new file mode 100644
index 0000000..7b77528
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_add_fetch.h
@@ -0,0 +1,96 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_ADD_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_X86_ADD_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_ADD_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86)
+
+
+ #define EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) + (val))
+
+ #define EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \
+ ret = ((prevObserved) + (val))
+
+
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+
+
+ #define EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) + (val))
+
+ #define EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \
+ ret = ((prevObserved) + (val))
+
+
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_ADD_FETCH_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_and_fetch.h b/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_and_fetch.h
new file mode 100644
index 0000000..0583163
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_and_fetch.h
@@ -0,0 +1,96 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_AND_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_X86_AND_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_AND_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86)
+
+
+ #define EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) & (val))
+
+ #define EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \
+ ret = ((prevObserved) & (val))
+
+
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+
+
+ #define EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) & (val))
+
+ #define EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \
+ ret = ((prevObserved) & (val))
+
+
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_AND_FETCH_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_strong.h b/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_strong.h
new file mode 100644
index 0000000..1968e9a
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_strong.h
@@ -0,0 +1,69 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_CMPXCHG_STRONG_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_X86_CMPXCHG_STRONG_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_*_*_N(type, bool ret, type * ptr, type * expected, type desired)
+//
+#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+
+
+ #define EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) \
+ { \
+ /* Compare RDX:RAX with m128. If equal, set ZF and load RCX:RBX into m128. Else, clear ZF and load m128 into RDX:RAX. */ \
+ __asm__ __volatile__ ("lock; cmpxchg16b %2\n" /* cmpxchg16b sets/clears ZF */ \
+ "sete %3" /* If ZF == 1, set the return value to 1 */ \
+ /* Output Operands */ \
+ : "=a"((EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)))[0]), "=d"((EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)))[1]), \
+ "+m"(*(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(__uint128_t, (ptr)))), \
+ "=rm"((ret)) \
+ /* Input Operands */ \
+ : "b"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(desired)))[0]), "c"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(desired)))[1]), \
+ "a"((EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)))[0]), "d"((EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)))[1]) \
+ /* Clobbers */ \
+ : "memory", "cc"); \
+ }
+
+
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
+
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
+
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
+
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
+
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
+
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
+
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
+
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
+
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
+
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_CMPXCHG_STRONG_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_weak.h b/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_weak.h
new file mode 100644
index 0000000..61a126c
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_weak.h
@@ -0,0 +1,52 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_CMPXCHG_WEAK_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_X86_CMPXCHG_WEAK_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_*_*_N(type, bool ret, type * ptr, type * expected, type desired)
+//
+#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+
+
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128(type, ret, ptr, expected, desired)
+
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired)
+
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired)
+
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128(type, ret, ptr, expected, desired)
+
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired)
+
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired)
+
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired)
+
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired)
+
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired)
+
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_CMPXCHG_WEAK_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_exchange.h b/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_exchange.h
new file mode 100644
index 0000000..b1de7d8
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_exchange.h
@@ -0,0 +1,91 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_EXCHANGE_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_X86_EXCHANGE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_EXCHANGE_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86)
+
+
+ #define EASTL_ARCH_ATOMIC_X86_EXCHANGE_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = (val)
+
+
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_EXCHANGE_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_EXCHANGE_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_EXCHANGE_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_EXCHANGE_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_EXCHANGE_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+
+
+ #define EASTL_ARCH_ATOMIC_X86_EXCHANGE_128(type, ret, ptr, val, MemoryOrder) \
+ { \
+ EASTL_ATOMIC_DEFAULT_INIT(bool, cmpxchgRet); \
+ /* This is intentionally a non-atomic 128-bit load which may observe shearing. */ \
+ /* Either we do not observe *(ptr) but then the cmpxchg will fail and the observed */ \
+ /* atomic load will be returned. Or the non-atomic load got lucky and the cmpxchg succeeds */ \
+ /* because the observed value equals the value in *(ptr) thus we optimistically do a non-atomic load. */ \
+ ret = *(ptr); \
+ do \
+ { \
+ EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_CMPXCHG_STRONG_, MemoryOrder), _128)(type, cmpxchgRet, ptr, &(ret), val); \
+ } while (!cmpxchgRet); \
+ }
+
+
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_EXCHANGE_128(type, ret, ptr, val, RELAXED)
+
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_EXCHANGE_128(type, ret, ptr, val, ACQUIRE)
+
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_EXCHANGE_128(type, ret, ptr, val, RELEASE)
+
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_EXCHANGE_128(type, ret, ptr, val, ACQ_REL)
+
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_EXCHANGE_128(type, ret, ptr, val, SEQ_CST)
+
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_EXCHANGE_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_add.h b/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_add.h
new file mode 100644
index 0000000..e816af9
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_add.h
@@ -0,0 +1,90 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_ADD_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_ADD_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_FETCH_ADD_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86)
+
+
+ #define EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) + (val))
+
+
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+
+
+ #define EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) + (val))
+
+
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_ADD_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_and.h b/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_and.h
new file mode 100644
index 0000000..ff27b1a
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_and.h
@@ -0,0 +1,90 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_AND_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_AND_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_FETCH_AND_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86)
+
+
+ #define EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) & (val))
+
+
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+
+
+ #define EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) & (val))
+
+
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_AND_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_or.h b/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_or.h
new file mode 100644
index 0000000..8627d3a
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_or.h
@@ -0,0 +1,90 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_OR_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_OR_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_FETCH_OR_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86)
+
+
+ #define EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) | (val))
+
+
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+
+
+ #define EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) | (val))
+
+
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_OR_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_sub.h b/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_sub.h
new file mode 100644
index 0000000..14b43f9
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_sub.h
@@ -0,0 +1,90 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_SUB_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_SUB_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_FETCH_SUB_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86)
+
+
+ #define EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) - (val))
+
+
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+
+
+ #define EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) - (val))
+
+
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_SUB_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_xor.h b/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_xor.h
new file mode 100644
index 0000000..666df8b
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_xor.h
@@ -0,0 +1,90 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_XOR_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_XOR_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_FETCH_XOR_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86)
+
+
+ #define EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) ^ (val))
+
+
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+
+
+ #define EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) ^ (val))
+
+
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_XOR_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_load.h b/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_load.h
new file mode 100644
index 0000000..644a2a1
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_load.h
@@ -0,0 +1,164 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_LOAD_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_X86_LOAD_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_LOAD_*_N(type, type ret, type * ptr)
+//
+
+#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+
+
+ /**
+ * NOTE:
+ *
+ * Since the cmpxchg 128-bit inline assembly does a sete in the asm to set the return boolean,
+ * it doesn't get dead-store removed even though we don't care about the success of the
+ * cmpxchg since the compiler cannot reason about what is inside asm blocks.
+ * Thus this variant just does the minimum required to do an atomic load.
+ */
+#define EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, MemoryOrder) \
+ { \
+ EASTL_ATOMIC_FIXED_WIDTH_TYPE_128 expected = 0; \
+ ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, expected); \
+ \
+ /* Compare RDX:RAX with m128. If equal, set ZF and load RCX:RBX into m128. Else, clear ZF and load m128 into RDX:RAX. */ \
+ __asm__ __volatile__ ("lock; cmpxchg16b %2" /* cmpxchg16b sets/clears ZF */ \
+ /* Output Operands */ \
+ : "=a"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(ret)))[0]), "=d"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(ret)))[1]), \
+ "+m"(*(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(__uint128_t, (ptr)))) \
+ /* Input Operands */ \
+ : "b"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(ret)))[0]), "c"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(ret)))[1]), \
+ "a"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(ret)))[0]), "d"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(ret)))[1]) \
+ /* Clobbers */ \
+ : "memory", "cc"); \
+ }
+
+
+#define EASTL_ARCH_ATOMIC_LOAD_RELAXED_128(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, RELAXED)
+
+#define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_128(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, ACQUIRE)
+
+#define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_128(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, SEQ_CST)
+
+#elif defined(EA_COMPILER_MSVC)
+
+
+ #if defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1920) // >= VS2019
+
+ #define EASTL_ARCH_ATOMIC_X86_LOAD_N(integralType, bits, type, ret, ptr) \
+ { \
+ integralType retIntegral; \
+ retIntegral = EA_PREPROCESSOR_JOIN(__iso_volatile_load, bits)(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr))); \
+ \
+ ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, retIntegral); \
+ }
+
+ #else
+
+ #define EASTL_ARCH_ATOMIC_X86_LOAD_N(integralType, bits, type, ret, ptr) \
+ { \
+ integralType retIntegral; \
+ retIntegral = (*(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)))); \
+ \
+ ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, retIntegral); \
+ }
+
+ #endif
+
+
+ #define EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, MemoryOrder) \
+ { \
+ EASTL_ATOMIC_FIXED_WIDTH_TYPE_128 expected{0, 0}; \
+ ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, expected); \
+ \
+ bool cmpxchgRetBool; EA_UNUSED(cmpxchgRetBool); \
+ EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_CMPXCHG_STRONG_, MemoryOrder), _128)(type, cmpxchgRetBool, ptr, &(ret), ret); \
+ }
+
+
+ #define EASTL_ARCH_ATOMIC_X86_LOAD_8(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_N(__int8, 8, type, ret, ptr)
+
+ #define EASTL_ARCH_ATOMIC_X86_LOAD_16(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_N(__int16, 16, type, ret, ptr)
+
+ #define EASTL_ARCH_ATOMIC_X86_LOAD_32(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_N(__int32, 32, type, ret, ptr)
+
+ #define EASTL_ARCH_ATOMIC_X86_LOAD_64(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_N(__int64, 64, type, ret, ptr)
+
+
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_8(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_8(type, ret, ptr)
+
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_16(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_16(type, ret, ptr)
+
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_32(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_32(type, ret, ptr)
+
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_64(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_64(type, ret, ptr)
+
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_128(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, RELAXED)
+
+
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_8(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_8(type, ret, ptr); \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_16(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_16(type, ret, ptr); \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_32(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_32(type, ret, ptr); \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_64(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_64(type, ret, ptr); \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_128(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, ACQUIRE)
+
+
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_8(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_8(type, ret, ptr); \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_16(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_16(type, ret, ptr); \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_32(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_32(type, ret, ptr); \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_64(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_64(type, ret, ptr); \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_128(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, SEQ_CST)
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_LOAD_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_memory_barrier.h b/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_memory_barrier.h
new file mode 100644
index 0000000..7bad141
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_memory_barrier.h
@@ -0,0 +1,104 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_MEMORY_BARRIER_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_X86_MEMORY_BARRIER_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_CPU_MB()
+//
+#if defined(EA_COMPILER_MSVC)
+
+ /**
+ * NOTE:
+ * While it makes no sense for a hardware memory barrier to not imply a compiler barrier.
+ * MSVC docs do not explicitly state that, so better to be safe than sorry chasing down
+ * hard to find bugs due to the compiler deciding to reorder things.
+ */
+
+ #if 1
+
+ // 4459 : declaration of 'identifier' hides global declaration
+ // 4456 : declaration of 'identifier' hides previous local declaration
+ #define EASTL_ARCH_ATOMIC_CPU_MB() \
+ { \
+ EA_DISABLE_VC_WARNING(4459 4456); \
+ volatile long _; \
+ _InterlockedExchangeAdd(&_, 0); \
+ EA_RESTORE_VC_WARNING(); \
+ }
+
+ #else
+
+ #define EASTL_ARCH_ATOMIC_CPU_MB() \
+ EASTL_ATOMIC_COMPILER_BARRIER(); \
+ _mm_mfence(); \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+ #endif
+
+#elif defined(__clang__) || defined(EA_COMPILER_GNUC)
+
+ /**
+ * NOTE:
+ *
+ * mfence orders all loads/stores to/from all memory types.
+ * We only care about ordinary cacheable memory so lighter weight locked instruction
+ * is far faster than a mfence to get a full memory barrier.
+ * lock; addl against the top of the stack is good because:
+ * distinct for every thread so prevents false sharing
+ * that cacheline is most likely cache hot
+ *
+ * We intentionally do it below the stack pointer to avoid false RAW register dependencies,
+ * in cases where the compiler reads from the stack pointer after the lock; addl instruction
+ *
+ * Accounting for Red Zones or Cachelines doesn't provide extra benefit.
+ */
+
+ #if defined(EA_PROCESSOR_X86)
+
+ #define EASTL_ARCH_ATOMIC_CPU_MB() \
+ __asm__ __volatile__ ("lock; addl $0, -4(%%esp)" ::: "memory", "cc")
+
+ #elif defined(EA_PROCESSOR_X86_64)
+
+ #define EASTL_ARCH_ATOMIC_CPU_MB() \
+ __asm__ __volatile__ ("lock; addl $0, -8(%%rsp)" ::: "memory", "cc")
+
+ #else
+
+ #define EASTL_ARCH_ATOMIC_CPU_MB() \
+ __asm__ __volatile__ ("mfence" ::: "memory")
+
+ #endif
+
+
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_CPU_WMB()
+//
+#define EASTL_ARCH_ATOMIC_CPU_WMB() \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_CPU_RMB()
+//
+#define EASTL_ARCH_ATOMIC_CPU_RMB() \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_MEMORY_BARRIER_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_or_fetch.h b/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_or_fetch.h
new file mode 100644
index 0000000..42f7d61
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_or_fetch.h
@@ -0,0 +1,96 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_OR_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_X86_OR_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_OR_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86)
+
+
+ #define EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) | (val))
+
+ #define EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \
+ ret = ((prevObserved) | (val))
+
+
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+
+
+ #define EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) | (val))
+
+ #define EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \
+ ret = ((prevObserved) | (val))
+
+
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_OR_FETCH_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_store.h b/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_store.h
new file mode 100644
index 0000000..31655c3
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_store.h
@@ -0,0 +1,171 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_STORE_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_X86_STORE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_STORE_*_N(type, type * ptr, type val)
+//
+#if defined(EA_COMPILER_MSVC)
+
+
+ #if defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1920) // >= VS2019
+
+ #define EASTL_ARCH_ATOMIC_X86_STORE_N(integralType, bits, type, ptr, val) \
+ EA_PREPROCESSOR_JOIN(__iso_volatile_store, bits)(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), EASTL_ATOMIC_TYPE_PUN_CAST(integralType, (val)))
+
+ #else
+
+ #define EASTL_ARCH_ATOMIC_X86_STORE_N(integralType, bits, type, ptr, val) \
+ { \
+ integralType valIntegral = EASTL_ATOMIC_TYPE_PUN_CAST(integralType, (val)); \
+ \
+ (*(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)))) = valIntegral; \
+ }
+
+ #endif
+
+
+ #define EASTL_ARCH_ATOMIC_X86_STORE_128(type, ptr, val, MemoryOrder) \
+ { \
+ type exchange128; EA_UNUSED(exchange128); \
+ EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_EXCHANGE_, MemoryOrder), _128)(type, exchange128, ptr, val); \
+ }
+
+
+ #define EASTL_ARCH_ATOMIC_X86_STORE_8(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_STORE_N(__int8, 8, type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_X86_STORE_16(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_STORE_N(__int16, 16, type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_X86_STORE_32(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_STORE_N(__int32, 32, type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_X86_STORE_64(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_STORE_N(__int64, 64, type, ptr, val)
+
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_8(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_STORE_8(type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_16(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_STORE_16(type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_32(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_STORE_32(type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_64(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_STORE_64(type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_128(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_STORE_128(type, ptr, val, RELAXED)
+
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_8(type, ptr, val) \
+ EASTL_ATOMIC_COMPILER_BARRIER(); \
+ EASTL_ARCH_ATOMIC_X86_STORE_8(type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_16(type, ptr, val) \
+ EASTL_ATOMIC_COMPILER_BARRIER(); \
+ EASTL_ARCH_ATOMIC_X86_STORE_16(type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_32(type, ptr, val) \
+ EASTL_ATOMIC_COMPILER_BARRIER(); \
+ EASTL_ARCH_ATOMIC_X86_STORE_32(type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_64(type, ptr, val) \
+ EASTL_ATOMIC_COMPILER_BARRIER(); \
+ EASTL_ARCH_ATOMIC_X86_STORE_64(type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_128(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_STORE_128(type, ptr, val, RELEASE)
+
+
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_8(type, ptr, val) \
+ { \
+ type exchange8; EA_UNUSED(exchange8); \
+ EASTL_ATOMIC_EXCHANGE_SEQ_CST_8(type, exchange8, ptr, val); \
+ }
+
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_16(type, ptr, val) \
+ { \
+ type exchange16; EA_UNUSED(exchange16); \
+ EASTL_ATOMIC_EXCHANGE_SEQ_CST_16(type, exchange16, ptr, val); \
+ }
+
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_32(type, ptr, val) \
+ { \
+ type exchange32; EA_UNUSED(exchange32); \
+ EASTL_ATOMIC_EXCHANGE_SEQ_CST_32(type, exchange32, ptr, val); \
+ }
+
+
+ /**
+ * NOTE:
+ *
+ * Since 64-bit exchange is wrapped around a cmpxchg8b on 32-bit x86, it is
+ * faster to just do a mov; mfence.
+ */
+ #if defined(EA_PROCESSOR_X86)
+
+
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_64(type, ptr, val) \
+ EASTL_ATOMIC_COMPILER_BARRIER(); \
+ EASTL_ARCH_ATOMIC_X86_STORE_64(type, ptr, val); \
+ EASTL_ATOMIC_CPU_MB()
+
+
+ #elif defined(EA_PROCESSOR_X86_64)
+
+
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_64(type, ptr, val) \
+ { \
+ type exchange64; EA_UNUSED(exchange64); \
+ EASTL_ATOMIC_EXCHANGE_SEQ_CST_64(type, exchange64, ptr, val); \
+ }
+
+
+ #endif
+
+
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_128(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_STORE_128(type, ptr, val, SEQ_CST)
+
+
+#endif
+
+
+#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+
+
+ #define EASTL_ARCH_ATOMIC_X86_STORE_128(type, ptr, val, MemoryOrder) \
+ { \
+ type exchange128; EA_UNUSED(exchange128); \
+ EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_EXCHANGE_, MemoryOrder), _128)(type, exchange128, ptr, val); \
+ }
+
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_128(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_STORE_128(type, ptr, val, RELAXED)
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_128(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_STORE_128(type, ptr, val, RELEASE)
+
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_128(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_STORE_128(type, ptr, val, SEQ_CST)
+
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_STORE_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_sub_fetch.h b/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_sub_fetch.h
new file mode 100644
index 0000000..a1d0932
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_sub_fetch.h
@@ -0,0 +1,96 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_SUB_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_X86_SUB_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_SUB_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86)
+
+
+ #define EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) - (val))
+
+ #define EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \
+ ret = ((prevObserved) - (val))
+
+
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+
+
+ #define EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) - (val))
+
+ #define EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \
+ ret = ((prevObserved) - (val))
+
+
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_SUB_FETCH_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_thread_fence.h b/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_thread_fence.h
new file mode 100644
index 0000000..183c7f3
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_thread_fence.h
@@ -0,0 +1,42 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_THREAD_FENCE_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_X86_THREAD_FENCE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_THREAD_FENCE_*()
+//
+#if defined(EA_COMPILER_MSVC)
+
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_RELAXED()
+
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQUIRE() \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_RELEASE() \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQ_REL() \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+#endif
+
+
+#if defined(EA_COMPILER_MSVC) || defined(__clang__) || defined(EA_COMPILER_GNUC)
+
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_SEQ_CST() \
+ EASTL_ATOMIC_CPU_MB()
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_THREAD_FENCE_H */
diff --git a/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_xor_fetch.h b/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_xor_fetch.h
new file mode 100644
index 0000000..a5b62c3
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_xor_fetch.h
@@ -0,0 +1,96 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_XOR_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_X86_XOR_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_XOR_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86)
+
+
+ #define EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) ^ (val))
+
+ #define EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \
+ ret = ((prevObserved) ^ (val))
+
+
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+
+
+ #define EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) ^ (val))
+
+ #define EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \
+ ret = ((prevObserved) ^ (val))
+
+
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_XOR_FETCH_H */
diff --git a/EASTL/include/EASTL/internal/atomic/atomic.h b/EASTL/include/EASTL/internal/atomic/atomic.h
new file mode 100644
index 0000000..eb27d2d
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/atomic.h
@@ -0,0 +1,252 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_H
+#define EASTL_ATOMIC_INTERNAL_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/move_help.h>
+#include <EASTL/internal/memory_base.h>
+#include <EASTL/type_traits.h>
+
+#include "atomic_macros.h"
+#include "atomic_casts.h"
+
+#include "atomic_memory_order.h"
+#include "atomic_asserts.h"
+
+#include "atomic_size_aligned.h"
+#include "atomic_base_width.h"
+
+#include "atomic_integral.h"
+
+#include "atomic_pointer.h"
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+/**
+ * NOTE:
+ *
+ * All of the actual implementation is done via the ATOMIC_MACROS in the compiler or arch sub folders.
+ * The C++ code is merely boilerplate around these macros that actually implement the atomic operations.
+ * The C++ boilerplate is also hidden behind macros.
+ * This may seem more complicated but this is all meant to reduce copy-pasting and to ensure all operations
+ * all end up going down to one macro that does the actual implementation.
+ * The reduced code duplication makes it easier to verify the implementation and reason about it.
+ * Ensures we do not have to re-implement the same code for compilers that do not support generic builtins such as MSVC.
+ * Ensures for compilers that have separate intrinsics for different widths, that C++ boilerplate isn't copy-pasted leading to programmer errors.
+ * Ensures if we ever have to implement a new platform, only the low-level leaf macros have to be implemented, everything else will be generated for you.
+ */
+
+
+#include "atomic_push_compiler_options.h"
+
+
+namespace eastl
+{
+
+
+namespace internal
+{
+
+
+ template <typename T>
+ struct is_atomic_lockfree_size
+ {
+ static EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR_OR_CONST bool value = false ||
+ #if defined(EASTL_ATOMIC_HAS_8BIT)
+ sizeof(T) == 1 ||
+ #endif
+ #if defined(EASTL_ATOMIC_HAS_16BIT)
+ sizeof(T) == 2 ||
+ #endif
+ #if defined(EASTL_ATOMIC_HAS_32BIT)
+ sizeof(T) == 4 ||
+ #endif
+ #if defined(EASTL_ATOMIC_HAS_64BIT)
+ sizeof(T) == 8 ||
+ #endif
+ #if defined(EASTL_ATOMIC_HAS_128BIT)
+ sizeof(T) == 16 ||
+ #endif
+ false;
+ };
+
+
+ template <typename T>
+ struct is_user_type_suitable_for_primary_template
+ {
+ static EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR_OR_CONST bool value = eastl::internal::is_atomic_lockfree_size<T>::value;
+ };
+
+
+ template <typename T>
+ using select_atomic_inherit_0 = typename eastl::conditional<eastl::is_same_v<bool, T> || eastl::internal::is_user_type_suitable_for_primary_template<T>::value,
+ eastl::internal::atomic_base_width<T>, /* True */
+ eastl::internal::atomic_invalid_type<T> /* False */
+ >::type;
+
+ template <typename T>
+ using select_atomic_inherit = select_atomic_inherit_0<T>;
+
+
+} // namespace internal
+
+
+#define EASTL_ATOMIC_CLASS_IMPL(type, base, valueType, differenceType) \
+ private: \
+ \
+ EASTL_ATOMIC_STATIC_ASSERT_TYPE(type); \
+ \
+ using Base = base; \
+ \
+ public: \
+ \
+ typedef valueType value_type; \
+ typedef differenceType difference_type; \
+ \
+ public: \
+ \
+ static EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR_OR_CONST bool is_always_lock_free = eastl::internal::is_atomic_lockfree_size<type>::value; \
+ \
+ public: /* deleted ctors && assignment operators */ \
+ \
+ atomic(const atomic&) EA_NOEXCEPT = delete; \
+ \
+ atomic& operator=(const atomic&) EA_NOEXCEPT = delete; \
+ atomic& operator=(const atomic&) volatile EA_NOEXCEPT = delete; \
+ \
+ public: /* ctors */ \
+ \
+ EA_CONSTEXPR atomic(type desired) EA_NOEXCEPT \
+ : Base{ desired } \
+ { \
+ } \
+ \
+ EA_CONSTEXPR atomic() EA_NOEXCEPT_IF(eastl::is_nothrow_default_constructible_v<type>) = default; \
+ \
+ public: \
+ \
+ bool is_lock_free() const EA_NOEXCEPT \
+ { \
+ return eastl::internal::is_atomic_lockfree_size<type>::value; \
+ } \
+ \
+ bool is_lock_free() const volatile EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(type); \
+ return false; \
+ }
+
+
+#define EASTL_ATOMIC_USING_ATOMIC_BASE(type) \
+ public: \
+ \
+ using Base::operator=; \
+ using Base::store; \
+ using Base::load; \
+ using Base::exchange; \
+ using Base::compare_exchange_weak; \
+ using Base::compare_exchange_strong; \
+ \
+ public: \
+ \
+ operator type() const volatile EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
+ } \
+ \
+ operator type() const EA_NOEXCEPT \
+ { \
+ return load(eastl::memory_order_seq_cst); \
+ }
+
+
+#define EASTL_ATOMIC_USING_ATOMIC_INTEGRAL() \
+ public: \
+ \
+ using Base::fetch_add; \
+ using Base::add_fetch; \
+ \
+ using Base::fetch_sub; \
+ using Base::sub_fetch; \
+ \
+ using Base::fetch_and; \
+ using Base::and_fetch; \
+ \
+ using Base::fetch_or; \
+ using Base::or_fetch; \
+ \
+ using Base::fetch_xor; \
+ using Base::xor_fetch; \
+ \
+ using Base::operator++; \
+ using Base::operator--; \
+ using Base::operator+=; \
+ using Base::operator-=; \
+ using Base::operator&=; \
+ using Base::operator|=; \
+ using Base::operator^=;
+
+
+#define EASTL_ATOMIC_USING_ATOMIC_POINTER() \
+ public: \
+ \
+ using Base::fetch_add; \
+ using Base::add_fetch; \
+ using Base::fetch_sub; \
+ using Base::sub_fetch; \
+ \
+ using Base::operator++; \
+ using Base::operator--; \
+ using Base::operator+=; \
+ using Base::operator-=;
+
+
+template <typename T, typename = void>
+struct atomic : protected eastl::internal::select_atomic_inherit<T>
+{
+ EASTL_ATOMIC_CLASS_IMPL(T, eastl::internal::select_atomic_inherit<T>, T, T)
+
+ EASTL_ATOMIC_USING_ATOMIC_BASE(T)
+};
+
+
+template <typename T>
+struct atomic<T, eastl::enable_if_t<eastl::is_integral_v<T> && !eastl::is_same_v<bool, T>>> : protected eastl::internal::atomic_integral_width<T>
+{
+ EASTL_ATOMIC_CLASS_IMPL(T, eastl::internal::atomic_integral_width<T>, T, T)
+
+ EASTL_ATOMIC_USING_ATOMIC_BASE(T)
+
+ EASTL_ATOMIC_USING_ATOMIC_INTEGRAL()
+};
+
+
+template <typename T>
+struct atomic<T*> : protected eastl::internal::atomic_pointer_width<T*>
+{
+ EASTL_ATOMIC_CLASS_IMPL(T*, eastl::internal::atomic_pointer_width<T*>, T*, ptrdiff_t)
+
+ EASTL_ATOMIC_USING_ATOMIC_BASE(T*)
+
+ EASTL_ATOMIC_USING_ATOMIC_POINTER()
+};
+
+
+} // namespace eastl
+
+
+#include "atomic_pop_compiler_options.h"
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_H */
diff --git a/EASTL/include/EASTL/internal/atomic/atomic_asserts.h b/EASTL/include/EASTL/internal/atomic/atomic_asserts.h
new file mode 100644
index 0000000..9324a47
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/atomic_asserts.h
@@ -0,0 +1,75 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_STATIC_ASSERTS_H
+#define EASTL_ATOMIC_INTERNAL_STATIC_ASSERTS_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(type) \
+ static_assert(!eastl::is_same<type, type>::value, "eastl::atomic<T> : volatile eastl::atomic<T> is not what you expect! Read the docs in EASTL/atomic.h! Use the memory orders to access the atomic object!");
+
+#define EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(type) \
+ static_assert(!eastl::is_same<type, type>::value, "eastl::atomic<T> : invalid memory order for the given operation!");
+
+#define EASTL_ATOMIC_STATIC_ASSERT_TYPE(type) \
+ /* User Provided T must not be cv qualified */ \
+ static_assert(!eastl::is_const<type>::value, "eastl::atomic<T> : Template Typename T cannot be const!"); \
+ static_assert(!eastl::is_volatile<type>::value, "eastl::atomic<T> : Template Typename T cannot be volatile! Use the memory orders to access the underlying type for the guarantees you need."); \
+ /* T must satisfy StandardLayoutType */ \
+ static_assert(eastl::is_standard_layout<type>::value, "eastl::atomic<T> : Must have standard layout!"); \
+ /* T must be TriviallyCopyable but it does not have to be TriviallyConstructible */ \
+ static_assert(eastl::is_trivially_copyable<type>::value, "eastl::atomci<T> : Template Typename T must be trivially copyable!"); \
+ static_assert(eastl::is_copy_constructible<type>::value, "eastl::atomic<T> : Template Typename T must be copy constructible!"); \
+ static_assert(eastl::is_move_constructible<type>::value, "eastl::atomic<T> : Template Typename T must be move constructible!"); \
+ static_assert(eastl::is_copy_assignable<type>::value, "eastl::atomic<T> : Template Typename T must be copy assignable!"); \
+ static_assert(eastl::is_move_assignable<type>::value, "eastl::atomic<T> : Template Typename T must be move assignable!"); \
+ static_assert(eastl::is_trivially_destructible<type>::value, "eastl::atomic<T> : Must be trivially destructible!"); \
+ static_assert(eastl::internal::is_atomic_lockfree_size<type>::value, "eastl::atomic<T> : Template Typename T must be a lockfree size!");
+
+#define EASTL_ATOMIC_STATIC_ASSERT_TYPE_IS_OBJECT(type) \
+ static_assert(eastl::is_object<type>::value, "eastl::atomic<T> : Template Typename T must be an object type!");
+
+#define EASTL_ATOMIC_ASSERT_ALIGNED(alignment) \
+ EASTL_ASSERT((alignment & (alignment - 1)) == 0); \
+ EASTL_ASSERT((reinterpret_cast<uintptr_t>(this) & (alignment - 1)) == 0)
+
+
+namespace eastl
+{
+
+
+namespace internal
+{
+
+
+ template <typename T>
+ struct atomic_invalid_type
+ {
+ /**
+ * class Test { int i; int j; int k; }; sizeof(Test) == 96 bits
+ *
+ * std::atomic allows non-primitive types to be used for the template type.
+ * This causes the api to degrade to locking for types that cannot fit into the lockfree size
+ * of the target platform such as std::atomic<Test> leading to performance traps.
+ *
+ * If this static_assert() fired, this means your template type T is larger than any atomic instruction
+ * supported on the given platform.
+ */
+
+ static_assert(!eastl::is_same<T, T>::value, "eastl::atomic<T> : invalid template type T!");
+ };
+
+
+} // namespace internal
+
+
+} // namespace eastl
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_STATIC_ASSERTS_H */
diff --git a/EASTL/include/EASTL/internal/atomic/atomic_base_width.h b/EASTL/include/EASTL/internal/atomic/atomic_base_width.h
new file mode 100644
index 0000000..ac76097
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/atomic_base_width.h
@@ -0,0 +1,346 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_BASE_WIDTH_H
+#define EASTL_ATOMIC_INTERNAL_BASE_WIDTH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#include "atomic_push_compiler_options.h"
+
+
+namespace eastl
+{
+
+
+namespace internal
+{
+
+
+ template <typename T, unsigned width = sizeof(T)>
+ struct atomic_base_width;
+
+ /**
+ * NOTE:
+ *
+ * T does not have to be trivially default constructible but it still
+ * has to be a trivially copyable type for the primary atomic template.
+ * Thus we must type pun into whatever storage type of the given fixed width
+ * the platform designates. This ensures T does not have to be trivially constructible.
+ */
+
+#define EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits) \
+ EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_FIXED_WIDTH_TYPE_, bits)
+
+
+#define EASTL_ATOMIC_STORE_FUNC_IMPL(op, bits) \
+ EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits) fixedWidthDesired = EASTL_ATOMIC_TYPE_PUN_CAST(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), desired); \
+ EA_PREPROCESSOR_JOIN(op, bits)(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), \
+ EASTL_ATOMIC_TYPE_CAST(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), this->GetAtomicAddress()), \
+ fixedWidthDesired)
+
+
+#define EASTL_ATOMIC_LOAD_FUNC_IMPL(op, bits) \
+ EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits) retVal; \
+ EA_PREPROCESSOR_JOIN(op, bits)(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), \
+ retVal, \
+ EASTL_ATOMIC_TYPE_CAST(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), this->GetAtomicAddress())); \
+ return EASTL_ATOMIC_TYPE_PUN_CAST(T, retVal);
+
+
+#define EASTL_ATOMIC_EXCHANGE_FUNC_IMPL(op, bits) \
+ EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits) retVal; \
+ EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits) fixedWidthDesired = EASTL_ATOMIC_TYPE_PUN_CAST(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), desired); \
+ EA_PREPROCESSOR_JOIN(op, bits)(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), \
+ retVal, \
+ EASTL_ATOMIC_TYPE_CAST(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), this->GetAtomicAddress()), \
+ fixedWidthDesired); \
+ return EASTL_ATOMIC_TYPE_PUN_CAST(T, retVal);
+
+
+#define EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(op, bits) \
+ EASTL_ATOMIC_DEFAULT_INIT(bool, retVal); \
+ EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits) fixedWidthDesired = EASTL_ATOMIC_TYPE_PUN_CAST(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), desired); \
+ EA_PREPROCESSOR_JOIN(op, bits)(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), \
+ retVal, \
+ EASTL_ATOMIC_TYPE_CAST(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), this->GetAtomicAddress()), \
+ EASTL_ATOMIC_TYPE_CAST(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), &expected), \
+ fixedWidthDesired); \
+ return retVal;
+
+
+#define EASTL_ATOMIC_BASE_OP_JOIN(op, Order) \
+ EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_, op), Order)
+
+
+#define EASTL_ATOMIC_BASE_CMPXCHG_FUNCS_IMPL(funcName, cmpxchgOp, bits) \
+ using Base::funcName; \
+ \
+ bool funcName(T& expected, T desired) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _SEQ_CST_), bits); \
+ } \
+ \
+ bool funcName(T& expected, T desired, \
+ eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _RELAXED_), bits); \
+ } \
+ \
+ bool funcName(T& expected, T desired, \
+ eastl::internal::memory_order_acquire_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _ACQUIRE_), bits); \
+ } \
+ \
+ bool funcName(T& expected, T desired, \
+ eastl::internal::memory_order_release_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _RELEASE_), bits); \
+ } \
+ \
+ bool funcName(T& expected, T desired, \
+ eastl::internal::memory_order_acq_rel_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _ACQ_REL_), bits); \
+ } \
+ \
+ bool funcName(T& expected, T desired, \
+ eastl::internal::memory_order_seq_cst_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _SEQ_CST_), bits); \
+ } \
+ \
+ bool funcName(T& expected, T desired, \
+ eastl::internal::memory_order_relaxed_s, \
+ eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _RELAXED_RELAXED_), bits); \
+ } \
+ \
+ bool funcName(T& expected, T desired, \
+ eastl::internal::memory_order_acquire_s, \
+ eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _ACQUIRE_RELAXED_), bits); \
+ } \
+ \
+ bool funcName(T& expected, T desired, \
+ eastl::internal::memory_order_acquire_s, \
+ eastl::internal::memory_order_acquire_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _ACQUIRE_ACQUIRE_), bits); \
+ } \
+ \
+ bool funcName(T& expected, T desired, \
+ eastl::internal::memory_order_release_s, \
+ eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _RELEASE_RELAXED_), bits); \
+ } \
+ \
+ bool funcName(T& expected, T desired, \
+ eastl::internal::memory_order_acq_rel_s, \
+ eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _ACQ_REL_RELAXED_), bits); \
+ } \
+ \
+ bool funcName(T& expected, T desired, \
+ eastl::internal::memory_order_acq_rel_s, \
+ eastl::internal::memory_order_acquire_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _ACQ_REL_ACQUIRE_), bits); \
+ } \
+ \
+ bool funcName(T& expected, T desired, \
+ eastl::internal::memory_order_seq_cst_s, \
+ eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _SEQ_CST_RELAXED_), bits); \
+ } \
+ \
+ bool funcName(T& expected, T desired, \
+ eastl::internal::memory_order_seq_cst_s, \
+ eastl::internal::memory_order_acquire_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _SEQ_CST_ACQUIRE_), bits); \
+ } \
+ \
+ bool funcName(T& expected, T desired, \
+ eastl::internal::memory_order_seq_cst_s, \
+ eastl::internal::memory_order_seq_cst_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _SEQ_CST_SEQ_CST_), bits); \
+ }
+
+#define EASTL_ATOMIC_BASE_CMPXCHG_WEAK_FUNCS_IMPL(bits) \
+ EASTL_ATOMIC_BASE_CMPXCHG_FUNCS_IMPL(compare_exchange_weak, CMPXCHG_WEAK, bits)
+
+#define EASTL_ATOMIC_BASE_CMPXCHG_STRONG_FUNCS_IMPL(bits) \
+ EASTL_ATOMIC_BASE_CMPXCHG_FUNCS_IMPL(compare_exchange_strong, CMPXCHG_STRONG, bits)
+
+
+#define EASTL_ATOMIC_BASE_WIDTH_SPECIALIZE(bytes, bits) \
+ template <typename T> \
+ struct atomic_base_width<T, bytes> : public atomic_size_aligned<T> \
+ { \
+ private: \
+ \
+ static_assert(EA_ALIGN_OF(atomic_size_aligned<T>) == bytes, "eastl::atomic<T> must be sizeof(T) aligned!"); \
+ static_assert(EA_ALIGN_OF(atomic_size_aligned<T>) == sizeof(T), "eastl::atomic<T> must be sizeof(T) aligned!"); \
+ using Base = atomic_size_aligned<T>; \
+ \
+ public: /* ctors */ \
+ \
+ EA_CONSTEXPR atomic_base_width(T desired) EA_NOEXCEPT \
+ : Base{ desired } \
+ { \
+ } \
+ \
+ EA_CONSTEXPR atomic_base_width() EA_NOEXCEPT_IF(eastl::is_nothrow_default_constructible_v<T>) = default; \
+ \
+ atomic_base_width(const atomic_base_width&) EA_NOEXCEPT = delete; \
+ \
+ public: /* store */ \
+ \
+ using Base::store; \
+ \
+ void store(T desired) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STORE_FUNC_IMPL(EASTL_ATOMIC_STORE_SEQ_CST_, bits); \
+ } \
+ \
+ void store(T desired, eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STORE_FUNC_IMPL(EASTL_ATOMIC_STORE_RELAXED_, bits); \
+ } \
+ \
+ void store(T desired, eastl::internal::memory_order_release_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STORE_FUNC_IMPL(EASTL_ATOMIC_STORE_RELEASE_, bits); \
+ } \
+ \
+ void store(T desired, eastl::internal::memory_order_seq_cst_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STORE_FUNC_IMPL(EASTL_ATOMIC_STORE_SEQ_CST_, bits); \
+ } \
+ \
+ public: /* load */ \
+ \
+ using Base::load; \
+ \
+ T load() const EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_LOAD_FUNC_IMPL(EASTL_ATOMIC_LOAD_SEQ_CST_, bits); \
+ } \
+ \
+ T load(eastl::internal::memory_order_relaxed_s) const EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_LOAD_FUNC_IMPL(EASTL_ATOMIC_LOAD_RELAXED_, bits); \
+ } \
+ \
+ T load(eastl::internal::memory_order_acquire_s) const EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_LOAD_FUNC_IMPL(EASTL_ATOMIC_LOAD_ACQUIRE_, bits); \
+ } \
+ \
+ T load(eastl::internal::memory_order_seq_cst_s) const EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_LOAD_FUNC_IMPL(EASTL_ATOMIC_LOAD_SEQ_CST_, bits); \
+ } \
+ \
+ public: /* exchange */ \
+ \
+ using Base::exchange; \
+ \
+ T exchange(T desired) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_EXCHANGE_FUNC_IMPL(EASTL_ATOMIC_EXCHANGE_SEQ_CST_, bits); \
+ } \
+ \
+ T exchange(T desired, eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_EXCHANGE_FUNC_IMPL(EASTL_ATOMIC_EXCHANGE_RELAXED_, bits); \
+ } \
+ \
+ T exchange(T desired, eastl::internal::memory_order_acquire_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_EXCHANGE_FUNC_IMPL(EASTL_ATOMIC_EXCHANGE_ACQUIRE_, bits); \
+ } \
+ \
+ T exchange(T desired, eastl::internal::memory_order_release_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_EXCHANGE_FUNC_IMPL(EASTL_ATOMIC_EXCHANGE_RELEASE_, bits); \
+ } \
+ \
+ T exchange(T desired, eastl::internal::memory_order_acq_rel_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_EXCHANGE_FUNC_IMPL(EASTL_ATOMIC_EXCHANGE_ACQ_REL_, bits); \
+ } \
+ \
+ T exchange(T desired, eastl::internal::memory_order_seq_cst_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_EXCHANGE_FUNC_IMPL(EASTL_ATOMIC_EXCHANGE_SEQ_CST_, bits); \
+ } \
+ \
+ public: /* compare_exchange_weak */ \
+ \
+ EASTL_ATOMIC_BASE_CMPXCHG_WEAK_FUNCS_IMPL(bits) \
+ \
+ public: /* compare_exchange_strong */ \
+ \
+ EASTL_ATOMIC_BASE_CMPXCHG_STRONG_FUNCS_IMPL(bits) \
+ \
+ public: /* assignment operator */ \
+ \
+ using Base::operator=; \
+ \
+ T operator=(T desired) EA_NOEXCEPT \
+ { \
+ store(desired, eastl::memory_order_seq_cst); \
+ return desired; \
+ } \
+ \
+ atomic_base_width& operator=(const atomic_base_width&) EA_NOEXCEPT = delete; \
+ atomic_base_width& operator=(const atomic_base_width&) volatile EA_NOEXCEPT = delete; \
+ \
+ };
+
+
+#if defined(EASTL_ATOMIC_HAS_8BIT)
+ EASTL_ATOMIC_BASE_WIDTH_SPECIALIZE(1, 8)
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_16BIT)
+ EASTL_ATOMIC_BASE_WIDTH_SPECIALIZE(2, 16)
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_32BIT)
+ EASTL_ATOMIC_BASE_WIDTH_SPECIALIZE(4, 32)
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_64BIT)
+ EASTL_ATOMIC_BASE_WIDTH_SPECIALIZE(8, 64)
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_128BIT)
+ EASTL_ATOMIC_BASE_WIDTH_SPECIALIZE(16, 128)
+#endif
+
+
+} // namespace internal
+
+
+} // namespace eastl
+
+
+#include "atomic_pop_compiler_options.h"
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_BASE_WIDTH_H */
diff --git a/EASTL/include/EASTL/internal/atomic/atomic_casts.h b/EASTL/include/EASTL/internal/atomic/atomic_casts.h
new file mode 100644
index 0000000..54b9ed2
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/atomic_casts.h
@@ -0,0 +1,190 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_CASTS_H
+#define EASTL_ATOMIC_INTERNAL_CASTS_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#include <EASTL/internal/type_transformations.h>
+
+
+#include <string.h>
+
+
+namespace eastl
+{
+
+
+namespace internal
+{
+
+
+template <typename T>
+EASTL_FORCE_INLINE volatile T* AtomicVolatileCast(T* ptr) EA_NOEXCEPT
+{
+ static_assert(!eastl::is_volatile<volatile T*>::value, "eastl::atomic<T> : pointer must not be volatile, the pointed to type must be volatile!");
+ static_assert(eastl::is_volatile<volatile T>::value, "eastl::atomic<T> : the pointed to type must be volatile!");
+
+ return reinterpret_cast<volatile T*>(ptr);
+}
+
+
+/**
+ * NOTE:
+ *
+ * Some compiler intrinsics do not operate on pointer types thus
+ * doing atomic operations on pointers must be casted to the suitable
+ * sized unsigned integral type.
+ *
+ * Some compiler intrinsics aren't generics and thus structs must also
+ * be casted to the appropriate sized unsigned integral type.
+ *
+ * Atomic operations on an int* might have to be casted to a uint64_t on
+ * a platform with 8-byte pointers as an example.
+ *
+ * Also doing an atomic operation on a struct, we must ensure that we observe
+ * the whole struct as one atomic unit with no shearing between the members.
+ * A load of a struct with two uint32_t members must be one uint64_t load,
+ * not two separate uint32_t loads, thus casted to the suitable sized
+ * unsigned integral type.
+ */
+template <typename Integral, typename T>
+EASTL_FORCE_INLINE volatile Integral* AtomicVolatileIntegralCast(T* ptr) EA_NOEXCEPT
+{
+ static_assert(!eastl::is_volatile<volatile Integral*>::value, "eastl::atomic<T> : pointer must not be volatile, the pointed to type must be volatile!");
+ static_assert(eastl::is_volatile<volatile Integral>::value, "eastl::atomic<T> : the pointed to type must be volatile!");
+ static_assert(eastl::is_integral<Integral>::value, "eastl::atomic<T> : Integral cast must cast to an Integral type!");
+ static_assert(sizeof(Integral) == sizeof(T), "eastl::atomic<T> : Integral and T must be same size for casting!");
+
+ return reinterpret_cast<volatile Integral*>(ptr);
+}
+
+template <typename Integral, typename T>
+EASTL_FORCE_INLINE Integral* AtomicIntegralCast(T* ptr) EA_NOEXCEPT
+{
+ static_assert(eastl::is_integral<Integral>::value, "eastl::atomic<T> : Integral cast must cast to an Integral type!");
+ static_assert(sizeof(Integral) == sizeof(T), "eastl::atomic<T> : Integral and T must be same size for casting!");
+
+ return reinterpret_cast<Integral*>(ptr);
+}
+
+
+/**
+ * NOTE:
+ *
+ * These casts are meant to be used with unions or structs of larger types that must be casted
+ * down to the smaller integral types. Like with 128-bit atomics and msvc intrinsics.
+ *
+ * struct Foo128 { __int64 array[2]; }; can be casted to a __int64*
+ * since a poiter to Foo128 is a pointer to the first member.
+ */
+template <typename ToType, typename FromType>
+EASTL_FORCE_INLINE volatile ToType* AtomicVolatileTypeCast(FromType* ptr) EA_NOEXCEPT
+{
+ static_assert(!eastl::is_volatile<volatile ToType*>::value, "eastl::atomic<T> : pointer must not be volatile, the pointed to type must be volatile!");
+ static_assert(eastl::is_volatile<volatile ToType>::value, "eastl::atomic<T> : the pointed to type must be volatile!");
+
+ return reinterpret_cast<volatile ToType*>(ptr);
+}
+
+template <typename ToType, typename FromType>
+EASTL_FORCE_INLINE ToType* AtomicTypeCast(FromType* ptr) EA_NOEXCEPT
+{
+ return reinterpret_cast<ToType*>(ptr);
+}
+
+
+/**
+ * NOTE:
+ *
+ * This is a compiler guaranteed safe type punning.
+ * This is useful when dealing with user defined structs.
+ * struct Test { uint32_t; unint32_t; };
+ *
+ * Example:
+ * uint64_t atomicLoad = *((volatile uint64_t*)&Test);
+ * Test load = AtomicTypePunCast<Test, uint64_t>(atomicLoad);
+ *
+ * uint64_t comparand = AtomicTypePunCast<uint64_t, Test>(Test);
+ * cmpxchg(&Test, comparand, desired);
+ *
+ * This can be implemented in many different ways depending on the compiler such
+ * as thru a union, memcpy, reinterpret_cast<Test&>(atomicLoad), etc.
+ */
+template <typename Pun, typename T, eastl::enable_if_t<!eastl::is_same_v<Pun, T>, int> = 0>
+EASTL_FORCE_INLINE Pun AtomicTypePunCast(const T& fromType) EA_NOEXCEPT
+{
+ static_assert(sizeof(Pun) == sizeof(T), "eastl::atomic<T> : Pun and T must be the same size for type punning!");
+
+ /**
+ * aligned_storage ensures we can TypePun objects that aren't trivially default constructible
+ * but still trivially copyable.
+ */
+ typename eastl::aligned_storage<sizeof(Pun), alignof(Pun)>::type ret;
+ memcpy(eastl::addressof(ret), eastl::addressof(fromType), sizeof(Pun));
+ return reinterpret_cast<Pun&>(ret);
+}
+
+template <typename Pun, typename T, eastl::enable_if_t<eastl::is_same_v<Pun, T>, int> = 0>
+EASTL_FORCE_INLINE Pun AtomicTypePunCast(const T& fromType) EA_NOEXCEPT
+{
+ return fromType;
+}
+
+
+template <typename T>
+EASTL_FORCE_INLINE T AtomicNegateOperand(T val) EA_NOEXCEPT
+{
+ static_assert(eastl::is_integral<T>::value, "eastl::atomic<T> : Integral Negation must be an Integral type!");
+ static_assert(!eastl::is_volatile<T>::value, "eastl::atomic<T> : T must not be volatile!");
+
+ return static_cast<T>(0U - static_cast<eastl::make_unsigned_t<T>>(val));
+}
+
+EASTL_FORCE_INLINE ptrdiff_t AtomicNegateOperand(ptrdiff_t val) EA_NOEXCEPT
+{
+ return -val;
+}
+
+
+} // namespace internal
+
+
+} // namespace eastl
+
+
+/**
+ * NOTE:
+ *
+ * These macros are meant to prevent inclusion hell.
+ * Also so that it fits with the style of the rest of the atomic macro implementation.
+ */
+#define EASTL_ATOMIC_VOLATILE_CAST(ptr) \
+ eastl::internal::AtomicVolatileCast((ptr))
+
+#define EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(IntegralType, ptr) \
+ eastl::internal::AtomicVolatileIntegralCast<IntegralType>((ptr))
+
+#define EASTL_ATOMIC_INTEGRAL_CAST(IntegralType, ptr) \
+ eastl::internal::AtomicIntegralCast<IntegralType>((ptr))
+
+#define EASTL_ATOMIC_VOLATILE_TYPE_CAST(ToType, ptr) \
+ eastl::internal::AtomicVolatileTypeCast<ToType>((ptr))
+
+#define EASTL_ATOMIC_TYPE_CAST(ToType, ptr) \
+ eastl::internal::AtomicTypeCast<ToType>((ptr))
+
+#define EASTL_ATOMIC_TYPE_PUN_CAST(PunType, fromType) \
+ eastl::internal::AtomicTypePunCast<PunType>((fromType))
+
+#define EASTL_ATOMIC_NEGATE_OPERAND(val) \
+ eastl::internal::AtomicNegateOperand((val))
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_CASTS_H */
diff --git a/EASTL/include/EASTL/internal/atomic/atomic_flag.h b/EASTL/include/EASTL/internal/atomic/atomic_flag.h
new file mode 100644
index 0000000..eed448a
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/atomic_flag.h
@@ -0,0 +1,170 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNA_ATOMIC_FLAG_H
+#define EASTL_ATOMIC_INTERNA_ATOMIC_FLAG_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#include "atomic_push_compiler_options.h"
+
+
+namespace eastl
+{
+
+
+class atomic_flag
+{
+public: /* ctors */
+
+ EA_CONSTEXPR atomic_flag(bool desired) EA_NOEXCEPT
+ : mFlag{ desired }
+ {
+ }
+
+ EA_CONSTEXPR atomic_flag() EA_NOEXCEPT
+ : mFlag{ false }
+ {
+ }
+
+public: /* deleted ctors && assignment operators */
+
+ atomic_flag(const atomic_flag&) EA_NOEXCEPT = delete;
+
+ atomic_flag& operator=(const atomic_flag&) EA_NOEXCEPT = delete;
+ atomic_flag& operator=(const atomic_flag&) volatile EA_NOEXCEPT = delete;
+
+public: /* clear */
+
+ template <typename Order>
+ void clear(Order /*order*/) volatile EA_NOEXCEPT
+ {
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(Order);
+ }
+
+ template <typename Order>
+ void clear(Order /*order*/) EA_NOEXCEPT
+ {
+ EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(Order);
+ }
+
+ void clear(eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT
+ {
+ mFlag.store(false, eastl::memory_order_relaxed);
+ }
+
+ void clear(eastl::internal::memory_order_release_s) EA_NOEXCEPT
+ {
+ mFlag.store(false, eastl::memory_order_release);
+ }
+
+ void clear(eastl::internal::memory_order_seq_cst_s) EA_NOEXCEPT
+ {
+ mFlag.store(false, eastl::memory_order_seq_cst);
+ }
+
+ void clear() EA_NOEXCEPT
+ {
+ mFlag.store(false, eastl::memory_order_seq_cst);
+ }
+
+public: /* test_and_set */
+
+ template <typename Order>
+ bool test_and_set(Order /*order*/) volatile EA_NOEXCEPT
+ {
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(Order);
+ return false;
+ }
+
+ template <typename Order>
+ bool test_and_set(Order /*order*/) EA_NOEXCEPT
+ {
+ EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(Order);
+ return false;
+ }
+
+ bool test_and_set(eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT
+ {
+ return mFlag.exchange(true, eastl::memory_order_relaxed);
+ }
+
+ bool test_and_set(eastl::internal::memory_order_acquire_s) EA_NOEXCEPT
+ {
+ return mFlag.exchange(true, eastl::memory_order_acquire);
+ }
+
+ bool test_and_set(eastl::internal::memory_order_release_s) EA_NOEXCEPT
+ {
+ return mFlag.exchange(true, eastl::memory_order_release);
+ }
+
+ bool test_and_set(eastl::internal::memory_order_acq_rel_s) EA_NOEXCEPT
+ {
+ return mFlag.exchange(true, eastl::memory_order_acq_rel);
+ }
+
+ bool test_and_set(eastl::internal::memory_order_seq_cst_s) EA_NOEXCEPT
+ {
+ return mFlag.exchange(true, eastl::memory_order_seq_cst);
+ }
+
+ bool test_and_set() EA_NOEXCEPT
+ {
+ return mFlag.exchange(true, eastl::memory_order_seq_cst);
+ }
+
+public: /* test */
+
+ template <typename Order>
+ bool test(Order /*order*/) const volatile EA_NOEXCEPT
+ {
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(Order);
+ return false;
+ }
+
+ template <typename Order>
+ bool test(Order /*order*/) const EA_NOEXCEPT
+ {
+ EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(Order);
+ return false;
+ }
+
+ bool test(eastl::internal::memory_order_relaxed_s) const EA_NOEXCEPT
+ {
+ return mFlag.load(eastl::memory_order_relaxed);
+ }
+
+ bool test(eastl::internal::memory_order_acquire_s) const EA_NOEXCEPT
+ {
+ return mFlag.load(eastl::memory_order_acquire);
+ }
+
+ bool test(eastl::internal::memory_order_seq_cst_s) const EA_NOEXCEPT
+ {
+ return mFlag.load(eastl::memory_order_seq_cst);
+ }
+
+ bool test() const EA_NOEXCEPT
+ {
+ return mFlag.load(eastl::memory_order_seq_cst);
+ }
+
+private:
+
+ eastl::atomic<bool> mFlag;
+};
+
+
+} // namespace eastl
+
+
+#include "atomic_pop_compiler_options.h"
+
+
+#endif /* EASTL_ATOMIC_INTERNA_ATOMIC_FLAG_H */
diff --git a/EASTL/include/EASTL/internal/atomic/atomic_flag_standalone.h b/EASTL/include/EASTL/internal/atomic/atomic_flag_standalone.h
new file mode 100644
index 0000000..b5284be
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/atomic_flag_standalone.h
@@ -0,0 +1,69 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_FLAG_STANDALONE_H
+#define EASTL_ATOMIC_INTERNAL_FLAG_STANDALONE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+namespace eastl
+{
+
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// bool atomic_flag_test_and_set(eastl::atomic<T>*)
+//
+EASTL_FORCE_INLINE bool atomic_flag_test_and_set(eastl::atomic_flag* atomicObj) EA_NOEXCEPT
+{
+ return atomicObj->test_and_set();
+}
+
+template <typename Order>
+EASTL_FORCE_INLINE bool atomic_flag_test_and_set_explicit(eastl::atomic_flag* atomicObj, Order order)
+{
+ return atomicObj->test_and_set(order);
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// bool atomic_flag_clear(eastl::atomic<T>*)
+//
+EASTL_FORCE_INLINE void atomic_flag_clear(eastl::atomic_flag* atomicObj)
+{
+ atomicObj->clear();
+}
+
+template <typename Order>
+EASTL_FORCE_INLINE void atomic_flag_clear_explicit(eastl::atomic_flag* atomicObj, Order order)
+{
+ atomicObj->clear(order);
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// bool atomic_flag_test(eastl::atomic<T>*)
+//
+EASTL_FORCE_INLINE bool atomic_flag_test(eastl::atomic_flag* atomicObj)
+{
+ return atomicObj->test();
+}
+
+template <typename Order>
+EASTL_FORCE_INLINE bool atomic_flag_test_explicit(eastl::atomic_flag* atomicObj, Order order)
+{
+ return atomicObj->test(order);
+}
+
+
+} // namespace eastl
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_FLAG_STANDALONE_H */
diff --git a/EASTL/include/EASTL/internal/atomic/atomic_integral.h b/EASTL/include/EASTL/internal/atomic/atomic_integral.h
new file mode 100644
index 0000000..a9c96c7
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/atomic_integral.h
@@ -0,0 +1,343 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_INTEGRAL_H
+#define EASTL_ATOMIC_INTERNAL_INTEGRAL_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#include "atomic_push_compiler_options.h"
+
+
+namespace eastl
+{
+
+
+namespace internal
+{
+
+
+#define EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(funcName) \
+ template <typename Order> \
+ T funcName(T /*arg*/, Order /*order*/) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(T); \
+ } \
+ \
+ template <typename Order> \
+ T funcName(T /*arg*/, Order /*order*/) volatile EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
+ } \
+ \
+ T funcName(T /*arg*/) volatile EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
+ }
+
+
+#define EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_INC_DEC_OPERATOR_IMPL(operatorOp) \
+ T operator operatorOp() volatile EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
+ } \
+ \
+ T operator operatorOp(int) volatile EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
+ }
+
+
+#define EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(operatorOp) \
+ T operator operatorOp(T /*arg*/) volatile EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
+ }
+
+
+ template <typename T, unsigned width = sizeof(T)>
+ struct atomic_integral_base : public atomic_base_width<T, width>
+ {
+ private:
+
+ using Base = atomic_base_width<T, width>;
+
+ public: /* ctors */
+
+ EA_CONSTEXPR atomic_integral_base(T desired) EA_NOEXCEPT
+ : Base{ desired }
+ {
+ }
+
+ EA_CONSTEXPR atomic_integral_base() EA_NOEXCEPT = default;
+
+ atomic_integral_base(const atomic_integral_base&) EA_NOEXCEPT = delete;
+
+ public: /* assignment operator */
+
+ using Base::operator=;
+
+ atomic_integral_base& operator=(const atomic_integral_base&) EA_NOEXCEPT = delete;
+ atomic_integral_base& operator=(const atomic_integral_base&) volatile EA_NOEXCEPT = delete;
+
+ public: /* fetch_add */
+
+ EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(fetch_add)
+
+ public: /* add_fetch */
+
+ EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(add_fetch)
+
+ public: /* fetch_sub */
+
+ EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(fetch_sub)
+
+ public: /* sub_fetch */
+
+ EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(sub_fetch)
+
+ public: /* fetch_and */
+
+ EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(fetch_and)
+
+ public: /* and_fetch */
+
+ EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(and_fetch)
+
+ public: /* fetch_or */
+
+ EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(fetch_or)
+
+ public: /* or_fetch */
+
+ EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(or_fetch)
+
+ public: /* fetch_xor */
+
+ EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(fetch_xor)
+
+ public: /* xor_fetch */
+
+ EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(xor_fetch)
+
+ public: /* operator++ && operator-- */
+
+ EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_INC_DEC_OPERATOR_IMPL(++)
+
+ EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_INC_DEC_OPERATOR_IMPL(--)
+
+ public: /* operator+= && operator-= */
+
+ EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(+=)
+
+ EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(-=)
+
+ public: /* operator&= */
+
+ EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(&=)
+
+ public: /* operator|= */
+
+ EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(|=)
+
+ public: /* operator^= */
+
+ EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(^=)
+
+ };
+
+
+ template <typename T, unsigned width = sizeof(T)>
+ struct atomic_integral_width;
+
+#define EASTL_ATOMIC_INTEGRAL_FUNC_IMPL(op, bits) \
+ EASTL_ATOMIC_DEFAULT_INIT(T, retVal); \
+ EA_PREPROCESSOR_JOIN(op, bits)(T, retVal, this->GetAtomicAddress(), arg); \
+ return retVal;
+
+#define EASTL_ATOMIC_INTEGRAL_FETCH_IMPL(funcName, op, bits) \
+ T funcName(T arg) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_INTEGRAL_FUNC_IMPL(op, bits); \
+ }
+
+#define EASTL_ATOMIC_INTEGRAL_FETCH_ORDER_IMPL(funcName, orderType, op, bits) \
+ T funcName(T arg, orderType) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_INTEGRAL_FUNC_IMPL(op, bits); \
+ }
+
+#define EASTL_ATOMIC_INTEGRAL_FETCH_OP_JOIN(fetchOp, Order) \
+ EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_, fetchOp), Order)
+
+#define EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(funcName, fetchOp, bits) \
+ using Base::funcName; \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_IMPL(funcName, EASTL_ATOMIC_INTEGRAL_FETCH_OP_JOIN(fetchOp, _SEQ_CST_), bits) \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_relaxed_s, \
+ EASTL_ATOMIC_INTEGRAL_FETCH_OP_JOIN(fetchOp, _RELAXED_), bits) \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_acquire_s, \
+ EASTL_ATOMIC_INTEGRAL_FETCH_OP_JOIN(fetchOp, _ACQUIRE_), bits) \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_release_s, \
+ EASTL_ATOMIC_INTEGRAL_FETCH_OP_JOIN(fetchOp, _RELEASE_), bits) \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_acq_rel_s, \
+ EASTL_ATOMIC_INTEGRAL_FETCH_OP_JOIN(fetchOp, _ACQ_REL_), bits) \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_seq_cst_s, \
+ EASTL_ATOMIC_INTEGRAL_FETCH_OP_JOIN(fetchOp, _SEQ_CST_), bits)
+
+#define EASTL_ATOMIC_INTEGRAL_FETCH_INC_DEC_OPERATOR_IMPL(operatorOp, preFuncName, postFuncName) \
+ using Base::operator operatorOp; \
+ \
+ T operator operatorOp() EA_NOEXCEPT \
+ { \
+ return preFuncName(1, eastl::memory_order_seq_cst); \
+ } \
+ \
+ T operator operatorOp(int) EA_NOEXCEPT \
+ { \
+ return postFuncName(1, eastl::memory_order_seq_cst); \
+ }
+
+#define EASTL_ATOMIC_INTEGRAL_FETCH_ASSIGNMENT_OPERATOR_IMPL(operatorOp, funcName) \
+ using Base::operator operatorOp; \
+ \
+ T operator operatorOp(T arg) EA_NOEXCEPT \
+ { \
+ return funcName(arg, eastl::memory_order_seq_cst); \
+ }
+
+
+#define EASTL_ATOMIC_INTEGRAL_WIDTH_SPECIALIZE(bytes, bits) \
+ template <typename T> \
+ struct atomic_integral_width<T, bytes> : public atomic_integral_base<T, bytes> \
+ { \
+ private: \
+ \
+ using Base = atomic_integral_base<T, bytes>; \
+ \
+ public: /* ctors */ \
+ \
+ EA_CONSTEXPR atomic_integral_width(T desired) EA_NOEXCEPT \
+ : Base{ desired } \
+ { \
+ } \
+ \
+ EA_CONSTEXPR atomic_integral_width() EA_NOEXCEPT = default; \
+ \
+ atomic_integral_width(const atomic_integral_width&) EA_NOEXCEPT = delete; \
+ \
+ public: /* assignment operator */ \
+ \
+ using Base::operator=; \
+ \
+ atomic_integral_width& operator=(const atomic_integral_width&) EA_NOEXCEPT = delete; \
+ atomic_integral_width& operator=(const atomic_integral_width&) volatile EA_NOEXCEPT = delete; \
+ \
+ public: /* fetch_add */ \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(fetch_add, FETCH_ADD, bits) \
+ \
+ public: /* add_fetch */ \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(add_fetch, ADD_FETCH, bits) \
+ \
+ public: /* fetch_sub */ \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(fetch_sub, FETCH_SUB, bits) \
+ \
+ public: /* sub_fetch */ \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(sub_fetch, SUB_FETCH, bits) \
+ \
+ public: /* fetch_and */ \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(fetch_and, FETCH_AND, bits) \
+ \
+ public: /* and_fetch */ \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(and_fetch, AND_FETCH, bits) \
+ \
+ public: /* fetch_or */ \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(fetch_or, FETCH_OR, bits) \
+ \
+ public: /* or_fetch */ \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(or_fetch, OR_FETCH, bits) \
+ \
+ public: /* fetch_xor */ \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(fetch_xor, FETCH_XOR, bits) \
+ \
+ public: /* xor_fetch */ \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(xor_fetch, XOR_FETCH, bits) \
+ \
+ public: /* operator++ && operator-- */ \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_INC_DEC_OPERATOR_IMPL(++, add_fetch, fetch_add) \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_INC_DEC_OPERATOR_IMPL(--, sub_fetch, fetch_sub) \
+ \
+ public: /* operator+= && operator-= */ \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_ASSIGNMENT_OPERATOR_IMPL(+=, add_fetch) \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_ASSIGNMENT_OPERATOR_IMPL(-=, sub_fetch) \
+ \
+ public: /* operator&= */ \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_ASSIGNMENT_OPERATOR_IMPL(&=, and_fetch) \
+ \
+ public: /* operator|= */ \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_ASSIGNMENT_OPERATOR_IMPL(|=, or_fetch) \
+ \
+ public: /* operator^= */ \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_ASSIGNMENT_OPERATOR_IMPL(^=, xor_fetch) \
+ \
+ };
+
+
+#if defined(EASTL_ATOMIC_HAS_8BIT)
+ EASTL_ATOMIC_INTEGRAL_WIDTH_SPECIALIZE(1, 8)
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_16BIT)
+ EASTL_ATOMIC_INTEGRAL_WIDTH_SPECIALIZE(2, 16)
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_32BIT)
+ EASTL_ATOMIC_INTEGRAL_WIDTH_SPECIALIZE(4, 32)
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_64BIT)
+ EASTL_ATOMIC_INTEGRAL_WIDTH_SPECIALIZE(8, 64)
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_128BIT)
+ EASTL_ATOMIC_INTEGRAL_WIDTH_SPECIALIZE(16, 128)
+#endif
+
+
+} // namespace internal
+
+
+} // namespace eastl
+
+
+#include "atomic_pop_compiler_options.h"
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_INTEGRAL_H */
diff --git a/EASTL/include/EASTL/internal/atomic/atomic_macros.h b/EASTL/include/EASTL/internal/atomic/atomic_macros.h
new file mode 100644
index 0000000..756a4b4
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/atomic_macros.h
@@ -0,0 +1,67 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// The reason for the implementation separating out into a compiler and architecture
+// folder is as follows.
+//
+// The compiler directory is meant to implement atomics using the compiler provided
+// intrinsics. This also implies that usually the same compiler instrinsic implementation
+// can be used for any architecture the compiler supports. If a compiler provides intrinsics
+// to support barriers or atomic operations, then that implementation should be in the
+// compiler directory.
+//
+// The arch directory is meant to manually implement atomics for a specific architecture
+// such as power or x86. There may be some compiler specific code in this directory because
+// GCC inline assembly syntax may be different than another compiler as an example.
+//
+// The arch directory can also be used to implement some atomic operations ourselves
+// if we deem the compiler provided implementation to be inefficient for the given
+// architecture or we need to do some things manually for a given compiler.
+//
+// The atomic_macros directory implements the macros that the rest of the atomic
+// library uses. These macros will expand to either the compiler or arch implemented
+// macro. The arch implemented macro is given priority over the compiler implemented
+// macro if both are implemented otherwise whichever is implemented is chosen or
+// an error is emitted if none are implemented.
+//
+// The implementation being all macros has a couple nice side effects as well.
+//
+// 1. All the implementation ends up funneling into one low level macro implementation
+// which makes it easy to verify correctness, reduce copy-paste errors and differences
+// in various platform implementations.
+//
+// 2. Allows for the implementation to be implemented efficiently on compilers that do not
+// directly implement the C++ memory model in their intrinsics such as msvc.
+//
+// 3. Allows for the implementation of atomics that may not be supported on the given platform,
+// such as 128-bit atomics on 32-bit platforms since the macros will only ever be expanded
+// on platforms that support said features. This makes implementing said features pretty easy
+// since we do not have to worry about complicated feature detection in the low level implementations.
+//
+// The macro implementation may asume that all passed in types are trivially constructible thus it is
+// free to create local variables of the passed in types as it may please.
+// It may also assume that all passed in types are trivially copyable as well.
+// It cannot assume any passed in type is any given type thus is a specific type if needed, it must do an
+// EASTL_ATOMIC_TYPE_PUN_CAST() to the required type.
+//
+
+
+#include "compiler/compiler.h"
+#include "arch/arch.h"
+
+#include "atomic_macros/atomic_macros.h"
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_H */
diff --git a/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros.h b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros.h
new file mode 100644
index 0000000..437b221
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros.h
@@ -0,0 +1,156 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ATOMIC_MACROS_H
+#define EASTL_ATOMIC_INTERNAL_ATOMIC_MACROS_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <EABase/eabase.h>
+
+#include "atomic_macros_base.h"
+
+#include "atomic_macros_fetch_add.h"
+#include "atomic_macros_fetch_sub.h"
+
+#include "atomic_macros_fetch_and.h"
+#include "atomic_macros_fetch_xor.h"
+#include "atomic_macros_fetch_or.h"
+
+#include "atomic_macros_add_fetch.h"
+#include "atomic_macros_sub_fetch.h"
+
+#include "atomic_macros_and_fetch.h"
+#include "atomic_macros_xor_fetch.h"
+#include "atomic_macros_or_fetch.h"
+
+#include "atomic_macros_exchange.h"
+
+#include "atomic_macros_cmpxchg_weak.h"
+#include "atomic_macros_cmpxchg_strong.h"
+
+#include "atomic_macros_load.h"
+#include "atomic_macros_store.h"
+
+#include "atomic_macros_compiler_barrier.h"
+
+#include "atomic_macros_cpu_pause.h"
+
+#include "atomic_macros_memory_barrier.h"
+
+#include "atomic_macros_signal_fence.h"
+
+#include "atomic_macros_thread_fence.h"
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#if defined(EASTL_COMPILER_ATOMIC_HAS_8BIT) || defined(EASTL_ARCH_ATOMIC_HAS_8BIT)
+
+ #define EASTL_ATOMIC_HAS_8BIT
+
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_HAS_16BIT) || defined(EASTL_ARCH_ATOMIC_HAS_16BIT)
+
+ #define EASTL_ATOMIC_HAS_16BIT
+
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_HAS_32BIT) || defined(EASTL_ARCH_ATOMIC_HAS_32BIT)
+
+ #define EASTL_ATOMIC_HAS_32BIT
+
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_HAS_64BIT) || defined(EASTL_ARCH_ATOMIC_HAS_64BIT)
+
+ #define EASTL_ATOMIC_HAS_64BIT
+
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_HAS_128BIT) || defined(EASTL_ARCH_ATOMIC_HAS_128BIT)
+
+ #define EASTL_ATOMIC_HAS_128BIT
+
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#if defined(EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_8)
+
+ #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_8 EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_8
+
+#elif defined(EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_8)
+
+ #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_8 EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_8
+
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_16)
+
+ #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_16 EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_16
+
+#elif defined(EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_16)
+
+ #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_16 EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_16
+
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_32)
+
+ #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_32 EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_32
+
+#elif defined(EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_32)
+
+ #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_32 EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_32
+
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_64)
+
+ #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_64 EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_64
+
+#elif defined(EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_64)
+
+ #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_64 EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_64
+
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_128)
+
+ #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_128 EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_128
+
+#elif defined(EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_128)
+
+ #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_128 EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_128
+
+#endif
+
+// We write some of our variables in inline assembly, which MSAN
+// doesn't understand. This macro forces initialization of those
+// variables when MSAN is enabled and doesn't pay the initialization
+// cost when it's not enabled.
+#if EA_MSAN_ENABLED
+ #define EASTL_ATOMIC_DEFAULT_INIT(type, var) type var{}
+#else
+ #define EASTL_ATOMIC_DEFAULT_INIT(type, var) type var
+#endif // EA_MSAN_ENABLED
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ATOMIC_MACROS_H */
diff --git a/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_add_fetch.h b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_add_fetch.h
new file mode 100644
index 0000000..f551a07
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_add_fetch.h
@@ -0,0 +1,98 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_ADD_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_ADD_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_ADD_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_ATOMIC_ADD_FETCH_RELAXED_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELAXED_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQUIRE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_RELEASE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELEASE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQ_REL_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_SEQ_CST_8)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_ADD_FETCH_RELAXED_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELAXED_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQUIRE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_RELEASE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELEASE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQ_REL_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_SEQ_CST_16)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_ADD_FETCH_RELAXED_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELAXED_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQUIRE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_RELEASE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELEASE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQ_REL_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_SEQ_CST_32)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_ADD_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELAXED_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQUIRE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELEASE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQ_REL_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_SEQ_CST_64)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_ADD_FETCH_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELAXED_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQUIRE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELEASE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQ_REL_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_SEQ_CST_128)(type, ret, ptr, val)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_ADD_FETCH_H */
diff --git a/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_and_fetch.h b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_and_fetch.h
new file mode 100644
index 0000000..6912722
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_and_fetch.h
@@ -0,0 +1,98 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_AND_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_AND_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_AND_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_ATOMIC_AND_FETCH_RELAXED_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELAXED_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQUIRE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_RELEASE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELEASE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQ_REL_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_SEQ_CST_8)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_AND_FETCH_RELAXED_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELAXED_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQUIRE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_RELEASE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELEASE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQ_REL_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_SEQ_CST_16)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_AND_FETCH_RELAXED_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELAXED_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQUIRE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_RELEASE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELEASE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQ_REL_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_SEQ_CST_32)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_AND_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELAXED_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQUIRE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELEASE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQ_REL_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_SEQ_CST_64)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_AND_FETCH_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELAXED_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQUIRE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELEASE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQ_REL_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_SEQ_CST_128)(type, ret, ptr, val)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_AND_FETCH_H */
diff --git a/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_base.h b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_base.h
new file mode 100644
index 0000000..486e137
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_base.h
@@ -0,0 +1,70 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_BASE_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_BASE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_ATOMIC_INTERNAL_COMPILER_AVAILABLE(op) \
+ EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_COMPILER_, op), _AVAILABLE)
+
+#define EASTL_ATOMIC_INTERNAL_ARCH_AVAILABLE(op) \
+ EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ARCH_, op), _AVAILABLE)
+
+
+// We can't just use static_assert(false, ...) here, since on MSVC 17.10
+// the /Zc:static_assert flag makes non-dependent static_asserts in the body of a template
+// be evaluated at template-parse time, rather than at template instantion time.
+// So instead we just make the assert dependent on the type.
+#define EASTL_ATOMIC_INTERNAL_NOT_IMPLEMENTED_ERROR(...) \
+ static_assert(!eastl::is_same_v<T,T>, "eastl::atomic<T> atomic macro not implemented!")
+
+
+/* Compiler && Arch Not Implemented */
+#define EASTL_ATOMIC_INTERNAL_OP_PATTERN_00(op) \
+ EASTL_ATOMIC_INTERNAL_NOT_IMPLEMENTED_ERROR
+
+/* Arch Implemented */
+#define EASTL_ATOMIC_INTERNAL_OP_PATTERN_01(op) \
+ EA_PREPROCESSOR_JOIN(EASTL_ARCH_, op)
+
+/* Compiler Implmented */
+#define EASTL_ATOMIC_INTERNAL_OP_PATTERN_10(op) \
+ EA_PREPROCESSOR_JOIN(EASTL_COMPILER_, op)
+
+/* Compiler && Arch Implemented */
+#define EASTL_ATOMIC_INTERNAL_OP_PATTERN_11(op) \
+ EA_PREPROCESSOR_JOIN(EASTL_ARCH_, op)
+
+
+/* This macro creates the pattern macros above for the 2x2 True-False truth table */
+#define EASTL_ATOMIC_INTERNAL_OP_HELPER1(compiler, arch, op) \
+ EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_INTERNAL_OP_PATTERN_, EA_PREPROCESSOR_JOIN(compiler, arch))(op)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// EASTL_ATOMIC_CHOOSE_OP_IMPL
+//
+// This macro chooses between the compiler or architecture implementation for a
+// given atomic operation.
+//
+// USAGE:
+//
+// EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELAXED_8)(ret, ptr, val)
+//
+#define EASTL_ATOMIC_CHOOSE_OP_IMPL(op) \
+ EASTL_ATOMIC_INTERNAL_OP_HELPER1( \
+ EASTL_ATOMIC_INTERNAL_COMPILER_AVAILABLE(op), \
+ EASTL_ATOMIC_INTERNAL_ARCH_AVAILABLE(op), \
+ op \
+ )
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_BASE_H */
diff --git a/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cmpxchg_strong.h b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cmpxchg_strong.h
new file mode 100644
index 0000000..3cff493
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cmpxchg_strong.h
@@ -0,0 +1,245 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_CMPXCHG_STRONG_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_CMPXCHG_STRONG_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_CMPXCHG_STRONG_*_*_N(type, bool ret, type * ptr, type * expected, type desired)
+//
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8)(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16)(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32)(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64)(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128)(type, ret, ptr, expected, desired)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_CMPXCHG_STRONG_*(bool ret, type * ptr, type * expected, type desired)
+//
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_8)(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_16)(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_32)(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_64)(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_128)(type, ret, ptr, expected, desired)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_CMPXCHG_STRONG_H */
diff --git a/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cmpxchg_weak.h b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cmpxchg_weak.h
new file mode 100644
index 0000000..60ea8b0
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cmpxchg_weak.h
@@ -0,0 +1,245 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_CMPXCHG_WEAK_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_CMPXCHG_WEAK_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_CMPXCHG_WEAK_*_*_N(type, bool ret, type * ptr, type * expected, type desired)
+//
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8)(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16)(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32)(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64)(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128)(type, ret, ptr, expected, desired)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_CMPXCHG_WEAK_*(bool ret, type * ptr, type * expected, type desired)
+//
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_8)(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_16)(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_32)(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_64)(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_128)(type, ret, ptr, expected, desired)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_CMPXCHG_WEAK_H */
diff --git a/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_compiler_barrier.h b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_compiler_barrier.h
new file mode 100644
index 0000000..96ea6d0
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_compiler_barrier.h
@@ -0,0 +1,30 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_COMPILER_BARRIER_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_COMPILER_BARRIER_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_COMPILER_BARRIER()
+//
+#define EASTL_ATOMIC_COMPILER_BARRIER() \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_COMPILER_BARRIER)()
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY(const T&, type)
+//
+#define EASTL_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY(val, type) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY)(val, type)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_COMPILER_BARRIER_H */
diff --git a/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cpu_pause.h b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cpu_pause.h
new file mode 100644
index 0000000..e027b57
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cpu_pause.h
@@ -0,0 +1,22 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_CPU_PAUSE_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_CPU_PAUSE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_CPU_PAUSE()
+//
+#define EASTL_ATOMIC_CPU_PAUSE() \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CPU_PAUSE)()
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_CPU_PAUSE_H */
diff --git a/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_exchange.h b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_exchange.h
new file mode 100644
index 0000000..0681318
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_exchange.h
@@ -0,0 +1,98 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_EXCHANGE_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_EXCHANGE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_EXCHANGE_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_ATOMIC_EXCHANGE_RELAXED_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELAXED_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQUIRE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_RELEASE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELEASE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQ_REL_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_SEQ_CST_8)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_EXCHANGE_RELAXED_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELAXED_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQUIRE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_RELEASE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELEASE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQ_REL_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_SEQ_CST_16)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_EXCHANGE_RELAXED_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELAXED_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQUIRE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_RELEASE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELEASE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQ_REL_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_SEQ_CST_32)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_EXCHANGE_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELAXED_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQUIRE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELEASE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQ_REL_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_SEQ_CST_64)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_EXCHANGE_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELAXED_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQUIRE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELEASE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQ_REL_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_SEQ_CST_128)(type, ret, ptr, val)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_EXCHANGE_H */
diff --git a/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_add.h b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_add.h
new file mode 100644
index 0000000..701fdf3
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_add.h
@@ -0,0 +1,98 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_FETCH_ADD_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_FETCH_ADD_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_FETCH_ADD_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_ATOMIC_FETCH_ADD_RELAXED_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELAXED_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQUIRE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_RELEASE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELEASE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQ_REL_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_SEQ_CST_8)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_ADD_RELAXED_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELAXED_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQUIRE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_RELEASE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELEASE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQ_REL_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_SEQ_CST_16)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_ADD_RELAXED_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELAXED_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQUIRE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_RELEASE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELEASE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQ_REL_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_SEQ_CST_32)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_ADD_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELAXED_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQUIRE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELEASE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQ_REL_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_SEQ_CST_64)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_ADD_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELAXED_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQUIRE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELEASE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQ_REL_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_SEQ_CST_128)(type, ret, ptr, val)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_FETCH_ADD_H */
diff --git a/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_and.h b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_and.h
new file mode 100644
index 0000000..831f1bf
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_and.h
@@ -0,0 +1,98 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_FETCH_AND_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_FETCH_AND_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_FETCH_AND_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_ATOMIC_FETCH_AND_RELAXED_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELAXED_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQUIRE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_RELEASE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELEASE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQ_REL_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_SEQ_CST_8)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_AND_RELAXED_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELAXED_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQUIRE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_RELEASE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELEASE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQ_REL_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_SEQ_CST_16)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_AND_RELAXED_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELAXED_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQUIRE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_RELEASE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELEASE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQ_REL_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_SEQ_CST_32)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_AND_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELAXED_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQUIRE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELEASE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQ_REL_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_SEQ_CST_64)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_AND_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELAXED_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQUIRE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELEASE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQ_REL_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_SEQ_CST_128)(type, ret, ptr, val)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_FETCH_AND_H */
diff --git a/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_or.h b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_or.h
new file mode 100644
index 0000000..b132297
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_or.h
@@ -0,0 +1,98 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_FETCH_OR_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_FETCH_OR_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_FETCH_OR_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_ATOMIC_FETCH_OR_RELAXED_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELAXED_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQUIRE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_RELEASE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELEASE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQ_REL_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_SEQ_CST_8)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_OR_RELAXED_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELAXED_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQUIRE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_RELEASE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELEASE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQ_REL_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_SEQ_CST_16)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_OR_RELAXED_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELAXED_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQUIRE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_RELEASE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELEASE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQ_REL_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_SEQ_CST_32)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_OR_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELAXED_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQUIRE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELEASE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQ_REL_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_SEQ_CST_64)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_OR_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELAXED_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQUIRE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELEASE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQ_REL_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_SEQ_CST_128)(type, ret, ptr, val)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_FETCH_OR_H */
diff --git a/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_sub.h b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_sub.h
new file mode 100644
index 0000000..0098064
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_sub.h
@@ -0,0 +1,98 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_FETCH_SUB_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_FETCH_SUB_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_FETCH_SUB_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_ATOMIC_FETCH_SUB_RELAXED_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELAXED_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQUIRE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_RELEASE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELEASE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQ_REL_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_SEQ_CST_8)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_SUB_RELAXED_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELAXED_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQUIRE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_RELEASE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELEASE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQ_REL_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_SEQ_CST_16)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_SUB_RELAXED_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELAXED_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQUIRE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_RELEASE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELEASE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQ_REL_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_SEQ_CST_32)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_SUB_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELAXED_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQUIRE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELEASE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQ_REL_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_SEQ_CST_64)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_SUB_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELAXED_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQUIRE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELEASE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQ_REL_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_SEQ_CST_128)(type, ret, ptr, val)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_FETCH_SUB_H */
diff --git a/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_xor.h b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_xor.h
new file mode 100644
index 0000000..2887ea5
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_xor.h
@@ -0,0 +1,98 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_FETCH_XOR_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_FETCH_XOR_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_FETCH_XOR_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_ATOMIC_FETCH_XOR_RELAXED_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELAXED_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQUIRE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_RELEASE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELEASE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQ_REL_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_SEQ_CST_8)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_XOR_RELAXED_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELAXED_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQUIRE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_RELEASE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELEASE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQ_REL_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_SEQ_CST_16)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_XOR_RELAXED_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELAXED_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQUIRE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_RELEASE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELEASE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQ_REL_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_SEQ_CST_32)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_XOR_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELAXED_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQUIRE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELEASE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQ_REL_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_SEQ_CST_64)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_XOR_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELAXED_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQUIRE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELEASE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQ_REL_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_SEQ_CST_128)(type, ret, ptr, val)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_FETCH_XOR_H */
diff --git a/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_load.h b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_load.h
new file mode 100644
index 0000000..7658059
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_load.h
@@ -0,0 +1,75 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_LOAD_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_LOAD_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_LOAD_*_N(type, type ret, type * ptr)
+//
+#define EASTL_ATOMIC_LOAD_RELAXED_8(type, ret, ptr) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_RELAXED_8)(type, ret, ptr)
+
+#define EASTL_ATOMIC_LOAD_ACQUIRE_8(type, ret, ptr) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_ACQUIRE_8)(type, ret, ptr)
+
+#define EASTL_ATOMIC_LOAD_SEQ_CST_8(type, ret, ptr) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_SEQ_CST_8)(type, ret, ptr)
+
+
+#define EASTL_ATOMIC_LOAD_RELAXED_16(type, ret, ptr) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_RELAXED_16)(type, ret, ptr)
+
+#define EASTL_ATOMIC_LOAD_ACQUIRE_16(type, ret, ptr) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_ACQUIRE_16)(type, ret, ptr)
+
+#define EASTL_ATOMIC_LOAD_SEQ_CST_16(type, ret, ptr) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_SEQ_CST_16)(type, ret, ptr)
+
+
+#define EASTL_ATOMIC_LOAD_RELAXED_32(type, ret, ptr) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_RELAXED_32)(type, ret, ptr)
+
+#define EASTL_ATOMIC_LOAD_ACQUIRE_32(type, ret, ptr) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_ACQUIRE_32)(type, ret, ptr)
+
+#define EASTL_ATOMIC_LOAD_SEQ_CST_32(type, ret, ptr) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_SEQ_CST_32)(type, ret, ptr)
+
+
+#define EASTL_ATOMIC_LOAD_RELAXED_64(type, ret, ptr) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_RELAXED_64)(type, ret, ptr)
+
+#define EASTL_ATOMIC_LOAD_ACQUIRE_64(type, ret, ptr) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_ACQUIRE_64)(type, ret, ptr)
+
+#define EASTL_ATOMIC_LOAD_SEQ_CST_64(type, ret, ptr) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_SEQ_CST_64)(type, ret, ptr)
+
+
+#define EASTL_ATOMIC_LOAD_RELAXED_128(type, ret, ptr) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_RELAXED_128)(type, ret, ptr)
+
+#define EASTL_ATOMIC_LOAD_ACQUIRE_128(type, ret, ptr) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_ACQUIRE_128)(type, ret, ptr)
+
+#define EASTL_ATOMIC_LOAD_SEQ_CST_128(type, ret, ptr) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_SEQ_CST_128)(type, ret, ptr)
+
+
+#define EASTL_ATOMIC_LOAD_READ_DEPENDS_32(type, ret, ptr) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_READ_DEPENDS_32)(type, ret, ptr)
+
+#define EASTL_ATOMIC_LOAD_READ_DEPENDS_64(type, ret, ptr) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_READ_DEPENDS_64)(type, ret, ptr)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_LOAD_H */
diff --git a/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_memory_barrier.h b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_memory_barrier.h
new file mode 100644
index 0000000..14f7be9
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_memory_barrier.h
@@ -0,0 +1,38 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_MEMORY_BARRIER_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_MEMORY_BARRIER_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_CPU_MB()
+//
+#define EASTL_ATOMIC_CPU_MB() \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CPU_MB)()
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_CPU_WMB()
+//
+#define EASTL_ATOMIC_CPU_WMB() \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CPU_WMB)()
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_CPU_RMB()
+//
+#define EASTL_ATOMIC_CPU_RMB() \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CPU_RMB)()
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_MEMORY_BARRIER_H */
diff --git a/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_or_fetch.h b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_or_fetch.h
new file mode 100644
index 0000000..c9ebd6e
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_or_fetch.h
@@ -0,0 +1,98 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_OR_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_OR_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_OR_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_ATOMIC_OR_FETCH_RELAXED_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELAXED_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQUIRE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_RELEASE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELEASE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQ_REL_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_SEQ_CST_8)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_OR_FETCH_RELAXED_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELAXED_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQUIRE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_RELEASE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELEASE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQ_REL_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_SEQ_CST_16)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_OR_FETCH_RELAXED_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELAXED_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQUIRE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_RELEASE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELEASE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQ_REL_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_SEQ_CST_32)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_OR_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELAXED_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQUIRE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELEASE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQ_REL_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_SEQ_CST_64)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_OR_FETCH_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELAXED_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQUIRE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELEASE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQ_REL_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_SEQ_CST_128)(type, ret, ptr, val)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_OR_FETCH_H */
diff --git a/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_signal_fence.h b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_signal_fence.h
new file mode 100644
index 0000000..dd16b10
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_signal_fence.h
@@ -0,0 +1,34 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_SIGNAL_FENCE_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_SIGNAL_FENCE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_SIGNAL_FENCE_*()
+//
+#define EASTL_ATOMIC_SIGNAL_FENCE_RELAXED() \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SIGNAL_FENCE_RELAXED)()
+
+#define EASTL_ATOMIC_SIGNAL_FENCE_ACQUIRE() \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SIGNAL_FENCE_ACQUIRE)()
+
+#define EASTL_ATOMIC_SIGNAL_FENCE_RELEASE() \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SIGNAL_FENCE_RELEASE)()
+
+#define EASTL_ATOMIC_SIGNAL_FENCE_ACQ_REL() \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SIGNAL_FENCE_ACQ_REL)()
+
+#define EASTL_ATOMIC_SIGNAL_FENCE_SEQ_CST() \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SIGNAL_FENCE_SEQ_CST)()
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_SIGNAL_FENCE_H */
diff --git a/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_store.h b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_store.h
new file mode 100644
index 0000000..64b662e
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_store.h
@@ -0,0 +1,68 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_STORE_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_STORE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_STORE_*_N(type, type * ptr, type val)
+//
+#define EASTL_ATOMIC_STORE_RELAXED_8(type, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELAXED_8)(type, ptr, val)
+
+#define EASTL_ATOMIC_STORE_RELEASE_8(type, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELEASE_8)(type, ptr, val)
+
+#define EASTL_ATOMIC_STORE_SEQ_CST_8(type, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_SEQ_CST_8)(type, ptr, val)
+
+
+#define EASTL_ATOMIC_STORE_RELAXED_16(type, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELAXED_16)(type, ptr, val)
+
+#define EASTL_ATOMIC_STORE_RELEASE_16(type, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELEASE_16)(type, ptr, val)
+
+#define EASTL_ATOMIC_STORE_SEQ_CST_16(type, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_SEQ_CST_16)(type, ptr, val)
+
+
+#define EASTL_ATOMIC_STORE_RELAXED_32(type, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELAXED_32)(type, ptr, val)
+
+#define EASTL_ATOMIC_STORE_RELEASE_32(type, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELEASE_32)(type, ptr, val)
+
+#define EASTL_ATOMIC_STORE_SEQ_CST_32(type, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_SEQ_CST_32)(type, ptr, val)
+
+
+#define EASTL_ATOMIC_STORE_RELAXED_64(type, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELAXED_64)(type, ptr, val)
+
+#define EASTL_ATOMIC_STORE_RELEASE_64(type, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELEASE_64)(type, ptr, val)
+
+#define EASTL_ATOMIC_STORE_SEQ_CST_64(type, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_SEQ_CST_64)(type, ptr, val)
+
+
+#define EASTL_ATOMIC_STORE_RELAXED_128(type, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELAXED_128)(type, ptr, val)
+
+#define EASTL_ATOMIC_STORE_RELEASE_128(type, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELEASE_128)(type, ptr, val)
+
+#define EASTL_ATOMIC_STORE_SEQ_CST_128(type, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_SEQ_CST_128)(type, ptr, val)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_STORE_H */
diff --git a/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_sub_fetch.h b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_sub_fetch.h
new file mode 100644
index 0000000..330f38e
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_sub_fetch.h
@@ -0,0 +1,98 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_SUB_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_SUB_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_SUB_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_ATOMIC_SUB_FETCH_RELAXED_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELAXED_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQUIRE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_RELEASE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELEASE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQ_REL_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_SEQ_CST_8)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_SUB_FETCH_RELAXED_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELAXED_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQUIRE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_RELEASE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELEASE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQ_REL_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_SEQ_CST_16)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_SUB_FETCH_RELAXED_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELAXED_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQUIRE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_RELEASE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELEASE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQ_REL_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_SEQ_CST_32)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_SUB_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELAXED_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQUIRE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELEASE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQ_REL_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_SEQ_CST_64)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_SUB_FETCH_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELAXED_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQUIRE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELEASE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQ_REL_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_SEQ_CST_128)(type, ret, ptr, val)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_SUB_FETCH_H */
diff --git a/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_thread_fence.h b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_thread_fence.h
new file mode 100644
index 0000000..26492c5
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_thread_fence.h
@@ -0,0 +1,34 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_THREAD_FENCE_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_THREAD_FENCE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_THREAD_FENCE_*()
+//
+#define EASTL_ATOMIC_THREAD_FENCE_RELAXED() \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_THREAD_FENCE_RELAXED)()
+
+#define EASTL_ATOMIC_THREAD_FENCE_ACQUIRE() \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_THREAD_FENCE_ACQUIRE)()
+
+#define EASTL_ATOMIC_THREAD_FENCE_RELEASE() \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_THREAD_FENCE_RELEASE)()
+
+#define EASTL_ATOMIC_THREAD_FENCE_ACQ_REL() \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_THREAD_FENCE_ACQ_REL)()
+
+#define EASTL_ATOMIC_THREAD_FENCE_SEQ_CST() \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_THREAD_FENCE_SEQ_CST)()
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_THREAD_FENCE_H */
diff --git a/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_xor_fetch.h b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_xor_fetch.h
new file mode 100644
index 0000000..4227647
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_xor_fetch.h
@@ -0,0 +1,98 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_XOR_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_XOR_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_XOR_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_ATOMIC_XOR_FETCH_RELAXED_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELAXED_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQUIRE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_RELEASE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELEASE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQ_REL_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_SEQ_CST_8)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_XOR_FETCH_RELAXED_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELAXED_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQUIRE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_RELEASE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELEASE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQ_REL_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_SEQ_CST_16)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_XOR_FETCH_RELAXED_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELAXED_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQUIRE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_RELEASE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELEASE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQ_REL_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_SEQ_CST_32)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_XOR_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELAXED_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQUIRE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELEASE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQ_REL_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_SEQ_CST_64)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_XOR_FETCH_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELAXED_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQUIRE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELEASE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQ_REL_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_SEQ_CST_128)(type, ret, ptr, val)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_XOR_FETCH_H */
diff --git a/EASTL/include/EASTL/internal/atomic/atomic_memory_order.h b/EASTL/include/EASTL/internal/atomic/atomic_memory_order.h
new file mode 100644
index 0000000..1564d87
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/atomic_memory_order.h
@@ -0,0 +1,44 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MEMORY_ORDER_H
+#define EASTL_ATOMIC_INTERNAL_MEMORY_ORDER_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+namespace eastl
+{
+
+
+namespace internal
+{
+
+
+struct memory_order_relaxed_s {};
+struct memory_order_read_depends_s {};
+struct memory_order_acquire_s {};
+struct memory_order_release_s {};
+struct memory_order_acq_rel_s {};
+struct memory_order_seq_cst_s {};
+
+
+} // namespace internal
+
+
+EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR auto memory_order_relaxed = internal::memory_order_relaxed_s{};
+EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR auto memory_order_read_depends = internal::memory_order_read_depends_s{};
+EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR auto memory_order_acquire = internal::memory_order_acquire_s{};
+EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR auto memory_order_release = internal::memory_order_release_s{};
+EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR auto memory_order_acq_rel = internal::memory_order_acq_rel_s{};
+EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR auto memory_order_seq_cst = internal::memory_order_seq_cst_s{};
+
+
+} // namespace eastl
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MEMORY_ORDER_H */
diff --git a/EASTL/include/EASTL/internal/atomic/atomic_pointer.h b/EASTL/include/EASTL/internal/atomic/atomic_pointer.h
new file mode 100644
index 0000000..c0b19e6
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/atomic_pointer.h
@@ -0,0 +1,281 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_POINTER_H
+#define EASTL_ATOMIC_INTERNAL_POINTER_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#include "atomic_push_compiler_options.h"
+
+
+namespace eastl
+{
+
+
+namespace internal
+{
+
+
+ template <typename T, unsigned width = sizeof(T)>
+ struct atomic_pointer_base;
+
+#define EASTL_ATOMIC_POINTER_STATIC_ASSERT_FUNCS_IMPL(funcName) \
+ template <typename Order> \
+ T* funcName(ptrdiff_t /*arg*/, Order /*order*/) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(T); \
+ } \
+ \
+ template <typename Order> \
+ T* funcName(ptrdiff_t /*arg*/, Order /*order*/) volatile EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
+ } \
+ \
+ T* funcName(ptrdiff_t /*arg*/) volatile EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
+ }
+
+#define EASTL_ATOMIC_POINTER_STATIC_ASSERT_INC_DEC_OPERATOR_IMPL(operatorOp) \
+ T* operator operatorOp() volatile EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
+ } \
+ \
+ T* operator operatorOp(int) volatile EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
+ }
+
+#define EASTL_ATOMIC_POINTER_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(operatorOp) \
+ T* operator operatorOp(ptrdiff_t /*arg*/) volatile EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
+ }
+
+
+ template <typename T, unsigned width>
+ struct atomic_pointer_base<T*, width> : public atomic_base_width<T*, width>
+ {
+ private:
+
+ using Base = atomic_base_width<T*, width>;
+
+ public: /* ctors */
+
+ EA_CONSTEXPR atomic_pointer_base(T* desired) EA_NOEXCEPT
+ : Base{ desired }
+ {
+ }
+
+ EA_CONSTEXPR atomic_pointer_base() EA_NOEXCEPT = default;
+
+ atomic_pointer_base(const atomic_pointer_base&) EA_NOEXCEPT = delete;
+
+ public: /* assignment operators */
+
+ using Base::operator=;
+
+ atomic_pointer_base& operator=(const atomic_pointer_base&) EA_NOEXCEPT = delete;
+ atomic_pointer_base& operator=(const atomic_pointer_base&) volatile EA_NOEXCEPT = delete;
+
+ public: /* fetch_add */
+
+ EASTL_ATOMIC_POINTER_STATIC_ASSERT_FUNCS_IMPL(fetch_add)
+
+ public: /* add_fetch */
+
+ EASTL_ATOMIC_POINTER_STATIC_ASSERT_FUNCS_IMPL(add_fetch)
+
+ public: /* fetch_sub */
+
+ EASTL_ATOMIC_POINTER_STATIC_ASSERT_FUNCS_IMPL(fetch_sub)
+
+ public: /* sub_fetch */
+
+ EASTL_ATOMIC_POINTER_STATIC_ASSERT_FUNCS_IMPL(sub_fetch)
+
+ public: /* operator++ && operator-- */
+
+ EASTL_ATOMIC_POINTER_STATIC_ASSERT_INC_DEC_OPERATOR_IMPL(++)
+
+ EASTL_ATOMIC_POINTER_STATIC_ASSERT_INC_DEC_OPERATOR_IMPL(--)
+
+ public: /* operator+= && operator-= */
+
+ EASTL_ATOMIC_POINTER_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(+=)
+
+ EASTL_ATOMIC_POINTER_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(-=)
+
+ };
+
+
+ template <typename T, unsigned width = sizeof(T)>
+ struct atomic_pointer_width;
+
+#define EASTL_ATOMIC_POINTER_FUNC_IMPL(op, bits) \
+ T* retVal; \
+ { \
+ ptr_integral_type retType; \
+ ptr_integral_type addend = static_cast<ptr_integral_type>(arg) * static_cast<ptr_integral_type>(sizeof(T)); \
+ \
+ EA_PREPROCESSOR_JOIN(op, bits)(ptr_integral_type, retType, EASTL_ATOMIC_INTEGRAL_CAST(ptr_integral_type, this->GetAtomicAddress()), addend); \
+ \
+ retVal = reinterpret_cast<T*>(retType); \
+ } \
+ return retVal;
+
+#define EASTL_ATOMIC_POINTER_FETCH_IMPL(funcName, op, bits) \
+ T* funcName(ptrdiff_t arg) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_TYPE_IS_OBJECT(T); \
+ EASTL_ATOMIC_POINTER_FUNC_IMPL(op, bits); \
+ }
+
+#define EASTL_ATOMIC_POINTER_FETCH_ORDER_IMPL(funcName, orderType, op, bits) \
+ T* funcName(ptrdiff_t arg, orderType) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_TYPE_IS_OBJECT(T); \
+ EASTL_ATOMIC_POINTER_FUNC_IMPL(op, bits); \
+ }
+
+#define EASTL_ATOMIC_POINTER_FETCH_OP_JOIN(fetchOp, Order) \
+ EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_, fetchOp), Order)
+
+#define EASTL_ATOMIC_POINTER_FETCH_FUNCS_IMPL(funcName, fetchOp, bits) \
+ using Base::funcName; \
+ \
+ EASTL_ATOMIC_POINTER_FETCH_IMPL(funcName, EASTL_ATOMIC_POINTER_FETCH_OP_JOIN(fetchOp, _SEQ_CST_), bits) \
+ \
+ EASTL_ATOMIC_POINTER_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_relaxed_s, \
+ EASTL_ATOMIC_POINTER_FETCH_OP_JOIN(fetchOp, _RELAXED_), bits) \
+ \
+ EASTL_ATOMIC_POINTER_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_acquire_s, \
+ EASTL_ATOMIC_POINTER_FETCH_OP_JOIN(fetchOp, _ACQUIRE_), bits) \
+ \
+ EASTL_ATOMIC_POINTER_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_release_s, \
+ EASTL_ATOMIC_POINTER_FETCH_OP_JOIN(fetchOp, _RELEASE_), bits) \
+ \
+ EASTL_ATOMIC_POINTER_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_acq_rel_s, \
+ EASTL_ATOMIC_POINTER_FETCH_OP_JOIN(fetchOp, _ACQ_REL_), bits) \
+ \
+ EASTL_ATOMIC_POINTER_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_seq_cst_s, \
+ EASTL_ATOMIC_POINTER_FETCH_OP_JOIN(fetchOp, _SEQ_CST_), bits)
+
+#define EASTL_ATOMIC_POINTER_FETCH_INC_DEC_OPERATOR_IMPL(operatorOp, preFuncName, postFuncName) \
+ using Base::operator operatorOp; \
+ \
+ T* operator operatorOp() EA_NOEXCEPT \
+ { \
+ return preFuncName(1, eastl::memory_order_seq_cst); \
+ } \
+ \
+ T* operator operatorOp(int) EA_NOEXCEPT \
+ { \
+ return postFuncName(1, eastl::memory_order_seq_cst); \
+ }
+
+#define EASTL_ATOMIC_POINTER_FETCH_ASSIGNMENT_OPERATOR_IMPL(operatorOp, funcName) \
+ using Base::operator operatorOp; \
+ \
+ T* operator operatorOp(ptrdiff_t arg) EA_NOEXCEPT \
+ { \
+ return funcName(arg, eastl::memory_order_seq_cst); \
+ }
+
+
+#define EASTL_ATOMIC_POINTER_WIDTH_SPECIALIZE(bytes, bits) \
+ template <typename T> \
+ struct atomic_pointer_width<T*, bytes> : public atomic_pointer_base<T*, bytes> \
+ { \
+ private: \
+ \
+ using Base = atomic_pointer_base<T*, bytes>; \
+ using u_ptr_integral_type = EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(uint, bits), _t); \
+ using ptr_integral_type = EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(int, bits), _t); \
+ \
+ public: /* ctors */ \
+ \
+ EA_CONSTEXPR atomic_pointer_width(T* desired) EA_NOEXCEPT \
+ : Base{ desired } \
+ { \
+ } \
+ \
+ EA_CONSTEXPR atomic_pointer_width() EA_NOEXCEPT = default; \
+ \
+ atomic_pointer_width(const atomic_pointer_width&) EA_NOEXCEPT = delete; \
+ \
+ public: /* assignment operators */ \
+ \
+ using Base::operator=; \
+ \
+ atomic_pointer_width& operator=(const atomic_pointer_width&) EA_NOEXCEPT = delete; \
+ atomic_pointer_width& operator=(const atomic_pointer_width&) volatile EA_NOEXCEPT = delete; \
+ \
+ public: /* fetch_add */ \
+ \
+ EASTL_ATOMIC_POINTER_FETCH_FUNCS_IMPL(fetch_add, FETCH_ADD, bits) \
+ \
+ public: /* add_fetch */ \
+ \
+ EASTL_ATOMIC_POINTER_FETCH_FUNCS_IMPL(add_fetch, ADD_FETCH, bits) \
+ \
+ public: /* fetch_sub */ \
+ \
+ EASTL_ATOMIC_POINTER_FETCH_FUNCS_IMPL(fetch_sub, FETCH_SUB, bits) \
+ \
+ public: /* sub_fetch */ \
+ \
+ EASTL_ATOMIC_POINTER_FETCH_FUNCS_IMPL(sub_fetch, SUB_FETCH, bits) \
+ \
+ public: /* operator++ && operator-- */ \
+ \
+ EASTL_ATOMIC_POINTER_FETCH_INC_DEC_OPERATOR_IMPL(++, add_fetch, fetch_add) \
+ \
+ EASTL_ATOMIC_POINTER_FETCH_INC_DEC_OPERATOR_IMPL(--, sub_fetch, fetch_sub) \
+ \
+ public: /* operator+= && operator-= */ \
+ \
+ EASTL_ATOMIC_POINTER_FETCH_ASSIGNMENT_OPERATOR_IMPL(+=, add_fetch) \
+ \
+ EASTL_ATOMIC_POINTER_FETCH_ASSIGNMENT_OPERATOR_IMPL(-=, sub_fetch) \
+ \
+ public: \
+ \
+ using Base::load; \
+ \
+ T* load(eastl::internal::memory_order_read_depends_s) EA_NOEXCEPT \
+ { \
+ T* retPointer; \
+ EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_LOAD_READ_DEPENDS_, bits)(T*, retPointer, this->GetAtomicAddress()); \
+ return retPointer; \
+ } \
+ };
+
+
+#if defined(EASTL_ATOMIC_HAS_32BIT) && EA_PLATFORM_PTR_SIZE == 4
+ EASTL_ATOMIC_POINTER_WIDTH_SPECIALIZE(4, 32)
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_64BIT) && EA_PLATFORM_PTR_SIZE == 8
+ EASTL_ATOMIC_POINTER_WIDTH_SPECIALIZE(8, 64)
+#endif
+
+
+} // namespace internal
+
+
+} // namespace eastl
+
+
+#include "atomic_pop_compiler_options.h"
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_POINTER_H */
diff --git a/EASTL/include/EASTL/internal/atomic/atomic_pop_compiler_options.h b/EASTL/include/EASTL/internal/atomic/atomic_pop_compiler_options.h
new file mode 100644
index 0000000..92f241a
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/atomic_pop_compiler_options.h
@@ -0,0 +1,11 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+/* NOTE: No Header Guard */
+
+
+EA_RESTORE_VC_WARNING();
+
+EA_RESTORE_CLANG_WARNING();
diff --git a/EASTL/include/EASTL/internal/atomic/atomic_push_compiler_options.h b/EASTL/include/EASTL/internal/atomic/atomic_push_compiler_options.h
new file mode 100644
index 0000000..c5a5471
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/atomic_push_compiler_options.h
@@ -0,0 +1,17 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+/* NOTE: No Header Guard */
+
+
+// 'class' : multiple assignment operators specified
+EA_DISABLE_VC_WARNING(4522);
+
+// misaligned atomic operation may incur significant performance penalty
+// The above warning is emitted in earlier versions of clang incorrectly.
+// All eastl::atomic<T> objects are size aligned.
+// This is static and runtime asserted.
+// Thus we disable this warning.
+EA_DISABLE_CLANG_WARNING(-Watomic-alignment);
diff --git a/EASTL/include/EASTL/internal/atomic/atomic_size_aligned.h b/EASTL/include/EASTL/internal/atomic/atomic_size_aligned.h
new file mode 100644
index 0000000..f503375
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/atomic_size_aligned.h
@@ -0,0 +1,197 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_SIZE_ALIGNED_H
+#define EASTL_ATOMIC_INTERNAL_SIZE_ALIGNED_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#include "atomic_push_compiler_options.h"
+
+
+namespace eastl
+{
+
+
+namespace internal
+{
+
+
+#define EASTL_ATOMIC_SIZE_ALIGNED_STATIC_ASSERT_CMPXCHG_IMPL(funcName) \
+ template <typename OrderSuccess, typename OrderFailure> \
+ bool funcName(T& /*expected*/, T /*desired*/, \
+ OrderSuccess /*orderSuccess*/, \
+ OrderFailure /*orderFailure*/) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(T); \
+ return false; \
+ } \
+ \
+ template <typename OrderSuccess, typename OrderFailure> \
+ bool funcName(T& /*expected*/, T /*desired*/, \
+ OrderSuccess /*orderSuccess*/, \
+ OrderFailure /*orderFailure*/) volatile EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
+ return false; \
+ } \
+ \
+ template <typename Order> \
+ bool funcName(T& /*expected*/, T /*desired*/, \
+ Order /*order*/) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(T); \
+ return false; \
+ } \
+ \
+ template <typename Order> \
+ bool funcName(T& /*expected*/, T /*desired*/, \
+ Order /*order*/) volatile EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
+ return false; \
+ } \
+ \
+ bool funcName(T& /*expected*/, T /*desired*/) volatile EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
+ return false; \
+ }
+
+#define EASTL_ATOMIC_SIZE_ALIGNED_STATIC_ASSERT_CMPXCHG_WEAK_IMPL() \
+ EASTL_ATOMIC_SIZE_ALIGNED_STATIC_ASSERT_CMPXCHG_IMPL(compare_exchange_weak)
+
+#define EASTL_ATOMIC_SIZE_ALIGNED_STATIC_ASSERT_CMPXCHG_STRONG_IMPL() \
+ EASTL_ATOMIC_SIZE_ALIGNED_STATIC_ASSERT_CMPXCHG_IMPL(compare_exchange_strong)
+
+
+ template<typename T>
+ struct atomic_size_aligned
+ {
+ public: /* ctors */
+
+ EA_CONSTEXPR atomic_size_aligned(T desired) EA_NOEXCEPT
+ : mAtomic{ desired }
+ {
+ }
+
+ EA_CONSTEXPR atomic_size_aligned() EA_NOEXCEPT_IF(eastl::is_nothrow_default_constructible_v<T>)
+ : mAtomic{} /* Value-Initialize which will Zero-Initialize Trivial Constructible types */
+ {
+ }
+
+ atomic_size_aligned(const atomic_size_aligned&) EA_NOEXCEPT = delete;
+
+ public: /* store */
+
+ template <typename Order>
+ void store(T /*desired*/, Order /*order*/) EA_NOEXCEPT
+ {
+ EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(T);
+ }
+
+ template <typename Order>
+ void store(T /*desired*/, Order /*order*/) volatile EA_NOEXCEPT
+ {
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T);
+ }
+
+ void store(T /*desired*/) volatile EA_NOEXCEPT
+ {
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T);
+ }
+
+ public: /* load */
+
+ template <typename Order>
+ T load(Order /*order*/) const EA_NOEXCEPT
+ {
+ EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(T);
+ }
+
+ template <typename Order>
+ T load(Order /*order*/) const volatile EA_NOEXCEPT
+ {
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T);
+ }
+
+ T load() const volatile EA_NOEXCEPT
+ {
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T);
+ }
+
+ public: /* exchange */
+
+ template <typename Order>
+ T exchange(T /*desired*/, Order /*order*/) EA_NOEXCEPT
+ {
+ EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(T);
+ }
+
+ template <typename Order>
+ T exchange(T /*desired*/, Order /*order*/) volatile EA_NOEXCEPT
+ {
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T);
+ }
+
+ T exchange(T /*desired*/) volatile EA_NOEXCEPT
+ {
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T);
+ }
+
+ public: /* compare_exchange_weak */
+
+ EASTL_ATOMIC_SIZE_ALIGNED_STATIC_ASSERT_CMPXCHG_WEAK_IMPL()
+
+ public: /* compare_exchange_strong */
+
+ EASTL_ATOMIC_SIZE_ALIGNED_STATIC_ASSERT_CMPXCHG_STRONG_IMPL()
+
+ public: /* assignment operator */
+
+ T operator=(T /*desired*/) volatile EA_NOEXCEPT
+ {
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T);
+ }
+
+ atomic_size_aligned& operator=(const atomic_size_aligned&) EA_NOEXCEPT = delete;
+ atomic_size_aligned& operator=(const atomic_size_aligned&) volatile EA_NOEXCEPT = delete;
+
+ protected: /* Accessors */
+
+ T* GetAtomicAddress() const EA_NOEXCEPT
+ {
+ return eastl::addressof(mAtomic);
+ }
+
+ private:
+
+ /**
+ * Some compilers such as MSVC will align 64-bit values on 32-bit machines on
+ * 4-byte boundaries which can ruin the atomicity guarantees.
+ *
+ * Ensure everything is size aligned.
+ *
+ * mutable is needed in cases such as when loads are only guaranteed to be atomic
+ * using a compare exchange, such as for 128-bit atomics, so we need to be able
+ * to have write access to the variable as one example.
+ */
+ EA_ALIGN(sizeof(T)) mutable T mAtomic;
+ };
+
+
+} // namespace internal
+
+
+} // namespace eastl
+
+
+#include "atomic_pop_compiler_options.h"
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_SIZE_ALIGNED_H */
diff --git a/EASTL/include/EASTL/internal/atomic/atomic_standalone.h b/EASTL/include/EASTL/internal/atomic/atomic_standalone.h
new file mode 100644
index 0000000..011d5fb
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/atomic_standalone.h
@@ -0,0 +1,470 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_STANDALONE_H
+#define EASTL_ATOMIC_INTERNAL_STANDALONE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+namespace eastl
+{
+
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// bool atomic_compare_exchange_strong(eastl::atomic<T>*, T* expected, T desired)
+//
+template <typename T>
+EASTL_FORCE_INLINE bool atomic_compare_exchange_strong(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::value_type* expected,
+ typename eastl::atomic<T>::value_type desired) EA_NOEXCEPT
+{
+ return atomicObj->compare_exchange_strong(*expected, desired);
+}
+
+template <typename T, typename OrderSuccess, typename OrderFailure>
+EASTL_FORCE_INLINE bool atomic_compare_exchange_strong_explicit(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::value_type* expected,
+ typename eastl::atomic<T>::value_type desired,
+ OrderSuccess orderSuccess, OrderFailure orderFailure) EA_NOEXCEPT
+{
+ return atomicObj->compare_exchange_strong(*expected, desired, orderSuccess, orderFailure);
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// bool atomic_compare_exchange_weak(eastl::atomic<T>*, T* expected, T desired)
+//
+template <typename T>
+EASTL_FORCE_INLINE bool atomic_compare_exchange_weak(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::value_type* expected,
+ typename eastl::atomic<T>::value_type desired) EA_NOEXCEPT
+{
+ return atomicObj->compare_exchange_weak(*expected, desired);
+}
+
+template <typename T, typename OrderSuccess, typename OrderFailure>
+EASTL_FORCE_INLINE bool atomic_compare_exchange_weak_explicit(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::value_type* expected,
+ typename eastl::atomic<T>::value_type desired,
+ OrderSuccess orderSuccess, OrderFailure orderFailure) EA_NOEXCEPT
+{
+ return atomicObj->compare_exchange_weak(*expected, desired, orderSuccess, orderFailure);
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// T atomic_fetch_xor(eastl::atomic<T>*, T arg)
+//
+template <typename T>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_fetch_xor(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::value_type arg) EA_NOEXCEPT
+{
+ return atomicObj->fetch_xor(arg);
+}
+
+template <typename T, typename Order>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_fetch_xor_explicit(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::value_type arg,
+ Order order) EA_NOEXCEPT
+{
+ return atomicObj->fetch_xor(arg, order);
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// T atomic_xor_fetch(eastl::atomic<T>*, T arg)
+//
+template <typename T>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_xor_fetch(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::value_type arg) EA_NOEXCEPT
+{
+ return atomicObj->xor_fetch(arg);
+}
+
+template <typename T, typename Order>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_xor_fetch_explicit(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::value_type arg,
+ Order order) EA_NOEXCEPT
+{
+ return atomicObj->xor_fetch(arg, order);
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// T atomic_fetch_or(eastl::atomic<T>*, T arg)
+//
+template <typename T>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_fetch_or(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::value_type arg) EA_NOEXCEPT
+{
+ return atomicObj->fetch_or(arg);
+}
+
+template <typename T, typename Order>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_fetch_or_explicit(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::value_type arg,
+ Order order) EA_NOEXCEPT
+{
+ return atomicObj->fetch_or(arg, order);
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// T atomic_or_fetch(eastl::atomic<T>*, T arg)
+//
+template <typename T>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_or_fetch(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::value_type arg) EA_NOEXCEPT
+{
+ return atomicObj->or_fetch(arg);
+}
+
+template <typename T, typename Order>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_or_fetch_explicit(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::value_type arg,
+ Order order) EA_NOEXCEPT
+{
+ return atomicObj->or_fetch(arg, order);
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// T atomic_fetch_and(eastl::atomic<T>*, T arg)
+//
+template <typename T>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_fetch_and(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::value_type arg) EA_NOEXCEPT
+{
+ return atomicObj->fetch_and(arg);
+}
+
+template <typename T, typename Order>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_fetch_and_explicit(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::value_type arg,
+ Order order) EA_NOEXCEPT
+{
+ return atomicObj->fetch_and(arg, order);
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// T atomic_and_fetch(eastl::atomic<T>*, T arg)
+//
+template <typename T>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_and_fetch(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::value_type arg) EA_NOEXCEPT
+{
+ return atomicObj->and_fetch(arg);
+}
+
+template <typename T, typename Order>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_and_fetch_explicit(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::value_type arg,
+ Order order) EA_NOEXCEPT
+{
+ return atomicObj->and_fetch(arg, order);
+}
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// T atomic_fetch_sub(eastl::atomic<T>*, T arg)
+//
+template <typename T>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_fetch_sub(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::difference_type arg) EA_NOEXCEPT
+{
+ return atomicObj->fetch_sub(arg);
+}
+
+template <typename T, typename Order>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_fetch_sub_explicit(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::difference_type arg,
+ Order order) EA_NOEXCEPT
+{
+ return atomicObj->fetch_sub(arg, order);
+}
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// T atomic_sub_fetch(eastl::atomic<T>*, T arg)
+//
+template <typename T>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_sub_fetch(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::difference_type arg) EA_NOEXCEPT
+{
+ return atomicObj->sub_fetch(arg);
+}
+
+template <typename T, typename Order>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_sub_fetch_explicit(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::difference_type arg,
+ Order order) EA_NOEXCEPT
+{
+ return atomicObj->sub_fetch(arg, order);
+}
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// T atomic_fetch_add(eastl::atomic<T>*, T arg)
+//
+template <typename T>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_fetch_add(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::difference_type arg) EA_NOEXCEPT
+{
+ return atomicObj->fetch_add(arg);
+}
+
+template <typename T, typename Order>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_fetch_add_explicit(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::difference_type arg,
+ Order order) EA_NOEXCEPT
+{
+ return atomicObj->fetch_add(arg, order);
+}
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// T atomic_add_fetch(eastl::atomic<T>*, T arg)
+//
+template <typename T>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_add_fetch(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::difference_type arg) EA_NOEXCEPT
+{
+ return atomicObj->add_fetch(arg);
+}
+
+template <typename T, typename Order>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_add_fetch_explicit(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::difference_type arg,
+ Order order) EA_NOEXCEPT
+{
+ return atomicObj->add_fetch(arg, order);
+}
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// T atomic_exchange(eastl::atomic<T>*, T desired)
+//
+template <typename T>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_exchange(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::value_type desired) EA_NOEXCEPT
+{
+ return atomicObj->exchange(desired);
+}
+
+template <typename T, typename Order>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_exchange_explicit(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::value_type desired,
+ Order order) EA_NOEXCEPT
+{
+ return atomicObj->exchange(desired, order);
+}
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// T atomic_load(const eastl::atomic<T>*)
+//
+template <typename T>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_load(const eastl::atomic<T>* atomicObj) EA_NOEXCEPT
+{
+ return atomicObj->load();
+}
+
+template <typename T, typename Order>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_load_explicit(const eastl::atomic<T>* atomicObj, Order order) EA_NOEXCEPT
+{
+ return atomicObj->load(order);
+}
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// T atomic_load_cond(const eastl::atomic<T>*)
+//
+template <typename T, typename Predicate>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_load_cond(const eastl::atomic<T>* atomicObj, Predicate pred) EA_NOEXCEPT
+{
+ for (;;)
+ {
+ typename eastl::atomic<T>::value_type ret = atomicObj->load();
+
+ if (pred(ret))
+ {
+ return ret;
+ }
+
+ EASTL_ATOMIC_CPU_PAUSE();
+ }
+}
+
+template <typename T, typename Predicate, typename Order>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_load_cond_explicit(const eastl::atomic<T>* atomicObj, Predicate pred, Order order) EA_NOEXCEPT
+{
+ for (;;)
+ {
+ typename eastl::atomic<T>::value_type ret = atomicObj->load(order);
+
+ if (pred(ret))
+ {
+ return ret;
+ }
+
+ EASTL_ATOMIC_CPU_PAUSE();
+ }
+}
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void atomic_store(eastl::atomic<T>*, T)
+//
+template <typename T>
+EASTL_FORCE_INLINE void atomic_store(eastl::atomic<T>* atomicObj, typename eastl::atomic<T>::value_type desired) EA_NOEXCEPT
+{
+ atomicObj->store(desired);
+}
+
+template <typename T, typename Order>
+EASTL_FORCE_INLINE void atomic_store_explicit(eastl::atomic<T>* atomicObj, typename eastl::atomic<T>::value_type desired, Order order) EA_NOEXCEPT
+{
+ atomicObj->store(desired, order);
+}
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void eastl::atomic_thread_fence(Order)
+//
+template <typename Order>
+EASTL_FORCE_INLINE void atomic_thread_fence(Order) EA_NOEXCEPT
+{
+ EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(Order);
+}
+
+EASTL_FORCE_INLINE void atomic_thread_fence(eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT
+{
+ EASTL_ATOMIC_THREAD_FENCE_RELAXED();
+}
+
+EASTL_FORCE_INLINE void atomic_thread_fence(eastl::internal::memory_order_acquire_s) EA_NOEXCEPT
+{
+ EASTL_ATOMIC_THREAD_FENCE_ACQUIRE();
+}
+
+EASTL_FORCE_INLINE void atomic_thread_fence(eastl::internal::memory_order_release_s) EA_NOEXCEPT
+{
+ EASTL_ATOMIC_THREAD_FENCE_RELEASE();
+}
+
+EASTL_FORCE_INLINE void atomic_thread_fence(eastl::internal::memory_order_acq_rel_s) EA_NOEXCEPT
+{
+ EASTL_ATOMIC_THREAD_FENCE_ACQ_REL();
+}
+
+EASTL_FORCE_INLINE void atomic_thread_fence(eastl::internal::memory_order_seq_cst_s) EA_NOEXCEPT
+{
+ EASTL_ATOMIC_THREAD_FENCE_SEQ_CST();
+}
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void eastl::atomic_signal_fence(Order)
+//
+template <typename Order>
+EASTL_FORCE_INLINE void atomic_signal_fence(Order) EA_NOEXCEPT
+{
+ EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(Order);
+}
+
+EASTL_FORCE_INLINE void atomic_signal_fence(eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT
+{
+ EASTL_ATOMIC_SIGNAL_FENCE_RELAXED();
+}
+
+EASTL_FORCE_INLINE void atomic_signal_fence(eastl::internal::memory_order_acquire_s) EA_NOEXCEPT
+{
+ EASTL_ATOMIC_SIGNAL_FENCE_ACQUIRE();
+}
+
+EASTL_FORCE_INLINE void atomic_signal_fence(eastl::internal::memory_order_release_s) EA_NOEXCEPT
+{
+ EASTL_ATOMIC_SIGNAL_FENCE_RELEASE();
+}
+
+EASTL_FORCE_INLINE void atomic_signal_fence(eastl::internal::memory_order_acq_rel_s) EA_NOEXCEPT
+{
+ EASTL_ATOMIC_SIGNAL_FENCE_ACQ_REL();
+}
+
+EASTL_FORCE_INLINE void atomic_signal_fence(eastl::internal::memory_order_seq_cst_s) EA_NOEXCEPT
+{
+ EASTL_ATOMIC_SIGNAL_FENCE_SEQ_CST();
+}
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void eastl::compiler_barrier()
+//
+EASTL_FORCE_INLINE void compiler_barrier() EA_NOEXCEPT
+{
+ EASTL_ATOMIC_COMPILER_BARRIER();
+}
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void eastl::compiler_barrier_data_dependency(const T&)
+//
+template <typename T>
+EASTL_FORCE_INLINE void compiler_barrier_data_dependency(const T& val) EA_NOEXCEPT
+{
+ EASTL_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY(val, T);
+}
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void eastl::cpu_pause()
+//
+EASTL_FORCE_INLINE void cpu_pause() EA_NOEXCEPT
+{
+ EASTL_ATOMIC_CPU_PAUSE();
+}
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// bool eastl::atomic_is_lock_free(eastl::atomic<T>*)
+//
+template <typename T>
+EASTL_FORCE_INLINE bool atomic_is_lock_free(const eastl::atomic<T>* atomicObj) EA_NOEXCEPT
+{
+ return atomicObj->is_lock_free();
+}
+
+
+} // namespace eastl
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_STANDALONE_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/compiler.h b/EASTL/include/EASTL/internal/atomic/compiler/compiler.h
new file mode 100644
index 0000000..fc12879
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/compiler.h
@@ -0,0 +1,120 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// Include the compiler specific implementations
+//
+#if defined(EA_COMPILER_GNUC) || defined(__clang__)
+
+ #include "gcc/compiler_gcc.h"
+
+#elif defined(EA_COMPILER_MSVC)
+
+ #include "msvc/compiler_msvc.h"
+
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+namespace eastl
+{
+
+
+namespace internal
+{
+
+
+/**
+ * NOTE:
+ *
+ * This can be used by specific compiler implementations to implement a data dependency compiler barrier.
+ * Some compiler barriers do not take in input dependencies as is possible with the gcc asm syntax.
+ * Thus we need a way to create a false dependency on the input variable so the compiler does not dead-store
+ * remove it.
+ * A volatile function pointer ensures the compiler must always load the function pointer and call thru it
+ * since the compiler cannot reason about any side effects. Thus the compiler must always assume the
+ * input variable may be accessed and thus cannot be dead-stored. This technique works even in the presence
+ * of Link-Time Optimization. A compiler barrier with a data dependency is useful in these situations.
+ *
+ * void foo()
+ * {
+ * eastl::vector<int> v;
+ * while (Benchmark.ContinueRunning())
+ * {
+ * v.push_back(0);
+ * eastl::compiler_barrier(); OR eastl::compiler_barrier_data_dependency(v);
+ * }
+ * }
+ *
+ * We are trying to benchmark the push_back function of a vector. The vector v has only local scope.
+ * The compiler is well within its writes to remove all accesses to v even with the compiler barrier
+ * because there are no observable uses of the vector v.
+ * The compiler barrier data dependency ensures there is an input dependency on the variable so that
+ * it isn't removed. This is also useful when writing test code that the compiler may remove.
+ */
+
+typedef void (*CompilerBarrierDataDependencyFuncPtr)(void*);
+
+extern EASTL_API volatile CompilerBarrierDataDependencyFuncPtr gCompilerBarrierDataDependencyFunc;
+
+
+#define EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY_FUNC(ptr) \
+ eastl::internal::gCompilerBarrierDataDependencyFunc(ptr)
+
+
+} // namespace internal
+
+
+} // namespace eastl
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#include "compiler_fetch_add.h"
+#include "compiler_fetch_sub.h"
+
+#include "compiler_fetch_and.h"
+#include "compiler_fetch_xor.h"
+#include "compiler_fetch_or.h"
+
+#include "compiler_add_fetch.h"
+#include "compiler_sub_fetch.h"
+
+#include "compiler_and_fetch.h"
+#include "compiler_xor_fetch.h"
+#include "compiler_or_fetch.h"
+
+#include "compiler_exchange.h"
+
+#include "compiler_cmpxchg_weak.h"
+#include "compiler_cmpxchg_strong.h"
+
+#include "compiler_load.h"
+#include "compiler_store.h"
+
+#include "compiler_barrier.h"
+
+#include "compiler_cpu_pause.h"
+
+#include "compiler_memory_barrier.h"
+
+#include "compiler_signal_fence.h"
+
+#include "compiler_thread_fence.h"
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/compiler_add_fetch.h b/EASTL/include/EASTL/internal/atomic/compiler/compiler_add_fetch.h
new file mode 100644
index 0000000..763921c
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/compiler_add_fetch.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_ADD_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_ADD_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_ADD_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_8)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_8)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_8)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_8)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_16)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_16)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_16)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_16)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_32)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_32)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_32)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_32)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_64)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_64)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_64)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_64)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_128)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_128)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_128)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_128)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_ADD_FETCH_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/compiler_and_fetch.h b/EASTL/include/EASTL/internal/atomic/compiler/compiler_and_fetch.h
new file mode 100644
index 0000000..7b1e0a4
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/compiler_and_fetch.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_AND_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_AND_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_AND_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_8)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_8)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_8)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_8)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_16)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_16)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_16)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_16)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_32)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_32)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_32)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_32)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_64)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_64)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_64)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_64)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_128)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_128)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_128)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_128)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_AND_FETCH_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/compiler_barrier.h b/EASTL/include/EASTL/internal/atomic/compiler/compiler_barrier.h
new file mode 100644
index 0000000..550070e
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/compiler_barrier.h
@@ -0,0 +1,36 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_BARRIER_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_BARRIER_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_COMPILER_BARRIER()
+//
+#if defined(EASTL_COMPILER_ATOMIC_COMPILER_BARRIER)
+ #define EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_AVAILABLE 0
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY(const T&, type)
+//
+#if defined(EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY)
+ #define EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_BARRIER_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/compiler_cmpxchg_strong.h b/EASTL/include/EASTL/internal/atomic/compiler/compiler_cmpxchg_strong.h
new file mode 100644
index 0000000..2ee2971
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/compiler_cmpxchg_strong.h
@@ -0,0 +1,430 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_CMPXCHG_STRONG_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_CMPXCHG_STRONG_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_*_*_N(type, bool ret, type * ptr, type * expected, type desired)
+//
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_*_N(type, bool ret, type * ptr, type * expected, type desired)
+//
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_8_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_8_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_8_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_8(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_8_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_8(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_8_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_8(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_16_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_16_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_16_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_16(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_16_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_16(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_16_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_16(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_32_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_32_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_32_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_32(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_32_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_32(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_32_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_32(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_64_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_64_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_64_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_64(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_64_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_64(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_64_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_64(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_128_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_128_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_128_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_128(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_128_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_128(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_128_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_128(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_CMPXCHG_STRONG_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/compiler_cmpxchg_weak.h b/EASTL/include/EASTL/internal/atomic/compiler/compiler_cmpxchg_weak.h
new file mode 100644
index 0000000..9bc1a62
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/compiler_cmpxchg_weak.h
@@ -0,0 +1,430 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_CMPXCHG_WEAK_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_CMPXCHG_WEAK_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_*_*_N(type, bool ret, type * ptr, type * expected, type desired)
+//
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_*_N(type, bool ret, type * ptr, type * expected, type desired)
+//
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_8_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_8_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_8_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_8(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_8_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_8(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_8_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_8(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_16_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_16_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_16_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_16(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_16_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_16(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_16_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_16(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_32_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_32_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_32_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_32(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_32_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_32(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_32_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_32(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_64_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_64_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_64_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_64(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_64_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_64(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_64_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_64(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_128_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_128_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_128_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_128(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_128_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_128(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_128_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_128(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_CMPXCHG_WEAK_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/compiler_cpu_pause.h b/EASTL/include/EASTL/internal/atomic/compiler/compiler_cpu_pause.h
new file mode 100644
index 0000000..073b3fb
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/compiler_cpu_pause.h
@@ -0,0 +1,32 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_CPU_PAUSE_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_CPU_PAUSE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_CPU_PAUSE()
+//
+#if defined(EASTL_COMPILER_ATOMIC_CPU_PAUSE)
+
+ #define EASTL_COMPILER_ATOMIC_CPU_PAUSE_AVAILABLE 1
+
+#else
+
+ #define EASTL_COMPILER_ATOMIC_CPU_PAUSE() \
+ ((void)0)
+
+ #define EASTL_COMPILER_ATOMIC_CPU_PAUSE_AVAILABLE 1
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_CPU_PAUSE_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/compiler_exchange.h b/EASTL/include/EASTL/internal/atomic/compiler/compiler_exchange.h
new file mode 100644
index 0000000..d82b199
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/compiler_exchange.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_EXCHANGE_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_EXCHANGE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_EXCHANGE_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_8)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_8)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_8)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_8)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_16)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_16)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_16)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_16)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_32)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_32)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_32)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_32)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_64)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_64)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_64)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_64)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_128)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_128)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_128)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_128)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_EXCHANGE_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/compiler_fetch_add.h b/EASTL/include/EASTL/internal/atomic/compiler/compiler_fetch_add.h
new file mode 100644
index 0000000..e6c4238
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/compiler_fetch_add.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_ADD_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_ADD_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_FETCH_ADD_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_ADD_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/compiler_fetch_and.h b/EASTL/include/EASTL/internal/atomic/compiler/compiler_fetch_and.h
new file mode 100644
index 0000000..b0976fc
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/compiler_fetch_and.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_AND_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_AND_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_FETCH_AND_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_AND_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/compiler_fetch_or.h b/EASTL/include/EASTL/internal/atomic/compiler/compiler_fetch_or.h
new file mode 100644
index 0000000..2e6cfda
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/compiler_fetch_or.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_OR_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_OR_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_FETCH_OR_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_OR_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/compiler_fetch_sub.h b/EASTL/include/EASTL/internal/atomic/compiler/compiler_fetch_sub.h
new file mode 100644
index 0000000..d7ed86c
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/compiler_fetch_sub.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_SUB_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_SUB_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_FETCH_SUB_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_SUB_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/compiler_fetch_xor.h b/EASTL/include/EASTL/internal/atomic/compiler/compiler_fetch_xor.h
new file mode 100644
index 0000000..10cf7d9
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/compiler_fetch_xor.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_XOR_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_XOR_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_FETCH_XOR_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_XOR_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/compiler_load.h b/EASTL/include/EASTL/internal/atomic/compiler/compiler_load.h
new file mode 100644
index 0000000..734dbb8
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/compiler_load.h
@@ -0,0 +1,139 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_LOAD_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_LOAD_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_LOAD_*_N(type, type ret, type * ptr)
+//
+#if defined(EASTL_COMPILER_ATOMIC_LOAD_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_8)
+ #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_8)
+ #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_LOAD_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_16)
+ #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_16)
+ #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_LOAD_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_32)
+ #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_32)
+ #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_LOAD_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_64)
+ #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_64)
+ #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_LOAD_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_128)
+ #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_128)
+ #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+/**
+ * NOTE:
+ *
+ * These are used for data-dependent reads thru a pointer. It is safe
+ * to assume that pointer-sized reads are atomic on any given platform.
+ * This implementation assumes the hardware doesn't reorder dependent
+ * loads unlike the DEC Alpha.
+ */
+#define EASTL_COMPILER_ATOMIC_LOAD_READ_DEPENDS_N(type, ret, ptr) \
+ { \
+ static_assert(eastl::is_pointer_v<type>, "eastl::atomic<T> : Read Depends Type must be a Pointer Type!"); \
+ static_assert(eastl::is_pointer_v<eastl::remove_pointer_t<decltype(ptr)>>, "eastl::atomic<T> : Read Depends Ptr must be a Pointer to a Pointer!"); \
+ \
+ ret = (*EASTL_ATOMIC_VOLATILE_CAST(ptr)); \
+ }
+
+#define EASTL_COMPILER_ATOMIC_LOAD_READ_DEPENDS_32(type, ret, ptr) \
+ EASTL_COMPILER_ATOMIC_LOAD_READ_DEPENDS_N(type, ret, ptr)
+
+#define EASTL_COMPILER_ATOMIC_LOAD_READ_DEPENDS_64(type, ret, ptr) \
+ EASTL_COMPILER_ATOMIC_LOAD_READ_DEPENDS_N(type, ret, ptr)
+
+#define EASTL_COMPILER_ATOMIC_LOAD_READ_DEPENDS_32_AVAILABLE 1
+#define EASTL_COMPILER_ATOMIC_LOAD_READ_DEPENDS_64_AVAILABLE 1
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_LOAD_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/compiler_memory_barrier.h b/EASTL/include/EASTL/internal/atomic/compiler/compiler_memory_barrier.h
new file mode 100644
index 0000000..ac3923c
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/compiler_memory_barrier.h
@@ -0,0 +1,47 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MEMORY_BARRIER_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_MEMORY_BARRIER_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_CPU_MB()
+//
+#if defined(EASTL_COMPILER_ATOMIC_CPU_MB)
+ #define EASTL_COMPILER_ATOMIC_CPU_MB_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CPU_MB_AVAILABLE 0
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_CPU_WMB()
+//
+#if defined(EASTL_COMPILER_ATOMIC_CPU_WMB)
+ #define EASTL_COMPILER_ATOMIC_CPU_WMB_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CPU_WMB_AVAILABLE 0
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_CPU_RMB()
+//
+#if defined(EASTL_COMPILER_ATOMIC_CPU_RMB)
+ #define EASTL_COMPILER_ATOMIC_CPU_RMB_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CPU_RMB_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MEMORY_BARRIER_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/compiler_or_fetch.h b/EASTL/include/EASTL/internal/atomic/compiler/compiler_or_fetch.h
new file mode 100644
index 0000000..a26a72c
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/compiler_or_fetch.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_OR_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_OR_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_OR_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_8)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_8)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_8)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_8)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_16)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_16)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_16)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_16)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_32)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_32)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_32)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_32)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_64)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_64)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_64)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_64)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_128)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_128)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_128)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_128)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_OR_FETCH_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/compiler_signal_fence.h b/EASTL/include/EASTL/internal/atomic/compiler/compiler_signal_fence.h
new file mode 100644
index 0000000..25b0b74
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/compiler_signal_fence.h
@@ -0,0 +1,49 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_SIGNAL_FENCE_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_SIGNAL_FENCE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_*()
+//
+#if defined(EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELAXED)
+ #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELAXED_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELAXED_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQUIRE)
+ #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQUIRE_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQUIRE_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELEASE)
+ #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELEASE_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELEASE_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQ_REL)
+ #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQ_REL_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQ_REL_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_SEQ_CST)
+ #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_SEQ_CST_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_SEQ_CST_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_SIGNAL_FENCE_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/compiler_store.h b/EASTL/include/EASTL/internal/atomic/compiler/compiler_store.h
new file mode 100644
index 0000000..1a553e2
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/compiler_store.h
@@ -0,0 +1,113 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_STORE_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_STORE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_STORE_*_N(type, type * ptr, type val)
+//
+#if defined(EASTL_COMPILER_ATOMIC_STORE_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_STORE_RELEASE_8)
+ #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_8)
+ #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_STORE_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_STORE_RELEASE_16)
+ #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_16)
+ #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_STORE_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_STORE_RELEASE_32)
+ #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_32)
+ #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_STORE_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_STORE_RELEASE_64)
+ #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_64)
+ #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_STORE_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_STORE_RELEASE_128)
+ #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_128)
+ #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_STORE_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/compiler_sub_fetch.h b/EASTL/include/EASTL/internal/atomic/compiler/compiler_sub_fetch.h
new file mode 100644
index 0000000..4b7eea9
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/compiler_sub_fetch.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_SUB_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_SUB_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_SUB_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_8)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_8)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_8)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_8)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_16)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_16)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_16)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_16)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_32)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_32)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_32)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_32)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_64)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_64)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_64)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_64)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_128)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_128)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_128)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_128)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_SUB_FETCH_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/compiler_thread_fence.h b/EASTL/include/EASTL/internal/atomic/compiler/compiler_thread_fence.h
new file mode 100644
index 0000000..01d8f0f
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/compiler_thread_fence.h
@@ -0,0 +1,49 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_THREAD_FENCE_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_THREAD_FENCE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_THREAD_FENCE_*()
+//
+#if defined(EASTL_COMPILER_ATOMIC_THREAD_FENCE_RELAXED)
+ #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_RELAXED_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_RELAXED_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_THREAD_FENCE_ACQUIRE)
+ #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_ACQUIRE_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_ACQUIRE_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_THREAD_FENCE_RELEASE)
+ #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_RELEASE_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_RELEASE_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_THREAD_FENCE_ACQ_REL)
+ #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_ACQ_REL_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_ACQ_REL_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_THREAD_FENCE_SEQ_CST)
+ #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_SEQ_CST_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_SEQ_CST_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_THREAD_FENCE_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/compiler_xor_fetch.h b/EASTL/include/EASTL/internal/atomic/compiler/compiler_xor_fetch.h
new file mode 100644
index 0000000..05680bd
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/compiler_xor_fetch.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_XOR_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_XOR_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_XOR_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_8)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_8)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_8)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_8)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_16)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_16)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_16)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_16)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_32)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_32)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_32)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_32)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_64)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_64)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_64)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_64)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_128)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_128)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_128)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_128)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_XOR_FETCH_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc.h b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc.h
new file mode 100644
index 0000000..26a99c2
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc.h
@@ -0,0 +1,154 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/**
+ * NOTE:
+ *
+ * gcc __atomic builtins may defer to function calls in libatomic.so for architectures that do not
+ * support atomic instructions of a given size. These functions will be implemented with pthread_mutex_t.
+ * It also requires the explicit linking against the compiler runtime libatomic.so.
+ * On architectures that do not support atomics, like armv6 the builtins may defer to kernel helpers
+ * or on classic uniprocessor systems just disable interrupts.
+ *
+ * We do not want to have to link against libatomic.so or fall into the trap of our atomics degrading
+ * into locks. We would rather have user-code explicitly use locking primitives if their code cannot
+ * be satisfied with atomic instructions on the given platform.
+ */
+static_assert(__atomic_always_lock_free(1, 0), "eastl::atomic<T> where sizeof(T) == 1 must be lock-free!");
+static_assert(__atomic_always_lock_free(2, 0), "eastl::atomic<T> where sizeof(T) == 2 must be lock-free!");
+static_assert(__atomic_always_lock_free(4, 0), "eastl::atomic<T> where sizeof(T) == 4 must be lock-free!");
+#if EA_PLATFORM_PTR_SIZE == 8
+ static_assert(__atomic_always_lock_free(8, 0), "eastl::atomic<T> where sizeof(T) == 8 must be lock-free!");
+#endif
+
+/**
+ * NOTE:
+ *
+ * The following can fail on gcc/clang on 64-bit systems.
+ * Firstly, it depends on the -march setting on clang whether or not it calls out to libatomic for 128-bit operations.
+ * Second, gcc always calls out to libatomic for 128-bit atomics. It is unclear if it uses locks
+ * or tries to look at the cpuid and use cmpxchg16b if its available.
+ * gcc mailing lists argue that since load must be implemented with cmpxchg16b, then the __atomic bultin
+ * cannot be used in read-only memory which is why they always call out to libatomic.
+ * There is no way to tell gcc to not do that, unfortunately.
+ * We don't care about the read-only restriction because our eastl::atomic<T> object is mutable
+ * and also msvc doesn't enforce this restriction thus to be fully platform agnostic we cannot either.
+ *
+ * Therefore, the follow static_assert is commented out for the time being, as it always fails on these compilers.
+ * We still guarantee 128-bit atomics are lock-free by handrolling the inline assembly ourselves.
+ *
+ * static_assert(__atomic_always_lock_free(16, 0), "eastl::atomic<T> where sizeof(T) == 16 must be lock-free!");
+ */
+
+/**
+ * NOTE:
+ *
+ * Why do we do the cast to the unsigned fixed width types for every operation even though gcc/clang builtins are generics?
+ * Well gcc/clang correctly-incorrectly call out to libatomic and do locking on user types that may be potentially misaligned.
+ * struct UserType { uint8_t a,b; }; This given struct is 2 bytes in size but has only 1 byte alignment.
+ * gcc/clang cannot and doesn't know that we always guarantee every type T is size aligned within eastl::atomic<T>.
+ * Therefore it always emits calls into libatomic and does locking for structs like these which we do not want.
+ * Therefore you'll notice we always cast each atomic ptr type to the equivalent unsigned fixed width type when doing the atomic operations.
+ * This ensures all user types are size aligned and thus are lock free.
+ */
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#define EASTL_COMPILER_ATOMIC_HAS_8BIT
+#define EASTL_COMPILER_ATOMIC_HAS_16BIT
+#define EASTL_COMPILER_ATOMIC_HAS_32BIT
+#define EASTL_COMPILER_ATOMIC_HAS_64BIT
+
+#if EA_PLATFORM_PTR_SIZE == 8
+ #define EASTL_COMPILER_ATOMIC_HAS_128BIT
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_8 uint8_t
+#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_16 uint16_t
+#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_32 uint32_t
+#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_64 uint64_t
+#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_128 __uint128_t
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#define EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, fetchIntrinsic, type, ret, ptr, val, gccMemoryOrder) \
+ { \
+ integralType retIntegral; \
+ integralType valIntegral = EASTL_ATOMIC_TYPE_PUN_CAST(integralType, (val)); \
+ \
+ retIntegral = fetchIntrinsic(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), valIntegral, gccMemoryOrder); \
+ \
+ ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, retIntegral); \
+ }
+
+#define EASTL_GCC_ATOMIC_CMPXCHG_INTRIN_N(integralType, type, ret, ptr, expected, desired, weak, successOrder, failOrder) \
+ ret = __atomic_compare_exchange(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), \
+ EASTL_ATOMIC_INTEGRAL_CAST(integralType, (expected)), \
+ EASTL_ATOMIC_INTEGRAL_CAST(integralType, &(desired)), \
+ weak, successOrder, failOrder)
+
+#define EASTL_GCC_ATOMIC_EXCHANGE_INTRIN_N(integralType, type, ret, ptr, val, gccMemoryOrder) \
+ { \
+ integralType retIntegral; \
+ integralType valIntegral = EASTL_ATOMIC_TYPE_PUN_CAST(integralType, (val)); \
+ \
+ __atomic_exchange(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), \
+ &valIntegral, &retIntegral, gccMemoryOrder); \
+ \
+ ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, retIntegral); \
+ }
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#include "compiler_gcc_fetch_add.h"
+#include "compiler_gcc_fetch_sub.h"
+
+#include "compiler_gcc_fetch_and.h"
+#include "compiler_gcc_fetch_xor.h"
+#include "compiler_gcc_fetch_or.h"
+
+#include "compiler_gcc_add_fetch.h"
+#include "compiler_gcc_sub_fetch.h"
+
+#include "compiler_gcc_and_fetch.h"
+#include "compiler_gcc_xor_fetch.h"
+#include "compiler_gcc_or_fetch.h"
+
+#include "compiler_gcc_exchange.h"
+
+#include "compiler_gcc_cmpxchg_weak.h"
+#include "compiler_gcc_cmpxchg_strong.h"
+
+#include "compiler_gcc_load.h"
+#include "compiler_gcc_store.h"
+
+#include "compiler_gcc_barrier.h"
+
+#include "compiler_gcc_cpu_pause.h"
+
+#include "compiler_gcc_signal_fence.h"
+
+#include "compiler_gcc_thread_fence.h"
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_add_fetch.h b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_add_fetch.h
new file mode 100644
index 0000000..1d19196
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_add_fetch.h
@@ -0,0 +1,118 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_ADD_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_ADD_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_GCC_ATOMIC_ADD_FETCH_N(integralType, type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_add_fetch, type, ret, ptr, val, gccMemoryOrder)
+
+
+#define EASTL_GCC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_N(uint8_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_N(uint16_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_N(uint32_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_N(uint64_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_ADD_FETCH_128(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_ADD_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_128(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_128(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_128(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_ADD_FETCH_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_and_fetch.h b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_and_fetch.h
new file mode 100644
index 0000000..a35307f
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_and_fetch.h
@@ -0,0 +1,118 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_AND_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_AND_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_GCC_ATOMIC_AND_FETCH_N(integralType, type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_and_fetch, type, ret, ptr, val, gccMemoryOrder)
+
+
+#define EASTL_GCC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_AND_FETCH_N(uint8_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_AND_FETCH_N(uint16_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_AND_FETCH_N(uint32_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_AND_FETCH_N(uint64_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_AND_FETCH_128(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_AND_FETCH_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_AND_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_128(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_128(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_128(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_AND_FETCH_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_barrier.h b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_barrier.h
new file mode 100644
index 0000000..64e8e54
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_barrier.h
@@ -0,0 +1,30 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_BARRIER_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_BARRIER_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_COMPILER_BARRIER()
+//
+#define EASTL_COMPILER_ATOMIC_COMPILER_BARRIER() \
+ __asm__ __volatile__ ("" ::: "memory")
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY(const T&, type)
+//
+#define EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY(val, type) \
+ __asm__ __volatile__ ("" : /* Output Operands */ : "r"(&(val)) : "memory")
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_BARRIER_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cmpxchg_strong.h b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cmpxchg_strong.h
new file mode 100644
index 0000000..3e47cf2
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cmpxchg_strong.h
@@ -0,0 +1,182 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_CMPXCHG_STRONG_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_CMPXCHG_STRONG_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_GCC_ATOMIC_CMPXCHG_STRONG_N(integralType, type, ret, ptr, expected, desired, successOrder, failOrder) \
+ EASTL_GCC_ATOMIC_CMPXCHG_INTRIN_N(integralType, type, ret, ptr, expected, desired, false, successOrder, failOrder)
+
+
+#define EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, successOrder, failOrder) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_N(uint8_t, type, ret, ptr, expected, desired, successOrder, failOrder)
+
+#define EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, successOrder, failOrder) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_N(uint16_t, type, ret, ptr, expected, desired, successOrder, failOrder)
+
+#define EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, successOrder, failOrder) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_N(uint32_t, type, ret, ptr, expected, desired, successOrder, failOrder)
+
+#define EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, successOrder, failOrder) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_N(uint64_t, type, ret, ptr, expected, desired, successOrder, failOrder)
+
+#define EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, successOrder, failOrder) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_N(__uint128_t, type, ret, ptr, expected, desired, successOrder, failOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_*_*_N(type, bool ret, type * ptr, type * expected, type desired)
+//
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_CMPXCHG_STRONG_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cmpxchg_weak.h b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cmpxchg_weak.h
new file mode 100644
index 0000000..f55fe3a
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cmpxchg_weak.h
@@ -0,0 +1,182 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_CMPXCHG_WEAK_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_CMPXCHG_WEAK_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_GCC_ATOMIC_CMPXCHG_WEAK_N(integralType, type, ret, ptr, expected, desired, successOrder, failOrder) \
+ EASTL_GCC_ATOMIC_CMPXCHG_INTRIN_N(integralType, type, ret, ptr, expected, desired, true, successOrder, failOrder)
+
+
+#define EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, successOrder, failOrder) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_N(uint8_t, type, ret, ptr, expected, desired, successOrder, failOrder)
+
+#define EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, successOrder, failOrder) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_N(uint16_t, type, ret, ptr, expected, desired, successOrder, failOrder)
+
+#define EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, successOrder, failOrder) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_N(uint32_t, type, ret, ptr, expected, desired, successOrder, failOrder)
+
+#define EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, successOrder, failOrder) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_N(uint64_t, type, ret, ptr, expected, desired, successOrder, failOrder)
+
+#define EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, successOrder, failOrder) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_N(__uint128_t, type, ret, ptr, expected, desired, successOrder, failOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_*_*_N(type, bool ret, type * ptr, type * expected, type desired)
+//
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16(type,ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_CMPXCHG_WEAK_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cpu_pause.h b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cpu_pause.h
new file mode 100644
index 0000000..9d4ac35
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cpu_pause.h
@@ -0,0 +1,31 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_CPU_PAUSE_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_CPU_PAUSE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_CPU_PAUSE()
+//
+#if defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64)
+
+ #define EASTL_COMPILER_ATOMIC_CPU_PAUSE() \
+ __asm__ __volatile__ ("pause")
+
+#elif defined(EA_PROCESSOR_ARM32) || defined(EA_PROCESSOR_ARM64)
+
+ #define EASTL_COMPILER_ATOMIC_CPU_PAUSE() \
+ __asm__ __volatile__ ("yield")
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_CPU_PAUSE_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_exchange.h b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_exchange.h
new file mode 100644
index 0000000..a332554
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_exchange.h
@@ -0,0 +1,118 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_EXCHANGE_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_EXCHANGE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_GCC_ATOMIC_EXCHANGE_N(integralType, type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_EXCHANGE_INTRIN_N(integralType, type, ret, ptr, val, gccMemoryOrder)
+
+
+#define EASTL_GCC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_EXCHANGE_N(uint8_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_EXCHANGE_N(uint16_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_EXCHANGE_N(uint32_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_EXCHANGE_N(uint64_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_EXCHANGE_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_EXCHANGE_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_EXCHANGE_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_add.h b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_add.h
new file mode 100644
index 0000000..98abbb8
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_add.h
@@ -0,0 +1,118 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_ADD_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_ADD_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_GCC_ATOMIC_FETCH_ADD_N(integralType, type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_fetch_add, type, ret, ptr, val, gccMemoryOrder)
+
+
+#define EASTL_GCC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_N(uint8_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_N(uint16_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_N(uint32_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_N(uint64_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_ADD_128(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_FETCH_ADD_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_128(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_128(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_128(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_128(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_128(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_ADD_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_and.h b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_and.h
new file mode 100644
index 0000000..0dfb81d
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_and.h
@@ -0,0 +1,118 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_AND_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_AND_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_GCC_ATOMIC_FETCH_AND_N(integralType, type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_fetch_and, type, ret, ptr, val, gccMemoryOrder)
+
+
+#define EASTL_GCC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_AND_N(uint8_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_AND_N(uint16_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_AND_N(uint32_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_AND_N(uint64_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_AND_128(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_AND_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_FETCH_AND_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_128(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_128(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_128(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_128(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_128(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_AND_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_or.h b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_or.h
new file mode 100644
index 0000000..ba259b7
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_or.h
@@ -0,0 +1,118 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_OR_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_OR_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_GCC_ATOMIC_FETCH_OR_N(integralType, type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_fetch_or, type, ret, ptr, val, gccMemoryOrder)
+
+
+#define EASTL_GCC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_OR_N(uint8_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_OR_N(uint16_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_OR_N(uint32_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_OR_N(uint64_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_OR_128(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_OR_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_FETCH_OR_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_128(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_128(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_128(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_128(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_128(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_OR_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_sub.h b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_sub.h
new file mode 100644
index 0000000..c8be225
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_sub.h
@@ -0,0 +1,118 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_SUB_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_SUB_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_GCC_ATOMIC_FETCH_SUB_N(integralType, type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_fetch_sub, type, ret, ptr, val, gccMemoryOrder)
+
+
+#define EASTL_GCC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_N(uint8_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_N(uint16_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_N(uint32_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_N(uint64_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_SUB_128(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_FETCH_SUB_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_128(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_128(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_128(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_128(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_128(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_SUB_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_xor.h b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_xor.h
new file mode 100644
index 0000000..4ec6d67
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_xor.h
@@ -0,0 +1,118 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_XOR_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_XOR_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_GCC_ATOMIC_FETCH_XOR_N(integralType, type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_fetch_xor, type, ret, ptr, val, gccMemoryOrder)
+
+
+#define EASTL_GCC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_N(uint8_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_N(uint16_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_N(uint32_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_N(uint64_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_XOR_128(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_FETCH_XOR_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_128(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_128(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_128(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_128(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_128(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_XOR_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_load.h b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_load.h
new file mode 100644
index 0000000..a4a3ebf
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_load.h
@@ -0,0 +1,90 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_LOAD_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_LOAD_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_GCC_ATOMIC_LOAD_N(integralType, type, ret, ptr, gccMemoryOrder) \
+ { \
+ integralType retIntegral; \
+ __atomic_load(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), &retIntegral, gccMemoryOrder); \
+ \
+ ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, retIntegral); \
+ }
+
+#define EASTL_GCC_ATOMIC_LOAD_8(type, ret, ptr, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_LOAD_N(uint8_t, type, ret, ptr, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_LOAD_16(type, ret, ptr, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_LOAD_N(uint16_t, type, ret, ptr, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_LOAD_32(type, ret, ptr, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_LOAD_N(uint32_t, type, ret, ptr, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_LOAD_64(type, ret, ptr, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_LOAD_N(uint64_t, type, ret, ptr, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_LOAD_128(type, ret, ptr, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_LOAD_N(__uint128_t, type, ret, ptr, gccMemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_LOAD_*_N(type, type ret, type * ptr)
+//
+#define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_8(type, ret, ptr) \
+ EASTL_GCC_ATOMIC_LOAD_8(type, ret, ptr, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_16(type, ret, ptr) \
+ EASTL_GCC_ATOMIC_LOAD_16(type, ret, ptr, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_32(type, ret, ptr) \
+ EASTL_GCC_ATOMIC_LOAD_32(type, ret, ptr, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_64(type, ret, ptr) \
+ EASTL_GCC_ATOMIC_LOAD_64(type, ret, ptr, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_128(type, ret, ptr) \
+ EASTL_GCC_ATOMIC_LOAD_128(type, ret, ptr, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_8(type, ret, ptr) \
+ EASTL_GCC_ATOMIC_LOAD_8(type, ret, ptr, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_16(type, ret, ptr) \
+ EASTL_GCC_ATOMIC_LOAD_16(type, ret, ptr, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_32(type, ret, ptr) \
+ EASTL_GCC_ATOMIC_LOAD_32(type, ret, ptr, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_64(type, ret, ptr) \
+ EASTL_GCC_ATOMIC_LOAD_64(type, ret, ptr, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_128(type, ret, ptr) \
+ EASTL_GCC_ATOMIC_LOAD_128(type, ret, ptr, __ATOMIC_ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_8(type, ret, ptr) \
+ EASTL_GCC_ATOMIC_LOAD_8(type, ret, ptr, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_16(type, ret, ptr) \
+ EASTL_GCC_ATOMIC_LOAD_16(type, ret, ptr, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_32(type, ret, ptr) \
+ EASTL_GCC_ATOMIC_LOAD_32(type, ret, ptr, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_64(type, ret, ptr) \
+ EASTL_GCC_ATOMIC_LOAD_64(type, ret, ptr, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_128(type, ret, ptr) \
+ EASTL_GCC_ATOMIC_LOAD_128(type, ret, ptr, __ATOMIC_SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_LOAD_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_or_fetch.h b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_or_fetch.h
new file mode 100644
index 0000000..9e4db3e
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_or_fetch.h
@@ -0,0 +1,118 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_OR_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_OR_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_GCC_ATOMIC_OR_FETCH_N(integralType, type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_or_fetch, type, ret, ptr, val, gccMemoryOrder)
+
+
+#define EASTL_GCC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_OR_FETCH_N(uint8_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_OR_FETCH_N(uint16_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_OR_FETCH_N(uint32_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_OR_FETCH_N(uint64_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_OR_FETCH_128(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_OR_FETCH_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_OR_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_128(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_128(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_128(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_OR_FETCH_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_signal_fence.h b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_signal_fence.h
new file mode 100644
index 0000000..16dff14
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_signal_fence.h
@@ -0,0 +1,38 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_SIGNAL_FENCE_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_SIGNAL_FENCE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_GCC_ATOMIC_SIGNAL_FENCE(gccMemoryOrder) \
+ __atomic_signal_fence(gccMemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_*()
+//
+#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELAXED() \
+ EASTL_GCC_ATOMIC_SIGNAL_FENCE(__ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQUIRE() \
+ EASTL_GCC_ATOMIC_SIGNAL_FENCE(__ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELEASE() \
+ EASTL_GCC_ATOMIC_SIGNAL_FENCE(__ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQ_REL() \
+ EASTL_GCC_ATOMIC_SIGNAL_FENCE(__ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_SEQ_CST() \
+ EASTL_GCC_ATOMIC_SIGNAL_FENCE(__ATOMIC_SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_SIGNAL_FENCE_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_store.h b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_store.h
new file mode 100644
index 0000000..04a28ac
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_store.h
@@ -0,0 +1,89 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_STORE_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_STORE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_GCC_ATOMIC_STORE_N(integralType, ptr, val, gccMemoryOrder) \
+ { \
+ integralType valIntegral = EASTL_ATOMIC_TYPE_PUN_CAST(integralType, (val)); \
+ __atomic_store(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), &valIntegral, gccMemoryOrder); \
+ }
+
+
+#define EASTL_GCC_ATOMIC_STORE_8(ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_STORE_N(uint8_t, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_STORE_16(ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_STORE_N(uint16_t, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_STORE_32(ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_STORE_N(uint32_t, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_STORE_64(ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_STORE_N(uint64_t, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_STORE_128(ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_STORE_N(__uint128_t, ptr, val, gccMemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_STORE_*_N(type, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_STORE_RELAXED_8(type, ptr, val) \
+ EASTL_GCC_ATOMIC_STORE_8(ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_STORE_RELAXED_16(type, ptr, val) \
+ EASTL_GCC_ATOMIC_STORE_16(ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_STORE_RELAXED_32(type, ptr, val) \
+ EASTL_GCC_ATOMIC_STORE_32(ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_STORE_RELAXED_64(type, ptr, val) \
+ EASTL_GCC_ATOMIC_STORE_64(ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_STORE_RELAXED_128(type, ptr, val) \
+ EASTL_GCC_ATOMIC_STORE_128(ptr, val, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_STORE_RELEASE_8(type, ptr, val) \
+ EASTL_GCC_ATOMIC_STORE_8(ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_STORE_RELEASE_16(type, ptr, val) \
+ EASTL_GCC_ATOMIC_STORE_16(ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_STORE_RELEASE_32(type, ptr, val) \
+ EASTL_GCC_ATOMIC_STORE_32(ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_STORE_RELEASE_64(type, ptr, val) \
+ EASTL_GCC_ATOMIC_STORE_64(ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_STORE_RELEASE_128(type, ptr, val) \
+ EASTL_GCC_ATOMIC_STORE_128(ptr, val, __ATOMIC_RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_8(type, ptr, val) \
+ EASTL_GCC_ATOMIC_STORE_8(ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_16(type, ptr, val) \
+ EASTL_GCC_ATOMIC_STORE_16(ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_32(type, ptr, val) \
+ EASTL_GCC_ATOMIC_STORE_32(ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_64(type, ptr, val) \
+ EASTL_GCC_ATOMIC_STORE_64(ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_128(type, ptr, val) \
+ EASTL_GCC_ATOMIC_STORE_128(ptr, val, __ATOMIC_SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_STORE_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_sub_fetch.h b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_sub_fetch.h
new file mode 100644
index 0000000..62f8cd9
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_sub_fetch.h
@@ -0,0 +1,118 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_SUB_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_SUB_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_GCC_ATOMIC_SUB_FETCH_N(integralType, type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_sub_fetch, type, ret, ptr, val, gccMemoryOrder)
+
+
+#define EASTL_GCC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_N(uint8_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_N(uint16_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_N(uint32_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_N(uint64_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_SUB_FETCH_128(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_SUB_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_128(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_128(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_128(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_SUB_FETCH_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_thread_fence.h b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_thread_fence.h
new file mode 100644
index 0000000..0dd005e
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_thread_fence.h
@@ -0,0 +1,38 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_THREAD_FENCE_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_THREAD_FENCE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_GCC_ATOMIC_THREAD_FENCE(gccMemoryOrder) \
+ __atomic_thread_fence(gccMemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_THREAD_FENCE_*()
+//
+#define EASTL_COMPILER_ATOMIC_THREAD_FENCE_RELAXED() \
+ EASTL_GCC_ATOMIC_THREAD_FENCE(__ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_THREAD_FENCE_ACQUIRE() \
+ EASTL_GCC_ATOMIC_THREAD_FENCE(__ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_THREAD_FENCE_RELEASE() \
+ EASTL_GCC_ATOMIC_THREAD_FENCE(__ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_THREAD_FENCE_ACQ_REL() \
+ EASTL_GCC_ATOMIC_THREAD_FENCE(__ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_THREAD_FENCE_SEQ_CST() \
+ EASTL_GCC_ATOMIC_THREAD_FENCE(__ATOMIC_SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_THREAD_FENCE_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_xor_fetch.h b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_xor_fetch.h
new file mode 100644
index 0000000..4827d79
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_xor_fetch.h
@@ -0,0 +1,118 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_XOR_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_XOR_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_GCC_ATOMIC_XOR_FETCH_N(integralType, type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_xor_fetch, type, ret, ptr, val, gccMemoryOrder)
+
+
+#define EASTL_GCC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_N(uint8_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_N(uint16_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_N(uint32_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_N(uint64_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_XOR_FETCH_128(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_XOR_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_128(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_128(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_128(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_XOR_FETCH_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc.h b/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc.h
new file mode 100644
index 0000000..90901ee
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc.h
@@ -0,0 +1,259 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+EA_DISABLE_ALL_VC_WARNINGS();
+#include <intrin.h>
+EA_RESTORE_ALL_VC_WARNINGS();
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#define EASTL_COMPILER_ATOMIC_HAS_8BIT
+#define EASTL_COMPILER_ATOMIC_HAS_16BIT
+#define EASTL_COMPILER_ATOMIC_HAS_32BIT
+#define EASTL_COMPILER_ATOMIC_HAS_64BIT
+
+#if EA_PLATFORM_PTR_SIZE == 8
+ #define EASTL_COMPILER_ATOMIC_HAS_128BIT
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_8 char
+#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_16 short
+#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_32 long
+#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_64 __int64
+
+namespace eastl
+{
+
+namespace internal
+{
+
+struct FixedWidth128
+{
+ __int64 value[2];
+};
+
+} // namespace internal
+
+} // namespace eastl
+
+#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_128 eastl::internal::FixedWidth128
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+/**
+ * NOTE:
+ *
+ * Unfortunately MSVC Intrinsics depend on the architecture
+ * that we are compiling for.
+ * These are some indirection macros to make our lives easier and
+ * ensure the least possible amount of copy-paste to reduce programmer errors.
+ *
+ * All compiler implementations end up deferring to the below macros.
+ */
+#if defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64)
+
+
+ #define EASTL_MSVC_ATOMIC_FETCH_OP(ret, ptr, val, MemoryOrder, Intrinsic) \
+ ret = Intrinsic(ptr, val)
+
+ #define EASTL_MSVC_ATOMIC_EXCHANGE_OP(ret, ptr, val, MemoryOrder, Intrinsic) \
+ ret = Intrinsic(ptr, val)
+
+ #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP(ret, ptr, comparand, exchange, MemoryOrder, Intrinsic) \
+ ret = Intrinsic(ptr, exchange, comparand)
+
+ #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128_OP(ret, ptr, comparandResult, exchangeHigh, exchangeLow, MemoryOrder) \
+ ret = _InterlockedCompareExchange128_np(ptr, exchangeHigh, exchangeLow, comparandResult)
+
+
+#elif defined(EA_PROCESSOR_ARM32) || defined(EA_PROCESSOR_ARM64)
+
+
+ #define EASTL_MSVC_INTRINSIC_RELAXED(Intrinsic) \
+ EA_PREPROCESSOR_JOIN(Intrinsic, _nf)
+
+ #define EASTL_MSVC_INTRINSIC_ACQUIRE(Intrinsic) \
+ EA_PREPROCESSOR_JOIN(Intrinsic, _acq)
+
+ #define EASTL_MSVC_INTRINSIC_RELEASE(Intrinsic) \
+ EA_PREPROCESSOR_JOIN(Intrinsic, _rel)
+
+ #define EASTL_MSVC_INTRINSIC_ACQ_REL(Intrinsic) \
+ Intrinsic
+
+ #define EASTL_MSVC_INTRINSIC_SEQ_CST(Intrinsic) \
+ Intrinsic
+
+
+ #define EASTL_MSVC_ATOMIC_FETCH_OP(ret, ptr, val, MemoryOrder, Intrinsic) \
+ ret = EA_PREPROCESSOR_JOIN(EASTL_MSVC_INTRINSIC_, MemoryOrder)(Intrinsic)(ptr, val)
+
+ #define EASTL_MSVC_ATOMIC_EXCHANGE_OP(ret, ptr, val, MemoryOrder, Intrinsic) \
+ ret = EA_PREPROCESSOR_JOIN(EASTL_MSVC_INTRINSIC_, MemoryOrder)(Intrinsic)(ptr, val)
+
+ #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP(ret, ptr, comparand, exchange, MemoryOrder, Intrinsic) \
+ ret = EA_PREPROCESSOR_JOIN(EASTL_MSVC_INTRINSIC_, MemoryOrder)(Intrinsic)(ptr, exchange, comparand)
+
+ #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128_OP(ret, ptr, comparandResult, exchangeHigh, exchangeLow, MemoryOrder) \
+ ret = EA_PREPROCESSOR_JOIN(EASTL_MSVC_INTRINSIC_, MemoryOrder)(_InterlockedCompareExchange128)(ptr, exchangeHigh, exchangeLow, comparandResult)
+
+
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#define EASTL_MSVC_NOP_POST_INTRIN_COMPUTE(ret, lhs, rhs)
+
+#define EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE(ret, val) \
+ ret = (val)
+
+
+#define EASTL_MSVC_ATOMIC_FETCH_INTRIN_N(integralType, fetchIntrinsic, type, ret, ptr, val, MemoryOrder, PRE_INTRIN_COMPUTE, POST_INTRIN_COMPUTE) \
+ { \
+ integralType retIntegral; \
+ type valCompute; \
+ \
+ PRE_INTRIN_COMPUTE(valCompute, (val)); \
+ const integralType valIntegral = EASTL_ATOMIC_TYPE_PUN_CAST(integralType, valCompute); \
+ \
+ EASTL_MSVC_ATOMIC_FETCH_OP(retIntegral, EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), \
+ valIntegral, MemoryOrder, fetchIntrinsic); \
+ \
+ ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, retIntegral); \
+ POST_INTRIN_COMPUTE(ret, ret, (val)); \
+ }
+
+#define EASTL_MSVC_ATOMIC_EXCHANGE_INTRIN_N(integralType, exchangeIntrinsic, type, ret, ptr, val, MemoryOrder) \
+ { \
+ integralType retIntegral; \
+ EASTL_MSVC_ATOMIC_EXCHANGE_OP(retIntegral, EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), \
+ EASTL_ATOMIC_TYPE_PUN_CAST(integralType, (val)), MemoryOrder, \
+ exchangeIntrinsic); \
+ \
+ ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, retIntegral); \
+ }
+
+#define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_N(integralType, cmpxchgStrongIntrinsic, type, ret, ptr, expected, desired, MemoryOrder) \
+ { \
+ integralType comparandIntegral = EASTL_ATOMIC_TYPE_PUN_CAST(integralType, *(expected)); \
+ integralType oldIntegral; \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP(oldIntegral, EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), \
+ comparandIntegral, EASTL_ATOMIC_TYPE_PUN_CAST(integralType, (desired)), \
+ MemoryOrder, cmpxchgStrongIntrinsic); \
+ \
+ if (oldIntegral == comparandIntegral) \
+ { \
+ ret = true; \
+ } \
+ else \
+ { \
+ *(expected) = EASTL_ATOMIC_TYPE_PUN_CAST(type, oldIntegral); \
+ ret = false; \
+ } \
+ }
+
+/**
+ * In my own opinion, I found the wording on Microsoft docs a little confusing.
+ * ExchangeHigh means the top 8 bytes so (ptr + 8).
+ * ExchangeLow means the low 8 butes so (ptr).
+ * Endianness does not matter since we are just loading data and comparing data.
+ * Thought of as memcpy() and memcmp() function calls whereby the layout of the
+ * data itself is irrelevant.
+ * Only after we type pun back to the original type, and load from memory does
+ * the layout of the data matter again.
+ */
+#define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_128(type, ret, ptr, expected, desired, MemoryOrder) \
+ { \
+ union TypePun \
+ { \
+ type templateType; \
+ \
+ struct exchange128 \
+ { \
+ __int64 value[2]; \
+ }; \
+ \
+ struct exchange128 exchangePun; \
+ }; \
+ \
+ union TypePun typePun = { (desired) }; \
+ \
+ unsigned char cmpxchgRetChar; \
+ cmpxchgRetChar = EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128_OP(cmpxchgRetChar, EASTL_ATOMIC_VOLATILE_TYPE_CAST(__int64, (ptr)), \
+ EASTL_ATOMIC_TYPE_CAST(__int64, (expected)), \
+ typePun.exchangePun.value[1], typePun.exchangePun.value[0], \
+ MemoryOrder); \
+ \
+ ret = static_cast<bool>(cmpxchgRetChar); \
+ }
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#define EASTL_MSVC_ATOMIC_FETCH_OP_N(integralType, fetchIntrinsic, type, ret, ptr, val, MemoryOrder, PRE_INTRIN_COMPUTE) \
+ EASTL_MSVC_ATOMIC_FETCH_INTRIN_N(integralType, fetchIntrinsic, type, ret, ptr, val, MemoryOrder, PRE_INTRIN_COMPUTE, EASTL_MSVC_NOP_POST_INTRIN_COMPUTE)
+
+#define EASTL_MSVC_ATOMIC_OP_FETCH_N(integralType, fetchIntrinsic, type, ret, ptr, val, MemoryOrder, PRE_INTRIN_COMPUTE, POST_INTRIN_COMPUTE) \
+ EASTL_MSVC_ATOMIC_FETCH_INTRIN_N(integralType, fetchIntrinsic, type, ret, ptr, val, MemoryOrder, PRE_INTRIN_COMPUTE, POST_INTRIN_COMPUTE)
+
+#define EASTL_MSVC_ATOMIC_EXCHANGE_OP_N(integralType, exchangeIntrinsic, type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_INTRIN_N(integralType, exchangeIntrinsic, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_N(integralType, cmpxchgStrongIntrinsic, type, ret, ptr, expected, desired, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_N(integralType, cmpxchgStrongIntrinsic, type, ret, ptr, expected, desired, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_128(type, ret, ptr, expected, desired, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_128(type, ret, ptr, expected, desired, MemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#include "compiler_msvc_fetch_add.h"
+#include "compiler_msvc_fetch_sub.h"
+
+#include "compiler_msvc_fetch_and.h"
+#include "compiler_msvc_fetch_xor.h"
+#include "compiler_msvc_fetch_or.h"
+
+#include "compiler_msvc_add_fetch.h"
+#include "compiler_msvc_sub_fetch.h"
+
+#include "compiler_msvc_and_fetch.h"
+#include "compiler_msvc_xor_fetch.h"
+#include "compiler_msvc_or_fetch.h"
+
+#include "compiler_msvc_exchange.h"
+
+#include "compiler_msvc_cmpxchg_weak.h"
+#include "compiler_msvc_cmpxchg_strong.h"
+
+#include "compiler_msvc_barrier.h"
+
+#include "compiler_msvc_cpu_pause.h"
+
+#include "compiler_msvc_signal_fence.h"
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_add_fetch.h b/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_add_fetch.h
new file mode 100644
index 0000000..12fc4b0
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_add_fetch.h
@@ -0,0 +1,104 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_ADD_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_ADD_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_MSVC_ADD_FETCH_POST_INTRIN_COMPUTE(ret, val, addend) \
+ ret = (val) + (addend)
+
+#define EASTL_MSVC_ATOMIC_ADD_FETCH_N(integralType, addIntrinsic, type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_OP_FETCH_N(integralType, addIntrinsic, type, ret, ptr, val, MemoryOrder, \
+ EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE, EASTL_MSVC_ADD_FETCH_POST_INTRIN_COMPUTE)
+
+
+#define EASTL_MSVC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_N(char, _InterlockedExchangeAdd8, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_N(short, _InterlockedExchangeAdd16, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_N(long, _InterlockedExchangeAdd, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_N(__int64, _InterlockedExchangeAdd64, type, ret, ptr, val, MemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_ADD_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_ADD_FETCH_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_and_fetch.h b/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_and_fetch.h
new file mode 100644
index 0000000..70ec577
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_and_fetch.h
@@ -0,0 +1,121 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_AND_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_AND_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#if defined(EA_PROCESSOR_X86_64)
+
+ #define EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_8 _InterlockedAnd8_np
+ #define EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_16 _InterlockedAnd16_np
+ #define EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_32 _InterlockedAnd_np
+ #define EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_64 _InterlockedAnd64_np
+
+#else
+
+ #define EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_8 _InterlockedAnd8
+ #define EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_16 _InterlockedAnd16
+ #define EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_32 _InterlockedAnd
+ #define EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_64 _InterlockedAnd64
+
+#endif
+
+
+#define EASTL_MSVC_AND_FETCH_POST_INTRIN_COMPUTE(ret, val, andend) \
+ ret = (val) & (andend)
+
+#define EASTL_MSVC_ATOMIC_AND_FETCH_N(integralType, andIntrinsic, type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_OP_FETCH_N(integralType, andIntrinsic, type, ret, ptr, val, MemoryOrder, \
+ EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE, EASTL_MSVC_AND_FETCH_POST_INTRIN_COMPUTE)
+
+
+#define EASTL_MSVC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_N(char, EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_8, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_N(short, EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_16, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_N(long, EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_32, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_N(__int64, EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_64, type, ret, ptr, val, MemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_AND_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_AND_FETCH_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_barrier.h b/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_barrier.h
new file mode 100644
index 0000000..90b78a6
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_barrier.h
@@ -0,0 +1,33 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_BARRIER_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_BARRIER_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_COMPILER_BARRIER()
+//
+#define EASTL_COMPILER_ATOMIC_COMPILER_BARRIER() \
+ EA_DISABLE_CLANG_WARNING(-Wdeprecated-declarations) \
+ _ReadWriteBarrier() \
+ EA_RESTORE_CLANG_WARNING()
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY(const T&, type)
+//
+#define EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY(val, type) \
+ EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY_FUNC(const_cast<type*>(eastl::addressof((val)))); \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_BARRIER_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cmpxchg_strong.h b/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cmpxchg_strong.h
new file mode 100644
index 0000000..8217f23
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cmpxchg_strong.h
@@ -0,0 +1,194 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_CMPXCHG_STRONG_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_CMPXCHG_STRONG_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#if defined(EA_PROCESSOR_X86_64)
+
+ #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_8 _InterlockedCompareExchange8
+ #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_16 _InterlockedCompareExchange16_np
+ #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_32 _InterlockedCompareExchange_np
+ #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_64 _InterlockedCompareExchange64_np
+
+#else
+
+ #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_8 _InterlockedCompareExchange8
+ #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_16 _InterlockedCompareExchange16
+ #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_32 _InterlockedCompareExchange
+ #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_64 _InterlockedCompareExchange64
+
+#endif
+
+
+#define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_N(char, EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_8, type, ret, ptr, expected, desired, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_N(short, EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_16, type, ret, ptr, expected, desired, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_N(long, EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_32, type, ret, ptr, expected, desired, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_N(__int64, EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_64, type, ret, ptr, expected, desired, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_128(type, ret, ptr, expected, desired, MemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_*_*_N(type, bool ret, type * ptr, type * expected, type desired)
+//
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, SEQ_CST)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, SEQ_CST)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_CMPXCHG_STRONG_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cmpxchg_weak.h b/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cmpxchg_weak.h
new file mode 100644
index 0000000..8f4147a
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cmpxchg_weak.h
@@ -0,0 +1,162 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_CMPXCHG_WEAK_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_CMPXCHG_WEAK_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_*_*_N(type, bool ret, type * ptr, type * expected, type desired)
+//
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128(type, ret, ptr, expected, desired)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128(type, ret, ptr, expected, desired)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_CMPXCHG_WEAK_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cpu_pause.h b/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cpu_pause.h
new file mode 100644
index 0000000..5f436b8
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cpu_pause.h
@@ -0,0 +1,22 @@
+/////////////////////////////////////////////////////////////////////////////////
+// copyright (c) electronic arts inc. all rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_CPU_PAUSE_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_CPU_PAUSE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#if defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64)
+ #define EASTL_COMPILER_ATOMIC_CPU_PAUSE() _mm_pause()
+#elif defined(EA_PROCESSOR_ARM32) || defined(EA_PROCESSOR_ARM64)
+ #define EASTL_COMPILER_ATOMIC_CPU_PAUSE() __yield()
+#else
+ #error Unsupported CPU architecture for EASTL_COMPILER_ATOMIC_CPU_PAUSE
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_CPU_PAUSE_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_exchange.h b/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_exchange.h
new file mode 100644
index 0000000..323f1fa
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_exchange.h
@@ -0,0 +1,125 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_EXCHANGE_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_EXCHANGE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_MSVC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_OP_N(char, _InterlockedExchange8, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_OP_N(short, _InterlockedExchange16, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_OP_N(long, _InterlockedExchange, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_OP_N(__int64, _InterlockedExchange64, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, MemoryOrder) \
+ { \
+ bool cmpxchgRet; \
+ /* This is intentionally a non-atomic 128-bit load which may observe shearing. */ \
+ /* Either we do not observe *(ptr) but then the cmpxchg will fail and the observed */ \
+ /* atomic load will be returned. Or the non-atomic load got lucky and the cmpxchg succeeds */ \
+ /* because the observed value equals the value in *(ptr) thus we optimistically do a non-atomic load. */ \
+ ret = *(ptr); \
+ do \
+ { \
+ EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_CMPXCHG_STRONG_, MemoryOrder), _128)(type, cmpxchgRet, ptr, &(ret), val); \
+ } while (!cmpxchgRet); \
+ }
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_EXCHANGE_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_128(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_128(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_EXCHANGE_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_add.h b/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_add.h
new file mode 100644
index 0000000..a951740
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_add.h
@@ -0,0 +1,101 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_ADD_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_ADD_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_MSVC_ATOMIC_FETCH_ADD_N(integralType, addIntrinsic, type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_OP_N(integralType, addIntrinsic, type, ret, ptr, val, MemoryOrder, \
+ EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE)
+
+
+#define EASTL_MSVC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_N(char, _InterlockedExchangeAdd8, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_N(short, _InterlockedExchangeAdd16, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_N(long, _InterlockedExchangeAdd, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_N(__int64, _InterlockedExchangeAdd64, type, ret, ptr, val, MemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_FETCH_ADD_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_ADD_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_and.h b/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_and.h
new file mode 100644
index 0000000..96f7894
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_and.h
@@ -0,0 +1,118 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_AND_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_AND_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#if defined(EA_PROCESSOR_X86_64)
+
+ #define EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_8 _InterlockedAnd8_np
+ #define EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_16 _InterlockedAnd16_np
+ #define EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_32 _InterlockedAnd_np
+ #define EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_64 _InterlockedAnd64_np
+
+#else
+
+ #define EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_8 _InterlockedAnd8
+ #define EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_16 _InterlockedAnd16
+ #define EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_32 _InterlockedAnd
+ #define EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_64 _InterlockedAnd64
+
+#endif
+
+
+#define EASTL_MSVC_ATOMIC_FETCH_AND_N(integralType, andIntrinsic, type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_OP_N(integralType, andIntrinsic, type, ret, ptr, val, MemoryOrder, \
+ EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE)
+
+
+#define EASTL_MSVC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_N(char, EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_8, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_N(short, EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_16, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_N(long, EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_32, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_N(__int64, EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_64, type, ret, ptr, val, MemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_FETCH_AND_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_AND_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_or.h b/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_or.h
new file mode 100644
index 0000000..2792fc3
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_or.h
@@ -0,0 +1,118 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_OR_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_OR_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#if defined(EA_PROCESSOR_X86_64)
+
+ #define EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_8 _InterlockedOr8_np
+ #define EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_16 _InterlockedOr16_np
+ #define EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_32 _InterlockedOr_np
+ #define EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_64 _InterlockedOr64_np
+
+#else
+
+ #define EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_8 _InterlockedOr8
+ #define EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_16 _InterlockedOr16
+ #define EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_32 _InterlockedOr
+ #define EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_64 _InterlockedOr64
+
+#endif
+
+
+#define EASTL_MSVC_ATOMIC_FETCH_OR_N(integralType, orIntrinsic, type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_OP_N(integralType, orIntrinsic, type, ret, ptr, val, MemoryOrder, \
+ EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE)
+
+
+#define EASTL_MSVC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_N(char, EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_8, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_N(short, EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_16, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_N(long, EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_32, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_N(long long, EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_64, type, ret, ptr, val, MemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_FETCH_OR_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_OR_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_sub.h b/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_sub.h
new file mode 100644
index 0000000..6d5d9e3
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_sub.h
@@ -0,0 +1,104 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_SUB_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_SUB_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_MSVC_FETCH_SUB_PRE_INTRIN_COMPUTE(ret, val) \
+ ret = EASTL_ATOMIC_NEGATE_OPERAND((val))
+
+#define EASTL_MSVC_ATOMIC_FETCH_SUB_N(integralType, subIntrinsic, type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_OP_N(integralType, subIntrinsic, type, ret, ptr, val, MemoryOrder, \
+ EASTL_MSVC_FETCH_SUB_PRE_INTRIN_COMPUTE)
+
+
+#define EASTL_MSVC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_N(char, _InterlockedExchangeAdd8, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_N(short, _InterlockedExchangeAdd16, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_N(long, _InterlockedExchangeAdd, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_N(__int64, _InterlockedExchangeAdd64, type, ret, ptr, val, MemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_FETCH_SUB_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_SUB_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_xor.h b/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_xor.h
new file mode 100644
index 0000000..371153e
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_xor.h
@@ -0,0 +1,118 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_XOR_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_XOR_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#if defined(EA_PROCESSOR_X86_64)
+
+ #define EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_8 _InterlockedXor8_np
+ #define EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_16 _InterlockedXor16_np
+ #define EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_32 _InterlockedXor_np
+ #define EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_64 _InterlockedXor64_np
+
+#else
+
+ #define EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_8 _InterlockedXor8
+ #define EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_16 _InterlockedXor16
+ #define EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_32 _InterlockedXor
+ #define EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_64 _InterlockedXor64
+
+#endif
+
+
+#define EASTL_MSVC_ATOMIC_FETCH_XOR_N(integralType, xorIntrinsic, type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_OP_N(integralType, xorIntrinsic, type, ret, ptr, val, MemoryOrder, \
+ EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE)
+
+
+#define EASTL_MSVC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_N(char, EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_8, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_N(short, EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_16, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_N(long, EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_32, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_N(__int64, EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_64, type, ret, ptr, val, MemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_FETCH_XOR_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_XOR_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_or_fetch.h b/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_or_fetch.h
new file mode 100644
index 0000000..c5b5fac
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_or_fetch.h
@@ -0,0 +1,121 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_OR_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_OR_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#if defined(EA_PROCESSOR_X86_64)
+
+ #define EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_8 _InterlockedOr8_np
+ #define EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_16 _InterlockedOr16_np
+ #define EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_32 _InterlockedOr_np
+ #define EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_64 _InterlockedOr64_np
+
+#else
+
+ #define EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_8 _InterlockedOr8
+ #define EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_16 _InterlockedOr16
+ #define EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_32 _InterlockedOr
+ #define EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_64 _InterlockedOr64
+
+#endif
+
+
+#define EASTL_MSVC_OR_FETCH_POST_INTRIN_COMPUTE(ret, val, orend) \
+ ret = (val) | (orend)
+
+#define EASTL_MSVC_ATOMIC_OR_FETCH_N(integralType, orIntrinsic, type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_OP_FETCH_N(integralType, orIntrinsic, type, ret, ptr, val, MemoryOrder, \
+ EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE, EASTL_MSVC_OR_FETCH_POST_INTRIN_COMPUTE)
+
+
+#define EASTL_MSVC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_N(char, EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_8, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_N(short, EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_16, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_N(long, EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_32, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_N(__int64, EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_64, type, ret, ptr, val, MemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_OR_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_OR_FETCH_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_signal_fence.h b/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_signal_fence.h
new file mode 100644
index 0000000..f35f577
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_signal_fence.h
@@ -0,0 +1,34 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_SIGNAL_FENCE_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_SIGNAL_FENCE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_*()
+//
+#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELAXED() \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQUIRE() \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELEASE() \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQ_REL() \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_SEQ_CST() \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_SIGNAL_FENCE_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_sub_fetch.h b/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_sub_fetch.h
new file mode 100644
index 0000000..6fb61e2
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_sub_fetch.h
@@ -0,0 +1,107 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_SUB_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_SUB_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_MSVC_SUB_FETCH_PRE_INTRIN_COMPUTE(ret, val) \
+ ret = EASTL_ATOMIC_NEGATE_OPERAND((val))
+
+#define EASTL_MSVC_SUB_FETCH_POST_INTRIN_COMPUTE(ret, val, subend) \
+ ret = (val) - (subend)
+
+#define EASTL_MSVC_ATOMIC_SUB_FETCH_N(integralType, subIntrinsic, type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_OP_FETCH_N(integralType, subIntrinsic, type, ret, ptr, val, MemoryOrder, \
+ EASTL_MSVC_SUB_FETCH_PRE_INTRIN_COMPUTE, EASTL_MSVC_SUB_FETCH_POST_INTRIN_COMPUTE)
+
+
+#define EASTL_MSVC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_N(char, _InterlockedExchangeAdd8, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_N(short, _InterlockedExchangeAdd16, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_N(long, _InterlockedExchangeAdd, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_N(__int64, _InterlockedExchangeAdd64, type, ret, ptr, val, MemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_SUB_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_SUB_FETCH_H */
diff --git a/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_xor_fetch.h b/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_xor_fetch.h
new file mode 100644
index 0000000..44ffff9
--- /dev/null
+++ b/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_xor_fetch.h
@@ -0,0 +1,121 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_XOR_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_XOR_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#if defined(EA_PROCESSOR_X86_64)
+
+ #define EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_8 _InterlockedXor8_np
+ #define EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_16 _InterlockedXor16_np
+ #define EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_32 _InterlockedXor_np
+ #define EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_64 _InterlockedXor64_np
+
+#else
+
+ #define EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_8 _InterlockedXor8
+ #define EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_16 _InterlockedXor16
+ #define EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_32 _InterlockedXor
+ #define EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_64 _InterlockedXor64
+
+#endif
+
+
+#define EASTL_MSVC_XOR_FETCH_POST_INTRIN_COMPUTE(ret, val, xorend) \
+ ret = (val) ^ (xorend)
+
+#define EASTL_MSVC_ATOMIC_XOR_FETCH_N(integralType, xorIntrinsic, type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_OP_FETCH_N(integralType, xorIntrinsic, type, ret, ptr, val, MemoryOrder, \
+ EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE, EASTL_MSVC_XOR_FETCH_POST_INTRIN_COMPUTE)
+
+
+#define EASTL_MSVC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_N(char, EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_8, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_N(short, EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_16, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_N(long, EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_32, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_N(__int64, EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_64, type, ret, ptr, val, MemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_XOR_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_XOR_FETCH_H */
diff --git a/EASTL/include/EASTL/internal/char_traits.h b/EASTL/include/EASTL/internal/char_traits.h
new file mode 100644
index 0000000..62fe79b
--- /dev/null
+++ b/EASTL/include/EASTL/internal/char_traits.h
@@ -0,0 +1,464 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements similar functionality to char_traits which is part of
+// the C++ standard STL library specification. This is intended for internal
+// EASTL use only. Functionality can be accessed through the eastl::string or
+// eastl::string_view types.
+//
+// http://en.cppreference.com/w/cpp/string/char_traits
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_CHAR_TRAITS_H
+#define EASTL_CHAR_TRAITS_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <EASTL/internal/config.h>
+#include <EASTL/type_traits.h>
+
+EA_DISABLE_ALL_VC_WARNINGS()
+#include <ctype.h> // toupper, etc.
+#include <string.h> // memset, etc.
+EA_RESTORE_ALL_VC_WARNINGS()
+
+namespace eastl
+{
+ ///////////////////////////////////////////////////////////////////////////////
+ /// DecodePart
+ ///
+ /// These implement UTF8/UCS2/UCS4 encoding/decoding.
+ ///
+ EASTL_API bool DecodePart(const char*& pSrc, const char* pSrcEnd, char*& pDest, char* pDestEnd);
+ EASTL_API bool DecodePart(const char*& pSrc, const char* pSrcEnd, char16_t*& pDest, char16_t* pDestEnd);
+ EASTL_API bool DecodePart(const char*& pSrc, const char* pSrcEnd, char32_t*& pDest, char32_t* pDestEnd);
+
+ EASTL_API bool DecodePart(const char16_t*& pSrc, const char16_t* pSrcEnd, char*& pDest, char* pDestEnd);
+ EASTL_API bool DecodePart(const char16_t*& pSrc, const char16_t* pSrcEnd, char16_t*& pDest, char16_t* pDestEnd);
+ EASTL_API bool DecodePart(const char16_t*& pSrc, const char16_t* pSrcEnd, char32_t*& pDest, char32_t* pDestEnd);
+
+ EASTL_API bool DecodePart(const char32_t*& pSrc, const char32_t* pSrcEnd, char*& pDest, char* pDestEnd);
+ EASTL_API bool DecodePart(const char32_t*& pSrc, const char32_t* pSrcEnd, char16_t*& pDest, char16_t* pDestEnd);
+ EASTL_API bool DecodePart(const char32_t*& pSrc, const char32_t* pSrcEnd, char32_t*& pDest, char32_t* pDestEnd);
+
+ EASTL_API bool DecodePart(const int*& pSrc, const int* pSrcEnd, char*& pDest, char* pDestEnd);
+ EASTL_API bool DecodePart(const int*& pSrc, const int* pSrcEnd, char16_t*& pDest, char16_t* pDestEnd);
+ EASTL_API bool DecodePart(const int*& pSrc, const int* pSrcEnd, char32_t*& pDest, char32_t* pDestEnd);
+
+ #if EA_CHAR8_UNIQUE
+ bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd);
+
+ bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, char*& pDest, char* pDestEnd);
+ bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, char16_t*& pDest, char16_t* pDestEnd);
+ bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, char32_t*& pDest, char32_t* pDestEnd);
+
+ bool DecodePart(const char*& pSrc, const char* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd);
+ bool DecodePart(const char16_t*& pSrc, const char16_t* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd);
+ bool DecodePart(const char32_t*& pSrc, const char32_t* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd);
+ #endif
+
+ #if EA_WCHAR_UNIQUE
+ bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd);
+
+ bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, char*& pDest, char* pDestEnd);
+ bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, char16_t*& pDest, char16_t* pDestEnd);
+ bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, char32_t*& pDest, char32_t* pDestEnd);
+
+ bool DecodePart(const char*& pSrc, const char* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd);
+ bool DecodePart(const char16_t*& pSrc, const char16_t* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd);
+ bool DecodePart(const char32_t*& pSrc, const char32_t* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd);
+ #endif
+
+ #if EA_CHAR8_UNIQUE && EA_WCHAR_UNIQUE
+ bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd);
+ bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd);
+ #endif
+
+
+ #if EA_WCHAR_UNIQUE
+ inline bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd)
+ {
+ return DecodePart(reinterpret_cast<const char*&>(pSrc), reinterpret_cast<const char*>(pSrcEnd), reinterpret_cast<char*&>(pDest), reinterpret_cast<char*&>(pDestEnd));
+ }
+
+ inline bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, char*& pDest, char* pDestEnd)
+ {
+ #if (EA_WCHAR_SIZE == 2)
+ return DecodePart(reinterpret_cast<const char16_t*&>(pSrc), reinterpret_cast<const char16_t*>(pSrcEnd), pDest, pDestEnd);
+ #elif (EA_WCHAR_SIZE == 4)
+ return DecodePart(reinterpret_cast<const char32_t*&>(pSrc), reinterpret_cast<const char32_t*>(pSrcEnd), pDest, pDestEnd);
+ #endif
+ }
+
+ inline bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, char16_t*& pDest, char16_t* pDestEnd)
+ {
+ #if (EA_WCHAR_SIZE == 2)
+ return DecodePart(reinterpret_cast<const char16_t*&>(pSrc), reinterpret_cast<const char16_t*>(pSrcEnd), pDest, pDestEnd);
+ #elif (EA_WCHAR_SIZE == 4)
+ return DecodePart(reinterpret_cast<const char32_t*&>(pSrc), reinterpret_cast<const char32_t*>(pSrcEnd), pDest, pDestEnd);
+ #endif
+ }
+
+ inline bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, char32_t*& pDest, char32_t* pDestEnd)
+ {
+ #if (EA_WCHAR_SIZE == 2)
+ return DecodePart(reinterpret_cast<const char16_t*&>(pSrc), reinterpret_cast<const char16_t*>(pSrcEnd), pDest, pDestEnd);
+ #elif (EA_WCHAR_SIZE == 4)
+ return DecodePart(reinterpret_cast<const char32_t*&>(pSrc), reinterpret_cast<const char32_t*>(pSrcEnd), pDest, pDestEnd);
+ #endif
+ }
+
+ inline bool DecodePart(const char*& pSrc, const char* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd)
+ {
+ #if (EA_WCHAR_SIZE == 2)
+ return DecodePart(pSrc, pSrcEnd, reinterpret_cast<char16_t*&>(pDest), reinterpret_cast<char16_t*>(pDestEnd));
+ #elif (EA_WCHAR_SIZE == 4)
+ return DecodePart(pSrc, pSrcEnd, reinterpret_cast<char32_t*&>(pDest), reinterpret_cast<char32_t*>(pDestEnd));
+ #endif
+ }
+
+ inline bool DecodePart(const char16_t*& pSrc, const char16_t* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd)
+ {
+ #if (EA_WCHAR_SIZE == 2)
+ return DecodePart(pSrc, pSrcEnd, reinterpret_cast<char16_t*&>(pDest), reinterpret_cast<char16_t*>(pDestEnd));
+ #elif (EA_WCHAR_SIZE == 4)
+ return DecodePart(pSrc, pSrcEnd, reinterpret_cast<char32_t*&>(pDest), reinterpret_cast<char32_t*>(pDestEnd));
+ #endif
+ }
+
+ inline bool DecodePart(const char32_t*& pSrc, const char32_t* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd)
+ {
+ #if (EA_WCHAR_SIZE == 2)
+ return DecodePart(pSrc, pSrcEnd, reinterpret_cast<char16_t*&>(pDest), reinterpret_cast<char16_t*>(pDestEnd));
+ #elif (EA_WCHAR_SIZE == 4)
+ return DecodePart(pSrc, pSrcEnd, reinterpret_cast<char32_t*&>(pDest), reinterpret_cast<char32_t*>(pDestEnd));
+ #endif
+ }
+ #endif
+
+ #if EA_CHAR8_UNIQUE
+ inline bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd)
+ {
+ return DecodePart(reinterpret_cast<const char*&>(pSrc), reinterpret_cast<const char*>(pSrcEnd), reinterpret_cast<char*&>(pDest), reinterpret_cast<char*&>(pDestEnd));
+ }
+
+ inline bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, char*& pDest, char* pDestEnd)
+ {
+ return DecodePart(reinterpret_cast<const char*&>(pSrc), reinterpret_cast<const char*>(pSrcEnd), pDest, pDestEnd);
+ }
+
+ inline bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, char16_t*& pDest, char16_t* pDestEnd)
+ {
+ return DecodePart(reinterpret_cast<const char*&>(pSrc), reinterpret_cast<const char*>(pSrcEnd), pDest, pDestEnd);
+ }
+
+ inline bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, char32_t*& pDest, char32_t* pDestEnd)
+ {
+ return DecodePart(reinterpret_cast<const char*&>(pSrc), reinterpret_cast<const char*>(pSrcEnd), pDest, pDestEnd);
+ }
+
+ inline bool DecodePart(const char*& pSrc, const char* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd)
+ {
+ return DecodePart(pSrc, pSrcEnd, reinterpret_cast<char*&>(pDest), reinterpret_cast<char*&>(pDestEnd));
+ }
+
+ inline bool DecodePart(const char16_t*& pSrc, const char16_t* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd)
+ {
+ return DecodePart(pSrc, pSrcEnd, reinterpret_cast<char*&>(pDest), reinterpret_cast<char*&>(pDestEnd));
+ }
+
+ inline bool DecodePart(const char32_t*& pSrc, const char32_t* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd)
+ {
+ return DecodePart(pSrc, pSrcEnd, reinterpret_cast<char*&>(pDest), reinterpret_cast<char*&>(pDestEnd));
+ }
+ #endif
+
+ #if EA_CHAR8_UNIQUE && EA_WCHAR_UNIQUE
+ inline bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd)
+ {
+ #if (EA_WCHAR_SIZE == 2)
+ return DecodePart(pSrc, pSrcEnd, reinterpret_cast<char16_t*&>(pDest), reinterpret_cast<char16_t*>(pDestEnd));
+ #elif (EA_WCHAR_SIZE == 4)
+ return DecodePart(pSrc, pSrcEnd, reinterpret_cast<char32_t*&>(pDest), reinterpret_cast<char32_t*>(pDestEnd));
+ #endif
+ }
+
+ inline bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd)
+ {
+ #if (EA_WCHAR_SIZE == 2)
+ return DecodePart(reinterpret_cast<const char16_t*&>(pSrc), reinterpret_cast<const char16_t*>(pSrcEnd), reinterpret_cast<char*&>(pDest), reinterpret_cast<char*>(pDestEnd));
+ #elif (EA_WCHAR_SIZE == 4)
+ return DecodePart(reinterpret_cast<const char32_t*&>(pSrc), reinterpret_cast<const char32_t*>(pSrcEnd), reinterpret_cast<char*&>(pDest), reinterpret_cast<char*>(pDestEnd));
+ #endif
+ }
+ #endif
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // 'char traits' functionality
+ //
+ inline char CharToLower(char c)
+ { return (char)tolower((uint8_t)c); }
+
+ template<typename T>
+ inline T CharToLower(T c)
+ { if((unsigned)c <= 0xff) return (T)tolower((uint8_t)c); return c; }
+
+
+ inline char CharToUpper(char c)
+ { return (char)toupper((uint8_t)c); }
+
+ template<typename T>
+ inline T CharToUpper(T c)
+ { if((unsigned)c <= 0xff) return (T)toupper((uint8_t)c); return c; }
+
+
+ template <typename T>
+ int Compare(const T* p1, const T* p2, size_t n)
+ {
+ for(; n > 0; ++p1, ++p2, --n)
+ {
+ if(*p1 != *p2)
+ return (static_cast<typename make_unsigned<T>::type>(*p1) <
+ static_cast<typename make_unsigned<T>::type>(*p2)) ? -1 : 1;
+ }
+ return 0;
+ }
+
+ inline int Compare(const char* p1, const char* p2, size_t n)
+ {
+ return memcmp(p1, p2, n);
+ }
+
+
+ template <typename T>
+ inline int CompareI(const T* p1, const T* p2, size_t n)
+ {
+ for(; n > 0; ++p1, ++p2, --n)
+ {
+ const T c1 = CharToLower(*p1);
+ const T c2 = CharToLower(*p2);
+
+ if(c1 != c2)
+ return (static_cast<typename make_unsigned<T>::type>(c1) <
+ static_cast<typename make_unsigned<T>::type>(c2)) ? -1 : 1;
+ }
+ return 0;
+ }
+
+
+ template<typename T>
+ inline const T* Find(const T* p, T c, size_t n)
+ {
+ for(; n > 0; --n, ++p)
+ {
+ if(*p == c)
+ return p;
+ }
+
+ return NULL;
+ }
+
+ inline const char* Find(const char* p, char c, size_t n)
+ {
+ return (const char*)memchr(p, c, n);
+ }
+
+
+ template<typename T>
+ inline EA_CPP14_CONSTEXPR size_t CharStrlen(const T* p)
+ {
+ const auto* pCurrent = p;
+ while(*pCurrent)
+ ++pCurrent;
+ return (size_t)(pCurrent - p);
+ }
+
+
+ template <typename T>
+ inline T* CharStringUninitializedCopy(const T* pSource, const T* pSourceEnd, T* pDestination)
+ {
+ memmove(pDestination, pSource, (size_t)(pSourceEnd - pSource) * sizeof(T));
+ return pDestination + (pSourceEnd - pSource);
+ }
+
+
+ template <typename T>
+ const T* CharTypeStringFindEnd(const T* pBegin, const T* pEnd, T c)
+ {
+ const T* pTemp = pEnd;
+ while(--pTemp >= pBegin)
+ {
+ if(*pTemp == c)
+ return pTemp;
+ }
+
+ return pEnd;
+ }
+
+
+ template <typename T>
+ const T* CharTypeStringRSearch(const T* p1Begin, const T* p1End,
+ const T* p2Begin, const T* p2End)
+ {
+ // Test for zero length strings, in which case we have a match or a failure,
+ // but the return value is the same either way.
+ if((p1Begin == p1End) || (p2Begin == p2End))
+ return p1Begin;
+
+ // Test for a pattern of length 1.
+ if((p2Begin + 1) == p2End)
+ return CharTypeStringFindEnd(p1Begin, p1End, *p2Begin);
+
+ // Test for search string length being longer than string length.
+ if((p2End - p2Begin) > (p1End - p1Begin))
+ return p1End;
+
+ // General case.
+ const T* pSearchEnd = (p1End - (p2End - p2Begin) + 1);
+ const T* pCurrent1;
+ const T* pCurrent2;
+
+ while(pSearchEnd != p1Begin)
+ {
+ // Search for the last occurrence of *p2Begin.
+ pCurrent1 = CharTypeStringFindEnd(p1Begin, pSearchEnd, *p2Begin);
+ if(pCurrent1 == pSearchEnd) // If the first char of p2 wasn't found,
+ return p1End; // then we immediately have failure.
+
+ // In this case, *pTemp == *p2Begin. So compare the rest.
+ pCurrent2 = p2Begin;
+ while(*pCurrent1++ == *pCurrent2++)
+ {
+ if(pCurrent2 == p2End)
+ return (pCurrent1 - (p2End - p2Begin));
+ }
+
+ // A smarter algorithm might know to subtract more than just one,
+ // but in most cases it won't make much difference anyway.
+ --pSearchEnd;
+ }
+
+ return p1End;
+ }
+
+
+ template <typename T>
+ inline const T* CharTypeStringFindFirstOf(const T* p1Begin, const T* p1End, const T* p2Begin, const T* p2End)
+ {
+ for (; p1Begin != p1End; ++p1Begin)
+ {
+ for (const T* pTemp = p2Begin; pTemp != p2End; ++pTemp)
+ {
+ if (*p1Begin == *pTemp)
+ return p1Begin;
+ }
+ }
+ return p1End;
+ }
+
+
+ template <typename T>
+ inline const T* CharTypeStringRFindFirstNotOf(const T* p1RBegin, const T* p1REnd, const T* p2Begin, const T* p2End)
+ {
+ for (; p1RBegin != p1REnd; --p1RBegin)
+ {
+ const T* pTemp;
+ for (pTemp = p2Begin; pTemp != p2End; ++pTemp)
+ {
+ if (*(p1RBegin - 1) == *pTemp)
+ break;
+ }
+ if (pTemp == p2End)
+ return p1RBegin;
+ }
+ return p1REnd;
+ }
+
+
+ template <typename T>
+ inline const T* CharTypeStringFindFirstNotOf(const T* p1Begin, const T* p1End, const T* p2Begin, const T* p2End)
+ {
+ for (; p1Begin != p1End; ++p1Begin)
+ {
+ const T* pTemp;
+ for (pTemp = p2Begin; pTemp != p2End; ++pTemp)
+ {
+ if (*p1Begin == *pTemp)
+ break;
+ }
+ if (pTemp == p2End)
+ return p1Begin;
+ }
+ return p1End;
+ }
+
+
+ template <typename T>
+ inline const T* CharTypeStringRFindFirstOf(const T* p1RBegin, const T* p1REnd, const T* p2Begin, const T* p2End)
+ {
+ for (; p1RBegin != p1REnd; --p1RBegin)
+ {
+ for (const T* pTemp = p2Begin; pTemp != p2End; ++pTemp)
+ {
+ if (*(p1RBegin - 1) == *pTemp)
+ return p1RBegin;
+ }
+ }
+ return p1REnd;
+ }
+
+
+ template <typename T>
+ inline const T* CharTypeStringRFind(const T* pRBegin, const T* pREnd, const T c)
+ {
+ while (pRBegin > pREnd)
+ {
+ if (*(pRBegin - 1) == c)
+ return pRBegin;
+ --pRBegin;
+ }
+ return pREnd;
+ }
+
+
+ inline char* CharStringUninitializedFillN(char* pDestination, size_t n, const char c)
+ {
+ if(n) // Some compilers (e.g. GCC 4.3+) generate a warning (which can't be disabled) if you call memset with a size of 0.
+ memset(pDestination, (uint8_t)c, (size_t)n);
+ return pDestination + n;
+ }
+
+ template<typename T>
+ inline T* CharStringUninitializedFillN(T* pDestination, size_t n, const T c)
+ {
+ T * pDest = pDestination;
+ const T* const pEnd = pDestination + n;
+ while(pDest < pEnd)
+ *pDest++ = c;
+ return pDestination + n;
+ }
+
+
+ inline char* CharTypeAssignN(char* pDestination, size_t n, char c)
+ {
+ if(n) // Some compilers (e.g. GCC 4.3+) generate a warning (which can't be disabled) if you call memset with a size of 0.
+ return (char*)memset(pDestination, c, (size_t)n);
+ return pDestination;
+ }
+
+ template<typename T>
+ inline T* CharTypeAssignN(T* pDestination, size_t n, T c)
+ {
+ T* pDest = pDestination;
+ const T* const pEnd = pDestination + n;
+ while(pDest < pEnd)
+ *pDest++ = c;
+ return pDestination;
+ }
+} // namespace eastl
+
+#endif // EASTL_CHAR_TRAITS_H
diff --git a/EASTL/include/EASTL/internal/config.h b/EASTL/include/EASTL/internal/config.h
new file mode 100644
index 0000000..0564e18
--- /dev/null
+++ b/EASTL/include/EASTL/internal/config.h
@@ -0,0 +1,1938 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_CONFIG_H
+#define EASTL_INTERNAL_CONFIG_H
+
+
+///////////////////////////////////////////////////////////////////////////////
+// ReadMe
+//
+// This is the EASTL configuration file. All configurable parameters of EASTL
+// are controlled through this file. However, all the settings here can be
+// manually overridden by the user. There are three ways for a user to override
+// the settings in this file:
+//
+// - Simply edit this file.
+// - Define EASTL_USER_CONFIG_HEADER.
+// - Predefine individual defines (e.g. EASTL_ASSERT).
+//
+///////////////////////////////////////////////////////////////////////////////
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_USER_CONFIG_HEADER
+//
+// This allows the user to define a header file to be #included before the
+// EASTL config.h contents are compiled. A primary use of this is to override
+// the contents of this config.h file. Note that all the settings below in
+// this file are user-overridable.
+//
+// Example usage:
+// #define EASTL_USER_CONFIG_HEADER "MyConfigOverrides.h"
+// #include <EASTL/vector.h>
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef EASTL_USER_CONFIG_HEADER
+ #include EASTL_USER_CONFIG_HEADER
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_EABASE_DISABLED
+//
+// The user can disable EABase usage and manually supply the configuration
+// via defining EASTL_EABASE_DISABLED and defining the appropriate entities
+// globally or via the above EASTL_USER_CONFIG_HEADER.
+//
+// Example usage:
+// #define EASTL_EABASE_DISABLED
+// #include <EASTL/vector.h>
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_EABASE_DISABLED
+ #include <EABase/eabase.h>
+#endif
+#include <EABase/eahave.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_VERSION
+//
+// We more or less follow the conventional EA packaging approach to versioning
+// here. A primary distinction here is that minor versions are defined as two
+// digit entities (e.g. .03") instead of minimal digit entities ".3"). The logic
+// here is that the value is a counter and not a floating point fraction.
+// Note that the major version doesn't have leading zeros.
+//
+// Example version strings:
+// "0.91.00" // Major version 0, minor version 91, patch version 0.
+// "1.00.00" // Major version 1, minor and patch version 0.
+// "3.10.02" // Major version 3, minor version 10, patch version 02.
+// "12.03.01" // Major version 12, minor version 03, patch version
+//
+// Example usage:
+// printf("EASTL version: %s", EASTL_VERSION);
+// printf("EASTL version: %d.%d.%d", EASTL_VERSION_N / 10000 % 100, EASTL_VERSION_N / 100 % 100, EASTL_VERSION_N % 100);
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_VERSION
+ #define EASTL_VERSION "3.20.02"
+ #define EASTL_VERSION_N 32002
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+//
+// Defined as 1 or undefined.
+// Implements support for the definition of EA_COMPILER_NO_STANDARD_CPP_LIBRARY for the case
+// of using EABase versions prior to the addition of its EA_COMPILER_NO_STANDARD_CPP_LIBRARY support.
+//
+#if !defined(EA_COMPILER_NO_STANDARD_CPP_LIBRARY)
+ #if defined(EA_PLATFORM_ANDROID)
+ // Disabled because EA's eaconfig/android_config/android_sdk packages currently
+ // don't support linking STL libraries. Perhaps we can figure out what linker arguments
+ // are needed for an app so we can manually specify them and then re-enable this code.
+ //
+ //#include <android/api-level.h>
+ //
+ //#if (__ANDROID_API__ < 9) // Earlier versions of Android provide no std C++ STL implementation.
+ #define EA_COMPILER_NO_STANDARD_CPP_LIBRARY 1
+ //#endif
+ #endif
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EA_NOEXCEPT
+//
+// Defined as a macro. Provided here for backward compatibility with older
+// EABase versions prior to 2.00.40 that don't yet define it themselves.
+//
+#if !defined(EA_NOEXCEPT)
+ #define EA_NOEXCEPT
+ #define EA_NOEXCEPT_IF(predicate)
+ #define EA_NOEXCEPT_EXPR(expression) false
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EA_CPP14_CONSTEXPR
+//
+// Defined as constexpr when a C++14 compiler is present. Defines it as nothing
+// when using a C++11 compiler.
+// C++14 relaxes the specification for constexpr such that it allows more
+// kinds of expressions. Since a C++11 compiler doesn't allow this, we need
+// to make a unique define for C++14 constexpr. This macro should be used only
+// when you are using it with code that specfically requires C++14 constexpr
+// functionality beyond the regular C++11 constexpr functionality.
+// http://en.wikipedia.org/wiki/C%2B%2B14#Relaxed_constexpr_restrictions
+//
+#if !defined(EA_CPP14_CONSTEXPR)
+ #if defined(EA_COMPILER_CPP14_ENABLED)
+ #define EA_CPP14_CONSTEXPR constexpr
+ #else
+ #define EA_CPP14_CONSTEXPR // not supported
+ #define EA_NO_CPP14_CONSTEXPR
+ #endif
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL namespace
+//
+// We define this so that users that #include this config file can reference
+// these namespaces without seeing any other files that happen to use them.
+///////////////////////////////////////////////////////////////////////////////
+
+/// EA Standard Template Library
+namespace eastl
+{
+ // Intentionally empty.
+}
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_DEBUG
+//
+// Defined as an integer >= 0. Default is 1 for debug builds and 0 for
+// release builds. This define is also a master switch for the default value
+// of some other settings.
+//
+// Example usage:
+// #if EASTL_DEBUG
+// ...
+// #endif
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_DEBUG
+ #if defined(EA_DEBUG) || defined(_DEBUG)
+ #define EASTL_DEBUG 1
+ #else
+ #define EASTL_DEBUG 0
+ #endif
+#endif
+
+// Developer debug. Helps EASTL developers assert EASTL is coded correctly.
+// Normally disabled for users since it validates internal things and not user things.
+#ifndef EASTL_DEV_DEBUG
+ #define EASTL_DEV_DEBUG 0
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_DEBUGPARAMS_LEVEL
+//
+// EASTL_DEBUGPARAMS_LEVEL controls what debug information is passed through to
+// the allocator by default.
+// This value may be defined by the user ... if not it will default to 1 for
+// EA_DEBUG builds, otherwise 0.
+//
+// 0 - no debug information is passed through to allocator calls.
+// 1 - 'name' is passed through to allocator calls.
+// 2 - 'name', __FILE__, and __LINE__ are passed through to allocator calls.
+//
+// This parameter mirrors the equivalent parameter in the CoreAllocator package.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_DEBUGPARAMS_LEVEL
+ #if EASTL_DEBUG
+ #define EASTL_DEBUGPARAMS_LEVEL 2
+ #else
+ #define EASTL_DEBUGPARAMS_LEVEL 0
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_DLL
+//
+// Defined as 0 or 1. The default is dependent on the definition of EA_DLL.
+// If EA_DLL is defined, then EASTL_DLL is 1, else EASTL_DLL is 0.
+// EA_DLL is a define that controls DLL builds within the EAConfig build system.
+// EASTL_DLL controls whether EASTL is built and used as a DLL.
+// Normally you wouldn't do such a thing, but there are use cases for such
+// a thing, particularly in the case of embedding C++ into C# applications.
+//
+#ifndef EASTL_DLL
+ #if defined(EA_DLL)
+ #define EASTL_DLL 1
+ #else
+ #define EASTL_DLL 0
+ #endif
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_IF_NOT_DLL
+//
+// Utility to include expressions only for static builds.
+//
+#ifndef EASTL_IF_NOT_DLL
+ #if EASTL_DLL
+ #define EASTL_IF_NOT_DLL(x)
+ #else
+ #define EASTL_IF_NOT_DLL(x) x
+ #endif
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_API
+//
+// This is used to label functions as DLL exports under Microsoft platforms.
+// If EA_DLL is defined, then the user is building EASTL as a DLL and EASTL's
+// non-templated functions will be exported. EASTL template functions are not
+// labelled as EASTL_API (and are thus not exported in a DLL build). This is
+// because it's not possible (or at least unsafe) to implement inline templated
+// functions in a DLL.
+//
+// Example usage of EASTL_API:
+// EASTL_API int someVariable = 10; // Export someVariable in a DLL build.
+//
+// struct EASTL_API SomeClass{ // Export SomeClass and its member functions in a DLL build.
+// EASTL_LOCAL void PrivateMethod(); // Not exported.
+// };
+//
+// EASTL_API void SomeFunction(); // Export SomeFunction in a DLL build.
+//
+//
+#if defined(EA_DLL) && !defined(EASTL_DLL)
+ #define EASTL_DLL 1
+#endif
+
+#ifndef EASTL_API // If the build file hasn't already defined this to be dllexport...
+ #if EASTL_DLL
+ #if defined(_MSC_VER)
+ #define EASTL_API __declspec(dllimport)
+ #define EASTL_LOCAL
+ #elif defined(__CYGWIN__)
+ #define EASTL_API __attribute__((dllimport))
+ #define EASTL_LOCAL
+ #elif (defined(__GNUC__) && (__GNUC__ >= 4))
+ #define EASTL_API __attribute__ ((visibility("default")))
+ #define EASTL_LOCAL __attribute__ ((visibility("hidden")))
+ #else
+ #define EASTL_API
+ #define EASTL_LOCAL
+ #endif
+ #else
+ #define EASTL_API
+ #define EASTL_LOCAL
+ #endif
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_EASTDC_API
+//
+// This is used for importing EAStdC functions into EASTL, possibly via a DLL import.
+//
+#ifndef EASTL_EASTDC_API
+ #if EASTL_DLL
+ #if defined(_MSC_VER)
+ #define EASTL_EASTDC_API __declspec(dllimport)
+ #define EASTL_EASTDC_LOCAL
+ #elif defined(__CYGWIN__)
+ #define EASTL_EASTDC_API __attribute__((dllimport))
+ #define EASTL_EASTDC_LOCAL
+ #elif (defined(__GNUC__) && (__GNUC__ >= 4))
+ #define EASTL_EASTDC_API __attribute__ ((visibility("default")))
+ #define EASTL_EASTDC_LOCAL __attribute__ ((visibility("hidden")))
+ #else
+ #define EASTL_EASTDC_API
+ #define EASTL_EASTDC_LOCAL
+ #endif
+ #else
+ #define EASTL_EASTDC_API
+ #define EASTL_EASTDC_LOCAL
+ #endif
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_EASTDC_VSNPRINTF
+//
+// Defined as 0 or 1. By default it is 1.
+//
+// When enabled EASTL uses EAStdC's Vsnprintf function directly instead of
+// having the user provide a global Vsnprintf8/16/32 function. The benefit
+// of this is that it will allow EASTL to just link to EAStdC's Vsnprintf
+// without the user doing anything. The downside is that any users who aren't
+// already using EAStdC will either need to now depend on EAStdC or globally
+// define this property to be 0 and simply provide functions that have the same
+// names. See the usage of EASTL_EASTDC_VSNPRINTF in string.h for more info.
+//
+#if !defined(EASTL_EASTDC_VSNPRINTF)
+ #define EASTL_EASTDC_VSNPRINTF 1
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_NAME_ENABLED / EASTL_NAME / EASTL_NAME_VAL
+//
+// Used to wrap debug string names. In a release build, the definition
+// goes away. These are present to avoid release build compiler warnings
+// and to make code simpler.
+//
+// Example usage of EASTL_NAME:
+// // pName will defined away in a release build and thus prevent compiler warnings.
+// void allocator::set_name(const char* EASTL_NAME(pName))
+// {
+// #if EASTL_NAME_ENABLED
+// mpName = pName;
+// #endif
+// }
+//
+// Example usage of EASTL_NAME_VAL:
+// // "xxx" is defined to NULL in a release build.
+// vector<T, Allocator>::vector(const allocator_type& allocator = allocator_type(EASTL_NAME_VAL("xxx")));
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_NAME_ENABLED
+ #define EASTL_NAME_ENABLED EASTL_DEBUG
+#endif
+
+#ifndef EASTL_NAME
+ #if EASTL_NAME_ENABLED
+ #define EASTL_NAME(x) x
+ #define EASTL_NAME_VAL(x) x
+ #else
+ #define EASTL_NAME(x)
+ #define EASTL_NAME_VAL(x) ((const char*)NULL)
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_DEFAULT_NAME_PREFIX
+//
+// Defined as a string literal. Defaults to "EASTL".
+// This define is used as the default name for EASTL where such a thing is
+// referenced in EASTL. For example, if the user doesn't specify an allocator
+// name for their deque, it is named "EASTL deque". However, you can override
+// this to say "SuperBaseball deque" by changing EASTL_DEFAULT_NAME_PREFIX.
+//
+// Example usage (which is simply taken from how deque.h uses this define):
+// #ifndef EASTL_DEQUE_DEFAULT_NAME
+// #define EASTL_DEQUE_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " deque"
+// #endif
+//
+#ifndef EASTL_DEFAULT_NAME_PREFIX
+ #define EASTL_DEFAULT_NAME_PREFIX "EASTL"
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_ASSERT_ENABLED
+//
+// Defined as 0 or non-zero. Default is same as EASTL_DEBUG.
+// If EASTL_ASSERT_ENABLED is non-zero, then asserts will be executed via
+// the assertion mechanism.
+//
+// Example usage:
+// #if EASTL_ASSERT_ENABLED
+// EASTL_ASSERT(v.size() > 17);
+// #endif
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_ASSERT_ENABLED
+ #define EASTL_ASSERT_ENABLED EASTL_DEBUG
+#endif
+
+// Developer assert. Helps EASTL developers assert EASTL is coded correctly.
+// Normally disabled for users since it validates internal things and not user things.
+#ifndef EASTL_DEV_ASSERT_ENABLED
+ #define EASTL_DEV_ASSERT_ENABLED EASTL_DEV_DEBUG
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+//
+// Defined as 0 or non-zero. Default is same as EASTL_ASSERT_ENABLED.
+// This is like EASTL_ASSERT_ENABLED, except it is for empty container
+// references. Sometime people like to be able to take a reference to
+// the front of the container, but not use it if the container is empty.
+// In practice it's often easier and more efficient to do this than to write
+// extra code to check if the container is empty.
+//
+// NOTE: If this is enabled, EASTL_ASSERT_ENABLED must also be enabled
+//
+// Example usage:
+// template <typename T, typename Allocator>
+// inline typename vector<T, Allocator>::reference
+// vector<T, Allocator>::front()
+// {
+// #if EASTL_ASSERT_ENABLED
+// EASTL_ASSERT(mpEnd > mpBegin);
+// #endif
+//
+// return *mpBegin;
+// }
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ #define EASTL_EMPTY_REFERENCE_ASSERT_ENABLED EASTL_ASSERT_ENABLED
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// SetAssertionFailureFunction
+//
+// Allows the user to set a custom assertion failure mechanism.
+//
+// Example usage:
+// void Assert(const char* pExpression, void* pContext);
+// SetAssertionFailureFunction(Assert, this);
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_ASSERTION_FAILURE_DEFINED
+ #define EASTL_ASSERTION_FAILURE_DEFINED
+
+ namespace eastl
+ {
+ typedef void (*EASTL_AssertionFailureFunction)(const char* pExpression, void* pContext);
+ EASTL_API void SetAssertionFailureFunction(EASTL_AssertionFailureFunction pFunction, void* pContext);
+
+ // These are the internal default functions that implement asserts.
+ EASTL_API void AssertionFailure(const char* pExpression);
+ EASTL_API void AssertionFailureFunctionDefault(const char* pExpression, void* pContext);
+ }
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_ASSERT
+//
+// Assertion macro. Can be overridden by user with a different value.
+//
+// Example usage:
+// EASTL_ASSERT(intVector.size() < 100);
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_ASSERT
+ #if EASTL_ASSERT_ENABLED
+ #define EASTL_ASSERT(expression) \
+ EA_DISABLE_VC_WARNING(4127) \
+ do { \
+ EA_ANALYSIS_ASSUME(expression); \
+ (void)((expression) || (eastl::AssertionFailure(#expression), 0)); \
+ } while (0) \
+ EA_RESTORE_VC_WARNING()
+ #else
+ #define EASTL_ASSERT(expression)
+ #endif
+#endif
+
+// Developer assert. Helps EASTL developers assert EASTL is coded correctly.
+// Normally disabled for users since it validates internal things and not user things.
+#ifndef EASTL_DEV_ASSERT
+ #if EASTL_DEV_ASSERT_ENABLED
+ #define EASTL_DEV_ASSERT(expression) \
+ EA_DISABLE_VC_WARNING(4127) \
+ do { \
+ EA_ANALYSIS_ASSUME(expression); \
+ (void)((expression) || (eastl::AssertionFailure(#expression), 0)); \
+ } while(0) \
+ EA_RESTORE_VC_WARNING()
+ #else
+ #define EASTL_DEV_ASSERT(expression)
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_ASSERT_MSG
+//
+// Example usage:
+// EASTL_ASSERT_MSG(false, "detected error condition!");
+//
+///////////////////////////////////////////////////////////////////////////////
+#ifndef EASTL_ASSERT_MSG
+ #if EASTL_ASSERT_ENABLED
+ #define EASTL_ASSERT_MSG(expression, message) \
+ EA_DISABLE_VC_WARNING(4127) \
+ do { \
+ EA_ANALYSIS_ASSUME(expression); \
+ (void)((expression) || (eastl::AssertionFailure(message), 0)); \
+ } while (0) \
+ EA_RESTORE_VC_WARNING()
+ #else
+ #define EASTL_ASSERT_MSG(expression, message)
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_FAIL_MSG
+//
+// Failure macro. Can be overridden by user with a different value.
+//
+// Example usage:
+// EASTL_FAIL("detected error condition!");
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_FAIL_MSG
+ #if EASTL_ASSERT_ENABLED
+ #define EASTL_FAIL_MSG(message) (eastl::AssertionFailure(message))
+ #else
+ #define EASTL_FAIL_MSG(message)
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_CT_ASSERT / EASTL_CT_ASSERT_NAMED
+//
+// EASTL_CT_ASSERT is a macro for compile time assertion checks, useful for
+// validating *constant* expressions. The advantage over using EASTL_ASSERT
+// is that errors are caught at compile time instead of runtime.
+//
+// Example usage:
+// EASTL_CT_ASSERT(sizeof(uint32_t) == 4);
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#define EASTL_CT_ASSERT(expression) static_assert(expression, #expression)
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_CT_ASSERT_MSG
+//
+// EASTL_CT_ASSERT_MSG is a macro for compile time assertion checks, useful for
+// validating *constant* expressions. The advantage over using EASTL_ASSERT
+// is that errors are caught at compile time instead of runtime.
+// The message must be a string literal.
+//
+// Example usage:
+// EASTL_CT_ASSERT_MSG(sizeof(uint32_t) == 4, "The size of uint32_t must be 4.");
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#define EASTL_CT_ASSERT_MSG(expression, message) static_assert(expression, message)
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_DEBUG_BREAK / EASTL_DEBUG_BREAK_OVERRIDE
+//
+// This function causes an app to immediately stop under the debugger.
+// It is implemented as a macro in order to allow stopping at the site
+// of the call.
+//
+// EASTL_DEBUG_BREAK_OVERRIDE allows one to define EASTL_DEBUG_BREAK directly.
+// This is useful in cases where you desire to disable EASTL_DEBUG_BREAK
+// but do not wish to (or cannot) define a custom void function() to replace
+// EASTL_DEBUG_BREAK callsites.
+//
+// Example usage:
+// EASTL_DEBUG_BREAK();
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_DEBUG_BREAK_OVERRIDE
+ #ifndef EASTL_DEBUG_BREAK
+ #if defined(_MSC_VER) && (_MSC_VER >= 1300)
+ #define EASTL_DEBUG_BREAK() __debugbreak() // This is a compiler intrinsic which will map to appropriate inlined asm for the platform.
+ #elif (defined(EA_PROCESSOR_ARM) && !defined(EA_PROCESSOR_ARM64)) && defined(__APPLE__)
+ #define EASTL_DEBUG_BREAK() asm("trap")
+ #elif defined(EA_PROCESSOR_ARM64) && defined(__APPLE__)
+ #include <signal.h>
+ #include <unistd.h>
+ #define EASTL_DEBUG_BREAK() kill( getpid(), SIGINT )
+ #elif defined(EA_PROCESSOR_ARM64) && defined(__GNUC__)
+ #define EASTL_DEBUG_BREAK() asm("brk 10")
+ #elif defined(EA_PROCESSOR_ARM) && defined(__GNUC__)
+ #define EASTL_DEBUG_BREAK() asm("BKPT 10") // The 10 is arbitrary. It's just a unique id.
+ #elif defined(EA_PROCESSOR_ARM) && defined(__ARMCC_VERSION)
+ #define EASTL_DEBUG_BREAK() __breakpoint(10)
+ #elif defined(EA_PROCESSOR_POWERPC) // Generic PowerPC.
+ #define EASTL_DEBUG_BREAK() asm(".long 0") // This triggers an exception by executing opcode 0x00000000.
+ #elif (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64)) && defined(EA_ASM_STYLE_INTEL)
+ #define EASTL_DEBUG_BREAK() { __asm int 3 }
+ #elif (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64)) && (defined(EA_ASM_STYLE_ATT) || defined(__GNUC__))
+ #define EASTL_DEBUG_BREAK() asm("int3")
+ #else
+ void EASTL_DEBUG_BREAK(); // User must define this externally.
+ #endif
+ #else
+ void EASTL_DEBUG_BREAK(); // User must define this externally.
+ #endif
+#else
+ #ifndef EASTL_DEBUG_BREAK
+ #if EASTL_DEBUG_BREAK_OVERRIDE == 1
+ // define an empty callable to satisfy the call site.
+ #define EASTL_DEBUG_BREAK ([]{})
+ #else
+ #define EASTL_DEBUG_BREAK EASTL_DEBUG_BREAK_OVERRIDE
+ #endif
+ #else
+ #error EASTL_DEBUG_BREAK is already defined yet you would like to override it. Please ensure no other headers are already defining EASTL_DEBUG_BREAK before this header (config.h) is included
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_CRASH
+//
+// Executes an invalid memory write, which should result in an exception
+// on most platforms.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#define EASTL_CRASH() *((volatile int*)0) = 0xDEADC0DE;
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_ALLOCATOR_COPY_ENABLED
+//
+// Defined as 0 or 1. Default is 0 (disabled) until some future date.
+// If enabled (1) then container operator= copies the allocator from the
+// source container. It ideally should be set to enabled but for backwards
+// compatibility with older versions of EASTL it is currently set to 0.
+// Regardless of whether this value is 0 or 1, this container copy constructs
+// or copy assigns allocators.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_ALLOCATOR_COPY_ENABLED
+ #define EASTL_ALLOCATOR_COPY_ENABLED 0
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_FIXED_SIZE_TRACKING_ENABLED
+//
+// Defined as an integer >= 0. Default is same as EASTL_DEBUG.
+// If EASTL_FIXED_SIZE_TRACKING_ENABLED is enabled, then fixed
+// containers in debug builds track the max count of objects
+// that have been in the container. This allows for the tuning
+// of fixed container sizes to their minimum required size.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_FIXED_SIZE_TRACKING_ENABLED
+ #define EASTL_FIXED_SIZE_TRACKING_ENABLED EASTL_DEBUG
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_RTTI_ENABLED
+//
+// Defined as 0 or 1. Default is 1 if RTTI is supported by the compiler.
+// This define exists so that we can use some dynamic_cast operations in the
+// code without warning. dynamic_cast is only used if the specifically refers
+// to it; EASTL won't do dynamic_cast behind your back.
+//
+// Example usage:
+// #if EASTL_RTTI_ENABLED
+// pChildClass = dynamic_cast<ChildClass*>(pParentClass);
+// #endif
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_RTTI_ENABLED
+ // The VC++ default Standard Library (Dinkumware) disables major parts of RTTI
+ // (e.g. type_info) if exceptions are disabled, even if RTTI itself is enabled.
+ // _HAS_EXCEPTIONS is defined by Dinkumware to 0 or 1 (disabled or enabled).
+ #if defined(EA_COMPILER_NO_RTTI) || (defined(_MSC_VER) && defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && !(defined(_HAS_EXCEPTIONS) && _HAS_EXCEPTIONS))
+ #define EASTL_RTTI_ENABLED 0
+ #else
+ #define EASTL_RTTI_ENABLED 1
+ #endif
+#endif
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_EXCEPTIONS_ENABLED
+//
+// Defined as 0 or 1. Default is to follow what the compiler settings are.
+// The user can predefine EASTL_EXCEPTIONS_ENABLED to 0 or 1; however, if the
+// compiler is set to disable exceptions then EASTL_EXCEPTIONS_ENABLED is
+// forced to a value of 0 regardless of the user predefine.
+//
+// Note that we do not enable EASTL exceptions by default if the compiler
+// has exceptions enabled. To enable EASTL_EXCEPTIONS_ENABLED you need to
+// manually set it to 1.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#if !defined(EASTL_EXCEPTIONS_ENABLED) || ((EASTL_EXCEPTIONS_ENABLED == 1) && defined(EA_COMPILER_NO_EXCEPTIONS))
+ #define EASTL_EXCEPTIONS_ENABLED 0
+#endif
+
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_STRING_OPT_XXXX
+//
+// Enables some options / optimizations options that cause the string class
+// to behave slightly different from the C++ standard basic_string. These are
+// options whereby you can improve performance by avoiding operations that
+// in practice may never occur for you.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_STRING_OPT_EXPLICIT_CTORS
+ // Defined as 0 or 1. Default is 0.
+ // Defines if we should implement explicity in constructors where the C++
+ // standard string does not. The advantage of enabling explicit constructors
+ // is that you can do this: string s = "hello"; in addition to string s("hello");
+ // The disadvantage of enabling explicity constructors is that there can be
+ // silent conversions done which impede performance if the user isn't paying
+ // attention.
+ // C++ standard string ctors are not explicit.
+ #define EASTL_STRING_OPT_EXPLICIT_CTORS 0
+#endif
+
+#ifndef EASTL_STRING_OPT_LENGTH_ERRORS
+ // Defined as 0 or 1. Default is equal to EASTL_EXCEPTIONS_ENABLED.
+ // Defines if we check for string values going beyond kMaxSize
+ // (a very large value) and throw exections if so.
+ // C++ standard strings are expected to do such checks.
+ #define EASTL_STRING_OPT_LENGTH_ERRORS EASTL_EXCEPTIONS_ENABLED
+#endif
+
+#ifndef EASTL_STRING_OPT_RANGE_ERRORS
+ // Defined as 0 or 1. Default is equal to EASTL_EXCEPTIONS_ENABLED.
+ // Defines if we check for out-of-bounds references to string
+ // positions and throw exceptions if so. Well-behaved code shouldn't
+ // refence out-of-bounds positions and so shouldn't need these checks.
+ // C++ standard strings are expected to do such range checks.
+ #define EASTL_STRING_OPT_RANGE_ERRORS EASTL_EXCEPTIONS_ENABLED
+#endif
+
+#ifndef EASTL_STRING_OPT_ARGUMENT_ERRORS
+ // Defined as 0 or 1. Default is 0.
+ // Defines if we check for NULL ptr arguments passed to string
+ // functions by the user and throw exceptions if so. Well-behaved code
+ // shouldn't pass bad arguments and so shouldn't need these checks.
+ // Also, some users believe that strings should check for NULL pointers
+ // in all their arguments and do no-ops if so. This is very debatable.
+ // C++ standard strings are not required to check for such argument errors.
+ #define EASTL_STRING_OPT_ARGUMENT_ERRORS 0
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_BITSET_SIZE_T
+//
+// Defined as 0 or 1. Default is 1.
+// Controls whether bitset uses size_t or eastl_size_t.
+//
+#ifndef EASTL_BITSET_SIZE_T
+ #define EASTL_BITSET_SIZE_T 1
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_INT128_SUPPORTED
+//
+// Defined as 0 or 1.
+//
+#ifndef EASTL_INT128_SUPPORTED
+ #if defined(EA_COMPILER_INTMAX_SIZE) && (EA_COMPILER_INTMAX_SIZE >= 16)
+ #define EASTL_INT128_SUPPORTED 1
+ #else
+ #define EASTL_INT128_SUPPORTED 0
+ #endif
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_GCC_STYLE_INT128_SUPPORTED
+//
+// Defined as 0 or 1.
+// Specifies whether __int128_t/__uint128_t are defined.
+//
+#ifndef EASTL_GCC_STYLE_INT128_SUPPORTED
+#if EASTL_INT128_SUPPORTED && (defined(EA_COMPILER_GNUC) || defined(__clang__))
+#define EASTL_GCC_STYLE_INT128_SUPPORTED 1
+#else
+#define EASTL_GCC_STYLE_INT128_SUPPORTED 0
+#endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_DEFAULT_ALLOCATOR_ALIGNED_ALLOCATIONS_SUPPORTED
+//
+// Defined as 0 or 1.
+// Tells if you can use the default EASTL allocator to do aligned allocations,
+// which for most uses tells if you can store aligned objects in containers
+// that use default allocators. It turns out that when built as a DLL for
+// some platforms, EASTL doesn't have a way to do aligned allocations, as it
+// doesn't have a heap that supports it. There is a way to work around this
+// with dynamically defined allocators, but that's currently a to-do.
+//
+#ifndef EASTL_DEFAULT_ALLOCATOR_ALIGNED_ALLOCATIONS_SUPPORTED
+ #if EASTL_DLL
+ #define EASTL_DEFAULT_ALLOCATOR_ALIGNED_ALLOCATIONS_SUPPORTED 0
+ #else
+ #define EASTL_DEFAULT_ALLOCATOR_ALIGNED_ALLOCATIONS_SUPPORTED 1
+ #endif
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_INT128_DEFINED
+//
+// Defined as 0 or 1.
+// Specifies whether eastl_int128_t/eastl_uint128_t have been typedef'd yet.
+// NB: these types are not considered fundamental, arithmetic or integral when using the EAStdC implementation.
+// this changes the compiler type traits defined in type_traits.h.
+// eg. is_signed<eastl_int128_t>::value may be false, because it is not arithmetic.
+//
+#ifndef EASTL_INT128_DEFINED
+ #if EASTL_INT128_SUPPORTED
+ #define EASTL_INT128_DEFINED 1
+
+ #if EASTL_GCC_STYLE_INT128_SUPPORTED
+ typedef __int128_t eastl_int128_t;
+ typedef __uint128_t eastl_uint128_t;
+ #else
+ typedef int128_t eastl_int128_t; // The EAStdC package defines an EA::StdC::int128_t and uint128_t type,
+ typedef uint128_t eastl_uint128_t; // though they are currently within the EA::StdC namespace.
+ #endif
+ #endif
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_BITSET_WORD_TYPE_DEFAULT / EASTL_BITSET_WORD_SIZE_DEFAULT
+//
+// Defined as an integral power of two type, usually uint32_t or uint64_t.
+// Specifies the word type that bitset should use internally to implement
+// storage. By default this is the platform register word size, but there
+// may be reasons to use a different value.
+//
+// Defines the integral data type used by bitset by default.
+// You can override this default on a bitset-by-bitset case by supplying a
+// custom bitset WordType template parameter.
+//
+// The C++ standard specifies that the std::bitset word type be unsigned long,
+// but that isn't necessarily the most efficient data type for the given platform.
+// We can follow the standard and be potentially less efficient or we can do what
+// is more efficient but less like the C++ std::bitset.
+//
+#if !defined(EASTL_BITSET_WORD_TYPE_DEFAULT)
+ #if defined(EASTL_BITSET_WORD_SIZE) // EASTL_BITSET_WORD_SIZE is deprecated, but we temporarily support the ability for the user to specify it. Use EASTL_BITSET_WORD_TYPE_DEFAULT instead.
+ #if (EASTL_BITSET_WORD_SIZE == 4)
+ #define EASTL_BITSET_WORD_TYPE_DEFAULT uint32_t
+ #define EASTL_BITSET_WORD_SIZE_DEFAULT 4
+ #else
+ #define EASTL_BITSET_WORD_TYPE_DEFAULT uint64_t
+ #define EASTL_BITSET_WORD_SIZE_DEFAULT 8
+ #endif
+ #elif (EA_PLATFORM_WORD_SIZE == 16) // EA_PLATFORM_WORD_SIZE is defined in EABase.
+ #define EASTL_BITSET_WORD_TYPE_DEFAULT uint128_t
+ #define EASTL_BITSET_WORD_SIZE_DEFAULT 16
+ #elif (EA_PLATFORM_WORD_SIZE == 8)
+ #define EASTL_BITSET_WORD_TYPE_DEFAULT uint64_t
+ #define EASTL_BITSET_WORD_SIZE_DEFAULT 8
+ #elif (EA_PLATFORM_WORD_SIZE == 4)
+ #define EASTL_BITSET_WORD_TYPE_DEFAULT uint32_t
+ #define EASTL_BITSET_WORD_SIZE_DEFAULT 4
+ #else
+ #define EASTL_BITSET_WORD_TYPE_DEFAULT uint16_t
+ #define EASTL_BITSET_WORD_SIZE_DEFAULT 2
+ #endif
+#endif
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_LIST_SIZE_CACHE
+//
+// Defined as 0 or 1. Default is 1. Changed from 0 in version 1.16.01.
+// If defined as 1, the list and slist containers (and possibly any additional
+// containers as well) keep a member mSize (or similar) variable which allows
+// the size() member function to execute in constant time (a.k.a. O(1)).
+// There are debates on both sides as to whether it is better to have this
+// cached value or not, as having it entails some cost (memory and code).
+// To consider: Make list size caching an optional template parameter.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_LIST_SIZE_CACHE
+ #define EASTL_LIST_SIZE_CACHE 1
+#endif
+
+#ifndef EASTL_SLIST_SIZE_CACHE
+ #define EASTL_SLIST_SIZE_CACHE 1
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_MAX_STACK_USAGE
+//
+// Defined as an integer greater than zero. Default is 4000.
+// There are some places in EASTL where temporary objects are put on the
+// stack. A common example of this is in the implementation of container
+// swap functions whereby a temporary copy of the container is made.
+// There is a problem, however, if the size of the item created on the stack
+// is very large. This can happen with fixed-size containers, for example.
+// The EASTL_MAX_STACK_USAGE define specifies the maximum amount of memory
+// (in bytes) that the given platform/compiler will safely allow on the stack.
+// Platforms such as Windows will generally allow larger values than embedded
+// systems or console machines, but it is usually a good idea to stick with
+// a max usage value that is portable across all platforms, lest the user be
+// surprised when something breaks as it is ported to another platform.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_MAX_STACK_USAGE
+ #define EASTL_MAX_STACK_USAGE 4000
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_VA_COPY_ENABLED
+//
+// Defined as 0 or 1. Default is 1 for compilers that need it, 0 for others.
+// Some compilers on some platforms implement va_list whereby its contents
+// are destroyed upon usage, even if passed by value to another function.
+// With these compilers you can use va_copy to save and restore a va_list.
+// Known compiler/platforms that destroy va_list contents upon usage include:
+// CodeWarrior on PowerPC
+// GCC on x86-64
+// However, va_copy is part of the C99 standard and not part of earlier C and
+// C++ standards. So not all compilers support it. VC++ doesn't support va_copy,
+// but it turns out that VC++ doesn't usually need it on the platforms it supports,
+// and va_copy can usually be implemented via memcpy(va_list, va_list) with VC++.
+//
+// Example usage:
+// void Function(va_list arguments)
+// {
+// #if EASTL_VA_COPY_ENABLED
+// va_list argumentsCopy;
+// va_copy(argumentsCopy, arguments);
+// #endif
+// <use arguments or argumentsCopy>
+// #if EASTL_VA_COPY_ENABLED
+// va_end(argumentsCopy);
+// #endif
+// }
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_VA_COPY_ENABLED
+ #if ((defined(__GNUC__) && (__GNUC__ >= 3)) || defined(__clang__)) && (!defined(__i386__) || defined(__x86_64__)) && !defined(__ppc__) && !defined(__PPC__) && !defined(__PPC64__)
+ #define EASTL_VA_COPY_ENABLED 1
+ #else
+ #define EASTL_VA_COPY_ENABLED 0
+ #endif
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_OPERATOR_EQUALS_OTHER_ENABLED
+//
+// Defined as 0 or 1. Default is 0 until such day that it's deemed safe.
+// When enabled, enables operator= for other char types, e.g. for code
+// like this:
+// eastl::string8 s8;
+// eastl::string16 s16;
+// s8 = s16;
+// This option is considered experimental, and may exist as such for an
+// indefinite amount of time.
+//
+#if !defined(EASTL_OPERATOR_EQUALS_OTHER_ENABLED)
+ #define EASTL_OPERATOR_EQUALS_OTHER_ENABLED 0
+#endif
+///////////////////////////////////////////////////////////////////////////////
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_LIST_PROXY_ENABLED
+//
+#if !defined(EASTL_LIST_PROXY_ENABLED)
+ // GCC with -fstrict-aliasing has bugs (or undocumented functionality in their
+ // __may_alias__ implementation. The compiler gets confused about function signatures.
+ // VC8 (1400) doesn't need the proxy because it has built-in smart debugging capabilities.
+ #if defined(EASTL_DEBUG) && !defined(__GNUC__) && (!defined(_MSC_VER) || (_MSC_VER < 1400))
+ #define EASTL_LIST_PROXY_ENABLED 1
+ #define EASTL_LIST_PROXY_MAY_ALIAS EASTL_MAY_ALIAS
+ #else
+ #define EASTL_LIST_PROXY_ENABLED 0
+ #define EASTL_LIST_PROXY_MAY_ALIAS
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_STD_ITERATOR_CATEGORY_ENABLED
+//
+// Defined as 0 or 1. Default is 0.
+// If defined as non-zero, EASTL iterator categories (iterator.h's input_iterator_tag,
+// forward_iterator_tag, etc.) are defined to be those from std C++ in the std
+// namespace. The reason for wanting to enable such a feature is that it allows
+// EASTL containers and algorithms to work with std STL containes and algorithms.
+// The default value was changed from 1 to 0 in EASL 1.13.03, January 11, 2012.
+// The reason for the change was that almost nobody was taking advantage of it and
+// it was slowing down compile times for some compilers quite a bit due to them
+// having a lot of headers behind <iterator>.
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_STD_ITERATOR_CATEGORY_ENABLED
+ #define EASTL_STD_ITERATOR_CATEGORY_ENABLED 0
+#endif
+
+#if EASTL_STD_ITERATOR_CATEGORY_ENABLED
+ #define EASTL_ITC_NS std
+#else
+ #define EASTL_ITC_NS eastl
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_VALIDATION_ENABLED
+//
+// Defined as an integer >= 0. Default is to be equal to EASTL_DEBUG.
+// If nonzero, then a certain amount of automatic runtime validation is done.
+// Runtime validation is not considered the same thing as asserting that user
+// input values are valid. Validation refers to internal consistency checking
+// of the validity of containers and their iterators. Validation checking is
+// something that often involves significantly more than basic assertion
+// checking, and it may sometimes be desirable to disable it.
+// This macro would generally be used internally by EASTL.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_VALIDATION_ENABLED
+ #define EASTL_VALIDATION_ENABLED EASTL_DEBUG
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_VALIDATE_COMPARE
+//
+// Defined as EASTL_ASSERT or defined away. Default is EASTL_ASSERT if EASTL_VALIDATION_ENABLED is enabled.
+// This is used to validate user-supplied comparison functions, particularly for sorting purposes.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_VALIDATE_COMPARE_ENABLED
+ #define EASTL_VALIDATE_COMPARE_ENABLED EASTL_VALIDATION_ENABLED
+#endif
+
+#if EASTL_VALIDATE_COMPARE_ENABLED
+ #define EASTL_VALIDATE_COMPARE EASTL_ASSERT
+#else
+ #define EASTL_VALIDATE_COMPARE(expression)
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_VALIDATE_INTRUSIVE_LIST
+//
+// Defined as an integral value >= 0. Controls the amount of automatic validation
+// done by intrusive_list. A value of 0 means no automatic validation is done.
+// As of this writing, EASTL_VALIDATE_INTRUSIVE_LIST defaults to 0, as it makes
+// the intrusive_list_node become a non-POD, which may be an issue for some code.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_VALIDATE_INTRUSIVE_LIST
+ #define EASTL_VALIDATE_INTRUSIVE_LIST 0
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_FORCE_INLINE
+//
+// Defined as a "force inline" expression or defined away.
+// You generally don't need to use forced inlining with the Microsoft and
+// Metrowerks compilers, but you may need it with the GCC compiler (any version).
+//
+// Example usage:
+// template <typename T, typename Allocator>
+// EASTL_FORCE_INLINE typename vector<T, Allocator>::size_type
+// vector<T, Allocator>::size() const
+// { return mpEnd - mpBegin; }
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_FORCE_INLINE
+ #define EASTL_FORCE_INLINE EA_FORCE_INLINE
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_MAY_ALIAS
+//
+// Defined as a macro that wraps the GCC may_alias attribute. This attribute
+// has no significance for VC++ because VC++ doesn't support the concept of
+// strict aliasing. Users should avoid writing code that breaks strict
+// aliasing rules; EASTL_MAY_ALIAS is for cases with no alternative.
+//
+// Example usage:
+// uint32_t value EASTL_MAY_ALIAS;
+//
+// Example usage:
+// typedef uint32_t EASTL_MAY_ALIAS value_type;
+// value_type value;
+//
+#if defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 303) && !defined(EA_COMPILER_RVCT)
+ #define EASTL_MAY_ALIAS __attribute__((__may_alias__))
+#else
+ #define EASTL_MAY_ALIAS
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_LIKELY / EASTL_UNLIKELY
+//
+// Defined as a macro which gives a hint to the compiler for branch
+// prediction. GCC gives you the ability to manually give a hint to
+// the compiler about the result of a comparison, though it's often
+// best to compile shipping code with profiling feedback under both
+// GCC (-fprofile-arcs) and VC++ (/LTCG:PGO, etc.). However, there
+// are times when you feel very sure that a boolean expression will
+// usually evaluate to either true or false and can help the compiler
+// by using an explicity directive...
+//
+// Example usage:
+// if(EASTL_LIKELY(a == 0)) // Tell the compiler that a will usually equal 0.
+// { ... }
+//
+// Example usage:
+// if(EASTL_UNLIKELY(a == 0)) // Tell the compiler that a will usually not equal 0.
+// { ... }
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_LIKELY
+ #if defined(__GNUC__) && (__GNUC__ >= 3)
+ #define EASTL_LIKELY(x) __builtin_expect(!!(x), true)
+ #define EASTL_UNLIKELY(x) __builtin_expect(!!(x), false)
+ #else
+ #define EASTL_LIKELY(x) (x)
+ #define EASTL_UNLIKELY(x) (x)
+ #endif
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_STD_TYPE_TRAITS_AVAILABLE
+//
+// Defined as 0 or 1; default is based on auto-detection.
+// Specifies whether Standard C++11 <type_traits> support exists.
+// Sometimes the auto-detection below fails to work properly and the
+// user needs to override it. Does not define whether the compiler provides
+// built-in compiler type trait support (e.g. __is_abstract()), as some
+// compilers will EASTL_STD_TYPE_TRAITS_AVAILABLE = 0, but have built
+// in type trait support.
+//
+#ifndef EASTL_STD_TYPE_TRAITS_AVAILABLE
+ /* Disabled because we don't currently need it.
+ #if defined(_MSC_VER) && (_MSC_VER >= 1500) // VS2008 or later
+ #pragma warning(push, 0)
+ #include <yvals.h>
+ #pragma warning(pop)
+ #if ((defined(_HAS_TR1) && _HAS_TR1) || _MSC_VER >= 1700) // VS2012 (1700) and later has built-in type traits support.
+ #define EASTL_STD_TYPE_TRAITS_AVAILABLE 1
+ #include <type_traits>
+ #else
+ #define EASTL_STD_TYPE_TRAITS_AVAILABLE 0
+ #endif
+
+ #elif defined(EA_COMPILER_CLANG) || (defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4003) && !defined(__GCCXML__)) && !defined(EA_COMPILER_NO_STANDARD_CPP_LIBRARY)
+ #include <cstddef> // This will define __GLIBCXX__ if using GNU's libstdc++ and _LIBCPP_VERSION if using clang's libc++.
+
+ #if defined(EA_COMPILER_CLANG) && !defined(EA_PLATFORM_APPLE) // As of v3.0.0, Apple's clang doesn't support type traits.
+ // http://clang.llvm.org/docs/LanguageExtensions.html#checking_type_traits
+ // Clang has some built-in compiler trait support. This support doesn't currently
+ // directly cover all our type_traits, though the C++ Standard Library that's used
+ // with clang could fill that in.
+ #define EASTL_STD_TYPE_TRAITS_AVAILABLE 1
+ #endif
+
+ #if !defined(EASTL_STD_TYPE_TRAITS_AVAILABLE)
+ #if defined(_LIBCPP_VERSION) // This is defined by clang's libc++.
+ #include <type_traits>
+
+ #elif defined(__GLIBCXX__) && (__GLIBCXX__ >= 20090124) // It's not clear if this is the oldest version that has type traits; probably it isn't.
+ #define EASTL_STD_TYPE_TRAITS_AVAILABLE 1
+
+ #if defined(__GXX_EXPERIMENTAL_CXX0X__) // To do: Update this test to include conforming C++11 implementations.
+ #include <type_traits>
+ #else
+ #include <tr1/type_traits>
+ #endif
+ #else
+ #define EASTL_STD_TYPE_TRAITS_AVAILABLE 0
+ #endif
+ #endif
+
+ #elif defined(__MSL_CPP__) && (__MSL_CPP__ >= 0x8000) // CodeWarrior compiler.
+ #define EASTL_STD_TYPE_TRAITS_AVAILABLE 0
+ // To do: Implement support for this (via modifying the EASTL type
+ // traits headers, as CodeWarrior provides this.
+ #else
+ #define EASTL_STD_TYPE_TRAITS_AVAILABLE 0
+ #endif
+ */
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE
+//
+// Defined as 0 or 1; default is based on auto-detection.
+// Specifies whether the compiler provides built-in compiler type trait support
+// (e.g. __is_abstract()). Does not specify any details about which traits
+// are available or what their standards-compliance is. Nevertheless this is a
+// useful macro identifier for our type traits implementation.
+//
+#ifndef EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE
+ #if defined(_MSC_VER) && (_MSC_VER >= 1500) && !defined(EA_COMPILER_CLANG_CL) // VS2008 or later
+ #pragma warning(push, 0)
+ #include <yvals.h>
+ #pragma warning(pop)
+ #if ((defined(_HAS_TR1) && _HAS_TR1) || _MSC_VER >= 1700) // VS2012 (1700) and later has built-in type traits support.
+ #define EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE 1
+ #else
+ #define EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE 0
+ #endif
+ #elif defined(__clang__) && defined(__APPLE__) && defined(_CXXCONFIG) // Apple clang but with GCC's libstdc++.
+ #define EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE 0
+ #elif defined(__clang__)
+ #define EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE 1
+ #elif defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4003) && !defined(__GCCXML__)
+ #define EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE 1
+ #elif defined(__MSL_CPP__) && (__MSL_CPP__ >= 0x8000) // CodeWarrior compiler.
+ #define EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE 1
+ #else
+ #define EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE 0
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_RESET_ENABLED
+//
+// Defined as 0 or 1; default is 1 for the time being.
+// The reset_lose_memory function works the same as reset, as described below.
+//
+// Specifies whether the container reset functionality is enabled. If enabled
+// then <container>::reset forgets its memory, otherwise it acts as the clear
+// function. The reset function is potentially dangerous, as it (by design)
+// causes containers to not free their memory.
+// This option has no applicability to the bitset::reset function, as bitset
+// isn't really a container. Also it has no applicability to the smart pointer
+// wrappers (e.g. intrusive_ptr).
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_RESET_ENABLED
+ #define EASTL_RESET_ENABLED 0
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_MINMAX_ENABLED
+//
+// Defined as 0 or 1; default is 1.
+// Specifies whether the min and max algorithms are available.
+// It may be useful to disable the min and max algorithms because sometimes
+// #defines for min and max exist which would collide with EASTL min and max.
+// Note that there are already alternative versions of min and max in EASTL
+// with the min_alt and max_alt functions. You can use these without colliding
+// with min/max macros that may exist.
+//
+///////////////////////////////////////////////////////////////////////////////
+#ifndef EASTL_MINMAX_ENABLED
+ #define EASTL_MINMAX_ENABLED 1
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_NOMINMAX
+//
+// Defined as 0 or 1; default is 1.
+// MSVC++ has #defines for min/max which collide with the min/max algorithm
+// declarations. If EASTL_NOMINMAX is defined as 1, then we undefine min and
+// max if they are #defined by an external library. This allows our min and
+// max definitions in algorithm.h to work as expected. An alternative to
+// the enabling of EASTL_NOMINMAX is to #define NOMINMAX in your project
+// settings if you are compiling for Windows.
+// Note that this does not control the availability of the EASTL min and max
+// algorithms; the EASTL_MINMAX_ENABLED configuration parameter does that.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_NOMINMAX
+ #define EASTL_NOMINMAX 1
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_STD_CPP_ONLY
+//
+// Defined as 0 or 1; default is 0.
+// Disables the use of compiler language extensions. We use compiler language
+// extensions only in the case that they provide some benefit that can't be
+// had any other practical way. But sometimes the compiler is set to disable
+// language extensions or sometimes one compiler's preprocesor is used to generate
+// code for another compiler, and so it's necessary to disable language extension usage.
+//
+// Example usage:
+// #if defined(_MSC_VER) && !EASTL_STD_CPP_ONLY
+// enum : size_type { npos = container_type::npos }; // Microsoft extension which results in significantly smaller debug symbols.
+// #else
+// static const size_type npos = container_type::npos;
+// #endif
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_STD_CPP_ONLY
+ #define EASTL_STD_CPP_ONLY 0
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_NO_RVALUE_REFERENCES
+//
+// Defined as 0 or 1.
+// This is the same as EABase EA_COMPILER_NO_RVALUE_REFERENCES except that it
+// follows the convention of being always defined, as 0 or 1.
+///////////////////////////////////////////////////////////////////////////////
+#if !defined(EASTL_NO_RVALUE_REFERENCES)
+ #if defined(EA_COMPILER_NO_RVALUE_REFERENCES)
+ #define EASTL_NO_RVALUE_REFERENCES 1
+ #else
+ #define EASTL_NO_RVALUE_REFERENCES 0
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_MOVE_SEMANTICS_ENABLED
+//
+// Defined as 0 or 1.
+// If enabled then C++11-like functionality with rvalue references and move
+// operations is enabled.
+///////////////////////////////////////////////////////////////////////////////
+#if !defined(EASTL_MOVE_SEMANTICS_ENABLED)
+ #if EASTL_NO_RVALUE_REFERENCES // If the compiler doesn't support rvalue references or EASTL is configured to disable them...
+ #define EASTL_MOVE_SEMANTICS_ENABLED 0
+ #else
+ #define EASTL_MOVE_SEMANTICS_ENABLED 1
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_VARIADIC_TEMPLATES_ENABLED
+//
+// Defined as 0 or 1.
+// If enabled then C++11-like functionality with variadic templates is enabled.
+///////////////////////////////////////////////////////////////////////////////
+#if !defined(EASTL_VARIADIC_TEMPLATES_ENABLED)
+ #if defined(EA_COMPILER_NO_VARIADIC_TEMPLATES) // If the compiler doesn't support variadic templates
+ #define EASTL_VARIADIC_TEMPLATES_ENABLED 0
+ #else
+ #define EASTL_VARIADIC_TEMPLATES_ENABLED 1
+ #endif
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_VARIABLE_TEMPLATES_ENABLED
+//
+// Defined as 0 or 1.
+// If enabled then C++11-like functionality with variable templates is enabled.
+///////////////////////////////////////////////////////////////////////////////
+#if !defined(EASTL_VARIABLE_TEMPLATES_ENABLED)
+ #if((EABASE_VERSION_N < 20605) || defined(EA_COMPILER_NO_VARIABLE_TEMPLATES))
+ #define EASTL_VARIABLE_TEMPLATES_ENABLED 0
+ #else
+ #define EASTL_VARIABLE_TEMPLATES_ENABLED 1
+ #endif
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_INLINE_VARIABLE_ENABLED
+//
+// Defined as 0 or 1.
+// If enabled then C++17-like functionality with inline variable is enabled.
+///////////////////////////////////////////////////////////////////////////////
+#if !defined(EASTL_INLINE_VARIABLE_ENABLED)
+ #if((EABASE_VERSION_N < 20707) || defined(EA_COMPILER_NO_INLINE_VARIABLES))
+ #define EASTL_INLINE_VARIABLE_ENABLED 0
+ #else
+ #define EASTL_INLINE_VARIABLE_ENABLED 1
+ #endif
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_CPP17_INLINE_VARIABLE
+//
+// Used to prefix a variable as inline when C++17 inline variables are available
+// Usage: EASTL_CPP17_INLINE_VARIABLE constexpr bool type_trait_v = type_trait::value
+///////////////////////////////////////////////////////////////////////////////
+#if !defined(EASTL_CPP17_INLINE_VARIABLE)
+ #if EASTL_INLINE_VARIABLE_ENABLED
+ #define EASTL_CPP17_INLINE_VARIABLE inline
+ #else
+ #define EASTL_CPP17_INLINE_VARIABLE
+ #endif
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_HAVE_CPP11_TYPE_TRAITS
+//
+// Defined as 0 or 1.
+// This is the same as EABase EA_HAVE_CPP11_TYPE_TRAITS except that it
+// follows the convention of being always defined, as 0 or 1. Note that this
+// identifies if the Standard Library has C++11 type traits and not if EASTL
+// has its equivalents to C++11 type traits.
+///////////////////////////////////////////////////////////////////////////////
+#if !defined(EASTL_HAVE_CPP11_TYPE_TRAITS)
+ // To do: Change this to use the EABase implementation once we have a few months of testing
+ // of this and we are sure it works right. Do this at some point after ~January 2014.
+ #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 540) // Dinkumware. VS2012+
+ #define EASTL_HAVE_CPP11_TYPE_TRAITS 1
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4007) // Prior versions of libstdc++ have incomplete support for C++11 type traits.
+ #define EASTL_HAVE_CPP11_TYPE_TRAITS 1
+ #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1)
+ #define EASTL_HAVE_CPP11_TYPE_TRAITS 1
+ #else
+ #define EASTL_HAVE_CPP11_TYPE_TRAITS 0
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EA_COMPILER_NO_FUNCTION_TEMPLATE_DEFAULT_ARGS undef
+//
+// We need revise this macro to be undefined in some cases, in case the user
+// isn't using an updated EABase.
+///////////////////////////////////////////////////////////////////////////////
+#if defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 403) // It may in fact be supported by 4.01 or 4.02 but we don't have compilers to test with.
+ #if defined(EA_COMPILER_NO_FUNCTION_TEMPLATE_DEFAULT_ARGS)
+ #undef EA_COMPILER_NO_FUNCTION_TEMPLATE_DEFAULT_ARGS
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_NO_RANGE_BASED_FOR_LOOP
+//
+// Defined as 0 or 1.
+// This is the same as EABase EA_COMPILER_NO_RANGE_BASED_FOR_LOOP except that it
+// follows the convention of being always defined, as 0 or 1.
+///////////////////////////////////////////////////////////////////////////////
+#if !defined(EASTL_NO_RANGE_BASED_FOR_LOOP)
+ #if defined(EA_COMPILER_NO_RANGE_BASED_FOR_LOOP)
+ #define EASTL_NO_RANGE_BASED_FOR_LOOP 1
+ #else
+ #define EASTL_NO_RANGE_BASED_FOR_LOOP 0
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_ALIGN_OF
+//
+// Determines the alignment of a type.
+//
+// Example usage:
+// size_t alignment = EASTL_ALIGN_OF(int);
+//
+///////////////////////////////////////////////////////////////////////////////
+#ifndef EASTL_ALIGN_OF
+ #define EASTL_ALIGN_OF alignof
+#endif
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// eastl_size_t
+//
+// Defined as an unsigned integer type, usually either size_t or uint32_t.
+// Defaults to size_t to match std STL unless the user specifies to use
+// uint32_t explicitly via the EASTL_SIZE_T_32BIT define
+//
+// Example usage:
+// eastl_size_t n = intVector.size();
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_SIZE_T_32BIT // Defines whether EASTL_SIZE_T uses uint32_t/int32_t as opposed to size_t/ssize_t.
+ #define EASTL_SIZE_T_32BIT 0 // This makes a difference on 64 bit platforms because they use a 64 bit size_t.
+#endif // By default we do the same thing as std STL and use size_t.
+
+#ifndef EASTL_SIZE_T
+ #if (EASTL_SIZE_T_32BIT == 0) || (EA_PLATFORM_WORD_SIZE == 4)
+ #include <stddef.h>
+ #define EASTL_SIZE_T size_t
+ #define EASTL_SSIZE_T intptr_t
+ #else
+ #define EASTL_SIZE_T uint32_t
+ #define EASTL_SSIZE_T int32_t
+ #endif
+#endif
+
+typedef EASTL_SIZE_T eastl_size_t; // Same concept as std::size_t.
+typedef EASTL_SSIZE_T eastl_ssize_t; // Signed version of eastl_size_t. Concept is similar to Posix's ssize_t.
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// AddRef / Release
+//
+// AddRef and Release are used for "intrusive" reference counting. By the term
+// "intrusive", we mean that the reference count is maintained by the object
+// and not by the user of the object. Given that an object implements referencing
+// counting, the user of the object needs to be able to increment and decrement
+// that reference count. We do that via the venerable AddRef and Release functions
+// which the object must supply. These defines here allow us to specify the name
+// of the functions. They could just as well be defined to addref and delref or
+// IncRef and DecRef.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTLAddRef
+ #define EASTLAddRef AddRef
+#endif
+
+#ifndef EASTLRelease
+ #define EASTLRelease Release
+#endif
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_ALLOCATOR_EXPLICIT_ENABLED
+//
+// Defined as 0 or 1. Default is 0 for now but ideally would be changed to
+// 1 some day. It's 0 because setting it to 1 breaks some existing code.
+// This option enables the allocator ctor to be explicit, which avoids
+// some undesirable silent conversions, especially with the string class.
+//
+// Example usage:
+// class allocator
+// {
+// public:
+// EASTL_ALLOCATOR_EXPLICIT allocator(const char* pName);
+// };
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_ALLOCATOR_EXPLICIT_ENABLED
+ #define EASTL_ALLOCATOR_EXPLICIT_ENABLED 0
+#endif
+
+#if EASTL_ALLOCATOR_EXPLICIT_ENABLED
+ #define EASTL_ALLOCATOR_EXPLICIT explicit
+#else
+ #define EASTL_ALLOCATOR_EXPLICIT
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_ALLOCATOR_MIN_ALIGNMENT
+//
+// Defined as an integral power-of-2 that's >= 1.
+// Identifies the minimum alignment that EASTL should assume its allocators
+// use. There is code within EASTL that decides whether to do a Malloc or
+// MallocAligned call and it's typically better if it can use the Malloc call.
+// But this requires knowing what the minimum possible alignment is.
+#if !defined(EASTL_ALLOCATOR_MIN_ALIGNMENT)
+ #define EASTL_ALLOCATOR_MIN_ALIGNMENT EA_PLATFORM_MIN_MALLOC_ALIGNMENT
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_SYSTEM_ALLOCATOR_MIN_ALIGNMENT
+//
+// Identifies the minimum alignment that EASTL should assume system allocations
+// from malloc and new will have.
+#if !defined(EASTL_SYSTEM_ALLOCATOR_MIN_ALIGNMENT)
+ #if defined(EA_PLATFORM_MICROSOFT) || defined(EA_PLATFORM_APPLE)
+ #define EASTL_SYSTEM_ALLOCATOR_MIN_ALIGNMENT 16
+ #else
+ #define EASTL_SYSTEM_ALLOCATOR_MIN_ALIGNMENT (EA_PLATFORM_PTR_SIZE * 2)
+ #endif
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL allocator
+//
+// The EASTL allocator system allows you to redefine how memory is allocated
+// via some defines that are set up here. In the container code, memory is
+// allocated via macros which expand to whatever the user has them set to
+// expand to. Given that there are multiple allocator systems available,
+// this system allows you to configure it to use whatever system you want,
+// provided your system meets the requirements of this library.
+// The requirements are:
+//
+// - Must be constructable via a const char* (name) parameter.
+// Some uses of allocators won't require this, however.
+// - Allocate a block of memory of size n and debug name string.
+// - Allocate a block of memory of size n, debug name string,
+// alignment a, and offset o.
+// - Free memory allocated via either of the allocation functions above.
+// - Provide a default allocator instance which can be used if the user
+// doesn't provide a specific one.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+// namespace eastl
+// {
+// class allocator
+// {
+// allocator(const char* pName = NULL);
+//
+// void* allocate(size_t n, int flags = 0);
+// void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0);
+// void deallocate(void* p, size_t n);
+//
+// const char* get_name() const;
+// void set_name(const char* pName);
+// };
+//
+// allocator* GetDefaultAllocator(); // This is used for anonymous allocations.
+// }
+
+#ifndef EASTLAlloc // To consider: Instead of calling through pAllocator, just go directly to operator new, since that's what allocator does.
+ #define EASTLAlloc(allocator, n) (allocator).allocate(n);
+#endif
+
+#ifndef EASTLAllocFlags // To consider: Instead of calling through pAllocator, just go directly to operator new, since that's what allocator does.
+ #define EASTLAllocFlags(allocator, n, flags) (allocator).allocate(n, flags);
+#endif
+
+#ifndef EASTLAllocAligned
+ #define EASTLAllocAligned(allocator, n, alignment, offset) (allocator).allocate((n), (alignment), (offset))
+#endif
+
+#ifndef EASTLAllocAlignedFlags
+ #define EASTLAllocAlignedFlags(allocator, n, alignment, offset, flags) (allocator).allocate((n), (alignment), (offset), (flags))
+#endif
+
+#ifndef EASTLFree
+ #define EASTLFree(allocator, p, size) (allocator).deallocate((void*)(p), (size)) // Important to cast to void* as p may be non-const.
+#endif
+
+#ifndef EASTLAllocatorType
+ #define EASTLAllocatorType eastl::allocator
+#endif
+
+#ifndef EASTLDummyAllocatorType
+ #define EASTLDummyAllocatorType eastl::dummy_allocator
+#endif
+
+#ifndef EASTLAllocatorDefault
+ // EASTLAllocatorDefault returns the default allocator instance. This is not a global
+ // allocator which implements all container allocations but is the allocator that is
+ // used when EASTL needs to allocate memory internally. There are very few cases where
+ // EASTL allocates memory internally, and in each of these it is for a sensible reason
+ // that is documented to behave as such.
+ #define EASTLAllocatorDefault eastl::GetDefaultAllocator
+#endif
+
+
+/// EASTL_ALLOCATOR_DEFAULT_NAME
+///
+/// Defines a default allocator name in the absence of a user-provided name.
+///
+#ifndef EASTL_ALLOCATOR_DEFAULT_NAME
+ #define EASTL_ALLOCATOR_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX // Unless the user overrides something, this is "EASTL".
+#endif
+
+/// EASTL_USE_FORWARD_WORKAROUND
+///
+/// This is to workaround a compiler bug that we found in VS2013. Update 1 did not fix it.
+/// This should be fixed in a future release of VS2013 http://accentuable4.rssing.com/browser.php?indx=3511740&item=15696
+///
+#ifndef EASTL_USE_FORWARD_WORKAROUND
+ #if defined(_MSC_FULL_VER) && _MSC_FULL_VER == 180021005 || (defined(__EDG_VERSION__) && (__EDG_VERSION__ < 405))// VS2013 initial release
+ #define EASTL_USE_FORWARD_WORKAROUND 1
+ #else
+ #define EASTL_USE_FORWARD_WORKAROUND 0
+ #endif
+#endif
+
+
+/// EASTL_TUPLE_ENABLED
+/// EASTL tuple implementation depends on variadic template support
+#if EASTL_VARIADIC_TEMPLATES_ENABLED && !defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ #define EASTL_TUPLE_ENABLED 1
+#else
+ #define EASTL_TUPLE_ENABLED 0
+#endif
+
+
+/// EASTL_FUNCTION_ENABLED
+///
+#ifndef EASTL_FUNCTION_ENABLED
+ #define EASTL_FUNCTION_ENABLED 1
+#endif
+
+
+/// EASTL_USER_LITERALS_ENABLED
+#ifndef EASTL_USER_LITERALS_ENABLED
+ #if defined(EA_COMPILER_CPP14_ENABLED)
+ #define EASTL_USER_LITERALS_ENABLED 1
+ #else
+ #define EASTL_USER_LITERALS_ENABLED 0
+ #endif
+#endif
+
+
+/// EASTL_INLINE_NAMESPACES_ENABLED
+#ifndef EASTL_INLINE_NAMESPACES_ENABLED
+ #if defined(EA_COMPILER_CPP14_ENABLED)
+ #define EASTL_INLINE_NAMESPACES_ENABLED 1
+ #else
+ #define EASTL_INLINE_NAMESPACES_ENABLED 0
+ #endif
+#endif
+
+
+/// EASTL_CORE_ALLOCATOR_ENABLED
+#ifndef EASTL_CORE_ALLOCATOR_ENABLED
+ #define EASTL_CORE_ALLOCATOR_ENABLED 0
+#endif
+
+/// EASTL_OPENSOURCE
+/// This is enabled when EASTL is building built in an "open source" mode. Which is a mode that eliminates code
+/// dependencies on other technologies that have not been released publically.
+/// EASTL_OPENSOURCE = 0, is the default.
+/// EASTL_OPENSOURCE = 1, utilizes technologies that not publically available.
+///
+#ifndef EASTL_OPENSOURCE
+ #define EASTL_OPENSOURCE 0
+#endif
+
+
+/// EASTL_OPTIONAL_ENABLED
+#if defined(EA_COMPILER_MSVC_2012)
+ #define EASTL_OPTIONAL_ENABLED 0
+#elif defined(EA_COMPILER_MSVC_2013)
+ #define EASTL_OPTIONAL_ENABLED 0
+#elif defined(EA_COMPILER_MSVC_2015)
+ #define EASTL_OPTIONAL_ENABLED 1
+#elif EASTL_VARIADIC_TEMPLATES_ENABLED && !defined(EA_COMPILER_NO_TEMPLATE_ALIASES) && !defined(EA_COMPILER_NO_DEFAULTED_FUNCTIONS) && defined(EA_COMPILER_CPP11_ENABLED)
+ #define EASTL_OPTIONAL_ENABLED 1
+#else
+ #define EASTL_OPTIONAL_ENABLED 0
+#endif
+
+
+/// EASTL_HAS_UNIQUE_OBJECT_REPRESENTATIONS_AVAILABLE
+#if defined(__clang__)
+ // NB: !__is_identifier() is correct: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66970#c11
+ #if !__is_identifier(__has_unique_object_representations)
+ #define EASTL_HAS_UNIQUE_OBJECT_REPRESENTATIONS_AVAILABLE 1
+ #else
+ #define EASTL_HAS_UNIQUE_OBJECT_REPRESENTATIONS_AVAILABLE 0
+ #endif
+#elif defined(_MSC_VER) && (_MSC_VER >= 1913) // VS2017 15.6+
+ #define EASTL_HAS_UNIQUE_OBJECT_REPRESENTATIONS_AVAILABLE 1
+#else
+ #define EASTL_HAS_UNIQUE_OBJECT_REPRESENTATIONS_AVAILABLE 0
+#endif
+
+#if defined(__clang__)
+ // NB: !__is_identifier() is correct: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66970#c11
+ #if !__is_identifier(__is_final)
+ #define EASTL_IS_FINAL_AVAILABLE 1
+ #else
+ #define EASTL_IS_FINAL_AVAILABLE 0
+ #endif
+#elif defined(_MSC_VER) && (_MSC_VER >= 1914) // VS2017 15.7+
+ #define EASTL_IS_FINAL_AVAILABLE 1
+#elif defined(EA_COMPILER_GNUC)
+ #define EASTL_IS_FINAL_AVAILABLE 1
+#else
+ #define EASTL_IS_FINAL_AVAILABLE 0
+#endif
+
+#if defined(__clang__)
+ // NB: !__is_identifier() is correct: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66970#c11
+ #if !__is_identifier(__is_aggregate)
+ #define EASTL_IS_AGGREGATE_AVAILABLE 1
+ #else
+ #define EASTL_IS_AGGREGATE_AVAILABLE 0
+ #endif
+#elif defined(_MSC_VER) && (_MSC_VER >= 1915) // VS2017 15.8+
+ #define EASTL_IS_AGGREGATE_AVAILABLE 1
+#elif defined(EA_COMPILER_GNUC)
+ #define EASTL_IS_AGGREGATE_AVAILABLE 1
+#else
+ #define EASTL_IS_AGGREGATE_AVAILABLE 0
+#endif
+
+
+/// EASTL_ENABLE_PAIR_FIRST_ELEMENT_CONSTRUCTOR
+/// This feature define allows users to toggle the problematic eastl::pair implicit
+/// single element constructor.
+#ifndef EASTL_ENABLE_PAIR_FIRST_ELEMENT_CONSTRUCTOR
+ #define EASTL_ENABLE_PAIR_FIRST_ELEMENT_CONSTRUCTOR 0
+#endif
+
+/// EASTL_SYSTEM_BIG_ENDIAN_STATEMENT
+/// EASTL_SYSTEM_LITTLE_ENDIAN_STATEMENT
+/// These macros allow you to write endian specific macros as statements.
+/// This allows endian specific code to be macro expanded from within other macros
+///
+#if defined(EA_SYSTEM_BIG_ENDIAN)
+ #define EASTL_SYSTEM_BIG_ENDIAN_STATEMENT(...) __VA_ARGS__
+#else
+ #define EASTL_SYSTEM_BIG_ENDIAN_STATEMENT(...)
+#endif
+
+#if defined(EA_SYSTEM_LITTLE_ENDIAN)
+ #define EASTL_SYSTEM_LITTLE_ENDIAN_STATEMENT(...) __VA_ARGS__
+#else
+ #define EASTL_SYSTEM_LITTLE_ENDIAN_STATEMENT(...)
+#endif
+
+/// EASTL_CONSTEXPR_BIT_CAST_SUPPORTED
+/// eastl::bit_cast, in order to be implemented as constexpr, requires explicit compiler support.
+/// This macro defines whether it's possible for bit_cast to be constexpr.
+///
+#if (defined(EA_COMPILER_MSVC) && defined(EA_COMPILER_MSVC_VERSION_14_26) && EA_COMPILER_VERSION >= EA_COMPILER_MSVC_VERSION_14_26) \
+ || EA_COMPILER_HAS_BUILTIN(__builtin_bit_cast)
+ #define EASTL_CONSTEXPR_BIT_CAST_SUPPORTED 1
+#else
+ #define EASTL_CONSTEXPR_BIT_CAST_SUPPORTED 0
+#endif
+
+
+
+#endif // Header include guard
diff --git a/EASTL/include/EASTL/internal/copy_help.h b/EASTL/include/EASTL/internal/copy_help.h
new file mode 100644
index 0000000..0b2c1b8
--- /dev/null
+++ b/EASTL/include/EASTL/internal/copy_help.h
@@ -0,0 +1,221 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_COPY_HELP_H
+#define EASTL_INTERNAL_COPY_HELP_H
+
+#include <EASTL/internal/config.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <EASTL/type_traits.h>
+#include <EASTL/iterator.h>
+#include <string.h> // memcpy, memcmp, memmove
+
+
+namespace eastl
+{
+ /// move / move_n / move_backward
+ /// copy / copy_n / copy_backward
+ ///
+ /// We want to optimize move, move_n, move_backward, copy, copy_backward, copy_n to do memmove operations
+ /// when possible.
+ ///
+ /// We could possibly use memcpy, though it has stricter overlap requirements than the move and copy
+ /// algorithms and would require a runtime if/else to choose it over memmove. In particular, memcpy
+ /// allows no range overlap at all, whereas move/copy allow output end overlap and move_backward/copy_backward
+ /// allow output begin overlap. Despite this it might be useful to use memcpy for any platforms where
+ /// memcpy is significantly faster than memmove, and since in most cases the copy/move operation in fact
+ /// doesn't target overlapping memory and so memcpy would be usable.
+ ///
+ /// We can use memmove/memcpy if the following hold true:
+ /// InputIterator and OutputIterator are of the same type.
+ /// InputIterator and OutputIterator are of type contiguous_iterator_tag or simply are pointers (the two are virtually synonymous).
+ /// is_trivially_copyable<T>::value is true. i.e. the constructor T(const T& t) (or T(T&& t) if present) can be replaced by memmove(this, &t, sizeof(T))
+ ///
+ /// copy normally differs from move, but there is a case where copy is the same as move: when copy is
+ /// used with a move_iterator. We handle that case here by detecting that copy is being done with a
+ /// move_iterator and redirect it to move (which can take advantage of memmove/memcpy).
+ ///
+ /// The generic_iterator class is typically used for wrapping raw memory pointers so they can act like
+ /// formal iterators. Since pointers provide an opportunity for memmove/memcpy operations, we can
+ /// detect a generic iterator and use it's wrapped type as a pointer if it happens to be one.
+
+ // Implementation moving copying both trivial and non-trivial data via a lesser iterator than random-access.
+ template <typename /*InputIteratorCategory*/, bool /*isMove*/, bool /*canMemmove*/>
+ struct move_and_copy_helper
+ {
+ template <typename InputIterator, typename OutputIterator>
+ static OutputIterator move_or_copy(InputIterator first, InputIterator last, OutputIterator result)
+ {
+ for(; first != last; ++result, ++first)
+ *result = *first;
+ return result;
+ }
+ };
+
+ // Specialization for copying non-trivial data via a random-access iterator. It's theoretically faster because the compiler can see the count when its a compile-time const.
+ // This specialization converts the random access InputIterator last-first to an integral type. There's simple way for us to take advantage of a random access output iterator,
+ // as the range is specified by the input instead of the output, and distance(first, last) for a non-random-access iterator is potentially slow.
+ template <>
+ struct move_and_copy_helper<EASTL_ITC_NS::random_access_iterator_tag, false, false>
+ {
+ template <typename InputIterator, typename OutputIterator>
+ static OutputIterator move_or_copy(InputIterator first, InputIterator last, OutputIterator result)
+ {
+ typedef typename eastl::iterator_traits<InputIterator>::difference_type difference_type;
+
+ for(difference_type n = (last - first); n > 0; --n, ++first, ++result)
+ *result = *first;
+
+ return result;
+ }
+ };
+
+ // Specialization for moving non-trivial data via a lesser iterator than random-access.
+ template <typename InputIteratorCategory>
+ struct move_and_copy_helper<InputIteratorCategory, true, false>
+ {
+ template <typename InputIterator, typename OutputIterator>
+ static OutputIterator move_or_copy(InputIterator first, InputIterator last, OutputIterator result)
+ {
+ for(; first != last; ++result, ++first)
+ *result = eastl::move(*first);
+ return result;
+ }
+ };
+
+ // Specialization for moving non-trivial data via a random-access iterator. It's theoretically faster because the compiler can see the count when its a compile-time const.
+ template <>
+ struct move_and_copy_helper<EASTL_ITC_NS::random_access_iterator_tag, true, false>
+ {
+ template <typename InputIterator, typename OutputIterator>
+ static OutputIterator move_or_copy(InputIterator first, InputIterator last, OutputIterator result)
+ {
+ typedef typename eastl::iterator_traits<InputIterator>::difference_type difference_type;
+
+ for(difference_type n = (last - first); n > 0; --n, ++first, ++result)
+ *result = eastl::move(*first);
+
+ return result;
+ }
+ };
+
+ // Specialization for when we can use memmove/memcpy. See the notes above for what conditions allow this.
+ template <bool isMove>
+ struct move_and_copy_helper<EASTL_ITC_NS::random_access_iterator_tag, isMove, true>
+ {
+ template <typename T>
+ static T* move_or_copy(const T* first, const T* last, T* result)
+ {
+ if (EASTL_UNLIKELY(first == last))
+ return result;
+
+ // We could use memcpy here if there's no range overlap, but memcpy is rarely much faster than memmove.
+ return (T*)memmove(result, first, (size_t)((uintptr_t)last - (uintptr_t)first)) + (last - first);
+ }
+ };
+
+
+ namespace internal {
+ // This exists to handle the case when EASTL_ITC_NS is `std`
+ // and the C++ version is older than C++20, in this case
+ // std::contiguous_iterator_tag does not exist so we can't use
+ // is_same<> directly.
+ #if !EASTL_STD_ITERATOR_CATEGORY_ENABLED || defined(EA_COMPILER_CPP20_ENABLED)
+ template <typename IC>
+ using is_contiguous_iterator_helper = eastl::is_same<IC, EASTL_ITC_NS::contiguous_iterator_tag>;
+ #else
+ template <typename IC>
+ using is_contiguous_iterator_helper = eastl::false_type;
+ #endif
+
+ template <typename InputIterator, typename OutputIterator>
+ struct can_be_memmoved_helper {
+ using IIC = typename eastl::iterator_traits<InputIterator>::iterator_category;
+ using OIC = typename eastl::iterator_traits<OutputIterator>::iterator_category;
+ using value_type_input = typename eastl::iterator_traits<InputIterator>::value_type;
+ using value_type_output = typename eastl::iterator_traits<OutputIterator>::value_type;
+
+ static constexpr bool value = eastl::is_trivially_copyable<value_type_output>::value &&
+ eastl::is_same<value_type_input, value_type_output>::value &&
+ (eastl::is_pointer<InputIterator>::value || is_contiguous_iterator_helper<IIC>::value) &&
+ (eastl::is_pointer<OutputIterator>::value || is_contiguous_iterator_helper<OIC>::value);
+
+ };
+ }
+
+ template <bool isMove, typename InputIterator, typename OutputIterator>
+ inline OutputIterator move_and_copy_chooser(InputIterator first, InputIterator last, OutputIterator result)
+ {
+ typedef typename eastl::iterator_traits<InputIterator>::iterator_category IIC;
+
+ const bool canBeMemmoved = internal::can_be_memmoved_helper<InputIterator, OutputIterator>::value;
+
+ return eastl::move_and_copy_helper<IIC, isMove, canBeMemmoved>::move_or_copy(first, last, result); // Need to chose based on the input iterator tag and not the output iterator tag, because containers accept input ranges of iterator types different than self.
+ }
+
+
+ // We have a second layer of unwrap_iterator calls because the original iterator might be something like move_iterator<generic_iterator<int*> > (i.e. doubly-wrapped).
+ template <bool isMove, typename InputIterator, typename OutputIterator>
+ inline OutputIterator move_and_copy_unwrapper(InputIterator first, InputIterator last, OutputIterator result)
+ {
+ return OutputIterator(eastl::move_and_copy_chooser<isMove>(eastl::unwrap_iterator(first), eastl::unwrap_iterator(last), eastl::unwrap_iterator(result))); // Have to convert to OutputIterator because unwrap_iterator(result) could be a T*
+ }
+
+
+ /// move
+ ///
+ /// After this operation the elements in the moved-from range will still contain valid values of the
+ /// appropriate type, but not necessarily the same values as before the move.
+ /// Returns the end of the result range.
+ /// Note: When moving between containers, the dest range must be valid; this function doesn't resize containers.
+ /// Note: if result is within [first, last), move_backward must be used instead of move.
+ ///
+ /// Example usage:
+ /// eastl::move(myArray.begin(), myArray.end(), myDestArray.begin());
+ ///
+ /// Reference implementation:
+ /// template <typename InputIterator, typename OutputIterator>
+ /// OutputIterator move(InputIterator first, InputIterator last, OutputIterator result)
+ /// {
+ /// while(first != last)
+ /// *result++ = eastl::move(*first++);
+ /// return result;
+ /// }
+
+ template <typename InputIterator, typename OutputIterator>
+ inline OutputIterator move(InputIterator first, InputIterator last, OutputIterator result)
+ {
+ return eastl::move_and_copy_unwrapper<true>(eastl::unwrap_iterator(first), eastl::unwrap_iterator(last), result);
+ }
+
+
+ /// copy
+ ///
+ /// Effects: Copies elements in the range [first, last) into the range [result, result + (last - first))
+ /// starting from first and proceeding to last. For each nonnegative integer n < (last - first),
+ /// performs *(result + n) = *(first + n).
+ ///
+ /// Returns: result + (last - first). That is, returns the end of the result. Note that this
+ /// is different from how memmove/memcpy work, as they return the beginning of the result.
+ ///
+ /// Requires: result shall not be in the range [first, last). But the end of the result range
+ /// may in fact be within the input rante.
+ ///
+ /// Complexity: Exactly 'last - first' assignments.
+ ///
+ template <typename InputIterator, typename OutputIterator>
+ inline OutputIterator copy(InputIterator first, InputIterator last, OutputIterator result)
+ {
+ const bool isMove = eastl::is_move_iterator<InputIterator>::value; EA_UNUSED(isMove);
+
+ return eastl::move_and_copy_unwrapper<isMove>(eastl::unwrap_iterator(first), eastl::unwrap_iterator(last), result);
+ }
+} // namespace eastl
+
+#endif // EASTL_INTERNAL_COPY_HELP_H
diff --git a/EASTL/include/EASTL/internal/enable_shared.h b/EASTL/include/EASTL/internal/enable_shared.h
new file mode 100644
index 0000000..ac5f072
--- /dev/null
+++ b/EASTL/include/EASTL/internal/enable_shared.h
@@ -0,0 +1,83 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_ENABLE_SHARED_H
+#define EASTL_INTERNAL_ENABLE_SHARED_H
+
+
+#include <EABase/eabase.h>
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+namespace eastl
+{
+
+ /// enable_shared_from_this
+ ///
+ /// This is a helper mixin class that allows you to make any class
+ /// export a shared_ptr instance that is associated with the class
+ /// instance. Any class that inherits from this class gets two functions:
+ /// shared_ptr<T> shared_from_this();
+ /// shared_ptr<T> shared_from_this() const;
+ /// If you call shared_from_this, you get back a shared_ptr that
+ /// refers to the class. A second call to shared_from_this returns
+ /// another shared_ptr that is shared with the first one.
+ ///
+ /// The trick that happens which is not so obvious here (and which is
+ /// not mentioned at all in the Boost documentation of their version
+ /// of this) is that the shared_ptr constructor detects that the
+ /// class has an enable_shared_from_this mixin and sets up this system
+ /// automatically for the user. This is done with template tricks.
+ ///
+ /// For some additional explanation, see the Boost documentation for
+ /// their description of their version of enable_shared_from_this.
+ ///
+ template <typename T>
+ class enable_shared_from_this
+ {
+ public:
+ shared_ptr<T> shared_from_this()
+ { return shared_ptr<T>(mWeakPtr); }
+
+ shared_ptr<const T> shared_from_this() const
+ { return shared_ptr<const T>(mWeakPtr); }
+
+ weak_ptr<T> weak_from_this()
+ { return mWeakPtr; }
+
+ weak_ptr<const T> weak_from_this() const
+ { return mWeakPtr; }
+
+ public: // This is public because the alternative fails on some compilers that we need to support.
+ mutable weak_ptr<T> mWeakPtr;
+
+ protected:
+ template <typename U> friend class shared_ptr;
+
+ EA_CONSTEXPR enable_shared_from_this() EA_NOEXCEPT
+ { }
+
+ enable_shared_from_this(const enable_shared_from_this&) EA_NOEXCEPT
+ { }
+
+ enable_shared_from_this& operator=(const enable_shared_from_this&) EA_NOEXCEPT
+ { return *this; }
+
+ ~enable_shared_from_this()
+ { }
+
+ }; // enable_shared_from_this
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/internal/fill_help.h b/EASTL/include/EASTL/internal/fill_help.h
new file mode 100644
index 0000000..07e3b62
--- /dev/null
+++ b/EASTL/include/EASTL/internal/fill_help.h
@@ -0,0 +1,484 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_FILL_HELP_H
+#define EASTL_INTERNAL_FILL_HELP_H
+
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <EASTL/internal/config.h>
+
+#if defined(EA_COMPILER_MICROSOFT) && (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64))
+#include <intrin.h>
+#endif
+
+namespace eastl
+{
+ // fill
+ //
+ // We implement some fill helper functions in order to allow us to optimize it
+ // where possible.
+ //
+ template <bool bIsScalar>
+ struct fill_imp
+ {
+ template <typename ForwardIterator, typename T>
+ static void do_fill(ForwardIterator first, ForwardIterator last, const T& value)
+ {
+ // The C++ standard doesn't specify whether we need to create a temporary
+ // or not, but all std STL implementations are written like what we have here.
+ for(; first != last; ++first)
+ *first = value;
+ }
+ };
+
+ template <>
+ struct fill_imp<true>
+ {
+ template <typename ForwardIterator, typename T>
+ static void do_fill(ForwardIterator first, ForwardIterator last, const T& value)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+ // We create a temp and fill from that because value might alias to the
+ // destination range and so the compiler would be forced into generating
+ // less efficient code.
+ for(const T temp = value; first != last; ++first)
+ {
+ EA_UNUSED(temp);
+ *first = static_cast<value_type>(temp);
+ }
+ }
+ };
+
+ /// fill
+ ///
+ /// fill is like memset in that it assigns a single value repeatedly to a
+ /// destination range. It allows for any type of iterator (not just an array)
+ /// and the source value can be any type, not just a byte.
+ /// Note that the source value (which is a reference) can come from within
+ /// the destination range.
+ ///
+ /// Effects: Assigns value through all the iterators in the range [first, last).
+ ///
+ /// Complexity: Exactly 'last - first' assignments.
+ ///
+ /// Note: The C++ standard doesn't specify anything about the value parameter
+ /// coming from within the first-last range. All std STL implementations act
+ /// as if the standard specifies that value must not come from within this range.
+ ///
+ template <typename ForwardIterator, typename T>
+ inline void fill(ForwardIterator first, ForwardIterator last, const T& value)
+ {
+ eastl::fill_imp< is_scalar<T>::value >::do_fill(first, last, value);
+
+ // Possibly better implementation, as it will deal with small PODs as well as scalars:
+ // bEasyCopy is true if the type has a trivial constructor (e.g. is a POD) and if
+ // it is small. Thus any built-in type or any small user-defined struct will qualify.
+ //const bool bEasyCopy = eastl::type_and<eastl::has_trivial_constructor<T>::value,
+ // eastl::integral_constant<bool, (sizeof(T) <= 16)>::value;
+ //eastl::fill_imp<bEasyCopy>::do_fill(first, last, value);
+
+ }
+
+ #if (defined(EA_COMPILER_GNUC) || defined(__clang__)) && (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64))
+ #if defined(EA_PROCESSOR_X86_64)
+ template <typename Value>
+ inline void fill(uint64_t* first, uint64_t* last, Value c)
+ {
+ uintptr_t count = (uintptr_t)(last - first);
+ uint64_t value = (uint64_t)(c);
+
+ __asm__ __volatile__ ("cld\n\t"
+ "rep stosq\n\t"
+ : "+c" (count), "+D" (first), "=m" (first)
+ : "a" (value)
+ : "cc" );
+ }
+
+
+ template <typename Value>
+ inline void fill(int64_t* first, int64_t* last, Value c)
+ {
+ uintptr_t count = (uintptr_t)(last - first);
+ int64_t value = (int64_t)(c);
+
+ __asm__ __volatile__ ("cld\n\t"
+ "rep stosq\n\t"
+ : "+c" (count), "+D" (first), "=m" (first)
+ : "a" (value)
+ : "cc" );
+ }
+ #endif
+
+ template <typename Value>
+ inline void fill(uint32_t* first, uint32_t* last, Value c)
+ {
+ uintptr_t count = (uintptr_t)(last - first);
+ uint32_t value = (uint32_t)(c);
+
+ __asm__ __volatile__ ("cld\n\t"
+ "rep stosl\n\t"
+ : "+c" (count), "+D" (first), "=m" (first)
+ : "a" (value)
+ : "cc" );
+ }
+
+
+ template <typename Value>
+ inline void fill(int32_t* first, int32_t* last, Value c)
+ {
+ uintptr_t count = (uintptr_t)(last - first);
+ int32_t value = (int32_t)(c);
+
+ __asm__ __volatile__ ("cld\n\t"
+ "rep stosl\n\t"
+ : "+c" (count), "+D" (first), "=m" (first)
+ : "a" (value)
+ : "cc" );
+ }
+
+
+ template <typename Value>
+ inline void fill(uint16_t* first, uint16_t* last, Value c)
+ {
+ uintptr_t count = (uintptr_t)(last - first);
+ uint16_t value = (uint16_t)(c);
+
+ __asm__ __volatile__ ("cld\n\t"
+ "rep stosw\n\t"
+ : "+c" (count), "+D" (first), "=m" (first)
+ : "a" (value)
+ : "cc" );
+ }
+
+
+ template <typename Value>
+ inline void fill(int16_t* first, int16_t* last, Value c)
+ {
+ uintptr_t count = (uintptr_t)(last - first);
+ int16_t value = (int16_t)(c);
+
+ __asm__ __volatile__ ("cld\n\t"
+ "rep stosw\n\t"
+ : "+c" (count), "+D" (first), "=m" (first)
+ : "a" (value)
+ : "cc" );
+ }
+
+ #elif defined(EA_COMPILER_MICROSOFT) && (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64))
+ #if defined(EA_PROCESSOR_X86_64)
+ template <typename Value>
+ inline void fill(uint64_t* first, uint64_t* last, Value c)
+ {
+ __stosq(first, (uint64_t)c, (size_t)(last - first));
+ }
+
+ template <typename Value>
+ inline void fill(int64_t* first, int64_t* last, Value c)
+ {
+ __stosq((uint64_t*)first, (uint64_t)c, (size_t)(last - first));
+ }
+ #endif
+
+ template <typename Value>
+ inline void fill(uint32_t* first, uint32_t* last, Value c)
+ {
+ __stosd((unsigned long*)first, (unsigned long)c, (size_t)(last - first));
+ }
+
+ template <typename Value>
+ inline void fill(int32_t* first, int32_t* last, Value c)
+ {
+ __stosd((unsigned long*)first, (unsigned long)c, (size_t)(last - first));
+ }
+
+ template <typename Value>
+ inline void fill(uint16_t* first, uint16_t* last, Value c)
+ {
+ __stosw(first, (uint16_t)c, (size_t)(last - first));
+ }
+
+ template <typename Value>
+ inline void fill(int16_t* first, int16_t* last, Value c)
+ {
+ __stosw((uint16_t*)first, (uint16_t)c, (size_t)(last - first));
+ }
+ #endif
+
+
+ inline void fill(char* first, char* last, const char& c) // It's debateable whether we should use 'char& c' or 'char c' here.
+ {
+ memset(first, (unsigned char)c, (size_t)(last - first));
+ }
+
+ inline void fill(char* first, char* last, const int c) // This is used for cases like 'fill(first, last, 0)'.
+ {
+ memset(first, (unsigned char)c, (size_t)(last - first));
+ }
+
+ inline void fill(unsigned char* first, unsigned char* last, const unsigned char& c)
+ {
+ memset(first, (unsigned char)c, (size_t)(last - first));
+ }
+
+ inline void fill(unsigned char* first, unsigned char* last, const int c)
+ {
+ memset(first, (unsigned char)c, (size_t)(last - first));
+ }
+
+ inline void fill(signed char* first, signed char* last, const signed char& c)
+ {
+ memset(first, (unsigned char)c, (size_t)(last - first));
+ }
+
+ inline void fill(signed char* first, signed char* last, const int c)
+ {
+ memset(first, (unsigned char)c, (size_t)(last - first));
+ }
+
+ #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__ICL) // ICL = Intel compiler
+ inline void fill(bool* first, bool* last, const bool& b)
+ {
+ memset(first, (char)b, (size_t)(last - first));
+ }
+ #endif
+
+
+
+
+ // fill_n
+ //
+ // We implement some fill helper functions in order to allow us to optimize it
+ // where possible.
+ //
+ template <bool bIsScalar>
+ struct fill_n_imp
+ {
+ template <typename OutputIterator, typename Size, typename T>
+ static OutputIterator do_fill(OutputIterator first, Size n, const T& value)
+ {
+ for(; n-- > 0; ++first)
+ *first = value;
+ return first;
+ }
+ };
+
+ template <>
+ struct fill_n_imp<true>
+ {
+ template <typename OutputIterator, typename Size, typename T>
+ static OutputIterator do_fill(OutputIterator first, Size n, const T& value)
+ {
+ typedef typename eastl::iterator_traits<OutputIterator>::value_type value_type;
+
+ // We create a temp and fill from that because value might alias to
+ // the destination range and so the compiler would be forced into
+ // generating less efficient code.
+ for(const T temp = value; n-- > 0; ++first)
+ *first = static_cast<value_type>(temp);
+ return first;
+ }
+ };
+
+ /// fill_n
+ ///
+ /// The fill_n function is very much like memset in that a copies a source value
+ /// n times into a destination range. The source value may come from within
+ /// the destination range.
+ ///
+ /// Effects: Assigns value through all the iterators in the range [first, first + n).
+ ///
+ /// Complexity: Exactly n assignments.
+ ///
+ template <typename OutputIterator, typename Size, typename T>
+ OutputIterator fill_n(OutputIterator first, Size n, const T& value)
+ {
+ return eastl::fill_n_imp<is_scalar<T>::value>::do_fill(first, n, value);
+ }
+
+ template <typename Size>
+ inline char* fill_n(char* first, Size n, const char& c)
+ {
+ return (char*)memset(first, (char)c, (size_t)n) + n;
+ }
+
+ template <typename Size>
+ inline unsigned char* fill_n(unsigned char* first, Size n, const unsigned char& c)
+ {
+ return (unsigned char*)memset(first, (unsigned char)c, (size_t)n) + n;
+ }
+
+ template <typename Size>
+ inline signed char* fill_n(signed char* first, Size n, const signed char& c)
+ {
+ return (signed char*)memset(first, (signed char)c, n) + (size_t)n;
+ }
+
+ #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__ICL) // ICL = Intel compiler
+ template <typename Size>
+ inline bool* fill_n(bool* first, Size n, const bool& b)
+ {
+ return (bool*)memset(first, (char)b, n) + (size_t)n;
+ }
+ #endif
+
+ #if (defined(EA_COMPILER_GNUC) || defined(__clang__)) && (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64))
+ #if defined(EA_PROCESSOR_X86_64)
+ template <typename Size, typename Value>
+ inline uint64_t* fill_n(uint64_t* first, Size n, Value c)
+ {
+ uintptr_t count = (uintptr_t)(n);
+ uint64_t value = (uint64_t)(c);
+
+ __asm__ __volatile__ ("cld\n\t"
+ "rep stosq\n\t"
+ : "+c" (count), "+D" (first), "=m" (first)
+ : "a" (value)
+ : "cc" );
+ return first; // first is updated by the code above.
+ }
+
+
+ template <typename Size, typename Value>
+ inline int64_t* fill_n(int64_t* first, Size n, Value c)
+ {
+ uintptr_t count = (uintptr_t)(n);
+ int64_t value = (int64_t)(c);
+
+ __asm__ __volatile__ ("cld\n\t"
+ "rep stosq\n\t"
+ : "+c" (count), "+D" (first), "=m" (first)
+ : "a" (value)
+ : "cc" );
+ return first; // first is updated by the code above.
+ }
+ #endif
+
+ template <typename Size, typename Value>
+ inline uint32_t* fill_n(uint32_t* first, Size n, Value c)
+ {
+ uintptr_t count = (uintptr_t)(n);
+ uint32_t value = (uint32_t)(c);
+
+ __asm__ __volatile__ ("cld\n\t"
+ "rep stosl\n\t"
+ : "+c" (count), "+D" (first), "=m" (first)
+ : "a" (value)
+ : "cc" );
+ return first; // first is updated by the code above.
+ }
+
+
+ template <typename Size, typename Value>
+ inline int32_t* fill_n(int32_t* first, Size n, Value c)
+ {
+ uintptr_t count = (uintptr_t)(n);
+ int32_t value = (int32_t)(c);
+
+ __asm__ __volatile__ ("cld\n\t"
+ "rep stosl\n\t"
+ : "+c" (count), "+D" (first), "=m" (first)
+ : "a" (value)
+ : "cc" );
+ return first; // first is updated by the code above.
+ }
+
+
+ template <typename Size, typename Value>
+ inline uint16_t* fill_n(uint16_t* first, Size n, Value c)
+ {
+ uintptr_t count = (uintptr_t)(n);
+ uint16_t value = (uint16_t)(c);
+
+ __asm__ __volatile__ ("cld\n\t"
+ "rep stosw\n\t"
+ : "+c" (count), "+D" (first), "=m" (first)
+ : "a" (value)
+ : "cc" );
+ return first; // first is updated by the code above.
+ }
+
+
+ template <typename Size, typename Value>
+ inline int16_t* fill_n(int16_t* first, Size n, Value c)
+ {
+ uintptr_t count = (uintptr_t)(n);
+ int16_t value = (int16_t)(c);
+
+ __asm__ __volatile__ ("cld\n\t"
+ "rep stosw\n\t"
+ : "+c" (count), "+D" (first), "=m" (first)
+ : "a" (value)
+ : "cc" );
+ return first; // first is updated by the code above.
+ }
+
+ #elif defined(EA_COMPILER_MICROSOFT) && (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64))
+ #if defined(EA_PROCESSOR_X86_64)
+ template <typename Size, typename Value>
+ inline uint64_t* fill_n(uint64_t* first, Size n, Value c)
+ {
+ __stosq(first, (uint64_t)c, (size_t)n);
+ return first + n;
+ }
+
+ template <typename Size, typename Value>
+ inline int64_t* fill_n(int64_t* first, Size n, Value c)
+ {
+ __stosq((uint64_t*)first, (uint64_t)c, (size_t)n);
+ return first + n;
+ }
+ #endif
+
+ template <typename Size, typename Value>
+ inline uint32_t* fill_n(uint32_t* first, Size n, Value c)
+ {
+ __stosd((unsigned long*)first, (unsigned long)c, (size_t)n);
+ return first + n;
+ }
+
+ template <typename Size, typename Value>
+ inline int32_t* fill_n(int32_t* first, Size n, Value c)
+ {
+ __stosd((unsigned long*)first, (unsigned long)c, (size_t)n);
+ return first + n;
+ }
+
+ template <typename Size, typename Value>
+ inline uint16_t* fill_n(uint16_t* first, Size n, Value c)
+ {
+ __stosw(first, (uint16_t)c, (size_t)n);
+ return first + n;
+ }
+
+ template <typename Size, typename Value>
+ inline int16_t* fill_n(int16_t* first, Size n, Value c)
+ {
+ __stosw((uint16_t*)first, (uint16_t)c, (size_t)n);
+ return first + n;
+ }
+ #endif
+
+} // namespace eastl
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/internal/fixed_pool.h b/EASTL/include/EASTL/internal/fixed_pool.h
new file mode 100644
index 0000000..61c0557
--- /dev/null
+++ b/EASTL/include/EASTL/internal/fixed_pool.h
@@ -0,0 +1,1631 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements the following
+// aligned_buffer
+// fixed_pool_base
+// fixed_pool
+// fixed_pool_with_overflow
+// fixed_hashtable_allocator
+// fixed_vector_allocator
+// fixed_swap
+//
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_FIXED_POOL_H
+#define EASTL_INTERNAL_FIXED_POOL_H
+
+
+#include <EABase/eabase.h>
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <EASTL/internal/config.h>
+#include <EASTL/functional.h>
+#include <EASTL/memory.h>
+#include <EASTL/allocator.h>
+#include <EASTL/type_traits.h>
+
+
+EA_DISABLE_ALL_VC_WARNINGS();
+#include <new>
+EA_RESTORE_ALL_VC_WARNINGS();
+
+// 4275 - non dll-interface class used as base for DLL-interface classkey 'identifier'
+EA_DISABLE_VC_WARNING(4275);
+
+
+namespace eastl
+{
+
+ /// EASTL_FIXED_POOL_DEFAULT_NAME
+ ///
+ /// Defines a default allocator name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_FIXED_POOL_DEFAULT_NAME
+ #define EASTL_FIXED_POOL_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_pool" // Unless the user overrides something, this is "EASTL fixed_pool".
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // aligned_buffer
+ ///////////////////////////////////////////////////////////////////////////
+
+ /// aligned_buffer
+ ///
+ /// This is useful for creating a buffer of the same size and alignment
+ /// of a given struct or class. This is useful for creating memory pools
+ /// that support both size and alignment requirements of stored objects
+ /// but without wasting space in over-allocating.
+ ///
+ /// Note that we implement this via struct specializations, as some
+ /// compilers such as VC++ do not support specification of alignments
+ /// in any way other than via an integral constant.
+ ///
+ /// Example usage:
+ /// struct Widget{ }; // This class has a given size and alignment.
+ ///
+ /// Declare a char buffer of equal size and alignment to Widget.
+ /// aligned_buffer<sizeof(Widget), EASTL_ALIGN_OF(Widget)> mWidgetBuffer;
+ ///
+ /// Declare an array this time.
+ /// aligned_buffer<sizeof(Widget), EASTL_ALIGN_OF(Widget)> mWidgetArray[15];
+ ///
+ typedef char EASTL_MAY_ALIAS aligned_buffer_char;
+
+ template <size_t size, size_t alignment>
+ struct aligned_buffer { aligned_buffer_char buffer[size]; };
+
+ template<size_t size>
+ struct aligned_buffer<size, 2> { EA_PREFIX_ALIGN(2) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(2); };
+
+ template<size_t size>
+ struct aligned_buffer<size, 4> { EA_PREFIX_ALIGN(4) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(4); };
+
+ template<size_t size>
+ struct aligned_buffer<size, 8> { EA_PREFIX_ALIGN(8) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(8); };
+
+ template<size_t size>
+ struct aligned_buffer<size, 16> { EA_PREFIX_ALIGN(16) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(16); };
+
+ template<size_t size>
+ struct aligned_buffer<size, 32> { EA_PREFIX_ALIGN(32) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(32); };
+
+ template<size_t size>
+ struct aligned_buffer<size, 64> { EA_PREFIX_ALIGN(64) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(64); };
+
+ template<size_t size>
+ struct aligned_buffer<size, 128> { EA_PREFIX_ALIGN(128) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(128); };
+
+ template<size_t size>
+ struct aligned_buffer<size, 256> { EA_PREFIX_ALIGN(256) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(256); };
+
+ template<size_t size>
+ struct aligned_buffer<size, 512> { EA_PREFIX_ALIGN(512) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(512); };
+
+ template<size_t size>
+ struct aligned_buffer<size, 1024> { EA_PREFIX_ALIGN(1024) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(1024); };
+
+ template<size_t size>
+ struct aligned_buffer<size, 2048> { EA_PREFIX_ALIGN(2048) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(2048); };
+
+ template<size_t size>
+ struct aligned_buffer<size, 4096> { EA_PREFIX_ALIGN(4096) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(4096); };
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // fixed_pool_base
+ ///////////////////////////////////////////////////////////////////////////
+
+ /// fixed_pool_base
+ ///
+ /// This is a base class for the implementation of fixed-size pools.
+ /// In particular, the fixed_pool and fixed_pool_with_overflow classes
+ /// are based on fixed_pool_base.
+ ///
+ struct fixed_pool_base
+ {
+ public:
+ /// fixed_pool_base
+ ///
+ fixed_pool_base(void* pMemory = NULL)
+ : mpHead((Link*)pMemory)
+ , mpNext((Link*)pMemory)
+ , mpCapacity((Link*)pMemory)
+ , mnNodeSize(0) // This is normally set in the init function.
+ {
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED
+ mnCurrentSize = 0;
+ mnPeakSize = 0;
+ #endif
+ }
+
+
+ /// fixed_pool_base
+ ///
+ // Disabled because the default is sufficient. While it normally makes no sense to deep copy
+ // this data, our usage of this class is such that this is OK and wanted.
+ //
+ // fixed_pool_base(const fixed_pool_base& x)
+ // {
+ // }
+
+
+ /// operator=
+ ///
+ fixed_pool_base& operator=(const fixed_pool_base&)
+ {
+ // By design we do nothing. We don't attempt to deep-copy member data.
+ return *this;
+ }
+
+
+ /// init
+ ///
+ /// Initializes a fixed_pool with a given set of parameters.
+ /// You cannot call this function twice else the resulting
+ /// behaviour will be undefined. You can only call this function
+ /// after constructing the fixed_pool with the default constructor.
+ ///
+ EASTL_API void init(void* pMemory, size_t memorySize, size_t nodeSize,
+ size_t alignment, size_t alignmentOffset = 0);
+
+
+ /// peak_size
+ ///
+ /// Returns the maximum number of outstanding allocations there have been
+ /// at any one time. This represents a high water mark for the allocation count.
+ ///
+ size_t peak_size() const
+ {
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED
+ return mnPeakSize;
+ #else
+ return 0;
+ #endif
+ }
+
+
+ /// can_allocate
+ ///
+ /// Returns true if there are any free links.
+ ///
+ bool can_allocate() const
+ {
+ return (mpHead != NULL) || (mpNext != mpCapacity);
+ }
+
+ public:
+ /// Link
+ /// Implements a singly-linked list.
+ struct Link
+ {
+ Link* mpNext;
+ };
+
+ Link* mpHead;
+ Link* mpNext;
+ Link* mpCapacity;
+ size_t mnNodeSize;
+
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED
+ uint32_t mnCurrentSize; /// Current number of allocated nodes.
+ uint32_t mnPeakSize; /// Max number of allocated nodes at any one time.
+ #endif
+
+ }; // fixed_pool_base
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // fixed_pool
+ ///////////////////////////////////////////////////////////////////////////
+
+ /// fixed_pool
+ ///
+ /// Implements a simple fixed pool allocator for use by fixed-size containers.
+ /// This is not a generic eastl allocator which can be plugged into an arbitrary
+ /// eastl container, as it simplifies some functions are arguments for the
+ /// purpose of efficiency.
+ ///
+ class EASTL_API fixed_pool : public fixed_pool_base
+ {
+ public:
+ /// fixed_pool
+ ///
+ /// Default constructor. User usually will want to call init() after
+ /// constructing via this constructor. The pMemory argument is for the
+ /// purposes of temporarily storing a pointer to the buffer to be used.
+ /// Even though init may have a pMemory argument, this arg is useful
+ /// for temporary storage, as per copy construction.
+ ///
+ fixed_pool(void* pMemory = NULL)
+ : fixed_pool_base(pMemory)
+ {
+ }
+
+
+ /// fixed_pool
+ ///
+ /// Constructs a fixed_pool with a given set of parameters.
+ ///
+ fixed_pool(void* pMemory, size_t memorySize, size_t nodeSize,
+ size_t alignment, size_t alignmentOffset = 0)
+ {
+ init(pMemory, memorySize, nodeSize, alignment, alignmentOffset);
+ }
+
+
+ /// fixed_pool
+ ///
+ // Disabled because the default is sufficient. While it normally makes no sense to deep copy
+ // this data, our usage of this class is such that this is OK and wanted.
+ //
+ // fixed_pool(const fixed_pool& x)
+ // {
+ // }
+
+
+ /// operator=
+ ///
+ fixed_pool& operator=(const fixed_pool&)
+ {
+ // By design we do nothing. We don't attempt to deep-copy member data.
+ return *this;
+ }
+
+
+ /// allocate
+ ///
+ /// Allocates a new object of the size specified upon class initialization.
+ /// Returns NULL if there is no more memory.
+ ///
+ void* allocate()
+ {
+ Link* pLink = mpHead;
+
+ if(pLink) // If we have space...
+ {
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED
+ if(++mnCurrentSize > mnPeakSize)
+ mnPeakSize = mnCurrentSize;
+ #endif
+
+ mpHead = pLink->mpNext;
+ return pLink;
+ }
+ else
+ {
+ // If there's no free node in the free list, just
+ // allocate another from the reserved memory area
+
+ if(mpNext != mpCapacity)
+ {
+ pLink = mpNext;
+
+ mpNext = reinterpret_cast<Link*>(reinterpret_cast<char*>(mpNext) + mnNodeSize);
+
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED
+ if(++mnCurrentSize > mnPeakSize)
+ mnPeakSize = mnCurrentSize;
+ #endif
+
+ return pLink;
+ }
+
+ return NULL;
+ }
+ }
+
+ void* allocate(size_t /*alignment*/, size_t /*offset*/)
+ {
+ return allocate();
+ }
+
+ /// deallocate
+ ///
+ /// Frees the given object which was allocated by allocate().
+ /// If the given node was not allocated by allocate() then the behaviour
+ /// is undefined.
+ ///
+ void deallocate(void* p)
+ {
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED
+ --mnCurrentSize;
+ #endif
+
+ ((Link*)p)->mpNext = mpHead;
+ mpHead = ((Link*)p);
+ }
+
+
+ using fixed_pool_base::can_allocate;
+
+
+ const char* get_name() const
+ {
+ return EASTL_FIXED_POOL_DEFAULT_NAME;
+ }
+
+
+ void set_name(const char*)
+ {
+ // Nothing to do. We don't allocate memory.
+ }
+
+ }; // fixed_pool
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // fixed_pool_with_overflow
+ ///////////////////////////////////////////////////////////////////////////
+
+ /// fixed_pool_with_overflow
+ ///
+ template <typename OverflowAllocator = EASTLAllocatorType>
+ class fixed_pool_with_overflow : public fixed_pool_base
+ {
+ public:
+ typedef OverflowAllocator overflow_allocator_type;
+
+
+ fixed_pool_with_overflow(void* pMemory = NULL)
+ : fixed_pool_base(pMemory),
+ mOverflowAllocator(EASTL_FIXED_POOL_DEFAULT_NAME)
+ {
+ // Leave mpPoolBegin, mpPoolEnd uninitialized.
+ }
+
+
+ fixed_pool_with_overflow(void* pMemory, const overflow_allocator_type& allocator)
+ : fixed_pool_base(pMemory),
+ mOverflowAllocator(allocator)
+ {
+ // Leave mpPoolBegin, mpPoolEnd uninitialized.
+ }
+
+
+ fixed_pool_with_overflow(void* pMemory, size_t memorySize, size_t nodeSize,
+ size_t alignment, size_t alignmentOffset = 0)
+ : mOverflowAllocator(EASTL_FIXED_POOL_DEFAULT_NAME)
+ {
+ fixed_pool_base::init(pMemory, memorySize, nodeSize, alignment, alignmentOffset);
+
+ mpPoolBegin = pMemory;
+ }
+
+
+ fixed_pool_with_overflow(void* pMemory, size_t memorySize, size_t nodeSize,
+ size_t alignment, size_t alignmentOffset,
+ const overflow_allocator_type& allocator)
+ : mOverflowAllocator(allocator)
+ {
+ fixed_pool_base::init(pMemory, memorySize, nodeSize, alignment, alignmentOffset);
+
+ mpPoolBegin = pMemory;
+ }
+
+
+ // Disabled because the default is sufficient. While it normally makes no sense to deep copy
+ // this data, our usage of this class is such that this is OK and wanted.
+ //
+ //fixed_pool_with_overflow(const fixed_pool_with_overflow& x)
+ //{
+ // ...
+ //}
+
+
+ fixed_pool_with_overflow& operator=(const fixed_pool_with_overflow& x)
+ {
+ #if EASTL_ALLOCATOR_COPY_ENABLED
+ mOverflowAllocator = x.mOverflowAllocator;
+ #else
+ (void)x;
+ #endif
+
+ return *this;
+ }
+
+
+ void init(void* pMemory, size_t memorySize, size_t nodeSize,
+ size_t alignment, size_t alignmentOffset = 0)
+ {
+ fixed_pool_base::init(pMemory, memorySize, nodeSize, alignment, alignmentOffset);
+
+ mpPoolBegin = pMemory;
+ }
+
+
+ void* allocate()
+ {
+ void* p = NULL;
+ Link* pLink = mpHead;
+
+ if(pLink)
+ {
+ // Unlink from chain
+ p = pLink;
+ mpHead = pLink->mpNext;
+ }
+ else
+ {
+ // If there's no free node in the free list, just
+ // allocate another from the reserved memory area
+
+ if(mpNext != mpCapacity)
+ {
+ p = pLink = mpNext;
+ mpNext = reinterpret_cast<Link*>(reinterpret_cast<char*>(mpNext) + mnNodeSize);
+ }
+ else
+ p = mOverflowAllocator.allocate(mnNodeSize);
+ }
+
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED
+ if(p && (++mnCurrentSize > mnPeakSize))
+ mnPeakSize = mnCurrentSize;
+ #endif
+
+ return p;
+ }
+
+
+ void* allocate(size_t alignment, size_t alignmentOffset)
+ {
+ void* p = NULL;
+ Link* pLink = mpHead;
+
+ if (pLink)
+ {
+ // Unlink from chain
+ p = pLink;
+ mpHead = pLink->mpNext;
+ }
+ else
+ {
+ // If there's no free node in the free list, just
+ // allocate another from the reserved memory area
+
+ if (mpNext != mpCapacity)
+ {
+ p = pLink = mpNext;
+ mpNext = reinterpret_cast<Link*>(reinterpret_cast<char*>(mpNext)+mnNodeSize);
+ }
+ else
+ {
+ p = allocate_memory(mOverflowAllocator, mnNodeSize, alignment, alignmentOffset);
+ EASTL_ASSERT_MSG(p != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined.");
+ }
+
+ }
+
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED
+ if (p && (++mnCurrentSize > mnPeakSize))
+ mnPeakSize = mnCurrentSize;
+ #endif
+
+ return p;
+ }
+
+ void deallocate(void* p)
+ {
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED
+ --mnCurrentSize;
+ #endif
+
+ if((p >= mpPoolBegin) && (p < mpCapacity))
+ {
+ ((Link*)p)->mpNext = mpHead;
+ mpHead = ((Link*)p);
+ }
+ else
+ mOverflowAllocator.deallocate(p, (size_t)mnNodeSize);
+ }
+
+
+ using fixed_pool_base::can_allocate;
+
+
+ const char* get_name() const
+ {
+ return mOverflowAllocator.get_name();
+ }
+
+
+ void set_name(const char* pName)
+ {
+ mOverflowAllocator.set_name(pName);
+ }
+
+
+ const overflow_allocator_type& get_overflow_allocator() const
+ {
+ return mOverflowAllocator;
+ }
+
+
+ overflow_allocator_type& get_overflow_allocator()
+ {
+ return mOverflowAllocator;
+ }
+
+
+ void set_overflow_allocator(const overflow_allocator_type& overflowAllocator)
+ {
+ mOverflowAllocator = overflowAllocator;
+ }
+ public:
+ OverflowAllocator mOverflowAllocator;
+ void* mpPoolBegin; // Ideally we wouldn't need this member variable. he problem is that the information about the pool buffer and object size is stored in the owning container and we can't have access to it without increasing the amount of code we need and by templating more code. It may turn out that simply storing data here is smaller in the end.
+
+ }; // fixed_pool_with_overflow
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // fixed_node_allocator
+ ///////////////////////////////////////////////////////////////////////////
+
+ /// fixed_node_allocator
+ ///
+ /// Note: This class was previously named fixed_node_pool, but was changed because this name
+ /// was inconsistent with the other allocators here which ended with _allocator.
+ ///
+ /// Implements a fixed_pool with a given node count, alignment, and alignment offset.
+ /// fixed_node_allocator is like fixed_pool except it is templated on the node type instead
+ /// of being a generic allocator. All it does is pass allocations through to
+ /// the fixed_pool base. This functionality is separate from fixed_pool because there
+ /// are other uses for fixed_pool.
+ ///
+ /// We template on kNodeSize instead of node_type because the former allows for the
+ /// two different node_types of the same size to use the same template implementation.
+ ///
+ /// Template parameters:
+ /// nodeSize The size of the object to allocate.
+ /// nodeCount The number of objects the pool contains.
+ /// nodeAlignment The alignment of the objects to allocate.
+ /// nodeAlignmentOffset The alignment offset of the objects to allocate.
+ /// bEnableOverflow Whether or not we should use the overflow heap if our object pool is exhausted.
+ /// OverflowAllocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap.
+ ///
+ template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename OverflowAllocator = EASTLAllocatorType>
+ class fixed_node_allocator
+ {
+ public:
+ typedef typename type_select<bEnableOverflow, fixed_pool_with_overflow<OverflowAllocator>, fixed_pool>::type pool_type;
+ typedef fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator> this_type;
+ typedef OverflowAllocator overflow_allocator_type;
+
+ enum
+ {
+ kNodeSize = nodeSize,
+ kNodeCount = nodeCount,
+ kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T).
+ kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset,
+ kNodeAlignment = nodeAlignment,
+ kNodeAlignmentOffset = nodeAlignmentOffset
+ };
+
+ public:
+ pool_type mPool;
+
+ public:
+ //fixed_node_allocator(const char* pName)
+ //{
+ // mPool.set_name(pName);
+ //}
+
+
+ fixed_node_allocator(void* pNodeBuffer)
+ : mPool(pNodeBuffer, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset)
+ {
+ }
+
+
+ fixed_node_allocator(void* pNodeBuffer, const overflow_allocator_type& allocator)
+ : mPool(pNodeBuffer, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset, allocator)
+ {
+ }
+
+
+ /// fixed_node_allocator
+ ///
+ /// Note that we are copying x.mpHead to our own fixed_pool. This at first may seem
+ /// broken, as fixed pools cannot take over ownership of other fixed pools' memory.
+ /// However, we declare that this copy ctor can only ever be safely called when
+ /// the user has intentionally pre-seeded the source with the destination pointer.
+ /// This is somewhat playing with fire, but it allows us to get around chicken-and-egg
+ /// problems with containers being their own allocators, without incurring any memory
+ /// costs or extra code costs. There's another reason for this: we very strongly want
+ /// to avoid full copying of instances of fixed_pool around, especially via the stack.
+ /// Larger pools won't even be able to fit on many machine's stacks. So this solution
+ /// is also a mechanism to prevent that situation from existing and being used.
+ /// Perhaps some day we'll find a more elegant yet costless way around this.
+ ///
+ fixed_node_allocator(const this_type& x)
+ : mPool(x.mPool.mpNext, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset, x.mPool.mOverflowAllocator)
+ {
+ }
+
+
+ this_type& operator=(const this_type& x)
+ {
+ mPool = x.mPool;
+ return *this;
+ }
+
+
+ void* allocate(size_t n, int /*flags*/ = 0)
+ {
+ (void)n;
+ EASTL_ASSERT(n == kNodeSize);
+ return mPool.allocate();
+ }
+
+
+ void* allocate(size_t n, size_t alignment, size_t offset, int /*flags*/ = 0)
+ {
+ (void)n;
+ EASTL_ASSERT(n == kNodeSize);
+ return mPool.allocate(alignment, offset);
+ }
+
+
+ void deallocate(void* p, size_t)
+ {
+ mPool.deallocate(p);
+ }
+
+
+ /// can_allocate
+ ///
+ /// Returns true if there are any free links.
+ ///
+ bool can_allocate() const
+ {
+ return mPool.can_allocate();
+ }
+
+
+ /// reset
+ ///
+ /// This function unilaterally resets the fixed pool back to a newly initialized
+ /// state. This is useful for using in tandem with container reset functionality.
+ ///
+ void reset(void* pNodeBuffer)
+ {
+ mPool.init(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset);
+ }
+
+
+ const char* get_name() const
+ {
+ return mPool.get_name();
+ }
+
+
+ void set_name(const char* pName)
+ {
+ mPool.set_name(pName);
+ }
+
+
+ const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT
+ {
+ return mPool.mOverflowAllocator;
+ }
+
+
+ overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT
+ {
+ return mPool.mOverflowAllocator;
+ }
+
+
+ void set_overflow_allocator(const overflow_allocator_type& allocator)
+ {
+ mPool.mOverflowAllocator = allocator;
+ }
+
+
+ void copy_overflow_allocator(const this_type& x) // This function exists so we can write generic code that works for allocators that do and don't have overflow allocators.
+ {
+ mPool.mOverflowAllocator = x.mPool.mOverflowAllocator;
+ }
+
+ }; // fixed_node_allocator
+
+
+ // This is a near copy of the code above, with the only difference being
+ // the 'false' bEnableOverflow template parameter, the pool_type and this_type typedefs,
+ // and the get_overflow_allocator / set_overflow_allocator functions.
+ template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, typename OverflowAllocator>
+ class fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, false, OverflowAllocator>
+ {
+ public:
+ typedef fixed_pool pool_type;
+ typedef fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, false, OverflowAllocator> this_type;
+ typedef OverflowAllocator overflow_allocator_type;
+
+ enum
+ {
+ kNodeSize = nodeSize,
+ kNodeCount = nodeCount,
+ kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T).
+ kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset,
+ kNodeAlignment = nodeAlignment,
+ kNodeAlignmentOffset = nodeAlignmentOffset
+ };
+
+ public:
+ pool_type mPool;
+
+ public:
+ fixed_node_allocator(void* pNodeBuffer)
+ : mPool(pNodeBuffer, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset)
+ {
+ }
+
+
+ fixed_node_allocator(void* pNodeBuffer, const overflow_allocator_type& /*allocator*/) // allocator is unused because bEnableOverflow is false in this specialization.
+ : mPool(pNodeBuffer, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset)
+ {
+ }
+
+
+ /// fixed_node_allocator
+ ///
+ /// Note that we are copying x.mpHead to our own fixed_pool. This at first may seem
+ /// broken, as fixed pools cannot take over ownership of other fixed pools' memory.
+ /// However, we declare that this copy ctor can only ever be safely called when
+ /// the user has intentionally pre-seeded the source with the destination pointer.
+ /// This is somewhat playing with fire, but it allows us to get around chicken-and-egg
+ /// problems with containers being their own allocators, without incurring any memory
+ /// costs or extra code costs. There's another reason for this: we very strongly want
+ /// to avoid full copying of instances of fixed_pool around, especially via the stack.
+ /// Larger pools won't even be able to fit on many machine's stacks. So this solution
+ /// is also a mechanism to prevent that situation from existing and being used.
+ /// Perhaps some day we'll find a more elegant yet costless way around this.
+ ///
+ fixed_node_allocator(const this_type& x) // No need to copy the overflow allocator, because bEnableOverflow is false in this specialization.
+ : mPool(x.mPool.mpNext, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset)
+ {
+ }
+
+
+ this_type& operator=(const this_type& x)
+ {
+ mPool = x.mPool;
+ return *this;
+ }
+
+
+ void* allocate(size_t n, int /*flags*/ = 0)
+ {
+ (void)n;
+ EASTL_ASSERT(n == kNodeSize);
+ return mPool.allocate();
+ }
+
+
+ void* allocate(size_t n, size_t alignment, size_t offset, int /*flags*/ = 0)
+ {
+ (void)n;
+ EASTL_ASSERT(n == kNodeSize);
+ return mPool.allocate(alignment, offset);
+ }
+
+
+ void deallocate(void* p, size_t)
+ {
+ mPool.deallocate(p);
+ }
+
+
+ bool can_allocate() const
+ {
+ return mPool.can_allocate();
+ }
+
+
+ void reset(void* pNodeBuffer)
+ {
+ mPool.init(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset);
+ }
+
+
+ const char* get_name() const
+ {
+ return mPool.get_name();
+ }
+
+
+ void set_name(const char* pName)
+ {
+ mPool.set_name(pName);
+ }
+
+
+ const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT
+ {
+ EASTL_ASSERT(false);
+ overflow_allocator_type* pNULL = NULL;
+ return *pNULL; // This is not pretty, but it should never execute. This is here only to allow this to compile.
+ }
+
+
+ overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT
+ {
+ EASTL_ASSERT(false);
+ overflow_allocator_type* pNULL = NULL;
+ return *pNULL; // This is not pretty, but it should never execute. This is here only to allow this to compile.
+ }
+
+
+ void set_overflow_allocator(const overflow_allocator_type& /*allocator*/)
+ {
+ // We don't have an overflow allocator.
+ EASTL_ASSERT(false);
+ }
+
+
+ void copy_overflow_allocator(const this_type&) // This function exists so we can write generic code that works for allocators that do and don't have overflow allocators.
+ {
+ // We don't have an overflow allocator.
+ }
+
+ }; // fixed_node_allocator
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename OverflowAllocator>
+ inline bool operator==(const fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator>& a,
+ const fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator>& b)
+ {
+ return (&a == &b); // They are only equal if they are the same object.
+ }
+
+
+ template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename OverflowAllocator>
+ inline bool operator!=(const fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator>& a,
+ const fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator>& b)
+ {
+ return (&a != &b); // They are only equal if they are the same object.
+ }
+
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // fixed_hashtable_allocator
+ ///////////////////////////////////////////////////////////////////////////
+
+ /// fixed_hashtable_allocator
+ ///
+ /// Provides a base class for fixed hashtable allocations.
+ /// To consider: Have this inherit from fixed_node_allocator.
+ ///
+ /// Template parameters:
+ /// bucketCount The fixed number of hashtable buckets to provide.
+ /// nodeCount The number of objects the pool contains.
+ /// nodeAlignment The alignment of the objects to allocate.
+ /// nodeAlignmentOffset The alignment offset of the objects to allocate.
+ /// bEnableOverflow Whether or not we should use the overflow heap if our object pool is exhausted.
+ /// OverflowAllocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap.
+ ///
+ template <size_t bucketCount, size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename OverflowAllocator = EASTLAllocatorType>
+ class fixed_hashtable_allocator
+ {
+ public:
+ typedef typename type_select<bEnableOverflow, fixed_pool_with_overflow<OverflowAllocator>, fixed_pool>::type pool_type;
+ typedef fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator> this_type;
+ typedef OverflowAllocator overflow_allocator_type;
+
+ enum
+ {
+ kBucketCount = bucketCount + 1, // '+1' because the hash table needs a null terminating bucket.
+ kBucketsSize = bucketCount * sizeof(void*),
+ kNodeSize = nodeSize,
+ kNodeCount = nodeCount,
+ kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T).
+ kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset, // Don't need to include kBucketsSize in this calculation, as fixed_hash_xxx containers have a separate buffer for buckets.
+ kNodeAlignment = nodeAlignment,
+ kNodeAlignmentOffset = nodeAlignmentOffset,
+ kAllocFlagBuckets = 0x00400000 // Flag to allocator which indicates that we are allocating buckets and not nodes.
+ };
+
+ protected:
+ pool_type mPool;
+ void* mpBucketBuffer;
+
+ public:
+ // Disabled because it causes compile conflicts.
+ //fixed_hashtable_allocator(const char* pName)
+ //{
+ // mPool.set_name(pName);
+ //}
+
+ fixed_hashtable_allocator(void* pNodeBuffer)
+ : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
+ mpBucketBuffer(NULL)
+ {
+ // EASTL_ASSERT(false); // As it stands now, this is not supposed to be called.
+ }
+
+
+ fixed_hashtable_allocator(void* pNodeBuffer, const overflow_allocator_type& allocator)
+ : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset, allocator),
+ mpBucketBuffer(NULL)
+ {
+ // EASTL_ASSERT(false); // As it stands now, this is not supposed to be called.
+ }
+
+
+ fixed_hashtable_allocator(void* pNodeBuffer, void* pBucketBuffer)
+ : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
+ mpBucketBuffer(pBucketBuffer)
+ {
+ }
+
+
+ fixed_hashtable_allocator(void* pNodeBuffer, void* pBucketBuffer, const overflow_allocator_type& allocator)
+ : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset, allocator),
+ mpBucketBuffer(pBucketBuffer)
+ {
+ }
+
+
+ /// fixed_hashtable_allocator
+ ///
+ /// Note that we are copying x.mpHead and mpBucketBuffer to our own fixed_pool.
+ /// See the discussion above in fixed_node_allocator for important information about this.
+ ///
+ fixed_hashtable_allocator(const this_type& x)
+ : mPool(x.mPool.mpHead, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset, x.mPool.mOverflowAllocator),
+ mpBucketBuffer(x.mpBucketBuffer)
+ {
+ }
+
+
+ fixed_hashtable_allocator& operator=(const fixed_hashtable_allocator& x)
+ {
+ mPool = x.mPool;
+ return *this;
+ }
+
+
+ void* allocate(size_t n, int flags = 0)
+ {
+ // We expect that the caller uses kAllocFlagBuckets when it wants us to allocate buckets instead of nodes.
+ EASTL_CT_ASSERT(kAllocFlagBuckets == 0x00400000); // Currently we expect this to be so, because the hashtable has a copy of this enum.
+
+ if((flags & kAllocFlagBuckets) == 0) // If we are allocating nodes and (probably) not buckets...
+ {
+ EASTL_ASSERT(n == kNodeSize); EA_UNUSED(n);
+ return mPool.allocate();
+ }
+
+ // If bucket size no longer fits within local buffer...
+ if ((flags & kAllocFlagBuckets) == kAllocFlagBuckets && (n > kBucketsSize))
+ return get_overflow_allocator().allocate(n);
+
+ EASTL_ASSERT(n <= kBucketsSize);
+ return mpBucketBuffer;
+ }
+
+
+ void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0)
+ {
+ // We expect that the caller uses kAllocFlagBuckets when it wants us to allocate buckets instead of nodes.
+ if ((flags & kAllocFlagBuckets) == 0) // If we are allocating nodes and (probably) not buckets...
+ {
+ EASTL_ASSERT(n == kNodeSize); EA_UNUSED(n);
+ return mPool.allocate(alignment, offset);
+ }
+
+ // If bucket size no longer fits within local buffer...
+ if ((flags & kAllocFlagBuckets) == kAllocFlagBuckets && (n > kBucketsSize))
+ return get_overflow_allocator().allocate(n, alignment, offset);
+
+ EASTL_ASSERT(n <= kBucketsSize);
+ return mpBucketBuffer;
+ }
+
+
+ void deallocate(void* p, size_t)
+ {
+ if(p != mpBucketBuffer) // If we are freeing a node and not buckets...
+ mPool.deallocate(p);
+ }
+
+
+ bool can_allocate() const
+ {
+ return mPool.can_allocate();
+ }
+
+
+ void reset(void* pNodeBuffer)
+ {
+ // No need to modify mpBucketBuffer, as that is constant.
+ mPool.init(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset);
+ }
+
+
+ const char* get_name() const
+ {
+ return mPool.get_name();
+ }
+
+
+ void set_name(const char* pName)
+ {
+ mPool.set_name(pName);
+ }
+
+
+ const overflow_allocator_type& get_overflow_allocator() const
+ {
+ return mPool.mOverflowAllocator;
+ }
+
+
+ overflow_allocator_type& get_overflow_allocator()
+ {
+ return mPool.mOverflowAllocator;
+ }
+
+
+ void set_overflow_allocator(const overflow_allocator_type& allocator)
+ {
+ mPool.mOverflowAllocator = allocator;
+ }
+
+
+ void copy_overflow_allocator(const this_type& x) // This function exists so we can write generic code that works for allocators that do and don't have overflow allocators.
+ {
+ mPool.mOverflowAllocator = x.mPool.mOverflowAllocator;
+ }
+
+ }; // fixed_hashtable_allocator
+
+
+ // This is a near copy of the code above, with the only difference being
+ // the 'false' bEnableOverflow template parameter, the pool_type and this_type typedefs,
+ // and the get_overflow_allocator / set_overflow_allocator functions.
+ template <size_t bucketCount, size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, typename OverflowAllocator>
+ class fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, false, OverflowAllocator>
+ {
+ public:
+ typedef fixed_pool pool_type;
+ typedef fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, false, OverflowAllocator> this_type;
+ typedef OverflowAllocator overflow_allocator_type;
+
+ enum
+ {
+ kBucketCount = bucketCount + 1, // '+1' because the hash table needs a null terminating bucket.
+ kBucketsSize = bucketCount * sizeof(void*),
+ kNodeSize = nodeSize,
+ kNodeCount = nodeCount,
+ kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T).
+ kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset, // Don't need to include kBucketsSize in this calculation, as fixed_hash_xxx containers have a separate buffer for buckets.
+ kNodeAlignment = nodeAlignment,
+ kNodeAlignmentOffset = nodeAlignmentOffset,
+ kAllocFlagBuckets = 0x00400000 // Flag to allocator which indicates that we are allocating buckets and not nodes.
+ };
+
+ protected:
+ pool_type mPool;
+ void* mpBucketBuffer;
+
+ public:
+ // Disabled because it causes compile conflicts.
+ //fixed_hashtable_allocator(const char* pName)
+ //{
+ // mPool.set_name(pName);
+ //}
+
+ fixed_hashtable_allocator(void* pNodeBuffer)
+ : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
+ mpBucketBuffer(NULL)
+ {
+ // EASTL_ASSERT(false); // As it stands now, this is not supposed to be called.
+ }
+
+ fixed_hashtable_allocator(void* pNodeBuffer, const overflow_allocator_type& /*allocator*/) // allocator is unused because bEnableOverflow is false in this specialization.
+ : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
+ mpBucketBuffer(NULL)
+ {
+ // EASTL_ASSERT(false); // As it stands now, this is not supposed to be called.
+ }
+
+
+ fixed_hashtable_allocator(void* pNodeBuffer, void* pBucketBuffer)
+ : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
+ mpBucketBuffer(pBucketBuffer)
+ {
+ }
+
+
+ fixed_hashtable_allocator(void* pNodeBuffer, void* pBucketBuffer, const overflow_allocator_type& /*allocator*/) // allocator is unused because bEnableOverflow is false in this specialization.
+ : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
+ mpBucketBuffer(pBucketBuffer)
+ {
+ }
+
+
+ /// fixed_hashtable_allocator
+ ///
+ /// Note that we are copying x.mpHead and mpBucketBuffer to our own fixed_pool.
+ /// See the discussion above in fixed_node_allocator for important information about this.
+ ///
+ fixed_hashtable_allocator(const this_type& x) // No need to copy the overflow allocator, because bEnableOverflow is false in this specialization.
+ : mPool(x.mPool.mpHead, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
+ mpBucketBuffer(x.mpBucketBuffer)
+ {
+ }
+
+
+ fixed_hashtable_allocator& operator=(const fixed_hashtable_allocator& x)
+ {
+ mPool = x.mPool;
+ return *this;
+ }
+
+
+ void* allocate(size_t n, int flags = 0)
+ {
+ // We expect that the caller uses kAllocFlagBuckets when it wants us to allocate buckets instead of nodes.
+ EASTL_CT_ASSERT(kAllocFlagBuckets == 0x00400000); // Currently we expect this to be so, because the hashtable has a copy of this enum.
+ if((flags & kAllocFlagBuckets) == 0) // If we are allocating nodes and (probably) not buckets...
+ {
+ EASTL_ASSERT(n == kNodeSize); (void)n; // Make unused var warning go away.
+ return mPool.allocate();
+ }
+
+ // Don't allow hashtable buckets to overflow in this case.
+ EASTL_ASSERT(n <= kBucketsSize);
+ return mpBucketBuffer;
+ }
+
+
+ void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0)
+ {
+ // We expect that the caller uses kAllocFlagBuckets when it wants us to allocate buckets instead of nodes.
+ if((flags & kAllocFlagBuckets) == 0) // If we are allocating nodes and (probably) not buckets...
+ {
+ EASTL_ASSERT(n == kNodeSize); (void)n; // Make unused var warning go away.
+ return mPool.allocate(alignment, offset);
+ }
+
+ // Don't allow hashtable buckets to overflow in this case.
+ EASTL_ASSERT(n <= kBucketsSize);
+ return mpBucketBuffer;
+ }
+
+
+ void deallocate(void* p, size_t)
+ {
+ if(p != mpBucketBuffer) // If we are freeing a node and not buckets...
+ mPool.deallocate(p);
+ }
+
+
+ bool can_allocate() const
+ {
+ return mPool.can_allocate();
+ }
+
+
+ void reset(void* pNodeBuffer)
+ {
+ // No need to modify mpBucketBuffer, as that is constant.
+ mPool.init(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset);
+ }
+
+
+ const char* get_name() const
+ {
+ return mPool.get_name();
+ }
+
+
+ void set_name(const char* pName)
+ {
+ mPool.set_name(pName);
+ }
+
+
+ const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT
+ {
+ EASTL_ASSERT(false);
+ overflow_allocator_type* pNULL = NULL;
+ return *pNULL; // This is not pretty, but it should never execute. This is here only to allow this to compile.
+ }
+
+
+ overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT
+ {
+ EASTL_ASSERT(false);
+ overflow_allocator_type* pNULL = NULL;
+ return *pNULL; // This is not pretty, but it should never execute. This is here only to allow this to compile.
+ }
+
+ void set_overflow_allocator(const overflow_allocator_type& /*allocator*/)
+ {
+ // We don't have an overflow allocator.
+ EASTL_ASSERT(false);
+ }
+
+ void copy_overflow_allocator(const this_type&) // This function exists so we can write generic code that works for allocators that do and don't have overflow allocators.
+ {
+ // We don't have an overflow allocator.
+ }
+
+ }; // fixed_hashtable_allocator
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <size_t bucketCount, size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename OverflowAllocator>
+ inline bool operator==(const fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator>& a,
+ const fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator>& b)
+ {
+ return (&a == &b); // They are only equal if they are the same object.
+ }
+
+
+ template <size_t bucketCount, size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename OverflowAllocator>
+ inline bool operator!=(const fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator>& a,
+ const fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator>& b)
+ {
+ return (&a != &b); // They are only equal if they are the same object.
+ }
+
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // fixed_vector_allocator
+ ///////////////////////////////////////////////////////////////////////////
+
+ /// fixed_vector_allocator
+ ///
+ /// Template parameters:
+ /// nodeSize The size of individual objects.
+ /// nodeCount The number of objects the pool contains.
+ /// nodeAlignment The alignment of the objects to allocate.
+ /// nodeAlignmentOffset The alignment offset of the objects to allocate.
+ /// bEnableOverflow Whether or not we should use the overflow heap if our object pool is exhausted.
+ /// OverflowAllocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap.
+ ///
+ template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename OverflowAllocator = EASTLAllocatorType>
+ class fixed_vector_allocator
+ {
+ public:
+ typedef fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator> this_type;
+ typedef OverflowAllocator overflow_allocator_type;
+
+ enum
+ {
+ kNodeSize = nodeSize,
+ kNodeCount = nodeCount,
+ kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T).
+ kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset,
+ kNodeAlignment = nodeAlignment,
+ kNodeAlignmentOffset = nodeAlignmentOffset
+ };
+
+ public:
+ overflow_allocator_type mOverflowAllocator;
+ void* mpPoolBegin; // To consider: Find some way to make this data unnecessary, without increasing template proliferation.
+
+ public:
+ // Disabled because it causes compile conflicts.
+ //fixed_vector_allocator(const char* pName = NULL)
+ //{
+ // mOverflowAllocator.set_name(pName);
+ //}
+
+ fixed_vector_allocator(void* pNodeBuffer = nullptr)
+ : mpPoolBegin(pNodeBuffer)
+ {
+ }
+
+ fixed_vector_allocator(void* pNodeBuffer, const overflow_allocator_type& allocator)
+ : mOverflowAllocator(allocator), mpPoolBegin(pNodeBuffer)
+ {
+ }
+
+ fixed_vector_allocator(const fixed_vector_allocator& x)
+ : mOverflowAllocator(x.mOverflowAllocator), mpPoolBegin(x.mpPoolBegin)
+ {
+ }
+
+ fixed_vector_allocator& operator=(const fixed_vector_allocator& x)
+ {
+ // We leave our mpPoolBegin variable alone.
+
+ #if EASTL_ALLOCATOR_COPY_ENABLED
+ mOverflowAllocator = x.mOverflowAllocator;
+ #else
+ (void)x;
+ #endif
+
+ return *this;
+ }
+
+ void* allocate(size_t n, int flags = 0)
+ {
+ return mOverflowAllocator.allocate(n, flags);
+ }
+
+ void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0)
+ {
+ return mOverflowAllocator.allocate(n, alignment, offset, flags);
+ }
+
+ void deallocate(void* p, size_t n)
+ {
+ if(p != mpPoolBegin)
+ mOverflowAllocator.deallocate(p, n); // Can't do this to our own allocation.
+ }
+
+ const char* get_name() const
+ {
+ return mOverflowAllocator.get_name();
+ }
+
+ void set_name(const char* pName)
+ {
+ mOverflowAllocator.set_name(pName);
+ }
+
+ const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT
+ {
+ return mOverflowAllocator;
+ }
+
+ overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT
+ {
+ return mOverflowAllocator;
+ }
+
+ void set_overflow_allocator(const overflow_allocator_type& allocator)
+ {
+ mOverflowAllocator = allocator;
+ }
+
+ void copy_overflow_allocator(const this_type& x) // This function exists so we can write generic code that works for allocators that do and don't have overflow allocators.
+ {
+ mOverflowAllocator = x.mOverflowAllocator;
+ }
+
+ }; // fixed_vector_allocator
+
+
+ template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, typename OverflowAllocator>
+ class fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, false, OverflowAllocator>
+ {
+ public:
+ typedef fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, false, OverflowAllocator> this_type;
+ typedef OverflowAllocator overflow_allocator_type;
+
+ enum
+ {
+ kNodeSize = nodeSize,
+ kNodeCount = nodeCount,
+ kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T).
+ kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset,
+ kNodeAlignment = nodeAlignment,
+ kNodeAlignmentOffset = nodeAlignmentOffset
+ };
+
+ // Disabled because it causes compile conflicts.
+ //fixed_vector_allocator(const char* = NULL) // This char* parameter is present so that this class can be like the other version.
+ //{
+ //}
+
+ fixed_vector_allocator()
+ {
+ }
+
+ fixed_vector_allocator(void* /*pNodeBuffer*/)
+ {
+ }
+
+ fixed_vector_allocator(void* /*pNodeBuffer*/, const overflow_allocator_type& /*allocator*/) // allocator is unused because bEnableOverflow is false in this specialization.
+ {
+ }
+
+ /// fixed_vector_allocator
+ ///
+ // Disabled because there is nothing to do. No member data. And the default for this is sufficient.
+ // fixed_vector_allocator(const fixed_vector_allocator&)
+ // {
+ // }
+
+ // Disabled because there is nothing to do. No member data.
+ //fixed_vector_allocator& operator=(const fixed_vector_allocator& x)
+ //{
+ // return *this;
+ //}
+
+ void* allocate(size_t /*n*/, int /*flags*/ = 0)
+ {
+ EASTL_ASSERT(false); // A fixed_vector should not reallocate, else the user has exhausted its space.
+ EASTL_CRASH(); // We choose to crash here since the owning vector can't handle an allocator returning null. Better to crash earlier.
+ return NULL;
+ }
+
+ void* allocate(size_t /*n*/, size_t /*alignment*/, size_t /*offset*/, int /*flags*/ = 0)
+ {
+ EASTL_ASSERT(false); // A fixed_vector should not reallocate, else the user has exhausted its space.
+ EASTL_CRASH(); // We choose to crash here since the owning vector can't handle an allocator returning null. Better to crash earlier.
+ return NULL;
+ }
+
+ void deallocate(void* /*p*/, size_t /*n*/)
+ {
+ }
+
+ const char* get_name() const
+ {
+ return EASTL_FIXED_POOL_DEFAULT_NAME;
+ }
+
+ void set_name(const char* /*pName*/)
+ {
+ }
+
+ const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT
+ {
+ EASTL_ASSERT(false);
+ overflow_allocator_type* pNULL = NULL;
+ return *pNULL; // This is not pretty, but it should never execute. This is here only to allow this to compile.
+ }
+
+ overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT
+ {
+ EASTL_ASSERT(false);
+ overflow_allocator_type* pNULL = NULL;
+ return *pNULL; // This is not pretty, but it should never execute. This is here only to allow this to compile.
+ }
+
+ void set_overflow_allocator(const overflow_allocator_type& /*allocator*/)
+ {
+ // We don't have an overflow allocator.
+ EASTL_ASSERT(false);
+ }
+
+ void copy_overflow_allocator(const this_type&) // This function exists so we can write generic code that works for allocators that do and don't have overflow allocators.
+ {
+ // We don't have an overflow allocator.
+ }
+
+ }; // fixed_vector_allocator
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename OverflowAllocator>
+ inline bool operator==(const fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator>& a,
+ const fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator>& b)
+ {
+ return (&a == &b); // They are only equal if they are the same object.
+ }
+
+
+ template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename OverflowAllocator>
+ inline bool operator!=(const fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator>& a,
+ const fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator>& b)
+ {
+ return (&a != &b); // They are only equal if they are the same object.
+ }
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // fixed_swap
+ ///////////////////////////////////////////////////////////////////////////
+
+ /// fixed_swap
+ ///
+ /// This function implements a swap suitable for fixed containers.
+ /// This is an issue because the size of fixed containers can be very
+ /// large, due to their having the container buffer within themselves.
+ /// Note that we are referring to sizeof(container) and not the total
+ /// sum of memory allocated by the container from the heap.
+ ///
+ ///
+ /// This implementation switches at compile time whether or not the
+ /// temporary is allocated on the stack or the heap as some compilers
+ /// will allocate the (large) stack frame regardless of which code
+ /// path is picked.
+ template <typename Container, bool UseHeapTemporary>
+ class fixed_swap_impl
+ {
+ public:
+ static void swap(Container& a, Container& b);
+ };
+
+
+ template <typename Container>
+ class fixed_swap_impl<Container, false>
+ {
+ public:
+ static void swap(Container& a, Container& b)
+ {
+ Container temp(EASTL_MOVE(a)); // Can't use global swap because that could
+ a = EASTL_MOVE(b); // itself call this swap function in return.
+ b = EASTL_MOVE(temp);
+ }
+ };
+
+
+ template <typename Container>
+ class fixed_swap_impl<Container, true>
+ {
+ public:
+ static void swap(Container& a, Container& b)
+ {
+ EASTLAllocatorType allocator(*EASTLAllocatorDefault(), EASTL_TEMP_DEFAULT_NAME);
+ void* const pMemory = allocator.allocate(sizeof(a));
+
+ if(pMemory)
+ {
+ Container* pTemp = ::new(pMemory) Container(EASTL_MOVE(a));
+ a = EASTL_MOVE(b);
+ b = EASTL_MOVE(*pTemp);
+
+ pTemp->~Container();
+ allocator.deallocate(pMemory, sizeof(a));
+ }
+ }
+ };
+
+
+ template<typename Container>
+ void fixed_swap(Container& a, Container& b)
+ {
+ return fixed_swap_impl<Container, sizeof(Container) >= EASTL_MAX_STACK_USAGE>::swap(a, b);
+ }
+
+
+
+} // namespace eastl
+
+
+EA_RESTORE_VC_WARNING();
+
+
+#endif // Header include guard
diff --git a/EASTL/include/EASTL/internal/function.h b/EASTL/include/EASTL/internal/function.h
new file mode 100644
index 0000000..ace71d8
--- /dev/null
+++ b/EASTL/include/EASTL/internal/function.h
@@ -0,0 +1,163 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_FUNCTION_H
+#define EASTL_FUNCTION_H
+
+#include <EASTL/internal/config.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <EASTL/internal/function_detail.h>
+
+namespace eastl
+{
+
+ /// EASTL_FUNCTION_DEFAULT_CAPTURE_SSO_SIZE
+ ///
+ /// Defines the size of the SSO buffer which is used to hold the specified capture state of the callable.
+ ///
+ #ifndef EASTL_FUNCTION_DEFAULT_CAPTURE_SSO_SIZE
+ #define EASTL_FUNCTION_DEFAULT_CAPTURE_SSO_SIZE (2 * sizeof(void*))
+ #endif
+
+ static_assert(EASTL_FUNCTION_DEFAULT_CAPTURE_SSO_SIZE >= sizeof(void*), "functor storage must be able to hold at least a pointer!");
+
+ template <typename>
+ class function;
+
+ template <typename R, typename... Args>
+ class function<R(Args...)> : public internal::function_detail<EASTL_FUNCTION_DEFAULT_CAPTURE_SSO_SIZE, R(Args...)>
+ {
+ private:
+ using Base = internal::function_detail<EASTL_FUNCTION_DEFAULT_CAPTURE_SSO_SIZE, R(Args...)>;
+ public:
+ using typename Base::result_type;
+
+ function() EA_NOEXCEPT = default;
+ function(std::nullptr_t p) EA_NOEXCEPT
+ : Base(p)
+ {
+ }
+
+ function(const function& other)
+ : Base(other)
+ {
+ }
+
+ function(function&& other)
+ : Base(eastl::move(other))
+ {
+ }
+
+ template <typename Functor, typename = EASTL_INTERNAL_FUNCTION_VALID_FUNCTION_ARGS(Functor, R, Args..., Base, function)>
+ function(Functor functor)
+ : Base(eastl::move(functor))
+ {
+ }
+
+ ~function() EA_NOEXCEPT = default;
+
+ function& operator=(const function& other)
+ {
+ Base::operator=(other);
+ return *this;
+ }
+
+ function& operator=(function&& other)
+ {
+ Base::operator=(eastl::move(other));
+ return *this;
+ }
+
+ function& operator=(std::nullptr_t p) EA_NOEXCEPT
+ {
+ Base::operator=(p);
+ return *this;
+ }
+
+ template <typename Functor, typename = EASTL_INTERNAL_FUNCTION_VALID_FUNCTION_ARGS(Functor, R, Args..., Base, function)>
+ function& operator=(Functor&& functor)
+ {
+ Base::operator=(eastl::forward<Functor>(functor));
+ return *this;
+ }
+
+ template <typename Functor>
+ function& operator=(eastl::reference_wrapper<Functor> f) EA_NOEXCEPT
+ {
+ Base::operator=(f);
+ return *this;
+ }
+
+ void swap(function& other) EA_NOEXCEPT
+ {
+ Base::swap(other);
+ }
+
+ explicit operator bool() const EA_NOEXCEPT
+ {
+ return Base::operator bool();
+ }
+
+ R operator ()(Args... args) const
+ {
+ return Base::operator ()(eastl::forward<Args>(args)...);
+ }
+
+ #if EASTL_RTTI_ENABLED
+ const std::type_info& target_type() const EA_NOEXCEPT
+ {
+ return Base::target_type();
+ }
+
+ template <typename Functor>
+ Functor* target() EA_NOEXCEPT
+ {
+ return Base::target();
+ }
+
+ template <typename Functor>
+ const Functor* target() const EA_NOEXCEPT
+ {
+ return Base::target();
+ }
+ #endif // EASTL_RTTI_ENABLED
+ };
+
+ template <typename R, typename... Args>
+ bool operator==(const function<R(Args...)>& f, std::nullptr_t) EA_NOEXCEPT
+ {
+ return !f;
+ }
+#if !defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ template <typename R, typename... Args>
+ bool operator==(std::nullptr_t, const function<R(Args...)>& f) EA_NOEXCEPT
+ {
+ return !f;
+ }
+
+ template <typename R, typename... Args>
+ bool operator!=(const function<R(Args...)>& f, std::nullptr_t) EA_NOEXCEPT
+ {
+ return !!f;
+ }
+
+ template <typename R, typename... Args>
+ bool operator!=(std::nullptr_t, const function<R(Args...)>& f) EA_NOEXCEPT
+ {
+ return !!f;
+ }
+#endif
+ template <typename R, typename... Args>
+ void swap(function<R(Args...)>& lhs, function<R(Args...)>& rhs)
+ {
+ lhs.swap(rhs);
+ }
+
+} // namespace eastl
+
+#endif // EASTL_FUNCTION_H
diff --git a/EASTL/include/EASTL/internal/function_detail.h b/EASTL/include/EASTL/internal/function_detail.h
new file mode 100644
index 0000000..3ee3667
--- /dev/null
+++ b/EASTL/include/EASTL/internal/function_detail.h
@@ -0,0 +1,673 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_FUNCTION_DETAIL_H
+#define EASTL_FUNCTION_DETAIL_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <EABase/eabase.h>
+#include <EABase/nullptr.h>
+#include <EABase/config/eacompilertraits.h>
+
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/functional_base.h>
+#include <EASTL/internal/move_help.h>
+#include <EASTL/internal/function_help.h>
+
+#include <EASTL/type_traits.h>
+#include <EASTL/utility.h>
+#include <EASTL/allocator.h>
+
+#if EASTL_RTTI_ENABLED
+ #include <typeinfo>
+#endif
+
+#if EASTL_EXCEPTIONS_ENABLED
+ EA_DISABLE_ALL_VC_WARNINGS()
+ #include <new>
+ #include <exception>
+ EA_RESTORE_ALL_VC_WARNINGS()
+#endif
+
+namespace eastl
+{
+ #if EASTL_EXCEPTIONS_ENABLED
+ class bad_function_call : public std::exception
+ {
+ public:
+ bad_function_call() EA_NOEXCEPT = default;
+
+ const char* what() const EA_NOEXCEPT EA_OVERRIDE
+ {
+ return "bad function_detail call";
+ }
+ };
+ #endif
+
+ namespace internal
+ {
+ class unused_class {};
+
+ union functor_storage_alignment
+ {
+ void (*unused_func_ptr)(void);
+ void (unused_class::*unused_func_mem_ptr)(void);
+ void* unused_ptr;
+ };
+
+ template <int SIZE_IN_BYTES>
+ struct functor_storage
+ {
+ static_assert(SIZE_IN_BYTES >= 0, "local buffer storage cannot have a negative size!");
+ template <typename Ret>
+ Ret& GetStorageTypeRef() const
+ {
+ return *reinterpret_cast<Ret*>(const_cast<char*>(&storage[0]));
+ }
+
+ union
+ {
+ functor_storage_alignment align;
+ char storage[SIZE_IN_BYTES];
+ };
+ };
+
+ template <>
+ struct functor_storage<0>
+ {
+ template <typename Ret>
+ Ret& GetStorageTypeRef() const
+ {
+ return *reinterpret_cast<Ret*>(const_cast<char*>(&storage[0]));
+ }
+
+ union
+ {
+ functor_storage_alignment align;
+ char storage[sizeof(functor_storage_alignment)];
+ };
+ };
+
+ template <typename Functor, int SIZE_IN_BYTES>
+ struct is_functor_inplace_allocatable
+ {
+ static EA_CONSTEXPR bool value =
+ sizeof(Functor) <= sizeof(functor_storage<SIZE_IN_BYTES>) &&
+ (eastl::alignment_of_v<functor_storage<SIZE_IN_BYTES>> % eastl::alignment_of_v<Functor>) == 0;
+ };
+
+
+ /// function_base_detail
+ ///
+ template <int SIZE_IN_BYTES>
+ class function_base_detail
+ {
+ public:
+ using FunctorStorageType = functor_storage<SIZE_IN_BYTES>;
+ FunctorStorageType mStorage;
+
+ enum ManagerOperations : int
+ {
+ MGROPS_DESTRUCT_FUNCTOR = 0,
+ MGROPS_COPY_FUNCTOR = 1,
+ MGROPS_MOVE_FUNCTOR = 2,
+ #if EASTL_RTTI_ENABLED
+ MGROPS_GET_TYPE_INFO = 3,
+ MGROPS_GET_FUNC_PTR = 4,
+ #endif
+ };
+
+ // Functor can be allocated inplace
+ template <typename Functor, typename = void>
+ class function_manager_base
+ {
+ public:
+
+ static Functor* GetFunctorPtr(const FunctorStorageType& storage) EA_NOEXCEPT
+ {
+ return &(storage.template GetStorageTypeRef<Functor>());
+ }
+
+ template <typename T>
+ static void CreateFunctor(FunctorStorageType& storage, T&& functor)
+ {
+ ::new (GetFunctorPtr(storage)) Functor(eastl::forward<T>(functor));
+ }
+
+ static void DestructFunctor(FunctorStorageType& storage)
+ {
+ GetFunctorPtr(storage)->~Functor();
+ }
+
+ static void CopyFunctor(FunctorStorageType& to, const FunctorStorageType& from)
+ {
+ ::new (GetFunctorPtr(to)) Functor(*GetFunctorPtr(from));
+ }
+
+ static void MoveFunctor(FunctorStorageType& to, FunctorStorageType& from) EA_NOEXCEPT
+ {
+ ::new (GetFunctorPtr(to)) Functor(eastl::move(*GetFunctorPtr(from)));
+ }
+
+ static void* Manager(void* to, void* from, typename function_base_detail::ManagerOperations ops) EA_NOEXCEPT
+ {
+ switch (ops)
+ {
+ case MGROPS_DESTRUCT_FUNCTOR:
+ {
+ DestructFunctor(*static_cast<FunctorStorageType*>(to));
+ }
+ break;
+ case MGROPS_COPY_FUNCTOR:
+ {
+ CopyFunctor(*static_cast<FunctorStorageType*>(to),
+ *static_cast<const FunctorStorageType*>(from));
+ }
+ break;
+ case MGROPS_MOVE_FUNCTOR:
+ {
+ MoveFunctor(*static_cast<FunctorStorageType*>(to), *static_cast<FunctorStorageType*>(from));
+ DestructFunctor(*static_cast<FunctorStorageType*>(from));
+ }
+ break;
+ default:
+ break;
+ }
+ return nullptr;
+ }
+ };
+
+ // Functor is allocated on the heap
+ template <typename Functor>
+ class function_manager_base<Functor, typename eastl::enable_if<!is_functor_inplace_allocatable<Functor, SIZE_IN_BYTES>::value>::type>
+ {
+ public:
+ static Functor* GetFunctorPtr(const FunctorStorageType& storage) EA_NOEXCEPT
+ {
+ return storage.template GetStorageTypeRef<Functor*>();
+ }
+
+ static Functor*& GetFunctorPtrRef(const FunctorStorageType& storage) EA_NOEXCEPT
+ {
+ return storage.template GetStorageTypeRef<Functor*>();
+ }
+
+ template <typename T>
+ static void CreateFunctor(FunctorStorageType& storage, T&& functor)
+ {
+ auto& allocator = *EASTLAllocatorDefault();
+ Functor* func = static_cast<Functor*>(allocator.allocate(sizeof(Functor), alignof(Functor), 0));
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ if (!func)
+ {
+ throw std::bad_alloc();
+ }
+ #else
+ EASTL_ASSERT_MSG(func != nullptr, "Allocation failed!");
+ #endif
+
+ ::new (static_cast<void*>(func)) Functor(eastl::forward<T>(functor));
+ GetFunctorPtrRef(storage) = func;
+ }
+
+ static void DestructFunctor(FunctorStorageType& storage)
+ {
+ Functor* func = GetFunctorPtr(storage);
+ if (func)
+ {
+ auto& allocator = *EASTLAllocatorDefault();
+ func->~Functor();
+ allocator.deallocate(static_cast<void*>(func), sizeof(Functor));
+ }
+ }
+
+ static void CopyFunctor(FunctorStorageType& to, const FunctorStorageType& from)
+ {
+ auto& allocator = *EASTLAllocatorDefault();
+ Functor* func = static_cast<Functor*>(allocator.allocate(sizeof(Functor), alignof(Functor), 0));
+ #if EASTL_EXCEPTIONS_ENABLED
+ if (!func)
+ {
+ throw std::bad_alloc();
+ }
+ #else
+ EASTL_ASSERT_MSG(func != nullptr, "Allocation failed!");
+ #endif
+ ::new (static_cast<void*>(func)) Functor(*GetFunctorPtr(from));
+ GetFunctorPtrRef(to) = func;
+ }
+
+ static void MoveFunctor(FunctorStorageType& to, FunctorStorageType& from) EA_NOEXCEPT
+ {
+ Functor* func = GetFunctorPtr(from);
+ GetFunctorPtrRef(to) = func;
+ GetFunctorPtrRef(from) = nullptr;
+ }
+
+ static void* Manager(void* to, void* from, typename function_base_detail::ManagerOperations ops) EA_NOEXCEPT
+ {
+ switch (ops)
+ {
+ case MGROPS_DESTRUCT_FUNCTOR:
+ {
+ DestructFunctor(*static_cast<FunctorStorageType*>(to));
+ }
+ break;
+ case MGROPS_COPY_FUNCTOR:
+ {
+ CopyFunctor(*static_cast<FunctorStorageType*>(to),
+ *static_cast<const FunctorStorageType*>(from));
+ }
+ break;
+ case MGROPS_MOVE_FUNCTOR:
+ {
+ MoveFunctor(*static_cast<FunctorStorageType*>(to), *static_cast<FunctorStorageType*>(from));
+ // Moved ptr, no need to destruct ourselves
+ }
+ break;
+ default:
+ break;
+ }
+ return nullptr;
+ }
+ };
+
+ template <typename Functor, typename R, typename... Args>
+ class function_manager final : public function_manager_base<Functor>
+ {
+ public:
+ using Base = function_manager_base<Functor>;
+
+ #if EASTL_RTTI_ENABLED
+ static void* GetTypeInfo() EA_NOEXCEPT
+ {
+ return reinterpret_cast<void*>(const_cast<std::type_info*>(&typeid(Functor)));
+ }
+
+ static void* Manager(void* to, void* from, typename function_base_detail::ManagerOperations ops) EA_NOEXCEPT
+ {
+ switch (ops)
+ {
+ case MGROPS_GET_TYPE_INFO:
+ {
+ return GetTypeInfo();
+ }
+ break;
+ case MGROPS_GET_FUNC_PTR:
+ {
+ return static_cast<void*>(Base::GetFunctorPtr(*static_cast<FunctorStorageType*>(to)));
+ }
+ break;
+ default:
+ {
+ return Base::Manager(to, from, ops);
+ }
+ break;
+ }
+ }
+ #endif // EASTL_RTTI_ENABLED
+
+ /**
+ * NOTE:
+ *
+ * The order of arguments here is vital to the call optimization. Let's dig into why and look at some asm.
+ * We have two invoker signatures to consider:
+ * R Invoker(const FunctorStorageType& functor, Args... args)
+ * R Invoker(Args... args, const FunctorStorageType& functor)
+ *
+ * Assume we are using the Windows x64 Calling Convention where the first 4 arguments are passed into
+ * RCX, RDX, R8, R9. This optimization works for any Calling Convention, we are just using Windows x64 for
+ * this example.
+ *
+ * Given the following member function: void TestMemberFunc(int a, int b)
+ * RCX == this
+ * RDX == a
+ * R8 == b
+ *
+ * All three arguments to the function including the hidden this pointer, which in C++ is always the first argument
+ * are passed into the first three registers.
+ * The function call chain for eastl::function<>() is as follows:
+ * operator ()(this, Args... args) -> Invoker(Args... args, this->mStorage) -> StoredFunction(Args... arg)
+ *
+ * Let's look at what is happening at the asm level with the different Invoker function signatures and why.
+ *
+ * You will notice that operator ()() and Invoker() have the arguments reversed. operator ()() just directly calls
+ * to Invoker(), it is a tail call, so we force inline the call operator to ensure we directly call to the Invoker().
+ * Most compilers always inline it anyways by default; have been instances where it doesn't even though the asm ends
+ * up being cheaper.
+ * call -> call -> call versus call -> call
+ *
+ * eastl::function<int(int, int)> = FunctionPointer
+ *
+ * Assume we have the above eastl::function object that holds a pointer to a function as the internal callable.
+ *
+ * Invoker(this->mStorage, Args... args) is called with the follow arguments in registers:
+ * RCX = this | RDX = a | R8 = b
+ *
+ * Inside Invoker() we use RCX to deference into the eastl::function object and get the function pointer to call.
+ * This function to call has signature Func(int, int) and thus requires its arguments in registers RCX and RDX.
+ * The compiler must shift all the arguments towards the left. The full asm looks something as follows.
+ *
+ * Calling Invoker: Inside Invoker:
+ *
+ * mov rcx, this mov rax, [rcx]
+ * mov rdx, a mov rcx, rdx
+ * mov r8, b mov rdx, r8
+ * call [rcx + offset to Invoker] jmp [rax]
+ *
+ * Notice how the compiler shifts all the arguments before calling the callable and also we only use the this pointer
+ * to access the internal storage inside the eastl::function object.
+ *
+ * Invoker(Args... args, this->mStorage) is called with the following arguments in registers:
+ * RCX = a | RDX = b | R8 = this
+ *
+ * You can see we no longer have to shift the arguments down when going to call the internal stored callable.
+ *
+ * Calling Invoker: Inside Invoker:
+ *
+ * mov rcx, a mov rax, [r8]
+ * mov rdx, b jmp [rax]
+ * mov r8, this
+ * call [r8 + offset to Invoker]
+ *
+ * The generated asm does a straight tail jmp to the loaded function pointer. The arguments are already in the correct
+ * registers.
+ *
+ * For Functors or Lambdas with no captures, this gives us another free register to use to pass arguments since the this
+ * is at the end, it can be passed onto the stack if we run out of registers. Since the callable has no captures; inside
+ * the Invoker(), we won't ever need to touch this thus we can just call the operator ()() or let the compiler inline it.
+ *
+ * For a callable with captures there is no perf hit since the callable in the common case is inlined and the pointer to the callable
+ * buffer is passed in a register which the compiler can use to access the captures.
+ *
+ * For eastl::function<void(const T&, int, int)> that a holds a pointer to member function. The this pointers is implicitly
+ * the first argument in the argument list, const T&, and the member function pointer will be called on that object.
+ * This prevents any argument shifting since the this for the member function pointer is already in RCX.
+ *
+ * This is why having this at the end of the argument list is important for generating efficient Invoker() thunks.
+ */
+ static R Invoker(Args... args, const FunctorStorageType& functor)
+ {
+ return eastl::invoke(*Base::GetFunctorPtr(functor), eastl::forward<Args>(args)...);
+ }
+ };
+
+ function_base_detail() EA_NOEXCEPT = default;
+ ~function_base_detail() EA_NOEXCEPT = default;
+ };
+
+ #define EASTL_INTERNAL_FUNCTION_VALID_FUNCTION_ARGS(FUNCTOR, RET, ARGS, BASE, MYSELF) \
+ typename eastl::enable_if_t<eastl::is_invocable_r_v<RET, FUNCTOR, ARGS> && \
+ !eastl::is_base_of_v<BASE, eastl::decay_t<FUNCTOR>> && \
+ !eastl::is_same_v<eastl::decay_t<FUNCTOR>, MYSELF>>
+
+ #define EASTL_INTERNAL_FUNCTION_DETAIL_VALID_FUNCTION_ARGS(FUNCTOR, RET, ARGS, MYSELF) \
+ EASTL_INTERNAL_FUNCTION_VALID_FUNCTION_ARGS(FUNCTOR, RET, ARGS, MYSELF, MYSELF)
+
+
+ /// function_detail
+ ///
+ template <int, typename>
+ class function_detail;
+
+ template <int SIZE_IN_BYTES, typename R, typename... Args>
+ class function_detail<SIZE_IN_BYTES, R(Args...)> : public function_base_detail<SIZE_IN_BYTES>
+ {
+ public:
+ using result_type = R;
+
+ protected:
+ using Base = function_base_detail<SIZE_IN_BYTES>;
+ using FunctorStorageType = typename function_base_detail<SIZE_IN_BYTES>::FunctorStorageType;
+ using Base::mStorage;
+
+ public:
+ function_detail() EA_NOEXCEPT = default;
+ function_detail(std::nullptr_t) EA_NOEXCEPT {}
+
+ function_detail(const function_detail& other)
+ {
+ if (this != &other)
+ {
+ Copy(other);
+ }
+ }
+
+ function_detail(function_detail&& other)
+ {
+ if (this != &other)
+ {
+ Move(eastl::move(other));
+ }
+ }
+
+ template <typename Functor, typename = EASTL_INTERNAL_FUNCTION_DETAIL_VALID_FUNCTION_ARGS(Functor, R, Args..., function_detail)>
+ function_detail(Functor functor)
+ {
+ CreateForwardFunctor(eastl::move(functor));
+ }
+
+ ~function_detail() EA_NOEXCEPT
+ {
+ Destroy();
+ }
+
+ function_detail& operator=(const function_detail& other)
+ {
+ if (this != &other)
+ {
+ Destroy();
+ Copy(other);
+ }
+
+ return *this;
+ }
+
+ function_detail& operator=(function_detail&& other)
+ {
+ if(this != &other)
+ {
+ Destroy();
+ Move(eastl::move(other));
+ }
+
+ return *this;
+ }
+
+ function_detail& operator=(std::nullptr_t) EA_NOEXCEPT
+ {
+ Destroy();
+ mMgrFuncPtr = nullptr;
+ mInvokeFuncPtr = &DefaultInvoker;
+
+ return *this;
+ }
+
+ template <typename Functor, typename = EASTL_INTERNAL_FUNCTION_DETAIL_VALID_FUNCTION_ARGS(Functor, R, Args..., function_detail)>
+ function_detail& operator=(Functor&& functor)
+ {
+ Destroy();
+ CreateForwardFunctor(eastl::forward<Functor>(functor));
+ return *this;
+ }
+
+ template <typename Functor>
+ function_detail& operator=(eastl::reference_wrapper<Functor> f) EA_NOEXCEPT
+ {
+ Destroy();
+ CreateForwardFunctor(f);
+ return *this;
+ }
+
+ void swap(function_detail& other) EA_NOEXCEPT
+ {
+ if(this == &other)
+ return;
+
+ FunctorStorageType tempStorage;
+ if (other.HaveManager())
+ {
+ (void)(*other.mMgrFuncPtr)(static_cast<void*>(&tempStorage), static_cast<void*>(&other.mStorage),
+ Base::ManagerOperations::MGROPS_MOVE_FUNCTOR);
+ }
+
+ if (HaveManager())
+ {
+ (void)(*mMgrFuncPtr)(static_cast<void*>(&other.mStorage), static_cast<void*>(&mStorage),
+ Base::ManagerOperations::MGROPS_MOVE_FUNCTOR);
+ }
+
+ if (other.HaveManager())
+ {
+ (void)(*other.mMgrFuncPtr)(static_cast<void*>(&mStorage), static_cast<void*>(&tempStorage),
+ Base::ManagerOperations::MGROPS_MOVE_FUNCTOR);
+ }
+
+ eastl::swap(mMgrFuncPtr, other.mMgrFuncPtr);
+ eastl::swap(mInvokeFuncPtr, other.mInvokeFuncPtr);
+ }
+
+ explicit operator bool() const EA_NOEXCEPT
+ {
+ return HaveManager();
+ }
+
+ EASTL_FORCE_INLINE R operator ()(Args... args) const
+ {
+ return (*mInvokeFuncPtr)(eastl::forward<Args>(args)..., this->mStorage);
+ }
+
+ #if EASTL_RTTI_ENABLED
+ const std::type_info& target_type() const EA_NOEXCEPT
+ {
+ if (HaveManager())
+ {
+ void* ret = (*mMgrFuncPtr)(nullptr, nullptr, Base::ManagerOperations::MGROPS_GET_TYPE_INFO);
+ return *(static_cast<const std::type_info*>(ret));
+ }
+ return typeid(void);
+ }
+
+ template <typename Functor>
+ Functor* target() EA_NOEXCEPT
+ {
+ if (HaveManager() && target_type() == typeid(Functor))
+ {
+ void* ret = (*mMgrFuncPtr)(static_cast<void*>(&mStorage), nullptr,
+ Base::ManagerOperations::MGROPS_GET_FUNC_PTR);
+ return ret ? static_cast<Functor*>(ret) : nullptr;
+ }
+ return nullptr;
+ }
+
+ template <typename Functor>
+ const Functor* target() const EA_NOEXCEPT
+ {
+ if (HaveManager() && target_type() == typeid(Functor))
+ {
+ void* ret = (*mMgrFuncPtr)(static_cast<void*>(&mStorage), nullptr,
+ Base::ManagerOperations::MGROPS_GET_FUNC_PTR);
+ return ret ? static_cast<const Functor*>(ret) : nullptr;
+ }
+ return nullptr;
+ }
+ #endif // EASTL_RTTI_ENABLED
+
+ private:
+ bool HaveManager() const EA_NOEXCEPT
+ {
+ return (mMgrFuncPtr != nullptr);
+ }
+
+ void Destroy() EA_NOEXCEPT
+ {
+ if (HaveManager())
+ {
+ (void)(*mMgrFuncPtr)(static_cast<void*>(&mStorage), nullptr,
+ Base::ManagerOperations::MGROPS_DESTRUCT_FUNCTOR);
+ }
+ }
+
+ void Copy(const function_detail& other)
+ {
+ if (other.HaveManager())
+ {
+ (void)(*other.mMgrFuncPtr)(static_cast<void*>(&mStorage),
+ const_cast<void*>(static_cast<const void*>(&other.mStorage)),
+ Base::ManagerOperations::MGROPS_COPY_FUNCTOR);
+ }
+
+ mMgrFuncPtr = other.mMgrFuncPtr;
+ mInvokeFuncPtr = other.mInvokeFuncPtr;
+ }
+
+ void Move(function_detail&& other)
+ {
+ if (other.HaveManager())
+ {
+ (void)(*other.mMgrFuncPtr)(static_cast<void*>(&mStorage), static_cast<void*>(&other.mStorage),
+ Base::ManagerOperations::MGROPS_MOVE_FUNCTOR);
+ }
+
+ mMgrFuncPtr = other.mMgrFuncPtr;
+ mInvokeFuncPtr = other.mInvokeFuncPtr;
+ other.mMgrFuncPtr = nullptr;
+ other.mInvokeFuncPtr = &DefaultInvoker;
+ }
+
+ template <typename Functor>
+ void CreateForwardFunctor(Functor&& functor)
+ {
+ using DecayedFunctorType = typename eastl::decay<Functor>::type;
+ using FunctionManagerType = typename Base::template function_manager<DecayedFunctorType, R, Args...>;
+
+ if (internal::is_null(functor))
+ {
+ mMgrFuncPtr = nullptr;
+ mInvokeFuncPtr = &DefaultInvoker;
+ }
+ else
+ {
+ mMgrFuncPtr = &FunctionManagerType::Manager;
+ mInvokeFuncPtr = &FunctionManagerType::Invoker;
+ FunctionManagerType::CreateFunctor(mStorage, eastl::forward<Functor>(functor));
+ }
+ }
+
+ private:
+ typedef void* (*ManagerFuncPtr)(void*, void*, typename Base::ManagerOperations);
+ typedef R (*InvokeFuncPtr)(Args..., const FunctorStorageType&);
+
+ EA_DISABLE_GCC_WARNING(-Wreturn-type);
+ EA_DISABLE_CLANG_WARNING(-Wreturn-type);
+ EA_DISABLE_VC_WARNING(4716); // 'function' must return a value
+ // We cannot assume that R is default constructible.
+ // This function is called only when the function object CANNOT be called because it is empty,
+ // it will always throw or assert so we never use the return value anyways and neither should the caller.
+ static R DefaultInvoker(Args... /*args*/, const FunctorStorageType& /*functor*/)
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ throw eastl::bad_function_call();
+ #else
+ EASTL_ASSERT_MSG(false, "function_detail call on an empty function_detail<R(Args..)>");
+ #endif
+ };
+ EA_RESTORE_VC_WARNING();
+ EA_RESTORE_CLANG_WARNING();
+ EA_RESTORE_GCC_WARNING();
+
+
+ ManagerFuncPtr mMgrFuncPtr = nullptr;
+ InvokeFuncPtr mInvokeFuncPtr = &DefaultInvoker;
+ };
+
+ } // namespace internal
+
+} // namespace eastl
+
+#endif // EASTL_FUNCTION_DETAIL_H
diff --git a/EASTL/include/EASTL/internal/function_help.h b/EASTL/include/EASTL/internal/function_help.h
new file mode 100644
index 0000000..04481d3
--- /dev/null
+++ b/EASTL/include/EASTL/internal/function_help.h
@@ -0,0 +1,51 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_INTERNAL_FUNCTION_HELP_H
+#define EASTL_INTERNAL_FUNCTION_HELP_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <EASTL/internal/config.h>
+#include <EASTL/type_traits.h>
+
+namespace eastl
+{
+ namespace internal
+ {
+
+ //////////////////////////////////////////////////////////////////////
+ // is_null
+ //
+ template <typename T>
+ bool is_null(const T&)
+ {
+ return false;
+ }
+
+ template <typename Result, typename... Arguments>
+ bool is_null(Result (*const& function_pointer)(Arguments...))
+ {
+ return function_pointer == nullptr;
+ }
+
+ template <typename Result, typename Class, typename... Arguments>
+ bool is_null(Result (Class::*const& function_pointer)(Arguments...))
+ {
+ return function_pointer == nullptr;
+ }
+
+ template <typename Result, typename Class, typename... Arguments>
+ bool is_null(Result (Class::*const& function_pointer)(Arguments...) const)
+ {
+ return function_pointer == nullptr;
+ }
+
+ } // namespace internal
+} // namespace eastl
+
+#endif // Header include guard
+
diff --git a/EASTL/include/EASTL/internal/functional_base.h b/EASTL/include/EASTL/internal/functional_base.h
new file mode 100644
index 0000000..de446db
--- /dev/null
+++ b/EASTL/include/EASTL/internal/functional_base.h
@@ -0,0 +1,420 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_FUNCTIONAL_BASE_H
+#define EASTL_INTERNAL_FUNCTIONAL_BASE_H
+
+#include <EASTL/internal/config.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <EASTL/internal/memory_base.h>
+#include <EASTL/internal/move_help.h>
+#include <EASTL/type_traits.h>
+
+
+namespace eastl
+{
+ // foward declaration for swap
+ template <typename T>
+ inline void swap(T& a, T& b)
+ EA_NOEXCEPT_IF(eastl::is_nothrow_move_constructible<T>::value && eastl::is_nothrow_move_assignable<T>::value);
+
+
+ /// invoke
+ ///
+ /// invoke is a generalized function-call operator which works on function pointers, member function
+ /// pointers, callable objects and member pointers.
+ ///
+ /// For (member/non-member) function pointers and callable objects, it returns the result of calling
+ /// the function/object with the specified arguments. For member data pointers, it simply returns
+ /// the member.
+ ///
+ /// Note that there are also reference_wrapper specializations of invoke, which need to be defined
+ /// later since reference_wrapper uses invoke in its implementation. Those are defined immediately
+ /// after the definition of reference_wrapper.
+ ///
+ /// http://en.cppreference.com/w/cpp/utility/functional/invoke
+ ///
+ template <typename R, typename C, typename T, typename... Args>
+ EA_CONSTEXPR auto invoke_impl(R C::*func, T&& obj, Args&&... args) EA_NOEXCEPT_IF(EA_NOEXCEPT_EXPR((eastl::forward<T>(obj).*func)(eastl::forward<Args>(args)...)))
+ -> typename enable_if<is_base_of<C, decay_t<T>>::value,
+ decltype((eastl::forward<T>(obj).*func)(eastl::forward<Args>(args)...))>::type
+ {
+ return (eastl::forward<T>(obj).*func)(eastl::forward<Args>(args)...);
+ }
+
+ template <typename F, typename... Args>
+ EA_CONSTEXPR auto invoke_impl(F&& func, Args&&... args) EA_NOEXCEPT_IF(EA_NOEXCEPT_EXPR(eastl::forward<F>(func)(eastl::forward<Args>(args)...)))
+ -> decltype(eastl::forward<F>(func)(eastl::forward<Args>(args)...))
+ {
+ return eastl::forward<F>(func)(eastl::forward<Args>(args)...);
+ }
+
+
+ template <typename R, typename C, typename T, typename... Args>
+ EA_CONSTEXPR auto invoke_impl(R C::*func, T&& obj, Args&&... args) EA_NOEXCEPT_IF(EA_NOEXCEPT_EXPR(((*eastl::forward<T>(obj)).*func)(eastl::forward<Args>(args)...)))
+ -> decltype(((*eastl::forward<T>(obj)).*func)(eastl::forward<Args>(args)...))
+ {
+ return ((*eastl::forward<T>(obj)).*func)(eastl::forward<Args>(args)...);
+ }
+
+ template <typename M, typename C, typename T>
+ EA_CONSTEXPR auto invoke_impl(M C::*member, T&& obj) EA_NOEXCEPT_IF(EA_NOEXCEPT_EXPR(eastl::forward<T>(obj).*member))
+ -> typename enable_if<
+ is_base_of<C, decay_t<T>>::value,
+ decltype(eastl::forward<T>(obj).*member)
+ >::type
+ {
+ return eastl::forward<T>(obj).*member;
+ }
+
+ template <typename M, typename C, typename T>
+ EA_CONSTEXPR auto invoke_impl(M C::*member, T&& obj) EA_NOEXCEPT_IF(EA_NOEXCEPT_EXPR((*eastl::forward<T>(obj)).*member))
+ -> decltype((*eastl::forward<T>(obj)).*member)
+ {
+ return (*eastl::forward<T>(obj)).*member;
+ }
+
+ template <typename F, typename... Args>
+ EA_CONSTEXPR decltype(auto) invoke(F&& func, Args&&... args) EA_NOEXCEPT_IF(EA_NOEXCEPT_EXPR(invoke_impl(eastl::forward<F>(func), eastl::forward<Args>(args)...)))
+ {
+ return invoke_impl(eastl::forward<F>(func), eastl::forward<Args>(args)...);
+ }
+
+ template <typename F, typename = void, typename... Args>
+ struct invoke_result_impl {
+ };
+
+ template <typename F, typename... Args>
+ struct invoke_result_impl<F, void_t<decltype(invoke_impl(eastl::declval<F>(), eastl::declval<Args>()...))>, Args...>
+ {
+ typedef decltype(invoke_impl(eastl::declval<F>(), eastl::declval<Args>()...)) type;
+ };
+
+ template <typename F, typename... Args>
+ struct invoke_result : public invoke_result_impl<F, void, Args...> {};
+
+ #if !defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ template <typename F, typename... Args>
+ using invoke_result_t = typename invoke_result<F, Args...>::type;
+ #endif
+
+ template <typename F, typename = void, typename... Args>
+ struct is_invocable_impl : public eastl::false_type {};
+
+ template <typename F, typename... Args>
+ struct is_invocable_impl<F, void_t<typename eastl::invoke_result<F, Args...>::type>, Args...> : public eastl::true_type {};
+
+ template <typename F, typename... Args>
+ struct is_invocable : public is_invocable_impl<F, void, Args...> {};
+
+ template <typename R, typename F, typename = void, typename... Args>
+ struct is_invocable_r_impl : public eastl::false_type {};
+
+ template <typename R, typename F, typename... Args>
+ struct is_invocable_r_impl<R, F, void_t<typename invoke_result<F, Args...>::type>, Args...>
+ : public disjunction<is_convertible<typename invoke_result<F, Args...>::type, R>,
+ is_same<typename remove_cv<R>::type, void>> {};
+
+ template <typename R, typename F, typename... Args>
+ struct is_invocable_r : public is_invocable_r_impl<R, F, void, Args...> {};
+
+ template <typename F, typename... Args>
+ EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR bool is_invocable_v = is_invocable<F, Args...>::value;
+
+ template <typename R, typename F, typename... Args>
+ EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR bool is_invocable_r_v = is_invocable_r<R, F, Args...>::value;
+
+ template <typename F, typename = void, typename... Args>
+ struct is_nothrow_invocable_impl : public eastl::false_type {};
+
+ template <typename F, typename... Args>
+ struct is_nothrow_invocable_impl<F, void_t<typename eastl::invoke_result<F, Args...>::type>, Args...>
+ : public eastl::bool_constant<EA_NOEXCEPT_EXPR(eastl::invoke(eastl::declval<F>(), eastl::declval<Args>()...))> {};
+
+ template <typename F, typename... Args>
+ struct is_nothrow_invocable : public is_nothrow_invocable_impl<F, void, Args...> {};
+
+ template <typename R, typename F, typename = void, typename... Args>
+ struct is_nothrow_invocable_r_impl : public eastl::false_type {};
+
+ template <typename R, typename F, typename... Args>
+ struct is_nothrow_invocable_r_impl<R, F, void_t<typename eastl::invoke_result<F, Args...>::type>, Args...>
+ {
+ static EA_CONSTEXPR_OR_CONST bool value = eastl::is_convertible<typename eastl::invoke_result<F, Args...>::type, R>::value
+ && eastl::is_nothrow_invocable<F, Args...>::value;
+ };
+
+ template <typename R, typename F, typename... Args>
+ struct is_nothrow_invocable_r : public is_nothrow_invocable_r_impl<R, F, void, Args...> {};
+
+ template <typename F, typename... Args>
+ EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR bool is_no_throw_invocable_v = is_nothrow_invocable<F, Args...>::value;
+
+ template <typename R, typename F, typename... Args>
+ EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR bool is_nothrow_invocable_r_v = is_nothrow_invocable_r<R, F, Args...>::value;
+
+ /// allocator_arg_t
+ ///
+ /// allocator_arg_t is an empty class type used to disambiguate the overloads of
+ /// constructors and member functions of allocator-aware objects, including tuple,
+ /// function, promise, and packaged_task.
+ /// http://en.cppreference.com/w/cpp/memory/allocator_arg_t
+ ///
+ struct allocator_arg_t
+ {};
+
+
+ /// allocator_arg
+ ///
+ /// allocator_arg is a constant of type allocator_arg_t used to disambiguate, at call site,
+ /// the overloads of the constructors and member functions of allocator-aware objects,
+ /// such as tuple, function, promise, and packaged_task.
+ /// http://en.cppreference.com/w/cpp/memory/allocator_arg
+ ///
+ EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR allocator_arg_t allocator_arg = allocator_arg_t();
+
+
+ template <typename Argument, typename Result>
+ struct unary_function
+ {
+ typedef Argument argument_type;
+ typedef Result result_type;
+ };
+
+
+ template <typename Argument1, typename Argument2, typename Result>
+ struct binary_function
+ {
+ typedef Argument1 first_argument_type;
+ typedef Argument2 second_argument_type;
+ typedef Result result_type;
+ };
+
+
+ /// less<T>
+ template <typename T = void>
+ struct less : public binary_function<T, T, bool>
+ {
+ EA_CPP14_CONSTEXPR bool operator()(const T& a, const T& b) const
+ { return a < b; }
+ };
+
+ // http://en.cppreference.com/w/cpp/utility/functional/less_void
+ template <>
+ struct less<void>
+ {
+ template<typename A, typename B>
+ EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const
+ -> decltype(eastl::forward<A>(a) < eastl::forward<B>(b))
+ { return eastl::forward<A>(a) < eastl::forward<B>(b); }
+ };
+
+
+ /// reference_wrapper
+ template <typename T>
+ class reference_wrapper
+ {
+ public:
+ typedef T type;
+
+ reference_wrapper(T&) EA_NOEXCEPT;
+ reference_wrapper(T&&) = delete;
+ reference_wrapper(const reference_wrapper<T>& x) EA_NOEXCEPT;
+
+ reference_wrapper& operator=(const reference_wrapper<T>& x) EA_NOEXCEPT;
+
+ operator T& () const EA_NOEXCEPT;
+ T& get() const EA_NOEXCEPT;
+
+ template <typename... ArgTypes>
+ typename eastl::invoke_result<T&, ArgTypes...>::type operator() (ArgTypes&&...) const;
+
+ private:
+ T* val;
+ };
+
+ template <typename T>
+ reference_wrapper<T>::reference_wrapper(T &v) EA_NOEXCEPT
+ : val(eastl::addressof(v))
+ {}
+
+ template <typename T>
+ reference_wrapper<T>::reference_wrapper(const reference_wrapper<T>& other) EA_NOEXCEPT
+ : val(other.val)
+ {}
+
+ template <typename T>
+ reference_wrapper<T>& reference_wrapper<T>::operator=(const reference_wrapper<T>& other) EA_NOEXCEPT
+ {
+ val = other.val;
+ return *this;
+ }
+
+ template <typename T>
+ reference_wrapper<T>::operator T&() const EA_NOEXCEPT
+ {
+ return *val;
+ }
+
+ template <typename T>
+ T& reference_wrapper<T>::get() const EA_NOEXCEPT
+ {
+ return *val;
+ }
+
+ template <typename T>
+ template <typename... ArgTypes>
+ typename eastl::invoke_result<T&, ArgTypes...>::type reference_wrapper<T>::operator() (ArgTypes&&... args) const
+ {
+ return eastl::invoke(*val, eastl::forward<ArgTypes>(args)...);
+ }
+
+ // reference_wrapper-specific utilties
+ template <typename T>
+ reference_wrapper<T> ref(T& t) EA_NOEXCEPT
+ {
+ return eastl::reference_wrapper<T>(t);
+ }
+
+ template <typename T>
+ void ref(const T&&) = delete;
+
+ template <typename T>
+ reference_wrapper<T> ref(reference_wrapper<T>t) EA_NOEXCEPT
+ {
+ return eastl::ref(t.get());
+ }
+
+ template <typename T>
+ reference_wrapper<const T> cref(const T& t) EA_NOEXCEPT
+ {
+ return eastl::reference_wrapper<const T>(t);
+ }
+
+ template <typename T>
+ void cref(const T&&) = delete;
+
+ template <typename T>
+ reference_wrapper<const T> cref(reference_wrapper<T> t) EA_NOEXCEPT
+ {
+ return eastl::cref(t.get());
+ }
+
+
+ // reference_wrapper-specific type traits
+ template <typename T>
+ struct is_reference_wrapper_helper
+ : public eastl::false_type {};
+
+ template <typename T>
+ struct is_reference_wrapper_helper<eastl::reference_wrapper<T> >
+ : public eastl::true_type {};
+
+ template <typename T>
+ struct is_reference_wrapper
+ : public eastl::is_reference_wrapper_helper<typename eastl::remove_cv<T>::type> {};
+
+
+ // Helper which adds a reference to a type when given a reference_wrapper of that type.
+ template <typename T>
+ struct remove_reference_wrapper
+ { typedef T type; };
+
+ template <typename T>
+ struct remove_reference_wrapper< eastl::reference_wrapper<T> >
+ { typedef T& type; };
+
+ template <typename T>
+ struct remove_reference_wrapper< const eastl::reference_wrapper<T> >
+ { typedef T& type; };
+
+ // reference_wrapper specializations of invoke
+ // These have to come after reference_wrapper is defined, but reference_wrapper needs to have a
+ // definition of invoke, so these specializations need to come after everything else has been defined.
+ template <typename R, typename C, typename T, typename... Args>
+ EA_CONSTEXPR auto invoke_impl(R C::*func, T&& obj, Args&&... args) EA_NOEXCEPT_IF(EA_NOEXCEPT_EXPR((obj.get().*func)(eastl::forward<Args>(args)...)))
+ -> typename enable_if<is_reference_wrapper<eastl::decay_t<T>>::value,
+ decltype((obj.get().*func)(eastl::forward<Args>(args)...))>::type
+ {
+ return (obj.get().*func)(eastl::forward<Args>(args)...);
+ }
+
+ template <typename M, typename C, typename T>
+ EA_CONSTEXPR auto invoke_impl(M C::*member, T&& obj) EA_NOEXCEPT_IF(EA_NOEXCEPT_EXPR(obj.get().*member))
+ -> typename enable_if<is_reference_wrapper<eastl::decay_t<T>>::value,
+ decltype(obj.get().*member)>::type
+ {
+ return obj.get().*member;
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // bind
+ ///////////////////////////////////////////////////////////////////////
+
+ /// bind1st
+ ///
+ template <typename Operation>
+ class binder1st : public unary_function<typename Operation::second_argument_type, typename Operation::result_type>
+ {
+ protected:
+ typename Operation::first_argument_type value;
+ Operation op;
+
+ public:
+ binder1st(const Operation& x, const typename Operation::first_argument_type& y)
+ : value(y), op(x) { }
+
+ typename Operation::result_type operator()(const typename Operation::second_argument_type& x) const
+ { return op(value, x); }
+
+ typename Operation::result_type operator()(typename Operation::second_argument_type& x) const
+ { return op(value, x); }
+ };
+
+
+ template <typename Operation, typename T>
+ inline binder1st<Operation> bind1st(const Operation& op, const T& x)
+ {
+ typedef typename Operation::first_argument_type value;
+ return binder1st<Operation>(op, value(x));
+ }
+
+
+ /// bind2nd
+ ///
+ template <typename Operation>
+ class binder2nd : public unary_function<typename Operation::first_argument_type, typename Operation::result_type>
+ {
+ protected:
+ Operation op;
+ typename Operation::second_argument_type value;
+
+ public:
+ binder2nd(const Operation& x, const typename Operation::second_argument_type& y)
+ : op(x), value(y) { }
+
+ typename Operation::result_type operator()(const typename Operation::first_argument_type& x) const
+ { return op(x, value); }
+
+ typename Operation::result_type operator()(typename Operation::first_argument_type& x) const
+ { return op(x, value); }
+ };
+
+
+ template <typename Operation, typename T>
+ inline binder2nd<Operation> bind2nd(const Operation& op, const T& x)
+ {
+ typedef typename Operation::second_argument_type value;
+ return binder2nd<Operation>(op, value(x));
+ }
+
+} // namespace eastl
+
+#endif // EASTL_INTERNAL_FUNCTIONAL_BASE_H
diff --git a/EASTL/include/EASTL/internal/generic_iterator.h b/EASTL/include/EASTL/internal/generic_iterator.h
new file mode 100644
index 0000000..0f1e28b
--- /dev/null
+++ b/EASTL/include/EASTL/internal/generic_iterator.h
@@ -0,0 +1,219 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// Implements a generic iterator from a given iteratable type, such as a pointer.
+// We cannot put this file into our own iterator.h file because we need to
+// still be able to use this file when we have our iterator.h disabled.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_GENERIC_ITERATOR_H
+#define EASTL_INTERNAL_GENERIC_ITERATOR_H
+
+
+#include <EABase/eabase.h>
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <EASTL/internal/config.h>
+#include <EASTL/iterator.h>
+#include <EASTL/type_traits.h>
+
+// There is no warning number 'number'.
+// Member template functions cannot be used for copy-assignment or copy-construction.
+EA_DISABLE_VC_WARNING(4619 4217);
+
+
+namespace eastl
+{
+
+ /// generic_iterator
+ ///
+ /// Converts something which can be iterated into a formal iterator.
+ /// While this class' primary purpose is to allow the conversion of
+ /// a pointer to an iterator, you can convert anything else to an
+ /// iterator by defining an iterator_traits<> specialization for that
+ /// object type. See EASTL iterator.h for this.
+ ///
+ /// Example usage:
+ /// typedef generic_iterator<int*> IntArrayIterator;
+ /// typedef generic_iterator<int*, char> IntArrayIteratorOther;
+ ///
+ template <typename Iterator, typename Container = void>
+ class generic_iterator
+ {
+ protected:
+ Iterator mIterator;
+
+ public:
+ typedef typename eastl::iterator_traits<Iterator>::iterator_category iterator_category;
+ typedef typename eastl::iterator_traits<Iterator>::value_type value_type;
+ typedef typename eastl::iterator_traits<Iterator>::difference_type difference_type;
+ typedef typename eastl::iterator_traits<Iterator>::reference reference;
+ typedef typename eastl::iterator_traits<Iterator>::pointer pointer;
+ typedef Iterator iterator_type;
+ typedef Container container_type;
+ typedef generic_iterator<Iterator, Container> this_type;
+
+ generic_iterator()
+ : mIterator(iterator_type()) { }
+
+ explicit generic_iterator(const iterator_type& x)
+ : mIterator(x) { }
+
+ this_type& operator=(const iterator_type& x)
+ { mIterator = x; return *this; }
+
+ template <typename Iterator2>
+ generic_iterator(const generic_iterator<Iterator2, Container>& x)
+ : mIterator(x.base()) { }
+
+ reference operator*() const
+ { return *mIterator; }
+
+ pointer operator->() const
+ { return mIterator; }
+
+ this_type& operator++()
+ { ++mIterator; return *this; }
+
+ this_type operator++(int)
+ { return this_type(mIterator++); }
+
+ this_type& operator--()
+ { --mIterator; return *this; }
+
+ this_type operator--(int)
+ { return this_type(mIterator--); }
+
+ reference operator[](const difference_type& n) const
+ { return mIterator[n]; }
+
+ this_type& operator+=(const difference_type& n)
+ { mIterator += n; return *this; }
+
+ this_type operator+(const difference_type& n) const
+ { return this_type(mIterator + n); }
+
+ this_type& operator-=(const difference_type& n)
+ { mIterator -= n; return *this; }
+
+ this_type operator-(const difference_type& n) const
+ { return this_type(mIterator - n); }
+
+ const iterator_type& base() const
+ { return mIterator; }
+
+ private:
+ // Unwrapping interface, not part of the public API.
+ const iterator_type& unwrap() const
+ { return mIterator; }
+
+ // The unwrapper helpers need access to unwrap().
+ friend is_iterator_wrapper_helper<this_type, true>;
+ friend is_iterator_wrapper<this_type>;
+
+ }; // class generic_iterator
+
+
+ template <typename IteratorL, typename IteratorR, typename Container>
+ inline bool operator==(const generic_iterator<IteratorL, Container>& lhs, const generic_iterator<IteratorR, Container>& rhs)
+ { return lhs.base() == rhs.base(); }
+
+ template <typename Iterator, typename Container>
+ inline bool operator==(const generic_iterator<Iterator, Container>& lhs, const generic_iterator<Iterator, Container>& rhs)
+ { return lhs.base() == rhs.base(); }
+
+ template <typename IteratorL, typename IteratorR, typename Container>
+ inline bool operator!=(const generic_iterator<IteratorL, Container>& lhs, const generic_iterator<IteratorR, Container>& rhs)
+ { return lhs.base() != rhs.base(); }
+
+ template <typename Iterator, typename Container>
+ inline bool operator!=(const generic_iterator<Iterator, Container>& lhs, const generic_iterator<Iterator, Container>& rhs)
+ { return lhs.base() != rhs.base(); }
+
+ template <typename IteratorL, typename IteratorR, typename Container>
+ inline bool operator<(const generic_iterator<IteratorL, Container>& lhs, const generic_iterator<IteratorR, Container>& rhs)
+ { return lhs.base() < rhs.base(); }
+
+ template <typename Iterator, typename Container>
+ inline bool operator<(const generic_iterator<Iterator, Container>& lhs, const generic_iterator<Iterator, Container>& rhs)
+ { return lhs.base() < rhs.base(); }
+
+ template <typename IteratorL, typename IteratorR, typename Container>
+ inline bool operator>(const generic_iterator<IteratorL, Container>& lhs, const generic_iterator<IteratorR, Container>& rhs)
+ { return lhs.base() > rhs.base(); }
+
+ template <typename Iterator, typename Container>
+ inline bool operator>(const generic_iterator<Iterator, Container>& lhs, const generic_iterator<Iterator, Container>& rhs)
+ { return lhs.base() > rhs.base(); }
+
+ template <typename IteratorL, typename IteratorR, typename Container>
+ inline bool operator<=(const generic_iterator<IteratorL, Container>& lhs, const generic_iterator<IteratorR, Container>& rhs)
+ { return lhs.base() <= rhs.base(); }
+
+ template <typename Iterator, typename Container>
+ inline bool operator<=(const generic_iterator<Iterator, Container>& lhs, const generic_iterator<Iterator, Container>& rhs)
+ { return lhs.base() <= rhs.base(); }
+
+ template <typename IteratorL, typename IteratorR, typename Container>
+ inline bool operator>=(const generic_iterator<IteratorL, Container>& lhs, const generic_iterator<IteratorR, Container>& rhs)
+ { return lhs.base() >= rhs.base(); }
+
+ template <typename Iterator, typename Container>
+ inline bool operator>=(const generic_iterator<Iterator, Container>& lhs, const generic_iterator<Iterator, Container>& rhs)
+ { return lhs.base() >= rhs.base(); }
+
+ template <typename IteratorL, typename IteratorR, typename Container>
+ inline typename generic_iterator<IteratorL, Container>::difference_type
+ operator-(const generic_iterator<IteratorL, Container>& lhs, const generic_iterator<IteratorR, Container>& rhs)
+ { return lhs.base() - rhs.base(); }
+
+ template <typename Iterator, typename Container>
+ inline generic_iterator<Iterator, Container>
+ operator+(typename generic_iterator<Iterator, Container>::difference_type n, const generic_iterator<Iterator, Container>& x)
+ { return generic_iterator<Iterator, Container>(x.base() + n); }
+
+
+
+ /// is_generic_iterator
+ ///
+ /// Tells if an iterator is one of these generic_iterators. This is useful if you want to
+ /// write code that uses miscellaneous iterators but wants to tell if they are generic_iterators.
+ /// A primary reason to do so is that you can get at the pointer within the generic_iterator.
+ ///
+ template <typename Iterator>
+ struct is_generic_iterator : public false_type { };
+
+ template <typename Iterator, typename Container>
+ struct is_generic_iterator<generic_iterator<Iterator, Container> > : public true_type { };
+
+
+ /// unwrap_generic_iterator
+ ///
+ /// Returns `it.base()` if it's a generic_iterator, else returns `it` as-is.
+ ///
+ /// Example usage:
+ /// vector<int> intVector;
+ /// eastl::generic_iterator<vector<int>::iterator> genericIterator(intVector.begin());
+ /// vector<int>::iterator it = unwrap_generic_iterator(genericIterator);
+ ///
+ template <typename Iterator>
+ inline typename eastl::is_iterator_wrapper_helper<Iterator, eastl::is_generic_iterator<Iterator>::value>::iterator_type unwrap_generic_iterator(Iterator it)
+ {
+ // get_unwrapped(it) -> it.unwrap() which is equivalent to `it.base()` for generic_iterator and to `it` otherwise.
+ return eastl::is_iterator_wrapper_helper<Iterator, eastl::is_generic_iterator<Iterator>::value>::get_unwrapped(it);
+ }
+
+
+} // namespace eastl
+
+
+EA_RESTORE_VC_WARNING();
+
+
+#endif // Header include guard
diff --git a/EASTL/include/EASTL/internal/hashtable.h b/EASTL/include/EASTL/internal/hashtable.h
new file mode 100644
index 0000000..077f5b4
--- /dev/null
+++ b/EASTL/include/EASTL/internal/hashtable.h
@@ -0,0 +1,3125 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements a hashtable, much like the C++11 unordered_set/unordered_map.
+// proposed classes.
+// The primary distinctions between this hashtable and C++11 unordered containers are:
+// - hashtable is savvy to an environment that doesn't have exception handling,
+// as is sometimes the case with console or embedded environments.
+// - hashtable is slightly more space-efficient than a conventional std hashtable
+// implementation on platforms with 64 bit size_t. This is
+// because std STL uses size_t (64 bits) in data structures whereby 32 bits
+// of data would be fine.
+// - hashtable can contain objects with alignment requirements. TR1 hash tables
+// cannot do so without a bit of tedious non-portable effort.
+// - hashtable supports debug memory naming natively.
+// - hashtable provides a find function that lets you specify a type that is
+// different from the hash table key type. This is particularly useful for
+// the storing of string objects but finding them by char pointers.
+// - hashtable provides a lower level insert function which lets the caller
+// specify the hash code and optionally the node instance.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_HASHTABLE_H
+#define EASTL_INTERNAL_HASHTABLE_H
+
+
+#include <EABase/eabase.h>
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <EASTL/internal/config.h>
+#include <EASTL/type_traits.h>
+#include <EASTL/allocator.h>
+#include <EASTL/iterator.h>
+#include <EASTL/functional.h>
+#include <EASTL/utility.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/initializer_list.h>
+#include <EASTL/tuple.h>
+#include <string.h>
+
+EA_DISABLE_ALL_VC_WARNINGS()
+ #include <new>
+ #include <stddef.h>
+EA_RESTORE_ALL_VC_WARNINGS()
+
+// 4512 - 'class' : assignment operator could not be generated.
+// 4530 - C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc
+// 4571 - catch(...) semantics changed since Visual C++ 7.1; structured exceptions (SEH) are no longer caught.
+EA_DISABLE_VC_WARNING(4512 4530 4571);
+
+
+namespace eastl
+{
+
+ /// EASTL_HASHTABLE_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_HASHTABLE_DEFAULT_NAME
+ #define EASTL_HASHTABLE_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " hashtable" // Unless the user overrides something, this is "EASTL hashtable".
+ #endif
+
+
+ /// EASTL_HASHTABLE_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_HASHTABLE_DEFAULT_ALLOCATOR
+ #define EASTL_HASHTABLE_DEFAULT_ALLOCATOR allocator_type(EASTL_HASHTABLE_DEFAULT_NAME)
+ #endif
+
+
+ /// kHashtableAllocFlagBuckets
+ /// Flag to allocator which indicates that we are allocating buckets and not nodes.
+ enum { kHashtableAllocFlagBuckets = 0x00400000 };
+
+
+ /// gpEmptyBucketArray
+ ///
+ /// A shared representation of an empty hash table. This is present so that
+ /// a new empty hashtable allocates no memory. It has two entries, one for
+ /// the first lone empty (NULL) bucket, and one for the non-NULL trailing sentinel.
+ ///
+ extern EASTL_API void* gpEmptyBucketArray[2];
+
+
+ /// EASTL_MACRO_SWAP
+ ///
+ /// Use EASTL_MACRO_SWAP because GCC (at least v4.6-4.8) has a bug where it fails to compile eastl::swap(mpBucketArray, x.mpBucketArray).
+ ///
+ #define EASTL_MACRO_SWAP(Type, a, b) \
+ { Type temp = a; a = b; b = temp; }
+
+
+ /// hash_node
+ ///
+ /// A hash_node stores an element in a hash table, much like a
+ /// linked list node stores an element in a linked list.
+ /// A hash_node additionally can, via template parameter,
+ /// store a hash code in the node to speed up hash calculations
+ /// and comparisons in some cases.
+ ///
+ template <typename Value, bool bCacheHashCode>
+ struct hash_node;
+
+ EA_DISABLE_VC_WARNING(4625 4626) // "copy constructor / assignment operator could not be generated because a base class copy constructor is inaccessible or deleted"
+ #ifdef EA_COMPILER_MSVC_2015
+ EA_DISABLE_VC_WARNING(5026) // disable warning: "move constructor was implicitly defined as deleted"
+ #endif
+ template <typename Value>
+ struct hash_node<Value, true>
+ {
+ hash_node() = default;
+ hash_node(const hash_node&) = default;
+ hash_node(hash_node&&) = default;
+
+ Value mValue;
+ hash_node* mpNext;
+ eastl_size_t mnHashCode; // See config.h for the definition of eastl_size_t, which defaults to size_t.
+ } EASTL_MAY_ALIAS;
+
+ template <typename Value>
+ struct hash_node<Value, false>
+ {
+ hash_node() = default;
+ hash_node(const hash_node&) = default;
+ hash_node(hash_node&&) = default;
+
+ Value mValue;
+ hash_node* mpNext;
+ } EASTL_MAY_ALIAS;
+
+ #ifdef EA_COMPILER_MSVC_2015
+ EA_RESTORE_VC_WARNING()
+ #endif
+ EA_RESTORE_VC_WARNING()
+
+
+ // has_hashcode_member
+ //
+ // Custom type-trait that checks for the existence of a class data member 'mnHashCode'.
+ //
+ // In order to explicitly instantiate the hashtable without error we need to SFINAE away the functions that will
+ // fail to compile based on if the 'hash_node' contains a 'mnHashCode' member dictated by the hashtable template
+ // parameters. The hashtable support this level of configuration to allow users to choose which between the space vs.
+ // time optimization.
+ //
+ namespace Internal
+ {
+ template <class T>
+ struct has_hashcode_member
+ {
+ private:
+ template <class U> static eastl::no_type test(...);
+ template <class U> static eastl::yes_type test(decltype(U::mnHashCode)* = 0);
+ public:
+ static const bool value = sizeof(test<T>(0)) == sizeof(eastl::yes_type);
+ };
+ }
+
+ static_assert(Internal::has_hashcode_member<hash_node<int, true>>::value, "contains a mnHashCode member");
+ static_assert(!Internal::has_hashcode_member<hash_node<int, false>>::value, "doesn't contain a mnHashCode member");
+
+ // convenience macros to increase the readability of the code paths that must SFINAE on if the 'hash_node'
+ // contains the cached hashed value or not.
+ #define ENABLE_IF_HAS_HASHCODE(T, RT) typename eastl::enable_if<Internal::has_hashcode_member<T>::value, RT>::type*
+ #define ENABLE_IF_HASHCODE_EASTLSIZET(T, RT) typename eastl::enable_if<eastl::is_convertible<T, eastl_size_t>::value, RT>::type
+ #define ENABLE_IF_TRUETYPE(T) typename eastl::enable_if<T::value>::type*
+ #define DISABLE_IF_TRUETYPE(T) typename eastl::enable_if<!T::value>::type*
+
+
+ /// node_iterator_base
+ ///
+ /// Node iterators iterate nodes within a given bucket.
+ ///
+ /// We define a base class here because it is shared by both const and
+ /// non-const iterators.
+ ///
+ template <typename Value, bool bCacheHashCode>
+ struct node_iterator_base
+ {
+ typedef hash_node<Value, bCacheHashCode> node_type;
+
+ node_type* mpNode;
+
+ node_iterator_base(node_type* pNode)
+ : mpNode(pNode) { }
+
+ void increment()
+ { mpNode = mpNode->mpNext; }
+ };
+
+
+
+ /// node_iterator
+ ///
+ /// Node iterators iterate nodes within a given bucket.
+ ///
+ /// The bConst parameter defines if the iterator is a const_iterator
+ /// or an iterator.
+ ///
+ template <typename Value, bool bConst, bool bCacheHashCode>
+ struct node_iterator : public node_iterator_base<Value, bCacheHashCode>
+ {
+ public:
+ typedef node_iterator_base<Value, bCacheHashCode> base_type;
+ typedef node_iterator<Value, bConst, bCacheHashCode> this_type;
+ typedef typename base_type::node_type node_type;
+ typedef Value value_type;
+ typedef typename type_select<bConst, const Value*, Value*>::type pointer;
+ typedef typename type_select<bConst, const Value&, Value&>::type reference;
+ typedef ptrdiff_t difference_type;
+ typedef EASTL_ITC_NS::forward_iterator_tag iterator_category;
+
+ public:
+ explicit node_iterator(node_type* pNode = NULL)
+ : base_type(pNode) { }
+
+ node_iterator(const node_iterator<Value, true, bCacheHashCode>& x)
+ : base_type(x.mpNode) { }
+
+ reference operator*() const
+ { return base_type::mpNode->mValue; }
+
+ pointer operator->() const
+ { return &(base_type::mpNode->mValue); }
+
+ node_iterator& operator++()
+ { base_type::increment(); return *this; }
+
+ node_iterator operator++(int)
+ { node_iterator temp(*this); base_type::increment(); return temp; }
+
+ }; // node_iterator
+
+
+
+ /// hashtable_iterator_base
+ ///
+ /// A hashtable_iterator iterates the entire hash table and not just
+ /// nodes within a single bucket. Users in general will use a hash
+ /// table iterator much more often, as it is much like other container
+ /// iterators (e.g. vector::iterator).
+ ///
+ /// We define a base class here because it is shared by both const and
+ /// non-const iterators.
+ ///
+ template <typename Value, bool bCacheHashCode>
+ struct hashtable_iterator_base
+ {
+ public:
+ typedef hashtable_iterator_base<Value, bCacheHashCode> this_type;
+ typedef hash_node<Value, bCacheHashCode> node_type;
+
+ protected:
+ template <typename, typename, typename, typename, typename, typename, typename, typename, typename, bool, bool, bool>
+ friend class hashtable;
+
+ template <typename, bool, bool>
+ friend struct hashtable_iterator;
+
+ template <typename V, bool b>
+ friend bool operator==(const hashtable_iterator_base<V, b>&, const hashtable_iterator_base<V, b>&);
+
+ template <typename V, bool b>
+ friend bool operator!=(const hashtable_iterator_base<V, b>&, const hashtable_iterator_base<V, b>&);
+
+ node_type* mpNode; // Current node within current bucket.
+ node_type** mpBucket; // Current bucket.
+
+ public:
+ hashtable_iterator_base(node_type* pNode, node_type** pBucket)
+ : mpNode(pNode), mpBucket(pBucket) { }
+
+ void increment_bucket()
+ {
+ ++mpBucket;
+ while(*mpBucket == NULL) // We store an extra bucket with some non-NULL value at the end
+ ++mpBucket; // of the bucket array so that finding the end of the bucket
+ mpNode = *mpBucket; // array is quick and simple.
+ }
+
+ void increment()
+ {
+ mpNode = mpNode->mpNext;
+
+ while(mpNode == NULL)
+ mpNode = *++mpBucket;
+ }
+
+ }; // hashtable_iterator_base
+
+
+
+
+ /// hashtable_iterator
+ ///
+ /// A hashtable_iterator iterates the entire hash table and not just
+ /// nodes within a single bucket. Users in general will use a hash
+ /// table iterator much more often, as it is much like other container
+ /// iterators (e.g. vector::iterator).
+ ///
+ /// The bConst parameter defines if the iterator is a const_iterator
+ /// or an iterator.
+ ///
+ template <typename Value, bool bConst, bool bCacheHashCode>
+ struct hashtable_iterator : public hashtable_iterator_base<Value, bCacheHashCode>
+ {
+ public:
+ typedef hashtable_iterator_base<Value, bCacheHashCode> base_type;
+ typedef hashtable_iterator<Value, bConst, bCacheHashCode> this_type;
+ typedef hashtable_iterator<Value, false, bCacheHashCode> this_type_non_const;
+ typedef typename base_type::node_type node_type;
+ typedef Value value_type;
+ typedef typename type_select<bConst, const Value*, Value*>::type pointer;
+ typedef typename type_select<bConst, const Value&, Value&>::type reference;
+ typedef ptrdiff_t difference_type;
+ typedef EASTL_ITC_NS::forward_iterator_tag iterator_category;
+
+ public:
+ hashtable_iterator(node_type* pNode = NULL, node_type** pBucket = NULL)
+ : base_type(pNode, pBucket) { }
+
+ hashtable_iterator(node_type** pBucket)
+ : base_type(*pBucket, pBucket) { }
+
+ hashtable_iterator(const this_type_non_const& x)
+ : base_type(x.mpNode, x.mpBucket) { }
+
+ reference operator*() const
+ { return base_type::mpNode->mValue; }
+
+ pointer operator->() const
+ { return &(base_type::mpNode->mValue); }
+
+ hashtable_iterator& operator++()
+ { base_type::increment(); return *this; }
+
+ hashtable_iterator operator++(int)
+ { hashtable_iterator temp(*this); base_type::increment(); return temp; }
+
+ const node_type* get_node() const
+ { return base_type::mpNode; }
+
+ }; // hashtable_iterator
+
+
+
+
+ /// ht_distance
+ ///
+ /// This function returns the same thing as distance() for
+ /// forward iterators but returns zero for input iterators.
+ /// The reason why is that input iterators can only be read
+ /// once, and calling distance() on an input iterator destroys
+ /// the ability to read it. This ht_distance is used only for
+ /// optimization and so the code will merely work better with
+ /// forward iterators that input iterators.
+ ///
+ template <typename Iterator>
+ inline typename eastl::iterator_traits<Iterator>::difference_type
+ distance_fw_impl(Iterator /*first*/, Iterator /*last*/, EASTL_ITC_NS::input_iterator_tag)
+ {
+ return 0;
+ }
+
+ template <typename Iterator>
+ inline typename eastl::iterator_traits<Iterator>::difference_type
+ distance_fw_impl(Iterator first, Iterator last, EASTL_ITC_NS::forward_iterator_tag)
+ { return eastl::distance(first, last); }
+
+ template <typename Iterator>
+ inline typename eastl::iterator_traits<Iterator>::difference_type
+ ht_distance(Iterator first, Iterator last)
+ {
+ typedef typename eastl::iterator_traits<Iterator>::iterator_category IC;
+ return distance_fw_impl(first, last, IC());
+ }
+
+
+
+
+ /// mod_range_hashing
+ ///
+ /// Implements the algorithm for conversion of a number in the range of
+ /// [0, SIZE_T_MAX] to the range of [0, BucketCount).
+ ///
+ struct mod_range_hashing
+ {
+ uint32_t operator()(size_t r, uint32_t n) const
+ { return r % n; }
+ };
+
+
+ /// default_ranged_hash
+ ///
+ /// Default ranged hash function H. In principle it should be a
+ /// function object composed from objects of type H1 and H2 such that
+ /// h(k, n) = h2(h1(k), n), but that would mean making extra copies of
+ /// h1 and h2. So instead we'll just use a tag to tell class template
+ /// hashtable to do that composition.
+ ///
+ struct default_ranged_hash{ };
+
+
+ /// prime_rehash_policy
+ ///
+ /// Default value for rehash policy. Bucket size is (usually) the
+ /// smallest prime that keeps the load factor small enough.
+ ///
+ struct EASTL_API prime_rehash_policy
+ {
+ public:
+ float mfMaxLoadFactor;
+ float mfGrowthFactor;
+ mutable uint32_t mnNextResize;
+
+ public:
+ prime_rehash_policy(float fMaxLoadFactor = 1.f)
+ : mfMaxLoadFactor(fMaxLoadFactor), mfGrowthFactor(2.f), mnNextResize(0) { }
+
+ float GetMaxLoadFactor() const
+ { return mfMaxLoadFactor; }
+
+ /// Return a bucket count no greater than nBucketCountHint,
+ /// Don't update member variables while at it.
+ static uint32_t GetPrevBucketCountOnly(uint32_t nBucketCountHint);
+
+ /// Return a bucket count no greater than nBucketCountHint.
+ /// This function has a side effect of updating mnNextResize.
+ uint32_t GetPrevBucketCount(uint32_t nBucketCountHint) const;
+
+ /// Return a bucket count no smaller than nBucketCountHint.
+ /// This function has a side effect of updating mnNextResize.
+ uint32_t GetNextBucketCount(uint32_t nBucketCountHint) const;
+
+ /// Return a bucket count appropriate for nElementCount elements.
+ /// This function has a side effect of updating mnNextResize.
+ uint32_t GetBucketCount(uint32_t nElementCount) const;
+
+ /// nBucketCount is current bucket count, nElementCount is current element count,
+ /// and nElementAdd is number of elements to be inserted. Do we need
+ /// to increase bucket count? If so, return pair(true, n), where
+ /// n is the new bucket count. If not, return pair(false, 0).
+ eastl::pair<bool, uint32_t>
+ GetRehashRequired(uint32_t nBucketCount, uint32_t nElementCount, uint32_t nElementAdd) const;
+ };
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // Base classes for hashtable. We define these base classes because
+ // in some cases we want to do different things depending on the
+ // value of a policy class. In some cases the policy class affects
+ // which member functions and nested typedefs are defined; we handle that
+ // by specializing base class templates. Several of the base class templates
+ // need to access other members of class template hashtable, so we use
+ // the "curiously recurring template pattern" (parent class is templated
+ // on type of child class) for them.
+ ///////////////////////////////////////////////////////////////////////
+
+
+ /// rehash_base
+ ///
+ /// Give hashtable the get_max_load_factor functions if the rehash
+ /// policy is prime_rehash_policy.
+ ///
+ template <typename RehashPolicy, typename Hashtable>
+ struct rehash_base { };
+
+ template <typename Hashtable>
+ struct rehash_base<prime_rehash_policy, Hashtable>
+ {
+ // Returns the max load factor, which is the load factor beyond
+ // which we rebuild the container with a new bucket count.
+ float get_max_load_factor() const
+ {
+ const Hashtable* const pThis = static_cast<const Hashtable*>(this);
+ return pThis->rehash_policy().GetMaxLoadFactor();
+ }
+
+ // If you want to make the hashtable never rehash (resize),
+ // set the max load factor to be a very high number (e.g. 100000.f).
+ void set_max_load_factor(float fMaxLoadFactor)
+ {
+ Hashtable* const pThis = static_cast<Hashtable*>(this);
+ pThis->rehash_policy(prime_rehash_policy(fMaxLoadFactor));
+ }
+ };
+
+
+
+
+ /// hash_code_base
+ ///
+ /// Encapsulates two policy issues that aren't quite orthogonal.
+ /// (1) The difference between using a ranged hash function and using
+ /// the combination of a hash function and a range-hashing function.
+ /// In the former case we don't have such things as hash codes, so
+ /// we have a dummy type as placeholder.
+ /// (2) Whether or not we cache hash codes. Caching hash codes is
+ /// meaningless if we have a ranged hash function. This is because
+ /// a ranged hash function converts an object directly to its
+ /// bucket index without ostensibly using a hash code.
+ /// We also put the key extraction and equality comparison function
+ /// objects here, for convenience.
+ ///
+ template <typename Key, typename Value, typename ExtractKey, typename Equal,
+ typename H1, typename H2, typename H, bool bCacheHashCode>
+ struct hash_code_base;
+
+
+ /// hash_code_base
+ ///
+ /// Specialization: ranged hash function, no caching hash codes.
+ /// H1 and H2 are provided but ignored. We define a dummy hash code type.
+ ///
+ template <typename Key, typename Value, typename ExtractKey, typename Equal, typename H1, typename H2, typename H>
+ struct hash_code_base<Key, Value, ExtractKey, Equal, H1, H2, H, false>
+ {
+ protected:
+ ExtractKey mExtractKey; // To do: Make this member go away entirely, as it never has any data.
+ Equal mEqual; // To do: Make this instance use zero space when it is zero size.
+ H mRangedHash; // To do: Make this instance use zero space when it is zero size
+
+ public:
+ H1 hash_function() const
+ { return H1(); }
+
+ Equal equal_function() const // Deprecated. Use key_eq() instead, as key_eq is what the new C++ standard
+ { return mEqual; } // has specified in its hashtable (unordered_*) proposal.
+
+ const Equal& key_eq() const
+ { return mEqual; }
+
+ Equal& key_eq()
+ { return mEqual; }
+
+ protected:
+ typedef void* hash_code_t;
+ typedef uint32_t bucket_index_t;
+
+ hash_code_base(const ExtractKey& extractKey, const Equal& eq, const H1&, const H2&, const H& h)
+ : mExtractKey(extractKey), mEqual(eq), mRangedHash(h) { }
+
+ hash_code_t get_hash_code(const Key& key) const
+ {
+ EA_UNUSED(key);
+ return NULL;
+ }
+
+ bucket_index_t bucket_index(hash_code_t, uint32_t) const
+ { return (bucket_index_t)0; }
+
+ bucket_index_t bucket_index(const Key& key, hash_code_t, uint32_t nBucketCount) const
+ { return (bucket_index_t)mRangedHash(key, nBucketCount); }
+
+ bucket_index_t bucket_index(const hash_node<Value, false>* pNode, uint32_t nBucketCount) const
+ { return (bucket_index_t)mRangedHash(mExtractKey(pNode->mValue), nBucketCount); }
+
+ bool compare(const Key& key, hash_code_t, hash_node<Value, false>* pNode) const
+ { return mEqual(key, mExtractKey(pNode->mValue)); }
+
+ void copy_code(hash_node<Value, false>*, const hash_node<Value, false>*) const
+ { } // Nothing to do.
+
+ void set_code(hash_node<Value, false>* pDest, hash_code_t c) const
+ {
+ EA_UNUSED(pDest);
+ EA_UNUSED(c);
+ }
+
+ void base_swap(hash_code_base& x)
+ {
+ eastl::swap(mExtractKey, x.mExtractKey);
+ eastl::swap(mEqual, x.mEqual);
+ eastl::swap(mRangedHash, x.mRangedHash);
+ }
+
+ }; // hash_code_base
+
+
+
+ // No specialization for ranged hash function while caching hash codes.
+ // That combination is meaningless, and trying to do it is an error.
+
+
+ /// hash_code_base
+ ///
+ /// Specialization: ranged hash function, cache hash codes.
+ /// This combination is meaningless, so we provide only a declaration
+ /// and no definition.
+ ///
+ template <typename Key, typename Value, typename ExtractKey, typename Equal, typename H1, typename H2, typename H>
+ struct hash_code_base<Key, Value, ExtractKey, Equal, H1, H2, H, true>;
+
+
+
+ /// hash_code_base
+ ///
+ /// Specialization: hash function and range-hashing function,
+ /// no caching of hash codes. H is provided but ignored.
+ /// Provides typedef and accessor required by TR1.
+ ///
+ template <typename Key, typename Value, typename ExtractKey, typename Equal, typename H1, typename H2>
+ struct hash_code_base<Key, Value, ExtractKey, Equal, H1, H2, default_ranged_hash, false>
+ {
+ protected:
+ ExtractKey mExtractKey;
+ Equal mEqual;
+ H1 m_h1;
+ H2 m_h2;
+
+ public:
+ typedef H1 hasher;
+
+ H1 hash_function() const
+ { return m_h1; }
+
+ Equal equal_function() const // Deprecated. Use key_eq() instead, as key_eq is what the new C++ standard
+ { return mEqual; } // has specified in its hashtable (unordered_*) proposal.
+
+ const Equal& key_eq() const
+ { return mEqual; }
+
+ Equal& key_eq()
+ { return mEqual; }
+
+ protected:
+ typedef size_t hash_code_t;
+ typedef uint32_t bucket_index_t;
+ typedef hash_node<Value, false> node_type;
+
+ hash_code_base(const ExtractKey& ex, const Equal& eq, const H1& h1, const H2& h2, const default_ranged_hash&)
+ : mExtractKey(ex), mEqual(eq), m_h1(h1), m_h2(h2) { }
+
+ hash_code_t get_hash_code(const Key& key) const
+ { return (hash_code_t)m_h1(key); }
+
+ bucket_index_t bucket_index(hash_code_t c, uint32_t nBucketCount) const
+ { return (bucket_index_t)m_h2(c, nBucketCount); }
+
+ bucket_index_t bucket_index(const Key&, hash_code_t c, uint32_t nBucketCount) const
+ { return (bucket_index_t)m_h2(c, nBucketCount); }
+
+ bucket_index_t bucket_index(const node_type* pNode, uint32_t nBucketCount) const
+ { return (bucket_index_t)m_h2((hash_code_t)m_h1(mExtractKey(pNode->mValue)), nBucketCount); }
+
+ bool compare(const Key& key, hash_code_t, node_type* pNode) const
+ { return mEqual(key, mExtractKey(pNode->mValue)); }
+
+ void copy_code(node_type*, const node_type*) const
+ { } // Nothing to do.
+
+ void set_code(node_type*, hash_code_t) const
+ { } // Nothing to do.
+
+ void base_swap(hash_code_base& x)
+ {
+ eastl::swap(mExtractKey, x.mExtractKey);
+ eastl::swap(mEqual, x.mEqual);
+ eastl::swap(m_h1, x.m_h1);
+ eastl::swap(m_h2, x.m_h2);
+ }
+
+ }; // hash_code_base
+
+
+
+ /// hash_code_base
+ ///
+ /// Specialization: hash function and range-hashing function,
+ /// caching hash codes. H is provided but ignored.
+ /// Provides typedef and accessor required by TR1.
+ ///
+ template <typename Key, typename Value, typename ExtractKey, typename Equal, typename H1, typename H2>
+ struct hash_code_base<Key, Value, ExtractKey, Equal, H1, H2, default_ranged_hash, true>
+ {
+ protected:
+ ExtractKey mExtractKey;
+ Equal mEqual;
+ H1 m_h1;
+ H2 m_h2;
+
+ public:
+ typedef H1 hasher;
+
+ H1 hash_function() const
+ { return m_h1; }
+
+ Equal equal_function() const // Deprecated. Use key_eq() instead, as key_eq is what the new C++ standard
+ { return mEqual; } // has specified in its hashtable (unordered_*) proposal.
+
+ const Equal& key_eq() const
+ { return mEqual; }
+
+ Equal& key_eq()
+ { return mEqual; }
+
+ protected:
+ typedef uint32_t hash_code_t;
+ typedef uint32_t bucket_index_t;
+ typedef hash_node<Value, true> node_type;
+
+ hash_code_base(const ExtractKey& ex, const Equal& eq, const H1& h1, const H2& h2, const default_ranged_hash&)
+ : mExtractKey(ex), mEqual(eq), m_h1(h1), m_h2(h2) { }
+
+ hash_code_t get_hash_code(const Key& key) const
+ { return (hash_code_t)m_h1(key); }
+
+ bucket_index_t bucket_index(hash_code_t c, uint32_t nBucketCount) const
+ { return (bucket_index_t)m_h2(c, nBucketCount); }
+
+ bucket_index_t bucket_index(const Key&, hash_code_t c, uint32_t nBucketCount) const
+ { return (bucket_index_t)m_h2(c, nBucketCount); }
+
+ bucket_index_t bucket_index(const node_type* pNode, uint32_t nBucketCount) const
+ { return (bucket_index_t)m_h2((uint32_t)pNode->mnHashCode, nBucketCount); }
+
+ bool compare(const Key& key, hash_code_t c, node_type* pNode) const
+ { return (pNode->mnHashCode == c) && mEqual(key, mExtractKey(pNode->mValue)); }
+
+ void copy_code(node_type* pDest, const node_type* pSource) const
+ { pDest->mnHashCode = pSource->mnHashCode; }
+
+ void set_code(node_type* pDest, hash_code_t c) const
+ { pDest->mnHashCode = c; }
+
+ void base_swap(hash_code_base& x)
+ {
+ eastl::swap(mExtractKey, x.mExtractKey);
+ eastl::swap(mEqual, x.mEqual);
+ eastl::swap(m_h1, x.m_h1);
+ eastl::swap(m_h2, x.m_h2);
+ }
+
+ }; // hash_code_base
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ /// hashtable
+ ///
+ /// Key and Value: arbitrary CopyConstructible types.
+ ///
+ /// ExtractKey: function object that takes a object of type Value
+ /// and returns a value of type Key.
+ ///
+ /// Equal: function object that takes two objects of type k and returns
+ /// a bool-like value that is true if the two objects are considered equal.
+ ///
+ /// H1: a hash function. A unary function object with argument type
+ /// Key and result type size_t. Return values should be distributed
+ /// over the entire range [0, numeric_limits<uint32_t>::max()].
+ ///
+ /// H2: a range-hashing function (in the terminology of Tavori and
+ /// Dreizin). This is a function which takes the output of H1 and
+ /// converts it to the range of [0, n]. Usually it merely takes the
+ /// output of H1 and mods it to n.
+ ///
+ /// H: a ranged hash function (Tavori and Dreizin). This is merely
+ /// a class that combines the functionality of H1 and H2 together,
+ /// possibly in some way that is somehow improved over H1 and H2
+ /// It is a binary function whose argument types are Key and size_t
+ /// and whose result type is uint32_t. Given arguments k and n, the
+ /// return value is in the range [0, n). Default: h(k, n) = h2(h1(k), n).
+ /// If H is anything other than the default, H1 and H2 are ignored,
+ /// as H is thus overriding H1 and H2.
+ ///
+ /// RehashPolicy: Policy class with three members, all of which govern
+ /// the bucket count. nBucket(n) returns a bucket count no smaller
+ /// than n. GetBucketCount(n) returns a bucket count appropriate
+ /// for an element count of n. GetRehashRequired(nBucketCount, nElementCount, nElementAdd)
+ /// determines whether, if the current bucket count is nBucket and the
+ /// current element count is nElementCount, we need to increase the bucket
+ /// count. If so, returns pair(true, n), where n is the new
+ /// bucket count. If not, returns pair(false, <anything>).
+ ///
+ /// Currently it is hard-wired that the number of buckets never
+ /// shrinks. Should we allow RehashPolicy to change that?
+ ///
+ /// bCacheHashCode: true if we store the value of the hash
+ /// function along with the value. This is a time-space tradeoff.
+ /// Storing it may improve lookup speed by reducing the number of
+ /// times we need to call the Equal function.
+ ///
+ /// bMutableIterators: true if hashtable::iterator is a mutable
+ /// iterator, false if iterator and const_iterator are both const
+ /// iterators. This is true for hash_map and hash_multimap,
+ /// false for hash_set and hash_multiset.
+ ///
+ /// bUniqueKeys: true if the return value of hashtable::count(k)
+ /// is always at most one, false if it may be an arbitrary number.
+ /// This is true for hash_set and hash_map and is false for
+ /// hash_multiset and hash_multimap.
+ ///
+ ///////////////////////////////////////////////////////////////////////
+ /// Note:
+ /// If you want to make a hashtable never increase its bucket usage,
+ /// call set_max_load_factor with a very high value such as 100000.f.
+ ///
+ /// find_as
+ /// In order to support the ability to have a hashtable of strings but
+ /// be able to do efficiently lookups via char pointers (i.e. so they
+ /// aren't converted to string objects), we provide the find_as
+ /// function. This function allows you to do a find with a key of a
+ /// type other than the hashtable key type. See the find_as function
+ /// for more documentation on this.
+ ///
+ /// find_by_hash
+ /// In the interest of supporting fast operations wherever possible,
+ /// we provide a find_by_hash function which finds a node using its
+ /// hash code. This is useful for cases where the node's hash is
+ /// already known, allowing us to avoid a redundant hash operation
+ /// in the normal find path.
+ ///
+ template <typename Key, typename Value, typename Allocator, typename ExtractKey,
+ typename Equal, typename H1, typename H2, typename H,
+ typename RehashPolicy, bool bCacheHashCode, bool bMutableIterators, bool bUniqueKeys>
+ class hashtable
+ : public rehash_base<RehashPolicy, hashtable<Key, Value, Allocator, ExtractKey, Equal, H1, H2, H, RehashPolicy, bCacheHashCode, bMutableIterators, bUniqueKeys> >,
+ public hash_code_base<Key, Value, ExtractKey, Equal, H1, H2, H, bCacheHashCode>
+ {
+ public:
+ typedef Key key_type;
+ typedef Value value_type;
+ typedef typename ExtractKey::result_type mapped_type;
+ typedef hash_code_base<Key, Value, ExtractKey, Equal, H1, H2, H, bCacheHashCode> hash_code_base_type;
+ typedef typename hash_code_base_type::hash_code_t hash_code_t;
+ typedef Allocator allocator_type;
+ typedef Equal key_equal;
+ typedef ptrdiff_t difference_type;
+ typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t.
+ typedef value_type& reference;
+ typedef const value_type& const_reference;
+ typedef node_iterator<value_type, !bMutableIterators, bCacheHashCode> local_iterator;
+ typedef node_iterator<value_type, true, bCacheHashCode> const_local_iterator;
+ typedef hashtable_iterator<value_type, !bMutableIterators, bCacheHashCode> iterator;
+ typedef hashtable_iterator<value_type, true, bCacheHashCode> const_iterator;
+ typedef hash_node<value_type, bCacheHashCode> node_type;
+ typedef typename type_select<bUniqueKeys, eastl::pair<iterator, bool>, iterator>::type insert_return_type;
+ typedef hashtable<Key, Value, Allocator, ExtractKey, Equal, H1, H2, H,
+ RehashPolicy, bCacheHashCode, bMutableIterators, bUniqueKeys> this_type;
+ typedef RehashPolicy rehash_policy_type;
+ typedef ExtractKey extract_key_type;
+ typedef H1 h1_type;
+ typedef H2 h2_type;
+ typedef H h_type;
+ typedef integral_constant<bool, bUniqueKeys> has_unique_keys_type;
+
+ using hash_code_base_type::key_eq;
+ using hash_code_base_type::hash_function;
+ using hash_code_base_type::mExtractKey;
+ using hash_code_base_type::get_hash_code;
+ using hash_code_base_type::bucket_index;
+ using hash_code_base_type::compare;
+ using hash_code_base_type::set_code;
+ using hash_code_base_type::copy_code;
+
+ static const bool kCacheHashCode = bCacheHashCode;
+
+ enum
+ {
+ // This enumeration is deprecated in favor of eastl::kHashtableAllocFlagBuckets.
+ kAllocFlagBuckets = eastl::kHashtableAllocFlagBuckets // Flag to allocator which indicates that we are allocating buckets and not nodes.
+ };
+
+ protected:
+ node_type** mpBucketArray;
+ size_type mnBucketCount;
+ size_type mnElementCount;
+ RehashPolicy mRehashPolicy; // To do: Use base class optimization to make this go away.
+ allocator_type mAllocator; // To do: Use base class optimization to make this go away.
+
+ struct NodeFindKeyData {
+ node_type* node;
+ hash_code_t code;
+ size_type bucket_index;
+ };
+
+ public:
+ hashtable(size_type nBucketCount, const H1&, const H2&, const H&, const Equal&, const ExtractKey&,
+ const allocator_type& allocator = EASTL_HASHTABLE_DEFAULT_ALLOCATOR);
+
+ template <typename FowardIterator>
+ hashtable(FowardIterator first, FowardIterator last, size_type nBucketCount,
+ const H1&, const H2&, const H&, const Equal&, const ExtractKey&,
+ const allocator_type& allocator = EASTL_HASHTABLE_DEFAULT_ALLOCATOR);
+
+ hashtable(const hashtable& x);
+
+ // initializer_list ctor support is implemented in subclasses (e.g. hash_set).
+ // hashtable(initializer_list<value_type>, size_type nBucketCount, const H1&, const H2&, const H&,
+ // const Equal&, const ExtractKey&, const allocator_type& allocator = EASTL_HASHTABLE_DEFAULT_ALLOCATOR);
+
+ hashtable(this_type&& x);
+ hashtable(this_type&& x, const allocator_type& allocator);
+ ~hashtable();
+
+ const allocator_type& get_allocator() const EA_NOEXCEPT;
+ allocator_type& get_allocator() EA_NOEXCEPT;
+ void set_allocator(const allocator_type& allocator);
+
+ this_type& operator=(const this_type& x);
+ this_type& operator=(std::initializer_list<value_type> ilist);
+ this_type& operator=(this_type&& x);
+
+ void swap(this_type& x);
+
+ iterator begin() EA_NOEXCEPT
+ {
+ iterator i(mpBucketArray);
+ if(!i.mpNode)
+ i.increment_bucket();
+ return i;
+ }
+
+ const_iterator begin() const EA_NOEXCEPT
+ {
+ const_iterator i(mpBucketArray);
+ if(!i.mpNode)
+ i.increment_bucket();
+ return i;
+ }
+
+ const_iterator cbegin() const EA_NOEXCEPT
+ { return begin(); }
+
+ iterator end() EA_NOEXCEPT
+ { return iterator(mpBucketArray + mnBucketCount); }
+
+ const_iterator end() const EA_NOEXCEPT
+ { return const_iterator(mpBucketArray + mnBucketCount); }
+
+ const_iterator cend() const EA_NOEXCEPT
+ { return const_iterator(mpBucketArray + mnBucketCount); }
+
+ // Returns an iterator to the first item in bucket n.
+ local_iterator begin(size_type n) EA_NOEXCEPT
+ { return local_iterator(mpBucketArray[n]); }
+
+ const_local_iterator begin(size_type n) const EA_NOEXCEPT
+ { return const_local_iterator(mpBucketArray[n]); }
+
+ const_local_iterator cbegin(size_type n) const EA_NOEXCEPT
+ { return const_local_iterator(mpBucketArray[n]); }
+
+ // Returns an iterator to the last item in a bucket returned by begin(n).
+ local_iterator end(size_type) EA_NOEXCEPT
+ { return local_iterator(NULL); }
+
+ const_local_iterator end(size_type) const EA_NOEXCEPT
+ { return const_local_iterator(NULL); }
+
+ const_local_iterator cend(size_type) const EA_NOEXCEPT
+ { return const_local_iterator(NULL); }
+
+ bool empty() const EA_NOEXCEPT
+ { return mnElementCount == 0; }
+
+ size_type size() const EA_NOEXCEPT
+ { return mnElementCount; }
+
+ size_type bucket_count() const EA_NOEXCEPT
+ { return mnBucketCount; }
+
+ size_type bucket_size(size_type n) const EA_NOEXCEPT
+ { return (size_type)eastl::distance(begin(n), end(n)); }
+
+ //size_type bucket(const key_type& k) const EA_NOEXCEPT
+ // { return bucket_index(k, (hash code here), (uint32_t)mnBucketCount); }
+
+ // Returns the ratio of element count to bucket count. A return value of 1 means
+ // there's an optimal 1 bucket for each element.
+ float load_factor() const EA_NOEXCEPT
+ { return (float)mnElementCount / (float)mnBucketCount; }
+
+ // Inherited from the base class.
+ // Returns the max load factor, which is the load factor beyond
+ // which we rebuild the container with a new bucket count.
+ // get_max_load_factor comes from rehash_base.
+ // float get_max_load_factor() const;
+
+ // Inherited from the base class.
+ // If you want to make the hashtable never rehash (resize),
+ // set the max load factor to be a very high number (e.g. 100000.f).
+ // set_max_load_factor comes from rehash_base.
+ // void set_max_load_factor(float fMaxLoadFactor);
+
+ /// Generalization of get_max_load_factor. This is an extension that's
+ /// not present in C++ hash tables (unordered containers).
+ const rehash_policy_type& rehash_policy() const EA_NOEXCEPT
+ { return mRehashPolicy; }
+
+ /// Generalization of set_max_load_factor. This is an extension that's
+ /// not present in C++ hash tables (unordered containers).
+ void rehash_policy(const rehash_policy_type& rehashPolicy);
+
+ template <class... Args>
+ insert_return_type emplace(Args&&... args);
+
+ template <class... Args>
+ iterator emplace_hint(const_iterator position, Args&&... args);
+
+ insert_return_type insert(const value_type& value);
+ insert_return_type insert(value_type&& otherValue);
+ iterator insert(const_iterator hint, const value_type& value);
+ iterator insert(const_iterator hint, value_type&& value);
+ void insert(std::initializer_list<value_type> ilist);
+ template <typename InputIterator> void insert(InputIterator first, InputIterator last);
+ //insert_return_type insert(node_type&& nh);
+ //iterator insert(const_iterator hint, node_type&& nh);
+
+ // This overload attempts to mitigate the overhead associated with mismatched cv-quality elements of
+ // the hashtable pair. It can avoid copy overhead because it will perfect forward the user provided pair types
+ // until it can constructed in-place in the allocated hashtable node.
+ //
+ // Ideally we would remove this overload as it deprecated and removed in C++17 but it currently causes
+ // performance regressions for hashtables with complex keys (keys that allocate resources).
+ template <class P,
+ class = typename eastl::enable_if_t<
+ #if EASTL_ENABLE_PAIR_FIRST_ELEMENT_CONSTRUCTOR
+ !eastl::is_same_v<eastl::decay_t<P>, key_type> &&
+ #endif
+ !eastl::is_literal_type_v<P> &&
+ eastl::is_constructible_v<value_type, P&&>>>
+ insert_return_type insert(P&& otherValue);
+
+ // Non-standard extension
+ template <class P> // See comments below for the const value_type& equivalent to this function.
+ insert_return_type insert(hash_code_t c, node_type* pNodeNew, P&& otherValue);
+
+ // We provide a version of insert which lets the caller directly specify the hash value and
+ // a potential node to insert if needed. This allows for less thread contention in the case
+ // of a thread-shared hash table that's accessed during a mutex lock, because the hash calculation
+ // and node creation is done outside of the lock. If pNodeNew is supplied by the user (i.e. non-NULL)
+ // then it must be freeable via the hash table's allocator. If the return value is true then this function
+ // took over ownership of pNodeNew, else pNodeNew is still owned by the caller to free or to pass
+ // to another call to insert. pNodeNew need not be assigned the value by the caller, as the insert
+ // function will assign value to pNodeNew upon insertion into the hash table. pNodeNew may be
+ // created by the user with the allocate_uninitialized_node function, and freed by the free_uninitialized_node function.
+ insert_return_type insert(hash_code_t c, node_type* pNodeNew, const value_type& value);
+
+ template <class M> eastl::pair<iterator, bool> insert_or_assign(const key_type& k, M&& obj);
+ template <class M> eastl::pair<iterator, bool> insert_or_assign(key_type&& k, M&& obj);
+ template <class M> iterator insert_or_assign(const_iterator hint, const key_type& k, M&& obj);
+ template <class M> iterator insert_or_assign(const_iterator hint, key_type&& k, M&& obj);
+
+ // Used to allocate and free memory used by insert(const value_type& value, hash_code_t c, node_type* pNodeNew).
+ node_type* allocate_uninitialized_node();
+ void free_uninitialized_node(node_type* pNode);
+
+ iterator erase(const_iterator position);
+ iterator erase(const_iterator first, const_iterator last);
+ size_type erase(const key_type& k);
+
+ void clear();
+ void clear(bool clearBuckets); // If clearBuckets is true, we free the bucket memory and set the bucket count back to the newly constructed count.
+ void reset_lose_memory() EA_NOEXCEPT; // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs.
+ void rehash(size_type nBucketCount);
+ void reserve(size_type nElementCount);
+
+ iterator find(const key_type& key);
+ const_iterator find(const key_type& key) const;
+
+ /// Implements a find whereby the user supplies a comparison of a different type
+ /// than the hashtable value_type. A useful case of this is one whereby you have
+ /// a container of string objects but want to do searches via passing in char pointers.
+ /// The problem is that without this kind of find, you need to do the expensive operation
+ /// of converting the char pointer to a string so it can be used as the argument to the
+ /// find function.
+ ///
+ /// Example usage (namespaces omitted for brevity):
+ /// hash_set<string> hashSet;
+ /// hashSet.find_as("hello"); // Use default hash and compare.
+ ///
+ /// Example usage (note that the predicate uses string as first type and char* as second):
+ /// hash_set<string> hashSet;
+ /// hashSet.find_as("hello", hash<char*>(), equal_to_2<string, char*>());
+ ///
+ template <typename U, typename UHash, typename BinaryPredicate>
+ iterator find_as(const U& u, UHash uhash, BinaryPredicate predicate);
+
+ template <typename U, typename UHash, typename BinaryPredicate>
+ const_iterator find_as(const U& u, UHash uhash, BinaryPredicate predicate) const;
+
+ template <typename U>
+ iterator find_as(const U& u);
+
+ template <typename U>
+ const_iterator find_as(const U& u) const;
+
+ // Note: find_by_hash and find_range_by_hash both perform a search based on a hash value.
+ // It is important to note that multiple hash values may map to the same hash bucket, so
+ // it would be incorrect to assume all items returned match the hash value that
+ // was searched for.
+
+ /// Implements a find whereby the user supplies the node's hash code.
+ /// It returns an iterator to the first element that matches the given hash. However, there may be multiple elements that match the given hash.
+
+ template<typename HashCodeT>
+ ENABLE_IF_HASHCODE_EASTLSIZET(HashCodeT, iterator) find_by_hash(HashCodeT c)
+ {
+ EASTL_CT_ASSERT_MSG(bCacheHashCode,
+ "find_by_hash(hash_code_t c) is designed to avoid recomputing hashes, "
+ "so it requires cached hash codes. Consider setting template parameter "
+ "bCacheHashCode to true or using find_by_hash(const key_type& k, hash_code_t c) instead.");
+
+ const size_type n = (size_type)bucket_index(c, (uint32_t)mnBucketCount);
+
+ node_type* const pNode = DoFindNode(mpBucketArray[n], c);
+
+ return pNode ? iterator(pNode, mpBucketArray + n) :
+ iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end()
+ }
+
+ template<typename HashCodeT>
+ ENABLE_IF_HASHCODE_EASTLSIZET(HashCodeT, const_iterator) find_by_hash(HashCodeT c) const
+ {
+ EASTL_CT_ASSERT_MSG(bCacheHashCode,
+ "find_by_hash(hash_code_t c) is designed to avoid recomputing hashes, "
+ "so it requires cached hash codes. Consider setting template parameter "
+ "bCacheHashCode to true or using find_by_hash(const key_type& k, hash_code_t c) instead.");
+
+ const size_type n = (size_type)bucket_index(c, (uint32_t)mnBucketCount);
+
+ node_type* const pNode = DoFindNode(mpBucketArray[n], c);
+
+ return pNode ?
+ const_iterator(pNode, mpBucketArray + n) :
+ const_iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end()
+ }
+
+ iterator find_by_hash(const key_type& k, hash_code_t c)
+ {
+ const size_type n = (size_type)bucket_index(c, (uint32_t)mnBucketCount);
+
+ node_type* const pNode = DoFindNode(mpBucketArray[n], k, c);
+ return pNode ? iterator(pNode, mpBucketArray + n) : iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end()
+ }
+
+ const_iterator find_by_hash(const key_type& k, hash_code_t c) const
+ {
+ const size_type n = (size_type)bucket_index(c, (uint32_t)mnBucketCount);
+
+ node_type* const pNode = DoFindNode(mpBucketArray[n], k, c);
+ return pNode ? const_iterator(pNode, mpBucketArray + n) : const_iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end()
+ }
+
+ // Returns a pair that allows iterating over all nodes in a hash bucket
+ // first in the pair returned holds the iterator for the beginning of the bucket,
+ // second in the pair returned holds the iterator for the end of the bucket,
+ // If no bucket is found, both values in the pair are set to end().
+ //
+ // See also the note above.
+ eastl::pair<iterator, iterator> find_range_by_hash(hash_code_t c);
+ eastl::pair<const_iterator, const_iterator> find_range_by_hash(hash_code_t c) const;
+
+ size_type count(const key_type& k) const EA_NOEXCEPT;
+
+ eastl::pair<iterator, iterator> equal_range(const key_type& k);
+ eastl::pair<const_iterator, const_iterator> equal_range(const key_type& k) const;
+
+ bool validate() const;
+ int validate_iterator(const_iterator i) const;
+
+ protected:
+ // We must remove one of the 'DoGetResultIterator' overloads from the overload-set (via SFINAE) because both can
+ // not compile successfully at the same time. The 'bUniqueKeys' template parameter chooses at compile-time the
+ // type of 'insert_return_type' between a pair<iterator,bool> and a raw iterator. We must pick between the two
+ // overloads that unpacks the iterator from the pair or simply passes the provided iterator to the caller based
+ // on the class template parameter.
+ template <typename BoolConstantT>
+ iterator DoGetResultIterator(BoolConstantT,
+ const insert_return_type& irt,
+ ENABLE_IF_TRUETYPE(BoolConstantT) = nullptr) const EA_NOEXCEPT
+ {
+ return irt.first;
+ }
+
+ template <typename BoolConstantT>
+ iterator DoGetResultIterator(BoolConstantT,
+ const insert_return_type& irt,
+ DISABLE_IF_TRUETYPE(BoolConstantT) = nullptr) const EA_NOEXCEPT
+ {
+ return irt;
+ }
+
+ node_type* DoAllocateNodeFromKey(const key_type& key);
+ node_type* DoAllocateNodeFromKey(key_type&& key);
+ void DoFreeNode(node_type* pNode);
+ void DoFreeNodes(node_type** pBucketArray, size_type);
+
+ node_type** DoAllocateBuckets(size_type n);
+ void DoFreeBuckets(node_type** pBucketArray, size_type n);
+
+ template <bool bDeleteOnException, typename Enabled = bool_constant<bUniqueKeys>, ENABLE_IF_TRUETYPE(Enabled) = nullptr> // only enabled when keys are unique
+ eastl::pair<iterator, bool> DoInsertUniqueNode(const key_type& k, hash_code_t c, size_type n, node_type* pNodeNew);
+
+ template <typename BoolConstantT, class... Args, ENABLE_IF_TRUETYPE(BoolConstantT) = nullptr>
+ eastl::pair<iterator, bool> DoInsertValue(BoolConstantT, Args&&... args);
+
+ template <typename BoolConstantT, class... Args, DISABLE_IF_TRUETYPE(BoolConstantT) = nullptr>
+ iterator DoInsertValue(BoolConstantT, Args&&... args);
+
+
+ template <typename BoolConstantT>
+ eastl::pair<iterator, bool> DoInsertValueExtra(BoolConstantT,
+ const key_type& k,
+ hash_code_t c,
+ node_type* pNodeNew,
+ value_type&& value,
+ ENABLE_IF_TRUETYPE(BoolConstantT) = nullptr);
+
+ template <typename BoolConstantT>
+ eastl::pair<iterator, bool> DoInsertValue(BoolConstantT,
+ value_type&& value,
+ ENABLE_IF_TRUETYPE(BoolConstantT) = nullptr);
+
+ template <typename BoolConstantT>
+ iterator DoInsertValueExtra(BoolConstantT,
+ const key_type& k,
+ hash_code_t c,
+ node_type* pNodeNew,
+ value_type&& value,
+ DISABLE_IF_TRUETYPE(BoolConstantT) = nullptr);
+
+ template <typename BoolConstantT>
+ iterator DoInsertValue(BoolConstantT, value_type&& value, DISABLE_IF_TRUETYPE(BoolConstantT) = nullptr);
+
+
+ template <typename BoolConstantT>
+ eastl::pair<iterator, bool> DoInsertValueExtra(BoolConstantT,
+ const key_type& k,
+ hash_code_t c,
+ node_type* pNodeNew,
+ const value_type& value,
+ ENABLE_IF_TRUETYPE(BoolConstantT) = nullptr);
+
+ template <typename BoolConstantT>
+ eastl::pair<iterator, bool> DoInsertValue(BoolConstantT,
+ const value_type& value,
+ ENABLE_IF_TRUETYPE(BoolConstantT) = nullptr);
+
+ template <typename BoolConstantT>
+ iterator DoInsertValueExtra(BoolConstantT,
+ const key_type& k,
+ hash_code_t c,
+ node_type* pNodeNew,
+ const value_type& value,
+ DISABLE_IF_TRUETYPE(BoolConstantT) = nullptr);
+
+ template <typename BoolConstantT>
+ iterator DoInsertValue(BoolConstantT, const value_type& value, DISABLE_IF_TRUETYPE(BoolConstantT) = nullptr);
+
+ template <class... Args>
+ node_type* DoAllocateNode(Args&&... args);
+ node_type* DoAllocateNode(value_type&& value);
+ node_type* DoAllocateNode(const value_type& value);
+
+ // DoInsertKey is supposed to get hash_code_t c = get_hash_code(key).
+ // it is done in case application has it's own hashset/hashmap-like containter, where hash code is for some reason known prior the insert
+ // this allows to save some performance, especially with heavy hash functions
+ eastl::pair<iterator, bool> DoInsertKey(true_type, const key_type& key, hash_code_t c);
+ iterator DoInsertKey(false_type, const key_type& key, hash_code_t c);
+ eastl::pair<iterator, bool> DoInsertKey(true_type, key_type&& key, hash_code_t c);
+ iterator DoInsertKey(false_type, key_type&& key, hash_code_t c);
+
+ // We keep DoInsertKey overload without third parameter, for compatibility with older revisions of EASTL (3.12.07 and earlier)
+ // It used to call get_hash_code as a first call inside the DoInsertKey.
+ eastl::pair<iterator, bool> DoInsertKey(true_type, const key_type& key) { return DoInsertKey(true_type(), key, get_hash_code(key)); }
+ iterator DoInsertKey(false_type, const key_type& key) { return DoInsertKey(false_type(), key, get_hash_code(key)); }
+ eastl::pair<iterator, bool> DoInsertKey(true_type, key_type&& key) { return DoInsertKey(true_type(), eastl::move(key), get_hash_code(key)); }
+ iterator DoInsertKey(false_type, key_type&& key) { return DoInsertKey(false_type(), eastl::move(key), get_hash_code(key)); }
+
+ void DoRehash(size_type nBucketCount);
+ node_type* DoFindNode(node_type* pNode, const key_type& k, hash_code_t c) const;
+ NodeFindKeyData DoFindKeyData(const key_type& k) const;
+
+ template <typename T>
+ ENABLE_IF_HAS_HASHCODE(T, node_type) DoFindNode(T* pNode, hash_code_t c) const
+ {
+ for (; pNode; pNode = pNode->mpNext)
+ {
+ if (pNode->mnHashCode == c)
+ return pNode;
+ }
+ return NULL;
+ }
+
+ template <typename U, typename BinaryPredicate>
+ node_type* DoFindNodeT(node_type* pNode, const U& u, BinaryPredicate predicate) const;
+
+ private:
+ template <typename V, typename Enabled = bool_constant<bUniqueKeys>, ENABLE_IF_TRUETYPE(Enabled) = nullptr>
+ eastl::pair<iterator, bool> DoInsertValueExtraForwarding(const key_type& k,
+ hash_code_t c,
+ node_type* pNodeNew,
+ V&& value);
+
+
+ }; // class hashtable
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // node_iterator_base
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Value, bool bCacheHashCode>
+ inline bool operator==(const node_iterator_base<Value, bCacheHashCode>& a, const node_iterator_base<Value, bCacheHashCode>& b)
+ { return a.mpNode == b.mpNode; }
+
+ template <typename Value, bool bCacheHashCode>
+ inline bool operator!=(const node_iterator_base<Value, bCacheHashCode>& a, const node_iterator_base<Value, bCacheHashCode>& b)
+ { return a.mpNode != b.mpNode; }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // hashtable_iterator_base
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Value, bool bCacheHashCode>
+ inline bool operator==(const hashtable_iterator_base<Value, bCacheHashCode>& a, const hashtable_iterator_base<Value, bCacheHashCode>& b)
+ { return a.mpNode == b.mpNode; }
+
+ template <typename Value, bool bCacheHashCode>
+ inline bool operator!=(const hashtable_iterator_base<Value, bCacheHashCode>& a, const hashtable_iterator_base<Value, bCacheHashCode>& b)
+ { return a.mpNode != b.mpNode; }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // hashtable
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>
+ ::hashtable(size_type nBucketCount, const H1& h1, const H2& h2, const H& h,
+ const Eq& eq, const EK& ek, const allocator_type& allocator)
+ : rehash_base<RP, hashtable>(),
+ hash_code_base<K, V, EK, Eq, H1, H2, H, bC>(ek, eq, h1, h2, h),
+ mnBucketCount(0),
+ mnElementCount(0),
+ mRehashPolicy(),
+ mAllocator(allocator)
+ {
+ if(nBucketCount < 2) // If we are starting in an initially empty state, with no memory allocation done.
+ reset_lose_memory();
+ else // Else we are creating a potentially non-empty hashtable...
+ {
+ EASTL_ASSERT(nBucketCount < 10000000);
+ mnBucketCount = (size_type)mRehashPolicy.GetNextBucketCount((uint32_t)nBucketCount);
+ mpBucketArray = DoAllocateBuckets(mnBucketCount); // mnBucketCount will always be at least 2.
+ }
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <typename FowardIterator>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::hashtable(FowardIterator first, FowardIterator last, size_type nBucketCount,
+ const H1& h1, const H2& h2, const H& h,
+ const Eq& eq, const EK& ek, const allocator_type& allocator)
+ : rehash_base<rehash_policy_type, hashtable>(),
+ hash_code_base<key_type, value_type, extract_key_type, key_equal, h1_type, h2_type, h_type, kCacheHashCode>(ek, eq, h1, h2, h),
+ //mnBucketCount(0), // This gets re-assigned below.
+ mnElementCount(0),
+ mRehashPolicy(),
+ mAllocator(allocator)
+ {
+ if(nBucketCount < 2)
+ {
+ const size_type nElementCount = (size_type)eastl::ht_distance(first, last);
+ mnBucketCount = (size_type)mRehashPolicy.GetBucketCount((uint32_t)nElementCount);
+ }
+ else
+ {
+ EASTL_ASSERT(nBucketCount < 10000000);
+ mnBucketCount = nBucketCount;
+ }
+
+ mpBucketArray = DoAllocateBuckets(mnBucketCount); // mnBucketCount will always be at least 2.
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ for(; first != last; ++first)
+ insert(*first);
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ clear();
+ DoFreeBuckets(mpBucketArray, mnBucketCount);
+ throw;
+ }
+ #endif
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::hashtable(const this_type& x)
+ : rehash_base<RP, hashtable>(x),
+ hash_code_base<K, V, EK, Eq, H1, H2, H, bC>(x),
+ mnBucketCount(x.mnBucketCount),
+ mnElementCount(x.mnElementCount),
+ mRehashPolicy(x.mRehashPolicy),
+ mAllocator(x.mAllocator)
+ {
+ if(mnElementCount) // If there is anything to copy...
+ {
+ mpBucketArray = DoAllocateBuckets(mnBucketCount); // mnBucketCount will be at least 2.
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ for(size_type i = 0; i < x.mnBucketCount; ++i)
+ {
+ node_type* pNodeSource = x.mpBucketArray[i];
+ node_type** ppNodeDest = mpBucketArray + i;
+
+ while(pNodeSource)
+ {
+ *ppNodeDest = DoAllocateNode(pNodeSource->mValue);
+ copy_code(*ppNodeDest, pNodeSource);
+ ppNodeDest = &(*ppNodeDest)->mpNext;
+ pNodeSource = pNodeSource->mpNext;
+ }
+ }
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ clear();
+ DoFreeBuckets(mpBucketArray, mnBucketCount);
+ throw;
+ }
+ #endif
+ }
+ else
+ {
+ // In this case, instead of allocate memory and copy nothing from x,
+ // we reset ourselves to a zero allocation state.
+ reset_lose_memory();
+ }
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::hashtable(this_type&& x)
+ : rehash_base<RP, hashtable>(x),
+ hash_code_base<K, V, EK, Eq, H1, H2, H, bC>(x),
+ mnBucketCount(0),
+ mnElementCount(0),
+ mRehashPolicy(x.mRehashPolicy),
+ mAllocator(x.mAllocator)
+ {
+ reset_lose_memory(); // We do this here the same as we do it in the default ctor because it puts the container in a proper initial empty state. This code would be cleaner if we could rely on being able to use C++11 delegating constructors and just call the default ctor here.
+ swap(x);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::hashtable(this_type&& x, const allocator_type& allocator)
+ : rehash_base<RP, hashtable>(x),
+ hash_code_base<K, V, EK, Eq, H1, H2, H, bC>(x),
+ mnBucketCount(0),
+ mnElementCount(0),
+ mRehashPolicy(x.mRehashPolicy),
+ mAllocator(allocator)
+ {
+ reset_lose_memory(); // We do this here the same as we do it in the default ctor because it puts the container in a proper initial empty state. This code would be cleaner if we could rely on being able to use C++11 delegating constructors and just call the default ctor here.
+ swap(x); // swap will directly or indirectly handle the possibility that mAllocator != x.mAllocator.
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline const typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::allocator_type&
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::get_allocator() const EA_NOEXCEPT
+ {
+ return mAllocator;
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::allocator_type&
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::get_allocator() EA_NOEXCEPT
+ {
+ return mAllocator;
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::set_allocator(const allocator_type& allocator)
+ {
+ mAllocator = allocator;
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::this_type&
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::operator=(const this_type& x)
+ {
+ if(this != &x)
+ {
+ clear();
+
+ #if EASTL_ALLOCATOR_COPY_ENABLED
+ mAllocator = x.mAllocator;
+ #endif
+
+ insert(x.begin(), x.end());
+ }
+ return *this;
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::this_type&
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::operator=(this_type&& x)
+ {
+ if(this != &x)
+ {
+ clear(); // To consider: Are we really required to clear here? x is going away soon and will clear itself in its dtor.
+ swap(x); // member swap handles the case that x has a different allocator than our allocator by doing a copy.
+ }
+ return *this;
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::this_type&
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::operator=(std::initializer_list<value_type> ilist)
+ {
+ // The simplest means of doing this is to clear and insert. There probably isn't a generic
+ // solution that's any more efficient without having prior knowledge of the ilist contents.
+ clear();
+ insert(ilist.begin(), ilist.end());
+ return *this;
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::~hashtable()
+ {
+ clear();
+ DoFreeBuckets(mpBucketArray, mnBucketCount);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::node_type*
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoAllocateNodeFromKey(const key_type& key)
+ {
+ node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(node_type), 0);
+ EASTL_ASSERT_MSG(pNode != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined.");
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ ::new(eastl::addressof(pNode->mValue)) value_type(pair_first_construct, key);
+ pNode->mpNext = NULL;
+ return pNode;
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ EASTLFree(mAllocator, pNode, sizeof(node_type));
+ throw;
+ }
+ #endif
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::node_type*
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoAllocateNodeFromKey(key_type&& key)
+ {
+ node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(node_type), 0);
+ EASTL_ASSERT_MSG(pNode != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined.");
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ ::new(eastl::addressof(pNode->mValue)) value_type(pair_first_construct, eastl::move(key));
+ pNode->mpNext = NULL;
+ return pNode;
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ EASTLFree(mAllocator, pNode, sizeof(node_type));
+ throw;
+ }
+ #endif
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoFreeNode(node_type* pNode)
+ {
+ pNode->~node_type();
+ EASTLFree(mAllocator, pNode, sizeof(node_type));
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoFreeNodes(node_type** pNodeArray, size_type n)
+ {
+ for(size_type i = 0; i < n; ++i)
+ {
+ node_type* pNode = pNodeArray[i];
+ while(pNode)
+ {
+ node_type* const pTempNode = pNode;
+ pNode = pNode->mpNext;
+ DoFreeNode(pTempNode);
+ }
+ pNodeArray[i] = NULL;
+ }
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::node_type**
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoAllocateBuckets(size_type n)
+ {
+ // We allocate one extra bucket to hold a sentinel, an arbitrary
+ // non-null pointer. Iterator increment relies on this.
+ EASTL_ASSERT(n > 1); // We reserve an mnBucketCount of 1 for the shared gpEmptyBucketArray.
+ EASTL_CT_ASSERT(kHashtableAllocFlagBuckets == 0x00400000); // Currently we expect this to be so, because the allocator has a copy of this enum.
+ node_type** const pBucketArray = (node_type**)EASTLAllocAlignedFlags(mAllocator, (n + 1) * sizeof(node_type*), EASTL_ALIGN_OF(node_type*), 0, kHashtableAllocFlagBuckets);
+ //eastl::fill(pBucketArray, pBucketArray + n, (node_type*)NULL);
+ memset(pBucketArray, 0, n * sizeof(node_type*));
+ pBucketArray[n] = reinterpret_cast<node_type*>((uintptr_t)~0);
+ return pBucketArray;
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoFreeBuckets(node_type** pBucketArray, size_type n)
+ {
+ // If n <= 1, then pBucketArray is from the shared gpEmptyBucketArray. We don't test
+ // for pBucketArray == &gpEmptyBucketArray because one library have a different gpEmptyBucketArray
+ // than another but pass a hashtable to another. So we go by the size.
+ if(n > 1)
+ EASTLFree(mAllocator, pBucketArray, (n + 1) * sizeof(node_type*)); // '+1' because DoAllocateBuckets allocates nBucketCount + 1 buckets in order to have a NULL sentinel at the end.
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::swap(this_type& x)
+ {
+ hash_code_base<K, V, EK, Eq, H1, H2, H, bC>::base_swap(x); // hash_code_base has multiple implementations, so we let them handle the swap.
+ eastl::swap(mRehashPolicy, x.mRehashPolicy);
+ EASTL_MACRO_SWAP(node_type**, mpBucketArray, x.mpBucketArray);
+ eastl::swap(mnBucketCount, x.mnBucketCount);
+ eastl::swap(mnElementCount, x.mnElementCount);
+
+ if (mAllocator != x.mAllocator) // If allocators are not equivalent...
+ {
+ eastl::swap(mAllocator, x.mAllocator);
+ }
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::rehash_policy(const rehash_policy_type& rehashPolicy)
+ {
+ mRehashPolicy = rehashPolicy;
+
+ const size_type nBuckets = rehashPolicy.GetBucketCount((uint32_t)mnElementCount);
+
+ if(nBuckets > mnBucketCount)
+ DoRehash(nBuckets);
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::find(const key_type& k)
+ {
+ const hash_code_t c = get_hash_code(k);
+ const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount);
+
+ node_type* const pNode = DoFindNode(mpBucketArray[n], k, c);
+ return pNode ? iterator(pNode, mpBucketArray + n) : iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end()
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::const_iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::find(const key_type& k) const
+ {
+ const hash_code_t c = get_hash_code(k);
+ const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount);
+
+ node_type* const pNode = DoFindNode(mpBucketArray[n], k, c);
+ return pNode ? const_iterator(pNode, mpBucketArray + n) : const_iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end()
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <typename U, typename UHash, typename BinaryPredicate>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::find_as(const U& other, UHash uhash, BinaryPredicate predicate)
+ {
+ const hash_code_t c = (hash_code_t)uhash(other);
+ const size_type n = (size_type)(c % mnBucketCount); // This assumes we are using the mod range policy.
+
+ node_type* const pNode = DoFindNodeT(mpBucketArray[n], other, predicate);
+ return pNode ? iterator(pNode, mpBucketArray + n) : iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end()
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <typename U, typename UHash, typename BinaryPredicate>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::const_iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::find_as(const U& other, UHash uhash, BinaryPredicate predicate) const
+ {
+ const hash_code_t c = (hash_code_t)uhash(other);
+ const size_type n = (size_type)(c % mnBucketCount); // This assumes we are using the mod range policy.
+
+ node_type* const pNode = DoFindNodeT(mpBucketArray[n], other, predicate);
+ return pNode ? const_iterator(pNode, mpBucketArray + n) : const_iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end()
+ }
+
+
+ /// hashtable_find
+ ///
+ /// Helper function that defaults to using hash<U> and equal_to_2<T, U>.
+ /// This makes it so that by default you don't need to provide these.
+ /// Note that the default hash functions may not be what you want, though.
+ ///
+ /// Example usage. Instead of this:
+ /// hash_set<string> hashSet;
+ /// hashSet.find("hello", hash<char*>(), equal_to_2<string, char*>());
+ ///
+ /// You can use this:
+ /// hash_set<string> hashSet;
+ /// hashtable_find(hashSet, "hello");
+ ///
+ template <typename H, typename U>
+ inline typename H::iterator hashtable_find(H& hashTable, U u)
+ { return hashTable.find_as(u, eastl::hash<U>(), eastl::equal_to_2<const typename H::key_type, U>()); }
+
+ template <typename H, typename U>
+ inline typename H::const_iterator hashtable_find(const H& hashTable, U u)
+ { return hashTable.find_as(u, eastl::hash<U>(), eastl::equal_to_2<const typename H::key_type, U>()); }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <typename U>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::find_as(const U& other)
+ { return eastl::hashtable_find(*this, other); }
+ // VC++ doesn't appear to like the following, though it seems correct to me.
+ // So we implement the workaround above until we can straighten this out.
+ //{ return find_as(other, eastl::hash<U>(), eastl::equal_to_2<const key_type, U>()); }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <typename U>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::const_iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::find_as(const U& other) const
+ { return eastl::hashtable_find(*this, other); }
+ // VC++ doesn't appear to like the following, though it seems correct to me.
+ // So we implement the workaround above until we can straighten this out.
+ //{ return find_as(other, eastl::hash<U>(), eastl::equal_to_2<const key_type, U>()); }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ eastl::pair<typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::const_iterator,
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::const_iterator>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::find_range_by_hash(hash_code_t c) const
+ {
+ const size_type start = (size_type)bucket_index(c, (uint32_t)mnBucketCount);
+ node_type* const pNodeStart = mpBucketArray[start];
+
+ if (pNodeStart)
+ {
+ eastl::pair<const_iterator, const_iterator> pair(const_iterator(pNodeStart, mpBucketArray + start),
+ const_iterator(pNodeStart, mpBucketArray + start));
+ pair.second.increment_bucket();
+ return pair;
+ }
+
+ return eastl::pair<const_iterator, const_iterator>(const_iterator(mpBucketArray + mnBucketCount),
+ const_iterator(mpBucketArray + mnBucketCount));
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ eastl::pair<typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator,
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::find_range_by_hash(hash_code_t c)
+ {
+ const size_type start = (size_type)bucket_index(c, (uint32_t)mnBucketCount);
+ node_type* const pNodeStart = mpBucketArray[start];
+
+ if (pNodeStart)
+ {
+ eastl::pair<iterator, iterator> pair(iterator(pNodeStart, mpBucketArray + start),
+ iterator(pNodeStart, mpBucketArray + start));
+ pair.second.increment_bucket();
+ return pair;
+
+ }
+
+ return eastl::pair<iterator, iterator>(iterator(mpBucketArray + mnBucketCount),
+ iterator(mpBucketArray + mnBucketCount));
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::size_type
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::count(const key_type& k) const EA_NOEXCEPT
+ {
+ const hash_code_t c = get_hash_code(k);
+ const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount);
+ size_type result = 0;
+
+ // To do: Make a specialization for bU (unique keys) == true and take
+ // advantage of the fact that the count will always be zero or one in that case.
+ for(node_type* pNode = mpBucketArray[n]; pNode; pNode = pNode->mpNext)
+ {
+ if(compare(k, c, pNode))
+ ++result;
+ }
+ return result;
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ eastl::pair<typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator,
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::equal_range(const key_type& k)
+ {
+ const hash_code_t c = get_hash_code(k);
+ const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount);
+ node_type** head = mpBucketArray + n;
+ node_type* pNode = DoFindNode(*head, k, c);
+
+ if(pNode)
+ {
+ node_type* p1 = pNode->mpNext;
+
+ for(; p1; p1 = p1->mpNext)
+ {
+ if(!compare(k, c, p1))
+ break;
+ }
+
+ iterator first(pNode, head);
+ iterator last(p1, head);
+
+ if(!p1)
+ last.increment_bucket();
+
+ return eastl::pair<iterator, iterator>(first, last);
+ }
+
+ return eastl::pair<iterator, iterator>(iterator(mpBucketArray + mnBucketCount), // iterator(mpBucketArray + mnBucketCount) == end()
+ iterator(mpBucketArray + mnBucketCount));
+ }
+
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ eastl::pair<typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::const_iterator,
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::const_iterator>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::equal_range(const key_type& k) const
+ {
+ const hash_code_t c = get_hash_code(k);
+ const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount);
+ node_type** head = mpBucketArray + n;
+ node_type* pNode = DoFindNode(*head, k, c);
+
+ if(pNode)
+ {
+ node_type* p1 = pNode->mpNext;
+
+ for(; p1; p1 = p1->mpNext)
+ {
+ if(!compare(k, c, p1))
+ break;
+ }
+
+ const_iterator first(pNode, head);
+ const_iterator last(p1, head);
+
+ if(!p1)
+ last.increment_bucket();
+
+ return eastl::pair<const_iterator, const_iterator>(first, last);
+ }
+
+ return eastl::pair<const_iterator, const_iterator>(const_iterator(mpBucketArray + mnBucketCount), // iterator(mpBucketArray + mnBucketCount) == end()
+ const_iterator(mpBucketArray + mnBucketCount));
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::NodeFindKeyData
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoFindKeyData(const key_type& k) const {
+ NodeFindKeyData d;
+ d.code = get_hash_code(k);
+ d.bucket_index = (size_type)bucket_index(k, d.code, (uint32_t)mnBucketCount);
+ d.node = DoFindNode(mpBucketArray[d.bucket_index], k, d.code);
+ return d;
+ }
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::node_type*
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoFindNode(node_type* pNode, const key_type& k, hash_code_t c) const
+ {
+ for(; pNode; pNode = pNode->mpNext)
+ {
+ if(compare(k, c, pNode))
+ return pNode;
+ }
+ return NULL;
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <typename U, typename BinaryPredicate>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::node_type*
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoFindNodeT(node_type* pNode, const U& other, BinaryPredicate predicate) const
+ {
+ for(; pNode; pNode = pNode->mpNext)
+ {
+ if(predicate(mExtractKey(pNode->mValue), other)) // Intentionally compare with key as first arg and other as second arg.
+ return pNode;
+ }
+ return NULL;
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <bool bDeleteOnException, typename Enabled, ENABLE_IF_TRUETYPE(Enabled)> // only enabled when keys are unique
+ eastl::pair<typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator, bool>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoInsertUniqueNode(const key_type& k, hash_code_t c, size_type n, node_type* pNodeNew)
+ {
+ const eastl::pair<bool, uint32_t> bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1);
+
+ set_code(pNodeNew, c); // This is a no-op for most hashtables.
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ if(bRehash.first)
+ {
+ n = (size_type)bucket_index(k, c, (uint32_t)bRehash.second);
+ DoRehash(bRehash.second);
+ }
+
+ EASTL_ASSERT((uintptr_t)mpBucketArray != (uintptr_t)&gpEmptyBucketArray[0]);
+ pNodeNew->mpNext = mpBucketArray[n];
+ mpBucketArray[n] = pNodeNew;
+ ++mnElementCount;
+
+ return eastl::pair<iterator, bool>(iterator(pNodeNew, mpBucketArray + n), true);
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ EA_CONSTEXPR_IF(bDeleteOnException) { DoFreeNode(pNodeNew); }
+ throw;
+ }
+ #endif
+ }
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <typename BoolConstantT, class... Args, ENABLE_IF_TRUETYPE(BoolConstantT)>
+ eastl::pair<typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator, bool>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoInsertValue(BoolConstantT, Args&&... args) // true_type means bUniqueKeys is true.
+ {
+ // Adds the value to the hash table if not already present.
+ // If already present then the existing value is returned via an iterator/bool pair.
+
+ // We have a chicken-and-egg problem here. In order to know if and where to insert the value, we need to get the
+ // hashtable key for the value. But we don't explicitly have a value argument, we have a templated Args&&... argument.
+ // We need the value_type in order to proceed, but that entails getting an instance of a value_type from the args.
+ // And it may turn out that the value is already present in the hashtable and we need to cancel the insertion,
+ // despite having obtained a value_type to put into the hashtable. We have mitigated this problem somewhat by providing
+ // specializations of the insert function for const value_type& and value_type&&, and so the only time this function
+ // should get called is when args refers to arguments to construct a value_type.
+
+ node_type* const pNodeNew = DoAllocateNode(eastl::forward<Args>(args)...);
+ const key_type& k = mExtractKey(pNodeNew->mValue);
+ const hash_code_t c = get_hash_code(k);
+ size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount);
+ node_type* const pNode = DoFindNode(mpBucketArray[n], k, c);
+
+ if(pNode == NULL) // If value is not present... add it.
+ {
+ return DoInsertUniqueNode<true>(k, c, n, pNodeNew);
+ }
+ else
+ {
+ // To do: We have an inefficiency to deal with here. We allocated a node above but we are freeing it here because
+ // it turned out it wasn't needed. But we needed to create the node in order to get the hashtable key for
+ // the node. One possible resolution is to create specializations: DoInsertValue(true_type, value_type&&) and
+ // DoInsertValue(true_type, const value_type&) which don't need to create a node up front in order to get the
+ // hashtable key. Probably most users would end up using these pathways instead of this Args... pathway.
+ // While we should considering handling this to-do item, a lot of the performance limitations of maps and sets
+ // in practice is with finding elements rather than adding (potentially redundant) new elements.
+ DoFreeNode(pNodeNew);
+ }
+
+ return eastl::pair<iterator, bool>(iterator(pNode, mpBucketArray + n), false);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <typename BoolConstantT, class... Args, DISABLE_IF_TRUETYPE(BoolConstantT)>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoInsertValue(BoolConstantT, Args&&... args) // false_type means bUniqueKeys is false.
+ {
+ const eastl::pair<bool, uint32_t> bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1);
+
+ if(bRehash.first)
+ DoRehash(bRehash.second);
+
+ node_type* pNodeNew = DoAllocateNode(eastl::forward<Args>(args)...);
+ const key_type& k = mExtractKey(pNodeNew->mValue);
+ const hash_code_t c = get_hash_code(k);
+ const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount);
+
+ set_code(pNodeNew, c); // This is a no-op for most hashtables.
+
+ // To consider: Possibly make this insertion not make equal elements contiguous.
+ // As it stands now, we insert equal values contiguously in the hashtable.
+ // The benefit is that equal_range can work in a sensible manner and that
+ // erase(value) can more quickly find equal values. The downside is that
+ // this insertion operation taking some extra time. How important is it to
+ // us that equal_range span all equal items?
+ node_type* const pNodePrev = DoFindNode(mpBucketArray[n], k, c);
+
+ if(pNodePrev == NULL)
+ {
+ EASTL_ASSERT((void**)mpBucketArray != &gpEmptyBucketArray[0]);
+ pNodeNew->mpNext = mpBucketArray[n];
+ mpBucketArray[n] = pNodeNew;
+ }
+ else
+ {
+ pNodeNew->mpNext = pNodePrev->mpNext;
+ pNodePrev->mpNext = pNodeNew;
+ }
+
+ ++mnElementCount;
+
+ return iterator(pNodeNew, mpBucketArray + n);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <class... Args>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::node_type*
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoAllocateNode(Args&&... args)
+ {
+ node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(node_type), 0);
+ EASTL_ASSERT_MSG(pNode != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined.");
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ ::new(eastl::addressof(pNode->mValue)) value_type(eastl::forward<Args>(args)...);
+ pNode->mpNext = NULL;
+ return pNode;
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ EASTLFree(mAllocator, pNode, sizeof(node_type));
+ throw;
+ }
+ #endif
+ }
+
+
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
+ // Note: The following insertion-related functions are nearly copies of the above three functions,
+ // but are for value_type&& and const value_type& arguments. It's useful for us to have the functions
+ // below, even when using a fully compliant C++11 compiler that supports the above functions.
+ // The reason is because the specializations below are slightly more efficient because they can delay
+ // the creation of a node until it's known that it will be needed.
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <typename BoolConstantT>
+ inline eastl::pair<typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator, bool>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoInsertValueExtra(BoolConstantT, const key_type& k,
+ hash_code_t c, node_type* pNodeNew, value_type&& value, ENABLE_IF_TRUETYPE(BoolConstantT)) // true_type means bUniqueKeys is true.
+ {
+ return DoInsertValueExtraForwarding(k, c, pNodeNew, eastl::move(value));
+ }
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <typename BoolConstantT>
+ inline eastl::pair<typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator, bool>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoInsertValueExtra(BoolConstantT, const key_type& k,
+ hash_code_t c, node_type* pNodeNew, const value_type& value, ENABLE_IF_TRUETYPE(BoolConstantT)) // true_type means bUniqueKeys is true.
+ {
+ return DoInsertValueExtraForwarding(k, c, pNodeNew, value);
+ }
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <typename VFwd, typename Enabled, ENABLE_IF_TRUETYPE(Enabled)> // true_type means bUniqueKeys is true.
+ eastl::pair<typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator, bool>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoInsertValueExtraForwarding(const key_type& k,
+ hash_code_t c, node_type* pNodeNew, VFwd&& value)
+ {
+ // Adds the value to the hash table if not already present.
+ // If already present then the existing value is returned via an iterator/bool pair.
+ size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount);
+ node_type* const pNode = DoFindNode(mpBucketArray[n], k, c);
+
+ if(pNode == NULL) // If value is not present... add it.
+ {
+ // Allocate the new node before doing the rehash so that we don't
+ // do a rehash if the allocation throws.
+ if(pNodeNew)
+ {
+ ::new(eastl::addressof(pNodeNew->mValue)) value_type(eastl::forward<VFwd>(value)); // It's expected that pNodeNew was allocated with allocate_uninitialized_node.
+ return DoInsertUniqueNode<false>(k, c, n, pNodeNew);
+ }
+ else
+ {
+ pNodeNew = DoAllocateNode(eastl::move(value));
+ return DoInsertUniqueNode<true>(k, c, n, pNodeNew);
+ }
+ }
+ // Else the value is already present, so don't add a new node. And don't free pNodeNew.
+
+ return eastl::pair<iterator, bool>(iterator(pNode, mpBucketArray + n), false);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <typename BoolConstantT>
+ eastl::pair<typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator, bool>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoInsertValue(BoolConstantT, value_type&& value, ENABLE_IF_TRUETYPE(BoolConstantT)) // true_type means bUniqueKeys is true.
+ {
+ const key_type& k = mExtractKey(value);
+ const hash_code_t c = get_hash_code(k);
+
+ return DoInsertValueExtra(true_type(), k, c, NULL, eastl::move(value));
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <typename BoolConstantT>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoInsertValueExtra(BoolConstantT, const key_type& k, hash_code_t c, node_type* pNodeNew, value_type&& value,
+ DISABLE_IF_TRUETYPE(BoolConstantT)) // false_type means bUniqueKeys is false.
+ {
+ const eastl::pair<bool, uint32_t> bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1);
+
+ if(bRehash.first)
+ DoRehash(bRehash.second); // Note: We don't need to wrap this call with try/catch because there's nothing we would need to do in the catch.
+
+ const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount);
+
+ if(pNodeNew)
+ ::new(eastl::addressof(pNodeNew->mValue)) value_type(eastl::move(value)); // It's expected that pNodeNew was allocated with allocate_uninitialized_node.
+ else
+ pNodeNew = DoAllocateNode(eastl::move(value));
+
+ set_code(pNodeNew, c); // This is a no-op for most hashtables.
+
+ // To consider: Possibly make this insertion not make equal elements contiguous.
+ // As it stands now, we insert equal values contiguously in the hashtable.
+ // The benefit is that equal_range can work in a sensible manner and that
+ // erase(value) can more quickly find equal values. The downside is that
+ // this insertion operation taking some extra time. How important is it to
+ // us that equal_range span all equal items?
+ node_type* const pNodePrev = DoFindNode(mpBucketArray[n], k, c);
+
+ if(pNodePrev == NULL)
+ {
+ EASTL_ASSERT((void**)mpBucketArray != &gpEmptyBucketArray[0]);
+ pNodeNew->mpNext = mpBucketArray[n];
+ mpBucketArray[n] = pNodeNew;
+ }
+ else
+ {
+ pNodeNew->mpNext = pNodePrev->mpNext;
+ pNodePrev->mpNext = pNodeNew;
+ }
+
+ ++mnElementCount;
+
+ return iterator(pNodeNew, mpBucketArray + n);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template<typename BoolConstantT>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoInsertValue(BoolConstantT, value_type&& value, DISABLE_IF_TRUETYPE(BoolConstantT)) // false_type means bUniqueKeys is false.
+ {
+ const key_type& k = mExtractKey(value);
+ const hash_code_t c = get_hash_code(k);
+
+ return DoInsertValueExtra(false_type(), k, c, NULL, eastl::move(value));
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::node_type*
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoAllocateNode(value_type&& value)
+ {
+ node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(node_type), 0);
+ EASTL_ASSERT_MSG(pNode != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined.");
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ ::new(eastl::addressof(pNode->mValue)) value_type(eastl::move(value));
+ pNode->mpNext = NULL;
+ return pNode;
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ EASTLFree(mAllocator, pNode, sizeof(node_type));
+ throw;
+ }
+ #endif
+ }
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template<typename BoolConstantT>
+ eastl::pair<typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator, bool>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoInsertValue(BoolConstantT, const value_type& value, ENABLE_IF_TRUETYPE(BoolConstantT)) // true_type means bUniqueKeys is true.
+ {
+ const key_type& k = mExtractKey(value);
+ const hash_code_t c = get_hash_code(k);
+
+ return DoInsertValueExtra(true_type(), k, c, NULL, value);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <typename BoolConstantT>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoInsertValueExtra(BoolConstantT, const key_type& k, hash_code_t c, node_type* pNodeNew, const value_type& value,
+ DISABLE_IF_TRUETYPE(BoolConstantT)) // false_type means bUniqueKeys is false.
+ {
+ const eastl::pair<bool, uint32_t> bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1);
+
+ if(bRehash.first)
+ DoRehash(bRehash.second); // Note: We don't need to wrap this call with try/catch because there's nothing we would need to do in the catch.
+
+ const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount);
+
+ if(pNodeNew)
+ ::new(eastl::addressof(pNodeNew->mValue)) value_type(value); // It's expected that pNodeNew was allocated with allocate_uninitialized_node.
+ else
+ pNodeNew = DoAllocateNode(value);
+
+ set_code(pNodeNew, c); // This is a no-op for most hashtables.
+
+ // To consider: Possibly make this insertion not make equal elements contiguous.
+ // As it stands now, we insert equal values contiguously in the hashtable.
+ // The benefit is that equal_range can work in a sensible manner and that
+ // erase(value) can more quickly find equal values. The downside is that
+ // this insertion operation taking some extra time. How important is it to
+ // us that equal_range span all equal items?
+ node_type* const pNodePrev = DoFindNode(mpBucketArray[n], k, c);
+
+ if(pNodePrev == NULL)
+ {
+ EASTL_ASSERT((void**)mpBucketArray != &gpEmptyBucketArray[0]);
+ pNodeNew->mpNext = mpBucketArray[n];
+ mpBucketArray[n] = pNodeNew;
+ }
+ else
+ {
+ pNodeNew->mpNext = pNodePrev->mpNext;
+ pNodePrev->mpNext = pNodeNew;
+ }
+
+ ++mnElementCount;
+
+ return iterator(pNodeNew, mpBucketArray + n);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template<typename BoolConstantT>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoInsertValue(BoolConstantT, const value_type& value, DISABLE_IF_TRUETYPE(BoolConstantT)) // false_type means bUniqueKeys is false.
+ {
+ const key_type& k = mExtractKey(value);
+ const hash_code_t c = get_hash_code(k);
+
+ return DoInsertValueExtra(false_type(), k, c, NULL, value);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::node_type*
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoAllocateNode(const value_type& value)
+ {
+ node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(node_type), 0);
+ EASTL_ASSERT_MSG(pNode != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined.");
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ ::new(eastl::addressof(pNode->mValue)) value_type(value);
+ pNode->mpNext = NULL;
+ return pNode;
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ EASTLFree(mAllocator, pNode, sizeof(node_type));
+ throw;
+ }
+ #endif
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::node_type*
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::allocate_uninitialized_node()
+ {
+ // We don't wrap this in try/catch because users of this function are expected to do that themselves as needed.
+ node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(node_type), 0);
+ EASTL_ASSERT_MSG(pNode != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined.");
+ // Leave pNode->mValue uninitialized.
+ pNode->mpNext = NULL;
+ return pNode;
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::free_uninitialized_node(node_type* pNode)
+ {
+ // pNode->mValue is expected to be uninitialized.
+ EASTLFree(mAllocator, pNode, sizeof(node_type));
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ eastl::pair<typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator, bool>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoInsertKey(true_type, const key_type& key, const hash_code_t c) // true_type means bUniqueKeys is true.
+ {
+ size_type n = (size_type)bucket_index(key, c, (uint32_t)mnBucketCount);
+ node_type* const pNode = DoFindNode(mpBucketArray[n], key, c);
+
+ if(pNode == NULL)
+ {
+ const eastl::pair<bool, uint32_t> bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1);
+
+ // Allocate the new node before doing the rehash so that we don't
+ // do a rehash if the allocation throws.
+ node_type* const pNodeNew = DoAllocateNodeFromKey(key);
+ set_code(pNodeNew, c); // This is a no-op for most hashtables.
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ if(bRehash.first)
+ {
+ n = (size_type)bucket_index(key, c, (uint32_t)bRehash.second);
+ DoRehash(bRehash.second);
+ }
+
+ EASTL_ASSERT((void**)mpBucketArray != &gpEmptyBucketArray[0]);
+ pNodeNew->mpNext = mpBucketArray[n];
+ mpBucketArray[n] = pNodeNew;
+ ++mnElementCount;
+
+ return eastl::pair<iterator, bool>(iterator(pNodeNew, mpBucketArray + n), true);
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ DoFreeNode(pNodeNew);
+ throw;
+ }
+ #endif
+ }
+
+ return eastl::pair<iterator, bool>(iterator(pNode, mpBucketArray + n), false);
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoInsertKey(false_type, const key_type& key, const hash_code_t c) // false_type means bUniqueKeys is false.
+ {
+ const eastl::pair<bool, uint32_t> bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1);
+
+ if(bRehash.first)
+ DoRehash(bRehash.second);
+
+ const size_type n = (size_type)bucket_index(key, c, (uint32_t)mnBucketCount);
+
+ node_type* const pNodeNew = DoAllocateNodeFromKey(key);
+ set_code(pNodeNew, c); // This is a no-op for most hashtables.
+
+ // To consider: Possibly make this insertion not make equal elements contiguous.
+ // As it stands now, we insert equal values contiguously in the hashtable.
+ // The benefit is that equal_range can work in a sensible manner and that
+ // erase(value) can more quickly find equal values. The downside is that
+ // this insertion operation taking some extra time. How important is it to
+ // us that equal_range span all equal items?
+ node_type* const pNodePrev = DoFindNode(mpBucketArray[n], key, c);
+
+ if(pNodePrev == NULL)
+ {
+ EASTL_ASSERT((void**)mpBucketArray != &gpEmptyBucketArray[0]);
+ pNodeNew->mpNext = mpBucketArray[n];
+ mpBucketArray[n] = pNodeNew;
+ }
+ else
+ {
+ pNodeNew->mpNext = pNodePrev->mpNext;
+ pNodePrev->mpNext = pNodeNew;
+ }
+
+ ++mnElementCount;
+
+ return iterator(pNodeNew, mpBucketArray + n);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ eastl::pair<typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator, bool>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoInsertKey(true_type, key_type&& key, const hash_code_t c) // true_type means bUniqueKeys is true.
+ {
+ size_type n = (size_type)bucket_index(key, c, (uint32_t)mnBucketCount);
+ node_type* const pNode = DoFindNode(mpBucketArray[n], key, c);
+
+ if(pNode == NULL)
+ {
+ const eastl::pair<bool, uint32_t> bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1);
+
+ // Allocate the new node before doing the rehash so that we don't
+ // do a rehash if the allocation throws.
+ node_type* const pNodeNew = DoAllocateNodeFromKey(eastl::move(key));
+ set_code(pNodeNew, c); // This is a no-op for most hashtables.
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ if(bRehash.first)
+ {
+ n = (size_type)bucket_index(key, c, (uint32_t)bRehash.second);
+ DoRehash(bRehash.second);
+ }
+
+ EASTL_ASSERT((void**)mpBucketArray != &gpEmptyBucketArray[0]);
+ pNodeNew->mpNext = mpBucketArray[n];
+ mpBucketArray[n] = pNodeNew;
+ ++mnElementCount;
+
+ return eastl::pair<iterator, bool>(iterator(pNodeNew, mpBucketArray + n), true);
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ DoFreeNode(pNodeNew);
+ throw;
+ }
+ #endif
+ }
+
+ return eastl::pair<iterator, bool>(iterator(pNode, mpBucketArray + n), false);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoInsertKey(false_type, key_type&& key, const hash_code_t c) // false_type means bUniqueKeys is false.
+ {
+ const eastl::pair<bool, uint32_t> bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1);
+
+ if(bRehash.first)
+ DoRehash(bRehash.second);
+
+ const size_type n = (size_type)bucket_index(key, c, (uint32_t)mnBucketCount);
+
+ node_type* const pNodeNew = DoAllocateNodeFromKey(eastl::move(key));
+ set_code(pNodeNew, c); // This is a no-op for most hashtables.
+
+ // To consider: Possibly make this insertion not make equal elements contiguous.
+ // As it stands now, we insert equal values contiguously in the hashtable.
+ // The benefit is that equal_range can work in a sensible manner and that
+ // erase(value) can more quickly find equal values. The downside is that
+ // this insertion operation taking some extra time. How important is it to
+ // us that equal_range span all equal items?
+ node_type* const pNodePrev = DoFindNode(mpBucketArray[n], key, c);
+
+ if(pNodePrev == NULL)
+ {
+ EASTL_ASSERT((void**)mpBucketArray != &gpEmptyBucketArray[0]);
+ pNodeNew->mpNext = mpBucketArray[n];
+ mpBucketArray[n] = pNodeNew;
+ }
+ else
+ {
+ pNodeNew->mpNext = pNodePrev->mpNext;
+ pNodePrev->mpNext = pNodeNew;
+ }
+
+ ++mnElementCount;
+
+ return iterator(pNodeNew, mpBucketArray + n);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <class... Args>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert_return_type
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::emplace(Args&&... args)
+ {
+ return DoInsertValue(has_unique_keys_type(), eastl::forward<Args>(args)...); // Need to use forward instead of move because Args&& is a "universal reference" instead of an rvalue reference.
+ }
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <class... Args>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::emplace_hint(const_iterator, Args&&... args)
+ {
+ // We currently ignore the iterator argument as a hint.
+ insert_return_type result = DoInsertValue(has_unique_keys_type(), eastl::forward<Args>(args)...);
+ return DoGetResultIterator(has_unique_keys_type(), result);
+ }
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert_return_type
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert(value_type&& otherValue)
+ {
+ return DoInsertValue(has_unique_keys_type(), eastl::move(otherValue));
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <class P>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert_return_type
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert(hash_code_t c, node_type* pNodeNew, P&& otherValue)
+ {
+ // pNodeNew->mValue is expected to be uninitialized.
+ value_type value(eastl::forward<P>(otherValue)); // Need to use forward instead of move because P&& is a "universal reference" instead of an rvalue reference.
+ const key_type& k = mExtractKey(value);
+ return DoInsertValueExtra(has_unique_keys_type(), k, c, pNodeNew, eastl::move(value));
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert(const_iterator, value_type&& value)
+ {
+ // We currently ignore the iterator argument as a hint.
+ insert_return_type result = DoInsertValue(has_unique_keys_type(), value_type(eastl::move(value)));
+ return DoGetResultIterator(has_unique_keys_type(), result);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert_return_type
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert(const value_type& value)
+ {
+ return DoInsertValue(has_unique_keys_type(), value);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert_return_type
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert(hash_code_t c, node_type* pNodeNew, const value_type& value)
+ {
+ // pNodeNew->mValue is expected to be uninitialized.
+ const key_type& k = mExtractKey(value);
+ return DoInsertValueExtra(has_unique_keys_type(), k, c, pNodeNew, value);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <typename P, class>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert_return_type
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert(P&& otherValue)
+ {
+ return emplace(eastl::forward<P>(otherValue));
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert(const_iterator, const value_type& value)
+ {
+ // We ignore the first argument (hint iterator). It's not likely to be useful for hashtable containers.
+ insert_return_type result = DoInsertValue(has_unique_keys_type(), value);
+ return DoGetResultIterator(has_unique_keys_type(), result);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert(std::initializer_list<value_type> ilist)
+ {
+ insert(ilist.begin(), ilist.end());
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <typename InputIterator>
+ void
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert(InputIterator first, InputIterator last)
+ {
+ const uint32_t nElementAdd = (uint32_t)eastl::ht_distance(first, last);
+ const eastl::pair<bool, uint32_t> bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, nElementAdd);
+
+ if(bRehash.first)
+ DoRehash(bRehash.second);
+
+ for(; first != last; ++first)
+ DoInsertValue(has_unique_keys_type(), *first);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <class M>
+ eastl::pair<typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator, bool>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert_or_assign(const key_type& k, M&& obj)
+ {
+ auto iter = find(k);
+ if(iter == end())
+ {
+ return insert(value_type(piecewise_construct, eastl::forward_as_tuple(k), eastl::forward_as_tuple(eastl::forward<M>(obj))));
+ }
+ else
+ {
+ iter->second = eastl::forward<M>(obj);
+ return {iter, false};
+ }
+ }
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <class M>
+ eastl::pair<typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator, bool>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert_or_assign(key_type&& k, M&& obj)
+ {
+ auto iter = find(k);
+ if(iter == end())
+ {
+ return insert(value_type(piecewise_construct, eastl::forward_as_tuple(eastl::move(k)), eastl::forward_as_tuple(eastl::forward<M>(obj))));
+ }
+ else
+ {
+ iter->second = eastl::forward<M>(obj);
+ return {iter, false};
+ }
+ }
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <class M>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert_or_assign(const_iterator, const key_type& k, M&& obj)
+ {
+ return insert_or_assign(k, eastl::forward<M>(obj)).first; // we ignore the iterator hint
+ }
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <class M>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert_or_assign(const_iterator, key_type&& k, M&& obj)
+ {
+ return insert_or_assign(eastl::move(k), eastl::forward<M>(obj)).first; // we ignore the iterator hint
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::erase(const_iterator i)
+ {
+ iterator iNext(i.mpNode, i.mpBucket); // Convert from const_iterator to iterator while constructing.
+ ++iNext;
+
+ node_type* pNode = i.mpNode;
+ node_type* pNodeCurrent = *i.mpBucket;
+
+ if(pNodeCurrent == pNode)
+ *i.mpBucket = pNodeCurrent->mpNext;
+ else
+ {
+ // We have a singly-linked list, so we have no choice but to
+ // walk down it till we find the node before the node at 'i'.
+ node_type* pNodeNext = pNodeCurrent->mpNext;
+
+ while(pNodeNext != pNode)
+ {
+ pNodeCurrent = pNodeNext;
+ pNodeNext = pNodeCurrent->mpNext;
+ }
+
+ pNodeCurrent->mpNext = pNodeNext->mpNext;
+ }
+
+ DoFreeNode(pNode);
+ --mnElementCount;
+
+ return iNext;
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::erase(const_iterator first, const_iterator last)
+ {
+ while(first != last)
+ first = erase(first);
+ return iterator(first.mpNode, first.mpBucket);
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::size_type
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::erase(const key_type& k)
+ {
+ // To do: Reimplement this function to do a single loop and not try to be
+ // smart about element contiguity. The mechanism here is only a benefit if the
+ // buckets are heavily overloaded; otherwise this mechanism may be slightly slower.
+
+ const hash_code_t c = get_hash_code(k);
+ const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount);
+ const size_type nElementCountSaved = mnElementCount;
+
+ node_type** pBucketArray = mpBucketArray + n;
+
+ while(*pBucketArray && !compare(k, c, *pBucketArray))
+ pBucketArray = &(*pBucketArray)->mpNext;
+
+ node_type* pDeleteList = nullptr;
+ while(*pBucketArray && compare(k, c, *pBucketArray))
+ {
+ node_type* const pNode = *pBucketArray;
+ *pBucketArray = pNode->mpNext;
+ // Don't free the node here, k might be a reference to the key inside this node,
+ // and we're re-using it when we compare to the following nodes.
+ // Instead, add it to the list of things to be deleted.
+ pNode->mpNext = pDeleteList;
+ pDeleteList = pNode;
+ --mnElementCount;
+ }
+
+ while (pDeleteList) {
+ node_type* const pToDelete = pDeleteList;
+ pDeleteList = pDeleteList->mpNext;
+ DoFreeNode(pToDelete);
+ }
+
+ return nElementCountSaved - mnElementCount;
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::clear()
+ {
+ DoFreeNodes(mpBucketArray, mnBucketCount);
+ mnElementCount = 0;
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::clear(bool clearBuckets)
+ {
+ DoFreeNodes(mpBucketArray, mnBucketCount);
+ if(clearBuckets)
+ {
+ DoFreeBuckets(mpBucketArray, mnBucketCount);
+ reset_lose_memory();
+ }
+ mnElementCount = 0;
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::reset_lose_memory() EA_NOEXCEPT
+ {
+ // The reset function is a special extension function which unilaterally
+ // resets the container to an empty state without freeing the memory of
+ // the contained objects. This is useful for very quickly tearing down a
+ // container built into scratch memory.
+ mnBucketCount = 1;
+
+ #ifdef _MSC_VER
+ mpBucketArray = (node_type**)&gpEmptyBucketArray[0];
+ #else
+ void* p = &gpEmptyBucketArray[0];
+ memcpy(&mpBucketArray, &p, sizeof(mpBucketArray)); // Other compilers implement strict aliasing and casting is thus unsafe.
+ #endif
+
+ mnElementCount = 0;
+ mRehashPolicy.mnNextResize = 0;
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::reserve(size_type nElementCount)
+ {
+ rehash(mRehashPolicy.GetBucketCount(uint32_t(nElementCount)));
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::rehash(size_type nBucketCount)
+ {
+ // Note that we unilaterally use the passed in bucket count; we do not attempt migrate it
+ // up to the next prime number. We leave it at the user's discretion to do such a thing.
+ DoRehash(nBucketCount);
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoRehash(size_type nNewBucketCount)
+ {
+ node_type** const pBucketArray = DoAllocateBuckets(nNewBucketCount); // nNewBucketCount should always be >= 2.
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ node_type* pNode;
+
+ for(size_type i = 0; i < mnBucketCount; ++i)
+ {
+ while((pNode = mpBucketArray[i]) != NULL) // Using '!=' disables compiler warnings.
+ {
+ const size_type nNewBucketIndex = (size_type)bucket_index(pNode, (uint32_t)nNewBucketCount);
+
+ mpBucketArray[i] = pNode->mpNext;
+ pNode->mpNext = pBucketArray[nNewBucketIndex];
+ pBucketArray[nNewBucketIndex] = pNode;
+ }
+ }
+
+ DoFreeBuckets(mpBucketArray, mnBucketCount);
+ mnBucketCount = nNewBucketCount;
+ mpBucketArray = pBucketArray;
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ // A failure here means that a hash function threw an exception.
+ // We can't restore the previous state without calling the hash
+ // function again, so the only sensible recovery is to delete everything.
+ DoFreeNodes(pBucketArray, nNewBucketCount);
+ DoFreeBuckets(pBucketArray, nNewBucketCount);
+ DoFreeNodes(mpBucketArray, mnBucketCount);
+ mnElementCount = 0;
+ throw;
+ }
+ #endif
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline bool hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::validate() const
+ {
+ // Verify our empty bucket array is unmodified.
+ if(gpEmptyBucketArray[0] != NULL)
+ return false;
+
+ if(gpEmptyBucketArray[1] != (void*)uintptr_t(~0))
+ return false;
+
+ // Verify that we have at least one bucket. Calculations can
+ // trigger division by zero exceptions otherwise.
+ if(mnBucketCount == 0)
+ return false;
+
+ // Verify that gpEmptyBucketArray is used correctly.
+ // gpEmptyBucketArray is only used when initially empty.
+ if((void**)mpBucketArray == &gpEmptyBucketArray[0])
+ {
+ if(mnElementCount) // gpEmptyBucketArray is used only for empty hash tables.
+ return false;
+
+ if(mnBucketCount != 1) // gpEmptyBucketArray is used exactly an only for mnBucketCount == 1.
+ return false;
+ }
+ else
+ {
+ if(mnBucketCount < 2) // Small bucket counts *must* use gpEmptyBucketArray.
+ return false;
+ }
+
+ // Verify that the element count matches mnElementCount.
+ size_type nElementCount = 0;
+
+ for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp)
+ ++nElementCount;
+
+ if(nElementCount != mnElementCount)
+ return false;
+
+ // To do: Verify that individual elements are in the expected buckets.
+
+ return true;
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ int hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::validate_iterator(const_iterator i) const
+ {
+ // To do: Come up with a more efficient mechanism of doing this.
+
+ for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp)
+ {
+ if(temp == i)
+ return (isf_valid | isf_current | isf_can_dereference);
+ }
+
+ if(i == end())
+ return (isf_valid | isf_current);
+
+ return isf_none;
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ // operator==, != have been moved to the specific container subclasses (e.g. hash_map).
+
+ // The following comparison operators are deprecated and will likely be removed in a
+ // future version of this package.
+ //
+ // Comparing hash tables for less-ness is an odd thing to do. We provide it for
+ // completeness, though the user is advised to be wary of how they use this.
+ //
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline bool operator<(const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& a,
+ const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& b)
+ {
+ // This requires hash table elements to support operator<. Since the hash table
+ // doesn't compare elements via less (it does so via equals), we must use the
+ // globally defined operator less for the elements.
+ return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end());
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline bool operator>(const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& a,
+ const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& b)
+ {
+ return b < a;
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline bool operator<=(const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& a,
+ const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& b)
+ {
+ return !(b < a);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline bool operator>=(const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& a,
+ const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& b)
+ {
+ return !(a < b);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline void swap(const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& a,
+ const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& b)
+ {
+ a.swap(b);
+ }
+
+
+} // namespace eastl
+
+
+EA_RESTORE_VC_WARNING();
+
+
+#endif // Header include guard
diff --git a/EASTL/include/EASTL/internal/in_place_t.h b/EASTL/include/EASTL/internal/in_place_t.h
new file mode 100644
index 0000000..79acd18
--- /dev/null
+++ b/EASTL/include/EASTL/internal/in_place_t.h
@@ -0,0 +1,82 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_IN_PLACE_T_H
+#define EASTL_INTERNAL_IN_PLACE_T_H
+
+
+#include <EABase/eabase.h>
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+namespace eastl
+{
+ namespace Internal
+ {
+ struct in_place_tag {};
+ template <class> struct in_place_type_tag {};
+ template <size_t> struct in_place_index_tag {};
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////
+ /// in_place_tag
+ ///
+ /// http://en.cppreference.com/w/cpp/utility/in_place_tag
+ ///
+ struct in_place_tag
+ {
+ in_place_tag() = delete;
+
+ private:
+ explicit in_place_tag(Internal::in_place_tag) {}
+ friend inline in_place_tag Internal_ConstructInPlaceTag();
+ };
+
+ // internal factory function for in_place_tag
+ inline in_place_tag Internal_ConstructInPlaceTag() { return in_place_tag(Internal::in_place_tag{}); }
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ /// in_place_t / in_place_type_t / in_place_index_t
+ ///
+ /// used to disambiguate overloads that take arguments (possibly a parameter
+ /// pack) for in-place construction of some value.
+ ///
+ /// http://en.cppreference.com/w/cpp/utility/optional/in_place_t
+ ///
+ using in_place_t = in_place_tag(&)(Internal::in_place_tag);
+
+ template <class T>
+ using in_place_type_t = in_place_tag(&)(Internal::in_place_type_tag<T>);
+
+ template <size_t N>
+ using in_place_index_t = in_place_tag(&)(Internal::in_place_index_tag<N>);
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ /// in_place / in_place<T> / in_place<size_t>
+ ///
+ /// http://en.cppreference.com/w/cpp/utility/in_place
+ ///
+ inline in_place_tag in_place(Internal::in_place_tag) { return Internal_ConstructInPlaceTag(); }
+
+ template <class T>
+ inline in_place_tag in_place(Internal::in_place_type_tag<T>) { return Internal_ConstructInPlaceTag(); }
+
+ template <std::size_t I>
+ inline in_place_tag in_place(Internal::in_place_index_tag<I>) { return Internal_ConstructInPlaceTag(); }
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/internal/integer_sequence.h b/EASTL/include/EASTL/internal/integer_sequence.h
new file mode 100644
index 0000000..ba5dd4e
--- /dev/null
+++ b/EASTL/include/EASTL/internal/integer_sequence.h
@@ -0,0 +1,118 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_INTEGER_SEQUENCE_H
+#define EASTL_INTEGER_SEQUENCE_H
+
+#include <EABase/config/eacompiler.h>
+#include <EASTL/internal/config.h>
+#include <EASTL/type_traits.h>
+
+namespace eastl
+{
+
+#if EASTL_VARIADIC_TEMPLATES_ENABLED && !defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+
+// integer_sequence
+template <typename T, T... Ints>
+class integer_sequence
+{
+public:
+ typedef T value_type;
+ static_assert(is_integral<T>::value, "eastl::integer_sequence can only be instantiated with an integral type");
+ static EA_CONSTEXPR size_t size() EA_NOEXCEPT { return sizeof...(Ints); }
+};
+
+template <size_t... Is>
+using index_sequence = integer_sequence<size_t, Is...>;
+
+#if (defined(EA_COMPILER_GNUC) && EA_COMPILER_VERSION >= 8001)
+
+template <typename T, T N>
+using make_integer_sequence = integer_sequence<T, __integer_pack(N)...>;
+
+#elif (defined(EA_COMPILER_CLANG) && EA_COMPILER_HAS_BUILTIN(__make_integer_seq)) || (defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1910))
+
+template <class T, T N>
+using make_integer_sequence = __make_integer_seq<integer_sequence, T, N>;
+
+#else
+
+template <size_t N, typename IndexSeq>
+struct make_index_sequence_impl;
+
+template <size_t N, size_t... Is>
+struct make_index_sequence_impl<N, integer_sequence<size_t, Is...>>
+{
+ typedef typename make_index_sequence_impl<N - 1, integer_sequence<size_t, N - 1, Is...>>::type type;
+};
+
+template <size_t... Is>
+struct make_index_sequence_impl<0, integer_sequence<size_t, Is...>>
+{
+ typedef integer_sequence<size_t, Is...> type;
+};
+
+template <typename Target, typename Seq>
+struct integer_sequence_convert_impl;
+
+template <typename Target, size_t... Is>
+struct integer_sequence_convert_impl<Target, integer_sequence<size_t, Is...>>
+{
+ typedef integer_sequence<Target, Is...> type;
+};
+
+template <typename T, T N>
+struct make_integer_sequence_impl
+{
+ typedef typename integer_sequence_convert_impl<T, typename make_index_sequence_impl<N, integer_sequence<size_t>>::type>::type type;
+};
+
+template <typename T, T N>
+using make_integer_sequence = typename make_integer_sequence_impl<T, N>::type;
+
+#endif
+
+template <size_t N>
+using make_index_sequence = make_integer_sequence<size_t, N>;
+
+// Helper alias template that converts any type parameter pack into an index sequence of the same length
+template<typename... T>
+using index_sequence_for = make_index_sequence<sizeof...(T)>;
+
+namespace internal
+{
+
+template <typename T>
+struct integer_sequence_size_helper;
+
+template <typename T, T... Ints>
+struct integer_sequence_size_helper<eastl::integer_sequence<T, Ints...>> : public integral_constant<size_t, sizeof...(Ints)>
+{
+};
+
+template <typename T>
+struct integer_sequence_size : public integer_sequence_size_helper<eastl::remove_cv_t<T>>
+{
+};
+
+template <typename T>
+struct index_sequence_size : public integer_sequence_size_helper<eastl::remove_cv_t<T>>
+{
+};
+
+template <typename T>
+EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR size_t integer_sequence_size_v = integer_sequence_size<T>::value;
+
+template <typename T>
+EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR size_t index_sequence_size_v = index_sequence_size<T>::value;
+
+
+} // namespace internal
+
+#endif // EASTL_VARIADIC_TEMPLATES_ENABLED
+
+} // namespace eastl
+
+#endif // EASTL_INTEGER_SEQUENCE_H
diff --git a/EASTL/include/EASTL/internal/intrusive_hashtable.h b/EASTL/include/EASTL/internal/intrusive_hashtable.h
new file mode 100644
index 0000000..dccca5b
--- /dev/null
+++ b/EASTL/include/EASTL/internal/intrusive_hashtable.h
@@ -0,0 +1,989 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements an intrusive hash table, which is a hash table whereby
+// the container nodes are the hash table objects themselves. This has benefits
+// primarily in terms of memory management. There are some minor limitations
+// that result from this.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+
+
+#ifndef EASTL_INTERNAL_INTRUSIVE_HASHTABLE_H
+#define EASTL_INTERNAL_INTRUSIVE_HASHTABLE_H
+
+
+#include <EABase/eabase.h>
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/hashtable.h>
+#include <EASTL/type_traits.h>
+#include <EASTL/iterator.h>
+#include <EASTL/functional.h>
+#include <EASTL/utility.h>
+#include <EASTL/algorithm.h>
+
+EA_DISABLE_ALL_VC_WARNINGS();
+#include <new>
+#include <stddef.h>
+#include <string.h>
+EA_RESTORE_ALL_VC_WARNINGS();
+
+
+namespace eastl
+{
+
+ /// intrusive_hash_node
+ ///
+ /// A hash_node stores an element in a hash table, much like a
+ /// linked list node stores an element in a linked list.
+ /// An intrusive_hash_node additionally can, via template parameter,
+ /// store a hash code in the node to speed up hash calculations
+ /// and comparisons in some cases.
+ ///
+ /// To consider: Make a version of intrusive_hash_node which is
+ /// templated on the container type. This would allow for the
+ /// mpNext pointer to be the container itself and thus allow
+ /// for easier debugging.
+ ///
+ /// Example usage:
+ /// struct Widget : public intrusive_hash_node{ ... };
+ ///
+ /// struct Dagget : public intrusive_hash_node_key<int>{ ... };
+ ///
+ struct intrusive_hash_node
+ {
+ intrusive_hash_node* mpNext;
+ };
+
+
+ template <typename Key>
+ struct intrusive_hash_node_key : public intrusive_hash_node
+ {
+ typedef Key key_type;
+ Key mKey;
+ };
+
+
+
+ /// intrusive_node_iterator
+ ///
+ /// Node iterators iterate nodes within a given bucket.
+ ///
+ /// The bConst parameter defines if the iterator is a const_iterator
+ /// or an iterator.
+ ///
+ template <typename Value, bool bConst>
+ struct intrusive_node_iterator
+ {
+ public:
+ typedef intrusive_node_iterator<Value, bConst> this_type;
+ typedef Value value_type;
+ typedef Value node_type;
+ typedef ptrdiff_t difference_type;
+ typedef typename type_select<bConst, const Value*, Value*>::type pointer;
+ typedef typename type_select<bConst, const Value&, Value&>::type reference;
+ typedef EASTL_ITC_NS::forward_iterator_tag iterator_category;
+
+ public:
+ node_type* mpNode;
+
+ public:
+ intrusive_node_iterator()
+ : mpNode(NULL) { }
+
+ explicit intrusive_node_iterator(value_type* pNode)
+ : mpNode(pNode) { }
+
+ intrusive_node_iterator(const intrusive_node_iterator<Value, true>& x)
+ : mpNode(x.mpNode) { }
+
+ reference operator*() const
+ { return *mpNode; }
+
+ pointer operator->() const
+ { return mpNode; }
+
+ this_type& operator++()
+ { mpNode = static_cast<node_type*>(mpNode->mpNext); return *this; }
+
+ this_type operator++(int)
+ { this_type temp(*this); mpNode = static_cast<node_type*>(mpNode->mpNext); return temp; }
+
+ }; // intrusive_node_iterator
+
+
+
+
+ /// intrusive_hashtable_iterator_base
+ ///
+ /// An intrusive_hashtable_iterator_base iterates the entire hash table and
+ /// not just nodes within a single bucket. Users in general will use a hash
+ /// table iterator much more often, as it is much like other container
+ /// iterators (e.g. vector::iterator).
+ ///
+ /// We define a base class here because it is shared by both const and
+ /// non-const iterators.
+ ///
+ template <typename Value>
+ struct intrusive_hashtable_iterator_base
+ {
+ public:
+ typedef Value value_type;
+
+ protected:
+ template <typename, typename, typename, typename, size_t, bool, bool>
+ friend class intrusive_hashtable;
+
+ template <typename, bool>
+ friend struct intrusive_hashtable_iterator;
+
+ template <typename V>
+ friend bool operator==(const intrusive_hashtable_iterator_base<V>&, const intrusive_hashtable_iterator_base<V>&);
+
+ template <typename V>
+ friend bool operator!=(const intrusive_hashtable_iterator_base<V>&, const intrusive_hashtable_iterator_base<V>&);
+
+ value_type* mpNode; // Current node within current bucket.
+ value_type** mpBucket; // Current bucket.
+
+ public:
+ intrusive_hashtable_iterator_base(value_type* pNode, value_type** pBucket)
+ : mpNode(pNode), mpBucket(pBucket) { }
+
+ void increment_bucket()
+ {
+ ++mpBucket;
+ while(*mpBucket == NULL) // We store an extra bucket with some non-NULL value at the end
+ ++mpBucket; // of the bucket array so that finding the end of the bucket
+ mpNode = *mpBucket; // array is quick and simple.
+ }
+
+ void increment()
+ {
+ mpNode = static_cast<value_type*>(mpNode->mpNext);
+
+ while(mpNode == NULL)
+ mpNode = *++mpBucket;
+ }
+
+ }; // intrusive_hashtable_iterator_base
+
+
+
+
+ /// intrusive_hashtable_iterator
+ ///
+ /// An intrusive_hashtable_iterator iterates the entire hash table and not
+ /// just nodes within a single bucket. Users in general will use a hash
+ /// table iterator much more often, as it is much like other container
+ /// iterators (e.g. vector::iterator).
+ ///
+ /// The bConst parameter defines if the iterator is a const_iterator
+ /// or an iterator.
+ ///
+ template <typename Value, bool bConst>
+ struct intrusive_hashtable_iterator : public intrusive_hashtable_iterator_base<Value>
+ {
+ public:
+ typedef intrusive_hashtable_iterator_base<Value> base_type;
+ typedef intrusive_hashtable_iterator<Value, bConst> this_type;
+ typedef intrusive_hashtable_iterator<Value, false> this_type_non_const;
+ typedef typename base_type::value_type value_type;
+ typedef typename type_select<bConst, const Value*, Value*>::type pointer;
+ typedef typename type_select<bConst, const Value&, Value&>::type reference;
+ typedef ptrdiff_t difference_type;
+ typedef EASTL_ITC_NS::forward_iterator_tag iterator_category;
+
+ public:
+ intrusive_hashtable_iterator()
+ : base_type(NULL, NULL) { }
+
+ explicit intrusive_hashtable_iterator(value_type* pNode, value_type** pBucket)
+ : base_type(pNode, pBucket) { }
+
+ explicit intrusive_hashtable_iterator(value_type** pBucket)
+ : base_type(*pBucket, pBucket) { }
+
+ intrusive_hashtable_iterator(const this_type_non_const& x)
+ : base_type(x.mpNode, x.mpBucket) { }
+
+ reference operator*() const
+ { return *base_type::mpNode; }
+
+ pointer operator->() const
+ { return base_type::mpNode; }
+
+ this_type& operator++()
+ { base_type::increment(); return *this; }
+
+ this_type operator++(int)
+ { this_type temp(*this); base_type::increment(); return temp; }
+
+ }; // intrusive_hashtable_iterator
+
+
+
+ /// use_intrusive_key
+ ///
+ /// operator()(x) returns x.mKey. Used in maps, as opposed to sets.
+ /// This is a template policy implementation; it is an alternative to
+ /// the use_self template implementation, which is used for sets.
+ ///
+ template <typename Node, typename Key>
+ struct use_intrusive_key // : public unary_function<T, T> // Perhaps we want to make it a subclass of unary_function.
+ {
+ typedef Key result_type;
+
+ const result_type& operator()(const Node& x) const
+ { return x.mKey; }
+ };
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ /// intrusive_hashtable
+ ///
+ template <typename Key, typename Value, typename Hash, typename Equal,
+ size_t bucketCount, bool bConstIterators, bool bUniqueKeys>
+ class intrusive_hashtable
+ {
+ public:
+ typedef intrusive_hashtable<Key, Value, Hash, Equal,
+ bucketCount, bConstIterators, bUniqueKeys> this_type;
+ typedef Key key_type;
+ typedef Value value_type;
+ typedef Value mapped_type;
+ typedef Value node_type;
+ typedef uint32_t hash_code_t;
+ typedef Equal key_equal;
+ typedef ptrdiff_t difference_type;
+ typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t.
+ typedef value_type& reference;
+ typedef const value_type& const_reference;
+ typedef intrusive_node_iterator<value_type, bConstIterators> local_iterator;
+ typedef intrusive_node_iterator<value_type, true> const_local_iterator;
+ typedef intrusive_hashtable_iterator<value_type, bConstIterators> iterator;
+ typedef intrusive_hashtable_iterator<value_type, true> const_iterator;
+ typedef typename type_select<bUniqueKeys, pair<iterator, bool>, iterator>::type insert_return_type;
+ typedef typename type_select<bConstIterators, eastl::use_self<Value>,
+ eastl::use_intrusive_key<Value, key_type> >::type extract_key;
+
+ enum
+ {
+ kBucketCount = bucketCount
+ };
+
+ protected:
+ node_type* mBucketArray[kBucketCount + 1]; // '+1' because we have an end bucket which is non-NULL so iterators always stop on it.
+ size_type mnElementCount;
+ Hash mHash; // To do: Use base class optimization to make this go away when it is of zero size.
+ Equal mEqual; // To do: Use base class optimization to make this go away when it is of zero size.
+
+ public:
+ intrusive_hashtable(const Hash&, const Equal&);
+
+ void swap(this_type& x);
+
+ iterator begin() EA_NOEXCEPT
+ {
+ iterator i(mBucketArray);
+ if(!i.mpNode)
+ i.increment_bucket();
+ return i;
+ }
+
+ const_iterator begin() const EA_NOEXCEPT
+ {
+ const_iterator i(const_cast<node_type**>(mBucketArray));
+ if(!i.mpNode)
+ i.increment_bucket();
+ return i;
+ }
+
+ const_iterator cbegin() const EA_NOEXCEPT
+ {
+ return begin();
+ }
+
+ iterator end() EA_NOEXCEPT
+ { return iterator(mBucketArray + kBucketCount); }
+
+ const_iterator end() const EA_NOEXCEPT
+ { return const_iterator(const_cast<node_type**>(mBucketArray) + kBucketCount); }
+
+ const_iterator cend() const EA_NOEXCEPT
+ { return const_iterator(const_cast<node_type**>(mBucketArray) + kBucketCount); }
+
+ local_iterator begin(size_type n) EA_NOEXCEPT
+ { return local_iterator(mBucketArray[n]); }
+
+ const_local_iterator begin(size_type n) const EA_NOEXCEPT
+ { return const_local_iterator(mBucketArray[n]); }
+
+ const_local_iterator cbegin(size_type n) const EA_NOEXCEPT
+ { return const_local_iterator(mBucketArray[n]); }
+
+ local_iterator end(size_type) EA_NOEXCEPT
+ { return local_iterator(NULL); }
+
+ const_local_iterator end(size_type) const EA_NOEXCEPT
+ { return const_local_iterator(NULL); }
+
+ const_local_iterator cend(size_type) const EA_NOEXCEPT
+ { return const_local_iterator(NULL); }
+
+ size_type size() const EA_NOEXCEPT
+ { return mnElementCount; }
+
+ bool empty() const EA_NOEXCEPT
+ { return mnElementCount == 0; }
+
+ size_type bucket_count() const EA_NOEXCEPT // This function is unnecessary, as the user can directly reference
+ { return kBucketCount; } // intrusive_hashtable::kBucketCount as a constant.
+
+ size_type bucket_size(size_type n) const EA_NOEXCEPT
+ { return (size_type)eastl::distance(begin(n), end(n)); }
+
+ size_type bucket(const key_type& k) const EA_NOEXCEPT
+ { return (size_type)(mHash(k) % kBucketCount); }
+
+ public:
+ float load_factor() const EA_NOEXCEPT
+ { return (float)mnElementCount / (float)kBucketCount; }
+
+ public:
+ insert_return_type insert(value_type& value)
+ { return DoInsertValue(value, integral_constant<bool, bUniqueKeys>()); }
+
+ insert_return_type insert(const_iterator, value_type& value)
+ { return insert(value); } // To consider: We might be able to use the iterator argument to specify a specific insertion location.
+
+ template <typename InputIterator>
+ void insert(InputIterator first, InputIterator last);
+
+ public:
+ iterator erase(const_iterator position);
+ iterator erase(const_iterator first, const_iterator last);
+ size_type erase(const key_type& k);
+ iterator remove(value_type& value); // Removes by value instead of by iterator. This is an O(1) operation, due to this hashtable being 'intrusive'.
+
+ void clear();
+
+ public:
+ iterator find(const key_type& k);
+ const_iterator find(const key_type& k) const;
+
+ /// Implements a find whereby the user supplies a comparison of a different type
+ /// than the hashtable value_type. A useful case of this is one whereby you have
+ /// a container of string objects but want to do searches via passing in char pointers.
+ /// The problem is that without this kind of find, you need to do the expensive operation
+ /// of converting the char pointer to a string so it can be used as the argument to the
+ /// find function.
+ ///
+ /// Example usage:
+ /// hash_set<string> hashSet;
+ /// hashSet.find_as("hello"); // Use default hash and compare.
+ ///
+ /// Example usage (namespaces omitted for brevity):
+ /// hash_set<string> hashSet;
+ /// hashSet.find_as("hello", hash<char*>(), equal_to_2<string, char*>());
+ ///
+ template <typename U, typename UHash, typename BinaryPredicate>
+ iterator find_as(const U& u, UHash uhash, BinaryPredicate predicate);
+
+ template <typename U, typename UHash, typename BinaryPredicate>
+ const_iterator find_as(const U& u, UHash uhash, BinaryPredicate predicate) const;
+
+ template <typename U>
+ iterator find_as(const U& u);
+
+ template <typename U>
+ const_iterator find_as(const U& u) const;
+
+ size_type count(const key_type& k) const;
+
+ // The use for equal_range in a hash_table seems somewhat questionable.
+ // The primary reason for its existence is to replicate the interface of set/map.
+ eastl::pair<iterator, iterator> equal_range(const key_type& k);
+ eastl::pair<const_iterator, const_iterator> equal_range(const key_type& k) const;
+
+ public:
+ bool validate() const;
+ int validate_iterator(const_iterator i) const;
+
+ public:
+ Hash hash_function() const
+ { return mHash; }
+
+ Equal equal_function() const // Deprecated. Use key_eq() instead, as key_eq is what the new C++ standard
+ { return mEqual; } // has specified in its hashtable (unordered_*) proposal.
+
+ const key_equal& key_eq() const
+ { return mEqual; }
+
+ key_equal& key_eq()
+ { return mEqual; }
+
+ protected:
+ eastl::pair<iterator, bool> DoInsertValue(value_type&, true_type); // true_type means bUniqueKeys is true.
+ iterator DoInsertValue(value_type&, false_type); // false_type means bUniqueKeys is false.
+
+ node_type* DoFindNode(node_type* pNode, const key_type& k) const;
+
+ template <typename U, typename BinaryPredicate>
+ node_type* DoFindNode(node_type* pNode, const U& u, BinaryPredicate predicate) const;
+
+ }; // class intrusive_hashtable
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // node_iterator_base
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Value, bool bConst>
+ inline bool operator==(const intrusive_node_iterator<Value, bConst>& a,
+ const intrusive_node_iterator<Value, bConst>& b)
+ { return a.mpNode == b.mpNode; }
+
+ template <typename Value, bool bConst>
+ inline bool operator!=(const intrusive_node_iterator<Value, bConst>& a,
+ const intrusive_node_iterator<Value, bConst>& b)
+ { return a.mpNode != b.mpNode; }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // hashtable_iterator_base
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Value>
+ inline bool operator==(const intrusive_hashtable_iterator_base<Value>& a,
+ const intrusive_hashtable_iterator_base<Value>& b)
+ { return a.mpNode == b.mpNode; }
+
+
+ template <typename Value>
+ inline bool operator!=(const intrusive_hashtable_iterator_base<Value>& a,
+ const intrusive_hashtable_iterator_base<Value>& b)
+ { return a.mpNode != b.mpNode; }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // intrusive_hashtable
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ inline intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::intrusive_hashtable(const H& h, const Eq& eq)
+ : mnElementCount(0),
+ mHash(h),
+ mEqual(eq)
+ {
+ memset(mBucketArray, 0, kBucketCount * sizeof(mBucketArray[0]));
+ mBucketArray[kBucketCount] = reinterpret_cast<node_type*>((uintptr_t)~0);
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ void intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::swap(this_type& x)
+ {
+ for(size_t i = 0; i < kBucketCount; i++)
+ eastl::swap(mBucketArray[i], x.mBucketArray[i]);
+
+ eastl::swap(mnElementCount, x.mnElementCount);
+ eastl::swap(mHash, x.mHash);
+ eastl::swap(mEqual, x.mEqual);
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ inline typename intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::iterator
+ intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::find(const key_type& k)
+ {
+ const size_type n = (size_type)(mHash(k) % kBucketCount);
+ node_type* const pNode = DoFindNode(mBucketArray[n], k);
+ return pNode ? iterator(pNode, mBucketArray + n) : iterator(mBucketArray + kBucketCount);
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ inline typename intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::const_iterator
+ intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::find(const key_type& k) const
+ {
+ const size_type n = (size_type)(mHash(k) % kBucketCount);
+ node_type* const pNode = DoFindNode(mBucketArray[n], k);
+ return pNode ? const_iterator(pNode, const_cast<node_type**>(mBucketArray) + n) : const_iterator(const_cast<node_type**>(mBucketArray) + kBucketCount);
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ template <typename U, typename UHash, typename BinaryPredicate>
+ inline typename intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::iterator
+ intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::find_as(const U& other, UHash uhash, BinaryPredicate predicate)
+ {
+ const size_type n = (size_type)(uhash(other) % kBucketCount);
+ node_type* const pNode = DoFindNode(mBucketArray[n], other, predicate);
+ return pNode ? iterator(pNode, mBucketArray + n) : iterator(mBucketArray + kBucketCount);
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ template <typename U, typename UHash, typename BinaryPredicate>
+ inline typename intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::const_iterator
+ intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::find_as(const U& other, UHash uhash, BinaryPredicate predicate) const
+ {
+ const size_type n = (size_type)(uhash(other) % kBucketCount);
+ node_type* const pNode = DoFindNode(mBucketArray[n], other, predicate);
+ return pNode ? const_iterator(pNode, const_cast<node_type**>(mBucketArray) + n) : const_iterator(const_cast<node_type**>(mBucketArray) + kBucketCount);
+ }
+
+
+ /// intrusive_hashtable_find
+ ///
+ /// Helper function that defaults to using hash<U> and equal_to_2<T, U>.
+ /// This makes it so that by default you don't need to provide these.
+ /// Note that the default hash functions may not be what you want, though.
+ ///
+ /// Example usage. Instead of this:
+ /// hash_set<string> hashSet;
+ /// hashSet.find("hello", hash<char*>(), equal_to_2<string, char*>());
+ ///
+ /// You can use this:
+ /// hash_set<string> hashSet;
+ /// hashtable_find(hashSet, "hello");
+ ///
+ template <typename H, typename U>
+ inline typename H::iterator intrusive_hashtable_find(H& hashTable, const U& u)
+ { return hashTable.find_as(u, eastl::hash<U>(), eastl::equal_to_2<const typename H::key_type, U>()); }
+
+ template <typename H, typename U>
+ inline typename H::const_iterator intrusive_hashtable_find(const H& hashTable, const U& u)
+ { return hashTable.find_as(u, eastl::hash<U>(), eastl::equal_to_2<const typename H::key_type, U>()); }
+
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ template <typename U>
+ inline typename intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::iterator
+ intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::find_as(const U& other)
+ { return eastl::intrusive_hashtable_find(*this, other); }
+ // VC++ doesn't appear to like the following, though it seems correct to me.
+ // So we implement the workaround above until we can straighten this out.
+ //{ return find_as(other, eastl::hash<U>(), eastl::equal_to_2<const key_type, U>()); }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ template <typename U>
+ inline typename intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::const_iterator
+ intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::find_as(const U& other) const
+ { return eastl::intrusive_hashtable_find(*this, other); }
+ // VC++ doesn't appear to like the following, though it seems correct to me.
+ // So we implement the workaround above until we can straighten this out.
+ //{ return find_as(other, eastl::hash<U>(), eastl::equal_to_2<const key_type, U>()); }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ typename intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::size_type
+ intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::count(const key_type& k) const
+ {
+ const size_type n = (size_type)(mHash(k) % kBucketCount);
+ size_type result = 0;
+ extract_key extractKey; // extract_key is empty and thus this ctor is a no-op.
+
+ // To do: Make a specialization for bU (unique keys) == true and take
+ // advantage of the fact that the count will always be zero or one in that case.
+ for(node_type* pNode = mBucketArray[n]; pNode; pNode = static_cast<node_type*>(pNode->mpNext))
+ {
+ if(mEqual(k, extractKey(*pNode)))
+ ++result;
+ }
+ return result;
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ eastl::pair<typename intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::iterator,
+ typename intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::iterator>
+ intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::equal_range(const key_type& k)
+ {
+ const size_type n = (size_type)(mHash(k) % kBucketCount);
+ node_type** head = mBucketArray + n;
+ node_type* pNode = DoFindNode(*head, k);
+ extract_key extractKey; // extract_key is empty and thus this ctor is a no-op.
+
+ if(pNode)
+ {
+ node_type* p1 = static_cast<node_type*>(pNode->mpNext);
+
+ for(; p1; p1 = static_cast<node_type*>(p1->mpNext))
+ {
+ if(!mEqual(k, extractKey(*p1)))
+ break;
+ }
+
+ iterator first(pNode, head);
+ iterator last(p1, head);
+
+ if(!p1)
+ last.increment_bucket();
+
+ return eastl::pair<iterator, iterator>(first, last);
+ }
+
+ return eastl::pair<iterator, iterator>(iterator(mBucketArray + kBucketCount),
+ iterator(mBucketArray + kBucketCount));
+ }
+
+
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ eastl::pair<typename intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::const_iterator,
+ typename intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::const_iterator>
+ intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::equal_range(const key_type& k) const
+ {
+ const size_type n = (size_type)(mHash(k) % kBucketCount);
+ node_type** head = const_cast<node_type**>(mBucketArray + n);
+ node_type* pNode = DoFindNode(*head, k);
+ extract_key extractKey; // extract_key is empty and thus this ctor is a no-op.
+
+ if(pNode)
+ {
+ node_type* p1 = static_cast<node_type*>(pNode->mpNext);
+
+ for(; p1; p1 = static_cast<node_type*>(p1->mpNext))
+ {
+ if(!mEqual(k, extractKey(*p1)))
+ break;
+ }
+
+ const_iterator first(pNode, head);
+ const_iterator last(p1, head);
+
+ if(!p1)
+ last.increment_bucket();
+
+ return eastl::pair<const_iterator, const_iterator>(first, last);
+ }
+
+ return eastl::pair<const_iterator, const_iterator>(const_iterator(const_cast<node_type**>(mBucketArray) + kBucketCount),
+ const_iterator(const_cast<node_type**>(mBucketArray) + kBucketCount));
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ inline typename intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::node_type*
+ intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::DoFindNode(node_type* pNode, const key_type& k) const
+ {
+ extract_key extractKey; // extract_key is empty and thus this ctor is a no-op.
+
+ for(; pNode; pNode = static_cast<node_type*>(pNode->mpNext))
+ {
+ if(mEqual(k, extractKey(*pNode)))
+ return pNode;
+ }
+ return NULL;
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ template <typename U, typename BinaryPredicate>
+ inline typename intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::node_type*
+ intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::DoFindNode(node_type* pNode, const U& other, BinaryPredicate predicate) const
+ {
+ extract_key extractKey; // extract_key is empty and thus this ctor is a no-op.
+
+ for(; pNode; pNode = static_cast<node_type*>(pNode->mpNext))
+ {
+ if(predicate(extractKey(*pNode), other)) // Intentionally compare with key as first arg and other as second arg.
+ return pNode;
+ }
+ return NULL;
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ eastl::pair<typename intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::iterator, bool>
+ intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::DoInsertValue(value_type& value, true_type) // true_type means bUniqueKeys is true.
+ {
+ // For sets (as opposed to maps), one could argue that all insertions are successful,
+ // as all elements are unique. However, the equal function might not think so.
+ extract_key extractKey; // extract_key is empty and thus this ctor is a no-op.
+ const size_type n = (size_type)(mHash(extractKey(value)) % kBucketCount);
+ node_type* const pNode = DoFindNode(mBucketArray[n], extractKey(value));
+
+ if(pNode == NULL)
+ {
+ value.mpNext = mBucketArray[n];
+ mBucketArray[n] = &value;
+ ++mnElementCount;
+
+ return eastl::pair<iterator, bool>(iterator(&value, mBucketArray + n), true);
+ }
+
+ return eastl::pair<iterator, bool>(iterator(pNode, mBucketArray + n), false);
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ typename intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::iterator
+ intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::DoInsertValue(value_type& value, false_type) // false_type means bUniqueKeys is false.
+ {
+ extract_key extractKey; // extract_key is empty and thus this ctor is a no-op.
+ const size_type n = (size_type)(mHash(extractKey(value)) % kBucketCount);
+ node_type* const pNodePrev = DoFindNode(mBucketArray[n], extractKey(value));
+
+ if(pNodePrev == NULL)
+ {
+ value.mpNext = mBucketArray[n];
+ mBucketArray[n] = &value;
+ }
+ else
+ {
+ value.mpNext = pNodePrev->mpNext;
+ pNodePrev->mpNext = &value;
+ }
+
+ ++mnElementCount;
+
+ return iterator(&value, mBucketArray + n);
+ }
+
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ template <typename InputIterator>
+ inline void intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::insert(InputIterator first, InputIterator last)
+ {
+ for(; first != last; ++first)
+ insert(*first);
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ typename intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::iterator
+ intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::erase(const_iterator i)
+ {
+ iterator iNext(i.mpNode, i.mpBucket);
+ ++iNext;
+
+ node_type* pNode = i.mpNode;
+ node_type* pNodeCurrent = *i.mpBucket;
+
+ if(pNodeCurrent == pNode)
+ *i.mpBucket = static_cast<node_type*>(pNodeCurrent->mpNext);
+ else
+ {
+ // We have a singly-linked list, so we have no choice but to
+ // walk down it till we find the node before the node at 'i'.
+ node_type* pNodeNext = static_cast<node_type*>(pNodeCurrent->mpNext);
+
+ while(pNodeNext != pNode)
+ {
+ pNodeCurrent = pNodeNext;
+ pNodeNext = static_cast<node_type*>(pNodeCurrent->mpNext);
+ }
+
+ pNodeCurrent->mpNext = static_cast<node_type*>(pNodeNext->mpNext);
+ }
+
+ // To consider: In debug builds set the node mpNext to NULL.
+ --mnElementCount;
+
+ return iNext;
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ inline typename intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::iterator
+ intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::erase(const_iterator first, const_iterator last)
+ {
+ while(first != last)
+ first = erase(first);
+ return iterator(first.mpNode, first.mpBucket);
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ typename intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::size_type
+ intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::erase(const key_type& k)
+ {
+ const size_type n = (size_type)(mHash(k) % kBucketCount);
+ const size_type nElementCountSaved = mnElementCount;
+ node_type*& pNodeBase = mBucketArray[n];
+ extract_key extractKey; // extract_key is empty and thus this ctor is a no-op.
+
+ // Note by Paul Pedriana:
+ // We have two loops here, and I'm not finding any easy way to having just one
+ // loop without changing the requirements of the hashtable node definition.
+ // It's a problem of taking an address of a variable and converting it to the
+ // address of another type without knowing what that type is. Perhaps I'm a
+ // little overly tired, so if there is a simple solution I am probably missing it.
+
+ while(pNodeBase && mEqual(k, extractKey(*pNodeBase)))
+ {
+ pNodeBase = static_cast<node_type*>(pNodeBase->mpNext);
+ --mnElementCount;
+ }
+
+ node_type* pNodePrev = pNodeBase;
+
+ if(pNodePrev)
+ {
+ node_type* pNodeCur;
+
+ while((pNodeCur = static_cast<node_type*>(pNodePrev->mpNext)) != NULL)
+ {
+ if(mEqual(k, extractKey(*pNodeCur)))
+ {
+ pNodePrev->mpNext = static_cast<node_type*>(pNodeCur->mpNext);
+ --mnElementCount; // To consider: In debug builds set the node mpNext to NULL.
+ }
+ else
+ pNodePrev = static_cast<node_type*>(pNodePrev->mpNext);
+ }
+ }
+
+ return nElementCountSaved - mnElementCount;
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ inline typename intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::iterator
+ intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::remove(value_type& value)
+ {
+ extract_key extractKey; // extract_key is empty and thus this ctor is a no-op.
+ const size_type n = (size_type)(mHash(extractKey(value)) % kBucketCount);
+
+ return erase(iterator(&value, &mBucketArray[n]));
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ inline void intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::clear()
+ {
+ // To consider: In debug builds set the node mpNext to NULL.
+ memset(mBucketArray, 0, kBucketCount * sizeof(mBucketArray[0]));
+ mnElementCount = 0;
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ inline bool intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::validate() const
+ {
+ // Verify that the element count matches mnElementCount.
+ size_type nElementCount = 0;
+
+ for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp)
+ ++nElementCount;
+
+ if(nElementCount != mnElementCount)
+ return false;
+
+ // To do: Verify that individual elements are in the expected buckets.
+
+ return true;
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ int intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::validate_iterator(const_iterator i) const
+ {
+ // To do: Come up with a more efficient mechanism of doing this.
+
+ for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp)
+ {
+ if(temp == i)
+ return (isf_valid | isf_current | isf_can_dereference);
+ }
+
+ if(i == end())
+ return (isf_valid | isf_current);
+
+ return isf_none;
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ inline bool operator==(const intrusive_hashtable<K, V, H, Eq, bC, bM, bU>& a,
+ const intrusive_hashtable<K, V, H, Eq, bC, bM, bU>& b)
+ {
+ return (a.size() == b.size()) && eastl::equal(a.begin(), a.end(), b.begin());
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ inline bool operator!=(const intrusive_hashtable<K, V, H, Eq, bC, bM, bU>& a,
+ const intrusive_hashtable<K, V, H, Eq, bC, bM, bU>& b)
+ {
+ return !(a == b);
+ }
+
+
+ // Comparing hash tables for less-ness is an odd thing to do. We provide it for
+ // completeness, though the user is advised to be wary of how they use this.
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ inline bool operator<(const intrusive_hashtable<K, V, H, Eq, bC, bM, bU>& a,
+ const intrusive_hashtable<K, V, H, Eq, bC, bM, bU>& b)
+ {
+ // This requires hash table elements to support operator<. Since the hash table
+ // doesn't compare elements via less (it does so via equals), we must use the
+ // globally defined operator less for the elements.
+ return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end());
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ inline bool operator>(const intrusive_hashtable<K, V, H, Eq, bC, bM, bU>& a,
+ const intrusive_hashtable<K, V, H, Eq, bC, bM, bU>& b)
+ {
+ return b < a;
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ inline bool operator<=(const intrusive_hashtable<K, V, H, Eq, bC, bM, bU>& a,
+ const intrusive_hashtable<K, V, H, Eq, bC, bM, bU>& b)
+ {
+ return !(b < a);
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ inline bool operator>=(const intrusive_hashtable<K, V, H, Eq, bC, bM, bU>& a,
+ const intrusive_hashtable<K, V, H, Eq, bC, bM, bU>& b)
+ {
+ return !(a < b);
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ inline void swap(const intrusive_hashtable<K, V, H, Eq, bC, bM, bU>& a,
+ const intrusive_hashtable<K, V, H, Eq, bC, bM, bU>& b)
+ {
+ a.swap(b);
+ }
+
+
+} // namespace eastl
+
+
+
+#endif // Header include guard
diff --git a/EASTL/include/EASTL/internal/mem_fn.h b/EASTL/include/EASTL/internal/mem_fn.h
new file mode 100644
index 0000000..1d3e7b3
--- /dev/null
+++ b/EASTL/include/EASTL/internal/mem_fn.h
@@ -0,0 +1,304 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_MEM_FN_H
+#define EASTL_INTERNAL_MEM_FN_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+#pragma once
+#endif
+
+////////////////////////////////////////////////////////////////////////////////
+// The code in this file is a modification of the libcxx implementation. We copy
+// the license information here as required.
+//
+// We implement only enough of mem_fn to implement eastl::function.
+////////////////////////////////////////////////////////////////////////////////
+
+//===------------------------ functional ----------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Source Licenses. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+
+namespace eastl
+{
+ //
+ // apply_cv
+ //
+ template <class T, class U,
+ bool = is_const<typename remove_reference<T>::type>::value,
+ bool = is_volatile<typename remove_reference<T>::type>::value>
+ struct apply_cv { typedef U type; };
+
+ template <class T, class U> struct apply_cv<T, U, true, false> { typedef const U type; };
+ template <class T, class U> struct apply_cv<T, U, false, true> { typedef volatile U type; };
+ template <class T, class U> struct apply_cv<T, U, true, true> { typedef const volatile U type; };
+ template <class T, class U> struct apply_cv<T&, U, false, false> { typedef U& type; };
+ template <class T, class U> struct apply_cv<T&, U, true, false> { typedef const U& type; };
+ template <class T, class U> struct apply_cv<T&, U, false, true> { typedef volatile U& type; };
+ template <class T, class U> struct apply_cv<T&, U, true, true> { typedef const volatile U& type; };
+
+
+
+ //
+ // has_result_type
+ //
+ template <class T>
+ struct has_result_type
+ {
+ private:
+ template <class U>
+ static eastl::no_type test(...);
+
+ template <class U>
+ static eastl::yes_type test(typename U::result_type* = 0);
+
+ public:
+ static const bool value = sizeof(test<T>(0)) == sizeof(eastl::yes_type);
+ };
+
+
+
+ //
+ // derives_from_unary_function
+ // derives_from_binary_function
+ //
+ template <class T>
+ struct derives_from_unary_function
+ {
+ private:
+ static eastl::no_type test(...);
+
+ template <class A, class R>
+ static unary_function<A, R> test(const volatile unary_function<A, R>*);
+
+ public:
+ static const bool value = !is_same<decltype(test((T*)0)), eastl::no_type>::value;
+ typedef decltype(test((T*)0)) type;
+ };
+
+ template <class T>
+ struct derives_from_binary_function
+ {
+ private:
+ static eastl::no_type test(...);
+ template <class A1, class A2, class R>
+ static binary_function<A1, A2, R> test(const volatile binary_function<A1, A2, R>*);
+
+ public:
+ static const bool value = !is_same<decltype(test((T*)0)), eastl::no_type>::value;
+ typedef decltype(test((T*)0)) type;
+ };
+
+
+
+ //
+ // maybe_derives_from_unary_function
+ // maybe_derives_from_binary_function
+ //
+ template <class T, bool = derives_from_unary_function<T>::value>
+ struct maybe_derive_from_unary_function // bool is true
+ : public derives_from_unary_function<T>::type { };
+
+ template <class T>
+ struct maybe_derive_from_unary_function<T, false> { };
+
+ template <class T, bool = derives_from_binary_function<T>::value>
+ struct maybe_derive_from_binary_function // bool is true
+ : public derives_from_binary_function<T>::type { };
+
+ template <class T>
+ struct maybe_derive_from_binary_function<T, false> { };
+
+
+
+ //
+ // weak_result_type_imp
+ //
+ template <class T, bool = has_result_type<T>::value>
+ struct weak_result_type_imp // bool is true
+ : public maybe_derive_from_unary_function<T>,
+ public maybe_derive_from_binary_function<T>
+ {
+ typedef typename T::result_type result_type;
+ };
+
+ template <class T>
+ struct weak_result_type_imp<T, false> : public maybe_derive_from_unary_function<T>,
+ public maybe_derive_from_binary_function<T> { };
+
+
+
+ //
+ // weak_result_type
+ //
+ template <class T>
+ struct weak_result_type : public weak_result_type_imp<T> { };
+
+ // 0 argument case
+ template <class R> struct weak_result_type<R()> { typedef R result_type; };
+ template <class R> struct weak_result_type<R(&)()> { typedef R result_type; };
+ template <class R> struct weak_result_type<R (*)()> { typedef R result_type; };
+
+ // 1 argument case
+ template <class R, class A1> struct weak_result_type<R(A1)> : public unary_function<A1, R> { };
+ template <class R, class A1> struct weak_result_type<R(&)(A1)> : public unary_function<A1, R> { };
+ template <class R, class A1> struct weak_result_type<R (*)(A1)> : public unary_function<A1, R> { };
+ template <class R, class C> struct weak_result_type<R (C::*)()> : public unary_function<C*, R> { };
+ template <class R, class C> struct weak_result_type<R (C::*)() const> : public unary_function<const C*, R> { };
+ template <class R, class C> struct weak_result_type<R (C::*)() volatile> : public unary_function<volatile C*, R> { };
+ template <class R, class C> struct weak_result_type<R (C::*)() const volatile> : public unary_function<const volatile C*, R> { };
+
+ // 2 argument case
+ template <class R, class A1, class A2> struct weak_result_type<R(A1, A2)> : public binary_function<A1, A2, R> { };
+ template <class R, class A1, class A2> struct weak_result_type<R (*)(A1, A2)> : public binary_function<A1, A2, R> { };
+ template <class R, class A1, class A2> struct weak_result_type<R(&)(A1, A2)> : public binary_function<A1, A2, R> { };
+ template <class R, class C, class A1> struct weak_result_type<R (C::*)(A1)> : public binary_function<C*, A1, R> { };
+ template <class R, class C, class A1> struct weak_result_type<R (C::*)(A1) const> : public binary_function<const C*, A1, R> { };
+ template <class R, class C, class A1> struct weak_result_type<R (C::*)(A1) volatile> : public binary_function<volatile C*, A1, R> { };
+ template <class R, class C, class A1> struct weak_result_type<R (C::*)(A1) const volatile> : public binary_function<const volatile C*, A1, R> { };
+
+ // 3 or more arguments
+#if EASTL_VARIADIC_TEMPLATES_ENABLED
+ template <class R, class A1, class A2, class A3, class... A4> struct weak_result_type<R(A1, A2, A3, A4...)> { typedef R result_type; };
+ template <class R, class A1, class A2, class A3, class... A4> struct weak_result_type<R(&)(A1, A2, A3, A4...)> { typedef R result_type; };
+ template <class R, class A1, class A2, class A3, class... A4> struct weak_result_type<R (*)(A1, A2, A3, A4...)> { typedef R result_type; };
+ template <class R, class C, class A1, class A2, class... A3> struct weak_result_type<R (C::*)(A1, A2, A3...)> { typedef R result_type; };
+ template <class R, class C, class A1, class A2, class... A3> struct weak_result_type<R (C::*)(A1, A2, A3...) const> { typedef R result_type; };
+ template <class R, class C, class A1, class A2, class... A3> struct weak_result_type<R (C::*)(A1, A2, A3...) volatile> { typedef R result_type; };
+ template <class R, class C, class A1, class A2, class... A3> struct weak_result_type<R (C::*)(A1, A2, A3...) const volatile> { typedef R result_type; };
+#endif
+
+ ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ // mem_fn_impl
+ //
+ template <class T>
+ class mem_fn_impl
+#if defined(_MSC_VER) && (_MSC_VER >= 1900) // VS2015 or later
+ // Due to a (seemingly random) internal compiler error on VS2013 we disable eastl::unary_function and
+ // binary_function support for eastl::mem_fn as its not widely (if at all) used. If you require this support
+ // on VS2013 or below please contact us.
+ : public weak_result_type<T>
+#endif
+ {
+ public:
+ typedef T type;
+
+ private:
+ type func;
+
+ public:
+ EASTL_FORCE_INLINE mem_fn_impl(type _func) : func(_func) {}
+
+#if EASTL_VARIADIC_TEMPLATES_ENABLED
+ template <class... ArgTypes>
+ typename invoke_result<type, ArgTypes...>::type operator()(ArgTypes&&... args) const
+ {
+ return invoke(func, eastl::forward<ArgTypes>(args)...);
+ }
+#else
+ typename invoke_result<type>::type operator()() const { return invoke_impl(func); }
+
+ template <class A0>
+ typename invoke_result0<type, A0>::type operator()(A0& a0) const
+ {
+ return invoke(func, a0);
+ }
+
+ template <class A0, class A1>
+ typename invoke_result1<type, A0, A1>::type operator()(A0& a0, A1& a1) const
+ {
+ return invoke(func, a0, a1);
+ }
+
+ template <class A0, class A1, class A2>
+ typename invoke_result2<type, A0, A1, A2>::type operator()(A0& a0, A1& a1, A2& a2) const
+ {
+ return invoke(func, a0, a1, a2);
+ }
+#endif
+ }; // mem_fn_impl
+
+
+
+ ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ // mem_fn -> mem_fn_impl adapters
+ //
+ template <class R, class T>
+ EASTL_FORCE_INLINE mem_fn_impl<R T::*> mem_fn(R T::*pm)
+ { return mem_fn_impl<R T::*>(pm); }
+
+ template <class R, class T>
+ EASTL_FORCE_INLINE mem_fn_impl<R (T::*)()> mem_fn(R (T::*pm)())
+ { return mem_fn_impl<R (T::*)()>(pm); }
+
+ template <class R, class T, class A0>
+ EASTL_FORCE_INLINE mem_fn_impl<R (T::*)(A0)> mem_fn(R (T::*pm)(A0))
+ { return mem_fn_impl<R (T::*)(A0)>(pm); }
+
+ template <class R, class T, class A0, class A1>
+ EASTL_FORCE_INLINE mem_fn_impl<R (T::*)(A0, A1)> mem_fn(R (T::*pm)(A0, A1))
+ { return mem_fn_impl<R (T::*)(A0, A1)>(pm); }
+
+ template <class R, class T, class A0, class A1, class A2>
+ EASTL_FORCE_INLINE mem_fn_impl<R (T::*)(A0, A1, A2)> mem_fn(R (T::*pm)(A0, A1, A2))
+ { return mem_fn_impl<R (T::*)(A0, A1, A2)>(pm); }
+
+ template <class R, class T>
+ EASTL_FORCE_INLINE mem_fn_impl<R (T::*)() const> mem_fn(R (T::*pm)() const)
+ { return mem_fn_impl<R (T::*)() const>(pm); }
+
+ template <class R, class T, class A0>
+ EASTL_FORCE_INLINE mem_fn_impl<R (T::*)(A0) const> mem_fn(R (T::*pm)(A0) const)
+ { return mem_fn_impl<R (T::*)(A0) const>(pm); }
+
+ template <class R, class T, class A0, class A1>
+ EASTL_FORCE_INLINE mem_fn_impl<R (T::*)(A0, A1) const> mem_fn(R (T::*pm)(A0, A1) const)
+ { return mem_fn_impl<R (T::*)(A0, A1) const>(pm); }
+
+ template <class R, class T, class A0, class A1, class A2>
+ EASTL_FORCE_INLINE mem_fn_impl<R (T::*)(A0, A1, A2) const> mem_fn(R (T::*pm)(A0, A1, A2) const)
+ { return mem_fn_impl<R (T::*)(A0, A1, A2) const>(pm); }
+
+ template <class R, class T>
+ EASTL_FORCE_INLINE mem_fn_impl<R (T::*)() volatile> mem_fn(R (T::*pm)() volatile)
+ { return mem_fn_impl<R (T::*)() volatile>(pm); }
+
+ template <class R, class T, class A0>
+ EASTL_FORCE_INLINE mem_fn_impl<R (T::*)(A0) volatile> mem_fn(R (T::*pm)(A0) volatile)
+ { return mem_fn_impl<R (T::*)(A0) volatile>(pm); }
+
+ template <class R, class T, class A0, class A1>
+ EASTL_FORCE_INLINE mem_fn_impl<R (T::*)(A0, A1) volatile> mem_fn(R (T::*pm)(A0, A1) volatile)
+ { return mem_fn_impl<R (T::*)(A0, A1) volatile>(pm); }
+
+ template <class R, class T, class A0, class A1, class A2>
+ EASTL_FORCE_INLINE mem_fn_impl<R (T::*)(A0, A1, A2) volatile> mem_fn(R (T::*pm)(A0, A1, A2) volatile)
+ { return mem_fn_impl<R (T::*)(A0, A1, A2) volatile>(pm); }
+
+ template <class R, class T>
+ EASTL_FORCE_INLINE mem_fn_impl<R (T::*)() const volatile> mem_fn(R (T::*pm)() const volatile)
+ { return mem_fn_impl<R (T::*)() const volatile>(pm); }
+
+ template <class R, class T, class A0>
+ EASTL_FORCE_INLINE mem_fn_impl<R (T::*)(A0) const volatile> mem_fn(R (T::*pm)(A0) const volatile)
+ { return mem_fn_impl<R (T::*)(A0) const volatile>(pm); }
+
+ template <class R, class T, class A0, class A1>
+ EASTL_FORCE_INLINE mem_fn_impl<R (T::*)(A0, A1) const volatile> mem_fn(R (T::*pm)(A0, A1) const volatile)
+ { return mem_fn_impl<R (T::*)(A0, A1) const volatile>(pm); }
+
+ template <class R, class T, class A0, class A1, class A2>
+ EASTL_FORCE_INLINE mem_fn_impl<R (T::*)(A0, A1, A2) const volatile> mem_fn(R (T::*pm)(A0, A1, A2) const volatile)
+ { return mem_fn_impl<R (T::*)(A0, A1, A2) const volatile>(pm); }
+
+} // namespace eastl
+
+#endif // EASTL_INTERNAL_MEM_FN_H
diff --git a/EASTL/include/EASTL/internal/memory_base.h b/EASTL/include/EASTL/internal/memory_base.h
new file mode 100644
index 0000000..b1c3490
--- /dev/null
+++ b/EASTL/include/EASTL/internal/memory_base.h
@@ -0,0 +1,37 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_INTERNAL_MEMORY_BASE_H
+#define EASTL_INTERNAL_MEMORY_BASE_H
+
+#include <EASTL/internal/config.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+////////////////////////////////////////////////////////////////////////////////////////////
+// This file contains basic functionality found in the standard library 'memory' header that
+// have limited or no dependencies. This allows us to utilize these utilize these functions
+// in other EASTL code while avoid circular dependencies.
+////////////////////////////////////////////////////////////////////////////////////////////
+
+namespace eastl
+{
+ /// addressof
+ ///
+ /// From the C++11 Standard, section 20.6.12.1
+ /// Returns the actual address of the object or function referenced by r, even in the presence of an overloaded operator&.
+ ///
+ template<typename T>
+ T* addressof(T& value) EA_NOEXCEPT
+ {
+ return reinterpret_cast<T*>(&const_cast<char&>(reinterpret_cast<const volatile char&>(value)));
+ }
+
+} // namespace eastl
+
+#endif // EASTL_INTERNAL_MEMORY_BASE_H
+
diff --git a/EASTL/include/EASTL/internal/move_help.h b/EASTL/include/EASTL/internal/move_help.h
new file mode 100644
index 0000000..97990df
--- /dev/null
+++ b/EASTL/include/EASTL/internal/move_help.h
@@ -0,0 +1,162 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_MOVE_HELP_H
+#define EASTL_INTERNAL_MOVE_HELP_H
+
+
+#include <EABase/eabase.h>
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <EASTL/internal/config.h>
+#include <EASTL/type_traits.h>
+
+
+// C++11's rvalue references aren't supported by earlier versions of C++.
+// It turns out that in a number of cases under earlier C++ versions we can
+// write code that uses rvalues similar to lvalues. We have macros below for
+// such cases. For example, eastl::move (same as std::move) can be treated
+// as a no-op under C++03, though with the consequence that move functionality
+// isn't taken advantage of.
+
+
+/// EASTL_MOVE
+/// Acts like eastl::move when possible. Same as C++11 std::move.
+///
+/// EASTL_MOVE_INLINE
+/// Acts like eastl::move but is implemented inline instead of a function call.
+/// This allows code to be faster in debug builds in particular.
+/// Depends on C++ compiler decltype support or a similar extension.
+///
+/// EASTL_FORWARD
+/// Acts like eastl::forward when possible. Same as C++11 std::forward.
+///
+/// EASTL_FORWARD_INLINE
+/// Acts like eastl::forward but is implemented inline instead of a function call.
+/// This allows code to be faster in debug builds in particular.
+///
+#define EASTL_MOVE(x) eastl::move(x)
+#if !defined(EA_COMPILER_NO_DECLTYPE)
+ #define EASTL_MOVE_INLINE(x) static_cast<typename eastl::remove_reference<decltype(x)>::type&&>(x)
+#elif defined(__GNUC__)
+ #define EASTL_MOVE_INLINE(x) static_cast<typename eastl::remove_reference<__typeof__(x)>::type&&>(x)
+#else
+ #define EASTL_MOVE_INLINE(x) eastl::move(x)
+#endif
+
+#define EASTL_FORWARD(T, x) eastl::forward<T>(x)
+#define EASTL_FORWARD_INLINE(T, x) eastl::forward<T>(x) // Need to investigate how to properly make a macro for this. (eastl::is_reference<T>::value ? static_cast<T&&>(static_cast<T&>(x)) : static_cast<T&&>(x))
+
+
+
+
+/// EASTL_MOVE_RANGE
+/// Acts like the eastl::move algorithm when possible. Same as C++11 std::move.
+/// Note to be confused with the single argument move: (typename remove_reference<T>::type&& move(T&& x))
+/// http://en.cppreference.com/w/cpp/algorithm/move
+/// http://en.cppreference.com/w/cpp/algorithm/move_backward
+///
+#define EASTL_MOVE_RANGE(first, last, result) eastl::move(first, last, result)
+#define EASTL_MOVE_BACKWARD_RANGE(first, last, resultEnd) eastl::move_backward(first, last, resultEnd)
+
+
+namespace eastl
+{
+ // forward
+ //
+ // forwards the argument to another function exactly as it was passed to the calling function.
+ // Not to be confused with move, this is specifically for echoing templated argument types
+ // to another function. move is specifically about making a type be an rvalue reference (i.e. movable) type.
+ //
+ // Example usage:
+ // template <class T>
+ // void WrapperFunction(T&& arg)
+ // { foo(eastl::forward<T>(arg)); }
+ //
+ // template <class... Args>
+ // void WrapperFunction(Args&&... args)
+ // { foo(eastl::forward<Args>(args)...); }
+ //
+ // See the C++ Standard, section 20.2.3
+ // http://en.cppreference.com/w/cpp/utility/forward
+ //
+ template <typename T>
+ EA_CPP14_CONSTEXPR T&& forward(typename eastl::remove_reference<T>::type& x) EA_NOEXCEPT
+ {
+ return static_cast<T&&>(x);
+ }
+
+
+ template <typename T>
+ EA_CPP14_CONSTEXPR T&& forward(typename eastl::remove_reference<T>::type&& x) EA_NOEXCEPT
+ {
+ static_assert(!is_lvalue_reference<T>::value, "forward T isn't lvalue reference");
+ return static_cast<T&&>(x);
+ }
+
+
+ // move
+ //
+ // move obtains an rvalue reference to its argument and converts it to an xvalue.
+ // Returns, by definition: static_cast<typename remove_reference<T>::type&&>(t).
+ // The primary use of this is to pass a move'd type to a function which takes T&&,
+ // and thus select that function instead of (e.g.) a function which takes T or T&.
+ // See the C++ Standard, section 20.2.3
+ // http://en.cppreference.com/w/cpp/utility/move
+ //
+ template <typename T>
+ EA_CPP14_CONSTEXPR typename eastl::remove_reference<T>::type&&
+ move(T&& x) EA_NOEXCEPT
+ {
+ return static_cast<typename eastl::remove_reference<T>::type&&>(x);
+ }
+
+
+ // move_if_noexcept
+ //
+ // Returns T&& if move-constructing T throws no exceptions. Instead returns const T& if
+ // move-constructing T throws exceptions or has no accessible copy constructor.
+ // The purpose of this is to use automatically use copy construction instead of move
+ // construction when the move may possible throw an exception.
+ // See the C++ Standard, section 20.2.3
+ // http://en.cppreference.com/w/cpp/utility/move_if_noexcept
+ //
+ #if EASTL_EXCEPTIONS_ENABLED
+ template <typename T>
+ EA_CPP14_CONSTEXPR typename eastl::conditional<!eastl::is_nothrow_move_constructible<T>::value &&
+ eastl::is_copy_constructible<T>::value, const T&, T&&>::type
+ move_if_noexcept(T& x) EA_NOEXCEPT
+ {
+ return eastl::move(x);
+ }
+ #else
+ template <typename T>
+ EA_CPP14_CONSTEXPR T&&
+ move_if_noexcept(T& x) EA_NOEXCEPT
+ {
+ return eastl::move(x);
+ }
+ #endif
+
+} // namespace eastl
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/internal/pair_fwd_decls.h b/EASTL/include/EASTL/internal/pair_fwd_decls.h
new file mode 100644
index 0000000..a716482
--- /dev/null
+++ b/EASTL/include/EASTL/internal/pair_fwd_decls.h
@@ -0,0 +1,16 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_PAIR_FWD_DECLS_H
+#define EASTL_PAIR_FWD_DECLS_H
+
+#include <EASTL/internal/config.h>
+
+namespace eastl
+{
+ template <typename T1, typename T2>
+ struct pair;
+}
+
+#endif // EASTL_PAIR_FWD_DECLS_H
diff --git a/EASTL/include/EASTL/internal/piecewise_construct_t.h b/EASTL/include/EASTL/internal/piecewise_construct_t.h
new file mode 100644
index 0000000..d853f0e
--- /dev/null
+++ b/EASTL/include/EASTL/internal/piecewise_construct_t.h
@@ -0,0 +1,46 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_PIECEWISE_CONSTRUCT_T_H
+#define EASTL_INTERNAL_PIECEWISE_CONSTRUCT_T_H
+
+
+#include <EABase/eabase.h>
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+namespace eastl
+{
+ ///////////////////////////////////////////////////////////////////////////////
+ /// piecewise_construct_t
+ ///
+ /// http://en.cppreference.com/w/cpp/utility/piecewise_construct_t
+ ///
+ struct piecewise_construct_t
+ {
+ explicit piecewise_construct_t() = default;
+ };
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ /// piecewise_construct
+ ///
+ /// A tag type used to disambiguate between function overloads that take two tuple arguments.
+ ///
+ /// http://en.cppreference.com/w/cpp/utility/piecewise_construct
+ ///
+ EA_CONSTEXPR piecewise_construct_t piecewise_construct = eastl::piecewise_construct_t();
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/internal/red_black_tree.h b/EASTL/include/EASTL/internal/red_black_tree.h
new file mode 100644
index 0000000..5b29b7c
--- /dev/null
+++ b/EASTL/include/EASTL/internal/red_black_tree.h
@@ -0,0 +1,2366 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_RED_BLACK_TREE_H
+#define EASTL_RED_BLACK_TREE_H
+
+
+#include <EABase/eabase.h>
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <EASTL/internal/config.h>
+#include <EASTL/type_traits.h>
+#include <EASTL/allocator.h>
+#include <EASTL/iterator.h>
+#include <EASTL/utility.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/initializer_list.h>
+#include <EASTL/tuple.h>
+
+EA_DISABLE_ALL_VC_WARNINGS()
+#include <new>
+#include <stddef.h>
+EA_RESTORE_ALL_VC_WARNINGS()
+
+
+// 4512 - 'class' : assignment operator could not be generated
+// 4530 - C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc
+// 4571 - catch(...) semantics changed since Visual C++ 7.1; structured exceptions (SEH) are no longer caught.
+EA_DISABLE_VC_WARNING(4512 4530 4571);
+
+
+namespace eastl
+{
+
+ /// EASTL_RBTREE_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_RBTREE_DEFAULT_NAME
+ #define EASTL_RBTREE_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " rbtree" // Unless the user overrides something, this is "EASTL rbtree".
+ #endif
+
+
+ /// EASTL_RBTREE_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_RBTREE_DEFAULT_ALLOCATOR
+ #define EASTL_RBTREE_DEFAULT_ALLOCATOR allocator_type(EASTL_RBTREE_DEFAULT_NAME)
+ #endif
+
+
+ /// EASTL_RBTREE_LEGACY_SWAP_BEHAVIOUR_REQUIRES_COPY_CTOR
+ ///
+ #ifndef EASTL_RBTREE_LEGACY_SWAP_BEHAVIOUR_REQUIRES_COPY_CTOR
+ #define EASTL_RBTREE_LEGACY_SWAP_BEHAVIOUR_REQUIRES_COPY_CTOR 0
+ #endif
+
+
+ /// RBTreeColor
+ ///
+ enum RBTreeColor
+ {
+ kRBTreeColorRed,
+ kRBTreeColorBlack
+ };
+
+
+
+ /// RBTreeColor
+ ///
+ enum RBTreeSide
+ {
+ kRBTreeSideLeft,
+ kRBTreeSideRight
+ };
+
+
+
+ /// rbtree_node_base
+ ///
+ /// We define a rbtree_node_base separately from rbtree_node (below), because it
+ /// allows us to have non-templated operations, and it makes it so that the
+ /// rbtree anchor node doesn't carry a T with it, which would waste space and
+ /// possibly lead to surprising the user due to extra Ts existing that the user
+ /// didn't explicitly create. The downside to all of this is that it makes debug
+ /// viewing of an rbtree harder, given that the node pointers are of type
+ /// rbtree_node_base and not rbtree_node.
+ ///
+ struct rbtree_node_base
+ {
+ typedef rbtree_node_base this_type;
+
+ public:
+ this_type* mpNodeRight; // Declared first because it is used most often.
+ this_type* mpNodeLeft;
+ this_type* mpNodeParent;
+ char mColor; // We only need one bit here, would be nice if we could stuff that bit somewhere else.
+ };
+
+
+ /// rbtree_node
+ ///
+ template <typename Value>
+ struct rbtree_node : public rbtree_node_base
+ {
+ Value mValue; // For set and multiset, this is the user's value, for map and multimap, this is a pair of key/value.
+
+ // This type is never constructed, so to avoid a MSVC warning we "delete" the copy constructor.
+ //
+ // Potentially we could provide a constructor that would satisfy the compiler and change the code to use this constructor
+ // instead of constructing mValue in place within an unconstructed rbtree_node.
+ #if defined(_MSC_VER)
+ rbtree_node(const rbtree_node&) = delete;
+ #endif
+ };
+
+
+
+
+ // rbtree_node_base functions
+ //
+ // These are the fundamental functions that we use to maintain the
+ // tree. The bulk of the work of the tree maintenance is done in
+ // these functions.
+ //
+ EASTL_API rbtree_node_base* RBTreeIncrement (const rbtree_node_base* pNode);
+ EASTL_API rbtree_node_base* RBTreeDecrement (const rbtree_node_base* pNode);
+ EASTL_API rbtree_node_base* RBTreeGetMinChild (const rbtree_node_base* pNode);
+ EASTL_API rbtree_node_base* RBTreeGetMaxChild (const rbtree_node_base* pNode);
+ EASTL_API size_t RBTreeGetBlackCount(const rbtree_node_base* pNodeTop,
+ const rbtree_node_base* pNodeBottom);
+ EASTL_API void RBTreeInsert ( rbtree_node_base* pNode,
+ rbtree_node_base* pNodeParent,
+ rbtree_node_base* pNodeAnchor,
+ RBTreeSide insertionSide);
+ EASTL_API void RBTreeErase ( rbtree_node_base* pNode,
+ rbtree_node_base* pNodeAnchor);
+
+
+
+
+
+
+
+ /// rbtree_iterator
+ ///
+ template <typename T, typename Pointer, typename Reference>
+ struct rbtree_iterator
+ {
+ typedef rbtree_iterator<T, Pointer, Reference> this_type;
+ typedef rbtree_iterator<T, T*, T&> iterator;
+ typedef rbtree_iterator<T, const T*, const T&> const_iterator;
+ typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t.
+ typedef ptrdiff_t difference_type;
+ typedef T value_type;
+ typedef rbtree_node_base base_node_type;
+ typedef rbtree_node<T> node_type;
+ typedef Pointer pointer;
+ typedef Reference reference;
+ typedef EASTL_ITC_NS::bidirectional_iterator_tag iterator_category;
+
+ public:
+ node_type* mpNode;
+
+ public:
+ rbtree_iterator();
+ explicit rbtree_iterator(const node_type* pNode);
+ rbtree_iterator(const iterator& x);
+ rbtree_iterator& operator=(const iterator& x);
+
+ reference operator*() const;
+ pointer operator->() const;
+
+ rbtree_iterator& operator++();
+ rbtree_iterator operator++(int);
+
+ rbtree_iterator& operator--();
+ rbtree_iterator operator--(int);
+
+ }; // rbtree_iterator
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // rb_base_compare_ebo
+ //
+ // Utilizes the "empty base-class optimization" to reduce the size of the rbtree
+ // when its Compare template argument is an empty class.
+ ///////////////////////////////////////////////////////////////////////////////
+
+ template <typename Compare, bool /*isEmpty*/ = is_empty<Compare>::value>
+ struct rb_base_compare_ebo
+ {
+ protected:
+ rb_base_compare_ebo() : mCompare() {}
+ rb_base_compare_ebo(const Compare& compare) : mCompare(compare) {}
+
+ Compare& get_compare() { return mCompare; }
+ const Compare& get_compare() const { return mCompare; }
+
+ template <typename T>
+ bool compare(const T& lhs, const T& rhs)
+ {
+ return mCompare(lhs, rhs);
+ }
+
+ template <typename T>
+ bool compare(const T& lhs, const T& rhs) const
+ {
+ return mCompare(lhs, rhs);
+ }
+
+ private:
+ Compare mCompare;
+ };
+
+ template <typename Compare>
+ struct rb_base_compare_ebo<Compare, true> : private Compare
+ {
+ protected:
+ rb_base_compare_ebo() {}
+ rb_base_compare_ebo(const Compare& compare) : Compare(compare) {}
+
+ Compare& get_compare() { return *this; }
+ const Compare& get_compare() const { return *this; }
+
+ template <typename T>
+ bool compare(const T& lhs, const T& rhs)
+ {
+ return Compare::operator()(lhs, rhs);
+ }
+
+ template <typename T>
+ bool compare(const T& lhs, const T& rhs) const
+ {
+ return Compare::operator()(lhs, rhs);
+ }
+ };
+
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // rb_base
+ //
+ // This class allows us to use a generic rbtree as the basis of map, multimap,
+ // set, and multiset transparently. The vital template parameters for this are
+ // the ExtractKey and the bUniqueKeys parameters.
+ //
+ // If the rbtree has a value type of the form pair<T1, T2> (i.e. it is a map or
+ // multimap and not a set or multiset) and a key extraction policy that returns
+ // the first part of the pair, the rbtree gets a mapped_type typedef.
+ // If it satisfies those criteria and also has unique keys, then it also gets an
+ // operator[] (which only map and set have and multimap and multiset don't have).
+ //
+ ///////////////////////////////////////////////////////////////////////////////
+
+
+
+ /// rb_base
+ /// This specialization is used for 'set'. In this case, Key and Value
+ /// will be the same as each other and ExtractKey will be eastl::use_self.
+ ///
+ template <typename Key, typename Value, typename Compare, typename ExtractKey, bool bUniqueKeys, typename RBTree>
+ struct rb_base : public rb_base_compare_ebo<Compare>
+ {
+ typedef ExtractKey extract_key;
+
+ protected:
+ using rb_base_compare_ebo<Compare>::compare;
+ using rb_base_compare_ebo<Compare>::get_compare;
+
+ public:
+ rb_base() {}
+ rb_base(const Compare& compare) : rb_base_compare_ebo<Compare>(compare) {}
+ };
+
+
+ /// rb_base
+ /// This class is used for 'multiset'.
+ /// In this case, Key and Value will be the same as each
+ /// other and ExtractKey will be eastl::use_self.
+ ///
+ template <typename Key, typename Value, typename Compare, typename ExtractKey, typename RBTree>
+ struct rb_base<Key, Value, Compare, ExtractKey, false, RBTree> : public rb_base_compare_ebo<Compare>
+ {
+ typedef ExtractKey extract_key;
+
+ protected:
+ using rb_base_compare_ebo<Compare>::compare;
+ using rb_base_compare_ebo<Compare>::get_compare;
+
+ public:
+ rb_base() {}
+ rb_base(const Compare& compare) : rb_base_compare_ebo<Compare>(compare) {}
+ };
+
+
+ /// rb_base
+ /// This specialization is used for 'map'.
+ ///
+ template <typename Key, typename Pair, typename Compare, typename RBTree>
+ struct rb_base<Key, Pair, Compare, eastl::use_first<Pair>, true, RBTree> : public rb_base_compare_ebo<Compare>
+ {
+ typedef eastl::use_first<Pair> extract_key;
+
+ using rb_base_compare_ebo<Compare>::compare;
+ using rb_base_compare_ebo<Compare>::get_compare;
+
+ public:
+ rb_base() {}
+ rb_base(const Compare& compare) : rb_base_compare_ebo<Compare>(compare) {}
+ };
+
+
+ /// rb_base
+ /// This specialization is used for 'multimap'.
+ ///
+ template <typename Key, typename Pair, typename Compare, typename RBTree>
+ struct rb_base<Key, Pair, Compare, eastl::use_first<Pair>, false, RBTree> : public rb_base_compare_ebo<Compare>
+ {
+ typedef eastl::use_first<Pair> extract_key;
+
+ using rb_base_compare_ebo<Compare>::compare;
+ using rb_base_compare_ebo<Compare>::get_compare;
+
+ public:
+ rb_base() {}
+ rb_base(const Compare& compare) : rb_base_compare_ebo<Compare>(compare) {}
+ };
+
+
+ /// rbtree
+ ///
+ /// rbtree is the red-black tree basis for the map, multimap, set, and multiset
+ /// containers. Just about all the work of those containers is done here, and
+ /// they are merely a shell which sets template policies that govern the code
+ /// generation for this rbtree.
+ ///
+ /// This rbtree implementation is pretty much the same as all other modern
+ /// rbtree implementations, as the topic is well known and researched. We may
+ /// choose to implement a "relaxed balancing" option at some point in the
+ /// future if it is deemed worthwhile. Most rbtree implementations don't do this.
+ ///
+ /// The primary rbtree member variable is mAnchor, which is a node_type and
+ /// acts as the end node. However, like any other node, it has mpNodeLeft,
+ /// mpNodeRight, and mpNodeParent members. We do the conventional trick of
+ /// assigning begin() (left-most rbtree node) to mpNodeLeft, assigning
+ /// 'end() - 1' (a.k.a. rbegin()) to mpNodeRight, and assigning the tree root
+ /// node to mpNodeParent.
+ ///
+ /// Compare (functor): This is a comparison class which defaults to 'less'.
+ /// It is a common STL thing which takes two arguments and returns true if
+ /// the first is less than the second.
+ ///
+ /// ExtractKey (functor): This is a class which gets the key from a stored
+ /// node. With map and set, the node is a pair, whereas with set and multiset
+ /// the node is just the value. ExtractKey will be either eastl::use_first (map and multimap)
+ /// or eastl::use_self (set and multiset).
+ ///
+ /// bMutableIterators (bool): true if rbtree::iterator is a mutable
+ /// iterator, false if iterator and const_iterator are both const iterators.
+ /// It will be true for map and multimap and false for set and multiset.
+ ///
+ /// bUniqueKeys (bool): true if the keys are to be unique, and false if there
+ /// can be multiple instances of a given key. It will be true for set and map
+ /// and false for multiset and multimap.
+ ///
+ /// To consider: Add an option for relaxed tree balancing. This could result
+ /// in performance improvements but would require a more complicated implementation.
+ ///
+ ///////////////////////////////////////////////////////////////////////
+ /// find_as
+ /// In order to support the ability to have a tree of strings but
+ /// be able to do efficiently lookups via char pointers (i.e. so they
+ /// aren't converted to string objects), we provide the find_as
+ /// function. This function allows you to do a find with a key of a
+ /// type other than the tree's key type. See the find_as function
+ /// for more documentation on this.
+ ///
+ template <typename Key, typename Value, typename Compare, typename Allocator,
+ typename ExtractKey, bool bMutableIterators, bool bUniqueKeys>
+ class rbtree
+ : public rb_base<Key, Value, Compare, ExtractKey, bUniqueKeys,
+ rbtree<Key, Value, Compare, Allocator, ExtractKey, bMutableIterators, bUniqueKeys> >
+ {
+ public:
+ typedef ptrdiff_t difference_type;
+ typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t.
+ typedef Key key_type;
+ typedef Value value_type;
+ typedef rbtree_node<value_type> node_type;
+ typedef value_type& reference;
+ typedef const value_type& const_reference;
+ typedef value_type* pointer;
+ typedef const value_type* const_pointer;
+
+ typedef typename type_select<bMutableIterators,
+ rbtree_iterator<value_type, value_type*, value_type&>,
+ rbtree_iterator<value_type, const value_type*, const value_type&> >::type iterator;
+ typedef rbtree_iterator<value_type, const value_type*, const value_type&> const_iterator;
+ typedef eastl::reverse_iterator<iterator> reverse_iterator;
+ typedef eastl::reverse_iterator<const_iterator> const_reverse_iterator;
+
+ typedef Allocator allocator_type;
+ typedef Compare key_compare;
+ typedef typename type_select<bUniqueKeys, eastl::pair<iterator, bool>, iterator>::type insert_return_type; // map/set::insert return a pair, multimap/multiset::iterator return an iterator.
+ typedef rbtree<Key, Value, Compare, Allocator,
+ ExtractKey, bMutableIterators, bUniqueKeys> this_type;
+ typedef rb_base<Key, Value, Compare, ExtractKey, bUniqueKeys, this_type> base_type;
+ typedef integral_constant<bool, bUniqueKeys> has_unique_keys_type;
+ typedef typename base_type::extract_key extract_key;
+
+ protected:
+ using base_type::compare;
+ using base_type::get_compare;
+
+ public:
+ rbtree_node_base mAnchor; /// This node acts as end() and its mpLeft points to begin(), and mpRight points to rbegin() (the last node on the right).
+ size_type mnSize; /// Stores the count of nodes in the tree (not counting the anchor node).
+ allocator_type mAllocator; // To do: Use base class optimization to make this go away.
+
+ public:
+ // ctor/dtor
+ rbtree();
+ rbtree(const allocator_type& allocator);
+ rbtree(const Compare& compare, const allocator_type& allocator = EASTL_RBTREE_DEFAULT_ALLOCATOR);
+ rbtree(const this_type& x);
+ rbtree(this_type&& x);
+ rbtree(this_type&& x, const allocator_type& allocator);
+
+ template <typename InputIterator>
+ rbtree(InputIterator first, InputIterator last, const Compare& compare, const allocator_type& allocator = EASTL_RBTREE_DEFAULT_ALLOCATOR);
+
+ ~rbtree();
+
+ public:
+ // properties
+ const allocator_type& get_allocator() const EA_NOEXCEPT;
+ allocator_type& get_allocator() EA_NOEXCEPT;
+ void set_allocator(const allocator_type& allocator);
+
+ const key_compare& key_comp() const { return get_compare(); }
+ key_compare& key_comp() { return get_compare(); }
+
+ this_type& operator=(const this_type& x);
+ this_type& operator=(std::initializer_list<value_type> ilist);
+ this_type& operator=(this_type&& x);
+
+ void swap(this_type& x);
+
+ public:
+ // iterators
+ iterator begin() EA_NOEXCEPT;
+ const_iterator begin() const EA_NOEXCEPT;
+ const_iterator cbegin() const EA_NOEXCEPT;
+
+ iterator end() EA_NOEXCEPT;
+ const_iterator end() const EA_NOEXCEPT;
+ const_iterator cend() const EA_NOEXCEPT;
+
+ reverse_iterator rbegin() EA_NOEXCEPT;
+ const_reverse_iterator rbegin() const EA_NOEXCEPT;
+ const_reverse_iterator crbegin() const EA_NOEXCEPT;
+
+ reverse_iterator rend() EA_NOEXCEPT;
+ const_reverse_iterator rend() const EA_NOEXCEPT;
+ const_reverse_iterator crend() const EA_NOEXCEPT;
+
+ public:
+ bool empty() const EA_NOEXCEPT;
+ size_type size() const EA_NOEXCEPT;
+
+ template <class... Args>
+ insert_return_type emplace(Args&&... args);
+
+ template <class... Args>
+ iterator emplace_hint(const_iterator position, Args&&... args);
+
+ // Standard conversion overload to avoid the overhead of mismatched 'pair<const Key, Value>' types.
+ template <class P, class = typename eastl::enable_if<eastl::is_constructible<value_type, P&&>::value>::type>
+ insert_return_type insert(P&& otherValue);
+
+ // Currently limited to value_type instead of P because it collides with insert(InputIterator, InputIterator).
+ // To allow this to work with templated P we need to implement a compile-time specialization for the
+ // case that P&& is const_iterator and have that specialization handle insert(InputIterator, InputIterator)
+ // instead of insert(InputIterator, InputIterator). Curiously, neither libstdc++ nor libc++
+ // implement this function either, which suggests they ran into the same problem I did here
+ // and haven't yet resolved it (at least as of March 2014, GCC 4.8.1).
+ iterator insert(const_iterator hint, value_type&& value);
+
+ /// map::insert and set::insert return a pair, while multimap::insert and
+ /// multiset::insert return an iterator.
+ insert_return_type insert(const value_type& value);
+
+ // C++ standard: inserts value if and only if there is no element with
+ // key equivalent to the key of t in containers with unique keys; always
+ // inserts value in containers with equivalent keys. Always returns the
+ // iterator pointing to the element with key equivalent to the key of value.
+ // iterator position is a hint pointing to where the insert should start
+ // to search. However, there is a potential defect/improvement report on this behaviour:
+ // LWG issue #233 (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1780.html)
+ // We follow the same approach as SGI STL/STLPort and use the position as
+ // a forced insertion position for the value when possible.
+ iterator insert(const_iterator position, const value_type& value);
+
+ void insert(std::initializer_list<value_type> ilist);
+
+ template <typename InputIterator>
+ void insert(InputIterator first, InputIterator last);
+
+ // TODO(rparolin):
+ // insert_return_type insert(node_type&& nh);
+ // iterator insert(const_iterator hint, node_type&& nh);
+
+ template <class M> pair<iterator, bool> insert_or_assign(const key_type& k, M&& obj);
+ template <class M> pair<iterator, bool> insert_or_assign(key_type&& k, M&& obj);
+ template <class M> iterator insert_or_assign(const_iterator hint, const key_type& k, M&& obj);
+ template <class M> iterator insert_or_assign(const_iterator hint, key_type&& k, M&& obj);
+
+ iterator erase(const_iterator position);
+ iterator erase(const_iterator first, const_iterator last);
+ reverse_iterator erase(const_reverse_iterator position);
+ reverse_iterator erase(const_reverse_iterator first, const_reverse_iterator last);
+
+ // For some reason, multiple STL versions make a specialization
+ // for erasing an array of key_types. I'm pretty sure we don't
+ // need this, but just to be safe we will follow suit.
+ // The implementation is trivial. Returns void because the values
+ // could well be randomly distributed throughout the tree and thus
+ // a return value would be nearly meaningless.
+ void erase(const key_type* first, const key_type* last);
+
+ void clear();
+ void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs.
+
+ iterator find(const key_type& key);
+ const_iterator find(const key_type& key) const;
+
+ /// Implements a find whereby the user supplies a comparison of a different type
+ /// than the tree's value_type. A useful case of this is one whereby you have
+ /// a container of string objects but want to do searches via passing in char pointers.
+ /// The problem is that without this kind of find, you need to do the expensive operation
+ /// of converting the char pointer to a string so it can be used as the argument to the
+ /// find function.
+ ///
+ /// Example usage (note that the compare uses string as first type and char* as second):
+ /// set<string> strings;
+ /// strings.find_as("hello", less_2<string, const char*>());
+ ///
+ template <typename U, typename Compare2> iterator find_as(const U& u, Compare2 compare2);
+ template <typename U, typename Compare2> const_iterator find_as(const U& u, Compare2 compare2) const;
+
+ iterator lower_bound(const key_type& key);
+ const_iterator lower_bound(const key_type& key) const;
+
+ iterator upper_bound(const key_type& key);
+ const_iterator upper_bound(const key_type& key) const;
+
+ bool validate() const;
+ int validate_iterator(const_iterator i) const;
+
+ protected:
+ node_type* DoAllocateNode();
+ void DoFreeNode(node_type* pNode);
+
+ node_type* DoCreateNodeFromKey(const key_type& key);
+
+ template<class... Args>
+ node_type* DoCreateNode(Args&&... args);
+ node_type* DoCreateNode(const value_type& value);
+ node_type* DoCreateNode(value_type&& value);
+ node_type* DoCreateNode(const node_type* pNodeSource, node_type* pNodeParent);
+
+ node_type* DoCopySubtree(const node_type* pNodeSource, node_type* pNodeDest);
+ void DoNukeSubtree(node_type* pNode);
+
+ template <class... Args>
+ eastl::pair<iterator, bool> DoInsertValue(true_type, Args&&... args);
+
+ template <class... Args>
+ iterator DoInsertValue(false_type, Args&&... args);
+
+ eastl::pair<iterator, bool> DoInsertValue(true_type, value_type&& value);
+ iterator DoInsertValue(false_type, value_type&& value);
+
+ template <class... Args>
+ iterator DoInsertValueImpl(node_type* pNodeParent, bool bForceToLeft, const key_type& key, Args&&... args);
+ iterator DoInsertValueImpl(node_type* pNodeParent, bool bForceToLeft, const key_type& key, node_type* pNodeNew);
+
+ eastl::pair<iterator, bool> DoInsertKey(true_type, const key_type& key);
+ iterator DoInsertKey(false_type, const key_type& key);
+
+ template <class... Args>
+ iterator DoInsertValueHint(true_type, const_iterator position, Args&&... args);
+
+ template <class... Args>
+ iterator DoInsertValueHint(false_type, const_iterator position, Args&&... args);
+
+ iterator DoInsertValueHint(true_type, const_iterator position, value_type&& value);
+ iterator DoInsertValueHint(false_type, const_iterator position, value_type&& value);
+
+ iterator DoInsertKey(true_type, const_iterator position, const key_type& key); // By design we return iterator and not a pair.
+ iterator DoInsertKey(false_type, const_iterator position, const key_type& key);
+ iterator DoInsertKeyImpl(node_type* pNodeParent, bool bForceToLeft, const key_type& key);
+
+ node_type* DoGetKeyInsertionPositionUniqueKeys(bool& canInsert, const key_type& key);
+ node_type* DoGetKeyInsertionPositionNonuniqueKeys(const key_type& key);
+
+ node_type* DoGetKeyInsertionPositionUniqueKeysHint(const_iterator position, bool& bForceToLeft, const key_type& key);
+ node_type* DoGetKeyInsertionPositionNonuniqueKeysHint(const_iterator position, bool& bForceToLeft, const key_type& key);
+
+ }; // rbtree
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // rbtree_node_base functions
+ ///////////////////////////////////////////////////////////////////////
+
+ EASTL_API inline rbtree_node_base* RBTreeGetMinChild(const rbtree_node_base* pNodeBase)
+ {
+ while(pNodeBase->mpNodeLeft)
+ pNodeBase = pNodeBase->mpNodeLeft;
+ return const_cast<rbtree_node_base*>(pNodeBase);
+ }
+
+ EASTL_API inline rbtree_node_base* RBTreeGetMaxChild(const rbtree_node_base* pNodeBase)
+ {
+ while(pNodeBase->mpNodeRight)
+ pNodeBase = pNodeBase->mpNodeRight;
+ return const_cast<rbtree_node_base*>(pNodeBase);
+ }
+
+ // The rest of the functions are non-trivial and are found in
+ // the corresponding .cpp file to this file.
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // rbtree_iterator functions
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Pointer, typename Reference>
+ rbtree_iterator<T, Pointer, Reference>::rbtree_iterator()
+ : mpNode(NULL) { }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ rbtree_iterator<T, Pointer, Reference>::rbtree_iterator(const node_type* pNode)
+ : mpNode(static_cast<node_type*>(const_cast<node_type*>(pNode))) { }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ rbtree_iterator<T, Pointer, Reference>::rbtree_iterator(const iterator& x)
+ : mpNode(x.mpNode) { }
+
+ template <typename T, typename Pointer, typename Reference>
+ typename rbtree_iterator<T, Pointer, Reference>::this_type&
+ rbtree_iterator<T, Pointer, Reference>::operator=(const iterator& x)
+ {
+ mpNode = x.mpNode;
+ return *this;
+ }
+
+ template <typename T, typename Pointer, typename Reference>
+ typename rbtree_iterator<T, Pointer, Reference>::reference
+ rbtree_iterator<T, Pointer, Reference>::operator*() const
+ { return mpNode->mValue; }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ typename rbtree_iterator<T, Pointer, Reference>::pointer
+ rbtree_iterator<T, Pointer, Reference>::operator->() const
+ { return &mpNode->mValue; }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ typename rbtree_iterator<T, Pointer, Reference>::this_type&
+ rbtree_iterator<T, Pointer, Reference>::operator++()
+ {
+ mpNode = static_cast<node_type*>(RBTreeIncrement(mpNode));
+ return *this;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ typename rbtree_iterator<T, Pointer, Reference>::this_type
+ rbtree_iterator<T, Pointer, Reference>::operator++(int)
+ {
+ this_type temp(*this);
+ mpNode = static_cast<node_type*>(RBTreeIncrement(mpNode));
+ return temp;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ typename rbtree_iterator<T, Pointer, Reference>::this_type&
+ rbtree_iterator<T, Pointer, Reference>::operator--()
+ {
+ mpNode = static_cast<node_type*>(RBTreeDecrement(mpNode));
+ return *this;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ typename rbtree_iterator<T, Pointer, Reference>::this_type
+ rbtree_iterator<T, Pointer, Reference>::operator--(int)
+ {
+ this_type temp(*this);
+ mpNode = static_cast<node_type*>(RBTreeDecrement(mpNode));
+ return temp;
+ }
+
+
+ // The C++ defect report #179 requires that we support comparisons between const and non-const iterators.
+ // Thus we provide additional template paremeters here to support this. The defect report does not
+ // require us to support comparisons between reverse_iterators and const_reverse_iterators.
+ template <typename T, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB>
+ inline bool operator==(const rbtree_iterator<T, PointerA, ReferenceA>& a,
+ const rbtree_iterator<T, PointerB, ReferenceB>& b)
+ {
+ return a.mpNode == b.mpNode;
+ }
+
+
+ template <typename T, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB>
+ inline bool operator!=(const rbtree_iterator<T, PointerA, ReferenceA>& a,
+ const rbtree_iterator<T, PointerB, ReferenceB>& b)
+ {
+ return a.mpNode != b.mpNode;
+ }
+
+
+ // We provide a version of operator!= for the case where the iterators are of the
+ // same type. This helps prevent ambiguity errors in the presence of rel_ops.
+ template <typename T, typename Pointer, typename Reference>
+ inline bool operator!=(const rbtree_iterator<T, Pointer, Reference>& a,
+ const rbtree_iterator<T, Pointer, Reference>& b)
+ {
+ return a.mpNode != b.mpNode;
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // rbtree functions
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline rbtree<K, V, C, A, E, bM, bU>::rbtree()
+ : mAnchor(),
+ mnSize(0),
+ mAllocator(EASTL_RBTREE_DEFAULT_NAME)
+ {
+ reset_lose_memory();
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline rbtree<K, V, C, A, E, bM, bU>::rbtree(const allocator_type& allocator)
+ : mAnchor(),
+ mnSize(0),
+ mAllocator(allocator)
+ {
+ reset_lose_memory();
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline rbtree<K, V, C, A, E, bM, bU>::rbtree(const C& compare, const allocator_type& allocator)
+ : base_type(compare),
+ mAnchor(),
+ mnSize(0),
+ mAllocator(allocator)
+ {
+ reset_lose_memory();
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline rbtree<K, V, C, A, E, bM, bU>::rbtree(const this_type& x)
+ : base_type(x.get_compare()),
+ mAnchor(),
+ mnSize(0),
+ mAllocator(x.mAllocator)
+ {
+ reset_lose_memory();
+
+ if(x.mAnchor.mpNodeParent) // mAnchor.mpNodeParent is the rb_tree root node.
+ {
+ mAnchor.mpNodeParent = DoCopySubtree((const node_type*)x.mAnchor.mpNodeParent, (node_type*)&mAnchor);
+ mAnchor.mpNodeRight = RBTreeGetMaxChild(mAnchor.mpNodeParent);
+ mAnchor.mpNodeLeft = RBTreeGetMinChild(mAnchor.mpNodeParent);
+ mnSize = x.mnSize;
+ }
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline rbtree<K, V, C, A, E, bM, bU>::rbtree(this_type&& x)
+ : base_type(x.get_compare()),
+ mAnchor(),
+ mnSize(0),
+ mAllocator(x.mAllocator)
+ {
+ reset_lose_memory();
+ swap(x);
+ }
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline rbtree<K, V, C, A, E, bM, bU>::rbtree(this_type&& x, const allocator_type& allocator)
+ : base_type(x.get_compare()),
+ mAnchor(),
+ mnSize(0),
+ mAllocator(allocator)
+ {
+ reset_lose_memory();
+ swap(x); // swap will directly or indirectly handle the possibility that mAllocator != x.mAllocator.
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <typename InputIterator>
+ inline rbtree<K, V, C, A, E, bM, bU>::rbtree(InputIterator first, InputIterator last, const C& compare, const allocator_type& allocator)
+ : base_type(compare),
+ mAnchor(),
+ mnSize(0),
+ mAllocator(allocator)
+ {
+ reset_lose_memory();
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ for(; first != last; ++first)
+ insert(*first);
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ clear();
+ throw;
+ }
+ #endif
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline rbtree<K, V, C, A, E, bM, bU>::~rbtree()
+ {
+ // Erase the entire tree. DoNukeSubtree is not a
+ // conventional erase function, as it does no rebalancing.
+ DoNukeSubtree((node_type*)mAnchor.mpNodeParent);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline const typename rbtree<K, V, C, A, E, bM, bU>::allocator_type&
+ rbtree<K, V, C, A, E, bM, bU>::get_allocator() const EA_NOEXCEPT
+ {
+ return mAllocator;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::allocator_type&
+ rbtree<K, V, C, A, E, bM, bU>::get_allocator() EA_NOEXCEPT
+ {
+ return mAllocator;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline void rbtree<K, V, C, A, E, bM, bU>::set_allocator(const allocator_type& allocator)
+ {
+ mAllocator = allocator;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::size_type
+ rbtree<K, V, C, A, E, bM, bU>::size() const EA_NOEXCEPT
+ { return mnSize; }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline bool rbtree<K, V, C, A, E, bM, bU>::empty() const EA_NOEXCEPT
+ { return (mnSize == 0); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::begin() EA_NOEXCEPT
+ { return iterator(static_cast<node_type*>(mAnchor.mpNodeLeft)); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_iterator
+ rbtree<K, V, C, A, E, bM, bU>::begin() const EA_NOEXCEPT
+ { return const_iterator(static_cast<node_type*>(const_cast<rbtree_node_base*>(mAnchor.mpNodeLeft))); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_iterator
+ rbtree<K, V, C, A, E, bM, bU>::cbegin() const EA_NOEXCEPT
+ { return const_iterator(static_cast<node_type*>(const_cast<rbtree_node_base*>(mAnchor.mpNodeLeft))); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::end() EA_NOEXCEPT
+ { return iterator(static_cast<node_type*>(&mAnchor)); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_iterator
+ rbtree<K, V, C, A, E, bM, bU>::end() const EA_NOEXCEPT
+ { return const_iterator(static_cast<node_type*>(const_cast<rbtree_node_base*>(&mAnchor))); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_iterator
+ rbtree<K, V, C, A, E, bM, bU>::cend() const EA_NOEXCEPT
+ { return const_iterator(static_cast<node_type*>(const_cast<rbtree_node_base*>(&mAnchor))); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::reverse_iterator
+ rbtree<K, V, C, A, E, bM, bU>::rbegin() EA_NOEXCEPT
+ { return reverse_iterator(end()); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_reverse_iterator
+ rbtree<K, V, C, A, E, bM, bU>::rbegin() const EA_NOEXCEPT
+ { return const_reverse_iterator(end()); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_reverse_iterator
+ rbtree<K, V, C, A, E, bM, bU>::crbegin() const EA_NOEXCEPT
+ { return const_reverse_iterator(end()); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::reverse_iterator
+ rbtree<K, V, C, A, E, bM, bU>::rend() EA_NOEXCEPT
+ { return reverse_iterator(begin()); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_reverse_iterator
+ rbtree<K, V, C, A, E, bM, bU>::rend() const EA_NOEXCEPT
+ { return const_reverse_iterator(begin()); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_reverse_iterator
+ rbtree<K, V, C, A, E, bM, bU>::crend() const EA_NOEXCEPT
+ { return const_reverse_iterator(begin()); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::this_type&
+ rbtree<K, V, C, A, E, bM, bU>::operator=(const this_type& x)
+ {
+ if(this != &x)
+ {
+ clear();
+
+ #if EASTL_ALLOCATOR_COPY_ENABLED
+ mAllocator = x.mAllocator;
+ #endif
+
+ get_compare() = x.get_compare();
+
+ if(x.mAnchor.mpNodeParent) // mAnchor.mpNodeParent is the rb_tree root node.
+ {
+ mAnchor.mpNodeParent = DoCopySubtree((const node_type*)x.mAnchor.mpNodeParent, (node_type*)&mAnchor);
+ mAnchor.mpNodeRight = RBTreeGetMaxChild(mAnchor.mpNodeParent);
+ mAnchor.mpNodeLeft = RBTreeGetMinChild(mAnchor.mpNodeParent);
+ mnSize = x.mnSize;
+ }
+ }
+ return *this;
+ }
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::this_type&
+ rbtree<K, V, C, A, E, bM, bU>::operator=(this_type&& x)
+ {
+ if(this != &x)
+ {
+ clear(); // To consider: Are we really required to clear here? x is going away soon and will clear itself in its dtor.
+ swap(x); // member swap handles the case that x has a different allocator than our allocator by doing a copy.
+ }
+ return *this;
+ }
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::this_type&
+ rbtree<K, V, C, A, E, bM, bU>::operator=(std::initializer_list<value_type> ilist)
+ {
+ // The simplest means of doing this is to clear and insert. There probably isn't a generic
+ // solution that's any more efficient without having prior knowledge of the ilist contents.
+ clear();
+
+ for(typename std::initializer_list<value_type>::iterator it = ilist.begin(), itEnd = ilist.end(); it != itEnd; ++it)
+ DoInsertValue(has_unique_keys_type(), eastl::move(*it));
+
+ return *this;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ void rbtree<K, V, C, A, E, bM, bU>::swap(this_type& x)
+ {
+ #if EASTL_RBTREE_LEGACY_SWAP_BEHAVIOUR_REQUIRES_COPY_CTOR
+ if(mAllocator == x.mAllocator) // If allocators are equivalent...
+ #endif
+ {
+ // Most of our members can be exchaged by a basic swap:
+ // We leave mAllocator as-is.
+ eastl::swap(mnSize, x.mnSize);
+ eastl::swap(get_compare(), x.get_compare());
+ #if !EASTL_RBTREE_LEGACY_SWAP_BEHAVIOUR_REQUIRES_COPY_CTOR
+ eastl::swap(mAllocator, x.mAllocator);
+ #endif
+
+
+ // However, because our anchor node is a part of our class instance and not
+ // dynamically allocated, we can't do a swap of it but must do a more elaborate
+ // procedure. This is the downside to having the mAnchor be like this, but
+ // otherwise we consider it a good idea to avoid allocating memory for a
+ // nominal container instance.
+
+ // We optimize for the expected most common case: both pointers being non-null.
+ if(mAnchor.mpNodeParent && x.mAnchor.mpNodeParent) // If both pointers are non-null...
+ {
+ eastl::swap(mAnchor.mpNodeRight, x.mAnchor.mpNodeRight);
+ eastl::swap(mAnchor.mpNodeLeft, x.mAnchor.mpNodeLeft);
+ eastl::swap(mAnchor.mpNodeParent, x.mAnchor.mpNodeParent);
+
+ // We need to fix up the anchors to point to themselves (we can't just swap them).
+ mAnchor.mpNodeParent->mpNodeParent = &mAnchor;
+ x.mAnchor.mpNodeParent->mpNodeParent = &x.mAnchor;
+ }
+ else if(mAnchor.mpNodeParent)
+ {
+ x.mAnchor.mpNodeRight = mAnchor.mpNodeRight;
+ x.mAnchor.mpNodeLeft = mAnchor.mpNodeLeft;
+ x.mAnchor.mpNodeParent = mAnchor.mpNodeParent;
+ x.mAnchor.mpNodeParent->mpNodeParent = &x.mAnchor;
+
+ // We need to fix up our anchor to point it itself (we can't have it swap with x).
+ mAnchor.mpNodeRight = &mAnchor;
+ mAnchor.mpNodeLeft = &mAnchor;
+ mAnchor.mpNodeParent = NULL;
+ }
+ else if(x.mAnchor.mpNodeParent)
+ {
+ mAnchor.mpNodeRight = x.mAnchor.mpNodeRight;
+ mAnchor.mpNodeLeft = x.mAnchor.mpNodeLeft;
+ mAnchor.mpNodeParent = x.mAnchor.mpNodeParent;
+ mAnchor.mpNodeParent->mpNodeParent = &mAnchor;
+
+ // We need to fix up x's anchor to point it itself (we can't have it swap with us).
+ x.mAnchor.mpNodeRight = &x.mAnchor;
+ x.mAnchor.mpNodeLeft = &x.mAnchor;
+ x.mAnchor.mpNodeParent = NULL;
+ } // Else both are NULL and there is nothing to do.
+ }
+ #if EASTL_RBTREE_LEGACY_SWAP_BEHAVIOUR_REQUIRES_COPY_CTOR
+ else
+ {
+ const this_type temp(*this); // Can't call eastl::swap because that would
+ *this = x; // itself call this member swap function.
+ x = temp;
+ }
+ #endif
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <class... Args>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::insert_return_type // map/set::insert return a pair, multimap/multiset::iterator return an iterator.
+ rbtree<K, V, C, A, E, bM, bU>::emplace(Args&&... args)
+ {
+ return DoInsertValue(has_unique_keys_type(), eastl::forward<Args>(args)...);
+ }
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <class... Args>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::emplace_hint(const_iterator position, Args&&... args)
+ {
+ return DoInsertValueHint(has_unique_keys_type(), position, eastl::forward<Args>(args)...);
+ }
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <class P, class>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::insert_return_type // map/set::insert return a pair, multimap/multiset::iterator return an iterator.
+ rbtree<K, V, C, A, E, bM, bU>::insert(P&& otherValue)
+ {
+ // Need to use forward instead of move because P&& is a "universal reference" instead of an rvalue reference.
+ return emplace(eastl::forward<P>(otherValue));
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::insert(const_iterator position, value_type&& value)
+ {
+ return DoInsertValueHint(has_unique_keys_type(), position, eastl::move(value));
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::insert_return_type // map/set::insert return a pair, multimap/multiset::iterator return an iterator.
+ rbtree<K, V, C, A, E, bM, bU>::insert(const value_type& value)
+ {
+ return DoInsertValue(has_unique_keys_type(), value);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::insert(const_iterator position, const value_type& value)
+ {
+ return DoInsertValueHint(has_unique_keys_type(), position, value);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <class M>
+ eastl::pair<typename rbtree<K, V, C, A, E, bM, bU>::iterator, bool>
+ rbtree<K, V, C, A, E, bM, bU>::insert_or_assign(const key_type& k, M&& obj)
+ {
+ auto iter = find(k);
+
+ if(iter == end())
+ {
+ return insert(value_type(piecewise_construct, eastl::forward_as_tuple(k), eastl::forward_as_tuple(eastl::forward<M>(obj))));
+ }
+ else
+ {
+ iter->second = eastl::forward<M>(obj);
+ return {iter, false};
+ }
+ }
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <class M>
+ eastl::pair<typename rbtree<K, V, C, A, E, bM, bU>::iterator, bool>
+ rbtree<K, V, C, A, E, bM, bU>::insert_or_assign(key_type&& k, M&& obj)
+ {
+ auto iter = find(k);
+
+ if(iter == end())
+ {
+ return insert(value_type(piecewise_construct, eastl::forward_as_tuple(eastl::move(k)), eastl::forward_as_tuple(eastl::forward<M>(obj))));
+ }
+ else
+ {
+ iter->second = eastl::forward<M>(obj);
+ return {iter, false};
+ }
+ }
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <class M>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::insert_or_assign(const_iterator hint, const key_type& k, M&& obj)
+ {
+ auto iter = find(k);
+
+ if(iter == end())
+ {
+ return insert(hint, value_type(piecewise_construct, eastl::forward_as_tuple(k), eastl::forward_as_tuple(eastl::forward<M>(obj))));
+ }
+ else
+ {
+ iter->second = eastl::forward<M>(obj);
+ return iter;
+ }
+ }
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <class M>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::insert_or_assign(const_iterator hint, key_type&& k, M&& obj)
+ {
+ auto iter = find(k);
+
+ if(iter == end())
+ {
+ return insert(hint, value_type(piecewise_construct, eastl::forward_as_tuple(eastl::move(k)), eastl::forward_as_tuple(eastl::forward<M>(obj))));
+ }
+ else
+ {
+ iter->second = eastl::forward<M>(obj);
+ return iter;
+ }
+ }
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::node_type*
+ rbtree<K, V, C, A, E, bM, bU>::DoGetKeyInsertionPositionUniqueKeys(bool& canInsert, const key_type& key)
+ {
+ // This code is essentially a slightly modified copy of the the rbtree::insert
+ // function whereby this version takes a key and not a full value_type.
+ extract_key extractKey;
+
+ node_type* pCurrent = (node_type*)mAnchor.mpNodeParent; // Start with the root node.
+ node_type* pLowerBound = (node_type*)&mAnchor; // Set it to the container end for now.
+ node_type* pParent; // This will be where we insert the new node.
+
+ bool bValueLessThanNode = true; // If the tree is empty, this will result in an insertion at the front.
+
+ // Find insertion position of the value. This will either be a position which
+ // already contains the value, a position which is greater than the value or
+ // end(), which we treat like a position which is greater than the value.
+ while(EASTL_LIKELY(pCurrent)) // Do a walk down the tree.
+ {
+ bValueLessThanNode = compare(key, extractKey(pCurrent->mValue));
+ pLowerBound = pCurrent;
+
+ if(bValueLessThanNode)
+ {
+ EASTL_VALIDATE_COMPARE(!compare(extractKey(pCurrent->mValue), key)); // Validate that the compare function is sane.
+ pCurrent = (node_type*)pCurrent->mpNodeLeft;
+ }
+ else
+ pCurrent = (node_type*)pCurrent->mpNodeRight;
+ }
+
+ pParent = pLowerBound; // pLowerBound is actually upper bound right now (i.e. it is > value instead of <=), but we will make it the lower bound below.
+
+ if(bValueLessThanNode) // If we ended up on the left side of the last parent node...
+ {
+ if(EASTL_LIKELY(pLowerBound != (node_type*)mAnchor.mpNodeLeft)) // If the tree was empty or if we otherwise need to insert at the very front of the tree...
+ {
+ // At this point, pLowerBound points to a node which is > than value.
+ // Move it back by one, so that it points to a node which is <= value.
+ pLowerBound = (node_type*)RBTreeDecrement(pLowerBound);
+ }
+ else
+ {
+ canInsert = true;
+ return pLowerBound;
+ }
+ }
+
+ // Since here we require values to be unique, we will do nothing if the value already exists.
+ if(compare(extractKey(pLowerBound->mValue), key)) // If the node is < the value (i.e. if value is >= the node)...
+ {
+ EASTL_VALIDATE_COMPARE(!compare(key, extractKey(pLowerBound->mValue))); // Validate that the compare function is sane.
+ canInsert = true;
+ return pParent;
+ }
+
+ // The item already exists (as found by the compare directly above), so return false.
+ canInsert = false;
+ return pLowerBound;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::node_type*
+ rbtree<K, V, C, A, E, bM, bU>::DoGetKeyInsertionPositionNonuniqueKeys(const key_type& key)
+ {
+ // This is the pathway for insertion of non-unique keys (multimap and multiset, but not map and set).
+ node_type* pCurrent = (node_type*)mAnchor.mpNodeParent; // Start with the root node.
+ node_type* pRangeEnd = (node_type*)&mAnchor; // Set it to the container end for now.
+ extract_key extractKey;
+
+ while(pCurrent)
+ {
+ pRangeEnd = pCurrent;
+
+ if(compare(key, extractKey(pCurrent->mValue)))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(extractKey(pCurrent->mValue), key)); // Validate that the compare function is sane.
+ pCurrent = (node_type*)pCurrent->mpNodeLeft;
+ }
+ else
+ pCurrent = (node_type*)pCurrent->mpNodeRight;
+ }
+
+ return pRangeEnd;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ eastl::pair<typename rbtree<K, V, C, A, E, bM, bU>::iterator, bool>
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertValue(true_type, value_type&& value)
+ {
+ extract_key extractKey;
+ key_type key(extractKey(value));
+ bool canInsert;
+ node_type* pPosition = DoGetKeyInsertionPositionUniqueKeys(canInsert, key);
+
+ if(canInsert)
+ {
+ const iterator itResult(DoInsertValueImpl(pPosition, false, key, eastl::move(value)));
+ return pair<iterator, bool>(itResult, true);
+ }
+
+ return pair<iterator, bool>(iterator(pPosition), false);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertValue(false_type, value_type&& value)
+ {
+ extract_key extractKey;
+ key_type key(extractKey(value));
+ node_type* pPosition = DoGetKeyInsertionPositionNonuniqueKeys(key);
+
+ return DoInsertValueImpl(pPosition, false, key, eastl::move(value));
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <class... Args>
+ eastl::pair<typename rbtree<K, V, C, A, E, bM, bU>::iterator, bool>
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertValue(true_type, Args&&... args) // true_type means keys are unique.
+ {
+ // This is the pathway for insertion of unique keys (map and set, but not multimap and multiset).
+ // Note that we return a pair and not an iterator. This is because the C++ standard for map
+ // and set is to return a pair and not just an iterator.
+
+ node_type* pNodeNew = DoCreateNode(eastl::forward<Args>(args)...); // Note that pNodeNew->mpLeft, mpRight, mpParent, will be uninitialized.
+ const key_type& key = extract_key{}(pNodeNew->mValue);
+
+ bool canInsert;
+ node_type* pPosition = DoGetKeyInsertionPositionUniqueKeys(canInsert, key);
+
+ if(canInsert)
+ {
+ iterator itResult(DoInsertValueImpl(pPosition, false, key, pNodeNew));
+ return pair<iterator, bool>(itResult, true);
+ }
+
+ DoFreeNode(pNodeNew);
+ return pair<iterator, bool>(iterator(pPosition), false);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <class... Args>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertValue(false_type, Args&&... args) // false_type means keys are not unique.
+ {
+ // We have a problem here if sizeof(value_type) is too big for the stack. We may want to consider having a specialization for large value_types.
+ // To do: Change this so that we call DoCreateNode(eastl::forward<Args>(args)...) here and use the value from the resulting pNode to get the
+ // key, and make DoInsertValueImpl take that node as an argument. That way there is no value created on the stack.
+
+ node_type* const pNodeNew = DoCreateNode(eastl::forward<Args>(args)...); // Note that pNodeNew->mpLeft, mpRight, mpParent, will be uninitialized.
+ const key_type& key = extract_key{}(pNodeNew->mValue);
+
+ node_type* pPosition = DoGetKeyInsertionPositionNonuniqueKeys(key);
+
+ return DoInsertValueImpl(pPosition, false, key, pNodeNew);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <class... Args>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertValueImpl(node_type* pNodeParent, bool bForceToLeft, const key_type& key, Args&&... args)
+ {
+ node_type* const pNodeNew = DoCreateNode(eastl::forward<Args>(args)...); // Note that pNodeNew->mpLeft, mpRight, mpParent, will be uninitialized.
+
+ return DoInsertValueImpl(pNodeParent, bForceToLeft, key, pNodeNew);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertValueImpl(node_type* pNodeParent, bool bForceToLeft, const key_type& key, node_type* pNodeNew)
+ {
+ EASTL_ASSERT_MSG(pNodeNew != nullptr, "node to insert to the rbtree must not be null");
+
+ RBTreeSide side;
+ extract_key extractKey;
+
+ // The reason we may want to have bForceToLeft == true is that pNodeParent->mValue and value may be equal.
+ // In that case it doesn't matter what side we insert on, except that the C++ LWG #233 improvement report
+ // suggests that we should use the insert hint position to force an ordering. So that's what we do.
+ if(bForceToLeft || (pNodeParent == &mAnchor) || compare(key, extractKey(pNodeParent->mValue)))
+ side = kRBTreeSideLeft;
+ else
+ side = kRBTreeSideRight;
+
+ RBTreeInsert(pNodeNew, pNodeParent, &mAnchor, side);
+ mnSize++;
+
+ return iterator(pNodeNew);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ eastl::pair<typename rbtree<K, V, C, A, E, bM, bU>::iterator, bool>
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertKey(true_type, const key_type& key) // true_type means keys are unique.
+ {
+ // This is the pathway for insertion of unique keys (map and set, but not multimap and multiset).
+ // Note that we return a pair and not an iterator. This is because the C++ standard for map
+ // and set is to return a pair and not just an iterator.
+ bool canInsert;
+ node_type* pPosition = DoGetKeyInsertionPositionUniqueKeys(canInsert, key);
+
+ if(canInsert)
+ {
+ const iterator itResult(DoInsertKeyImpl(pPosition, false, key));
+ return pair<iterator, bool>(itResult, true);
+ }
+
+ return pair<iterator, bool>(iterator(pPosition), false);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertKey(false_type, const key_type& key) // false_type means keys are not unique.
+ {
+ node_type* pPosition = DoGetKeyInsertionPositionNonuniqueKeys(key);
+
+ return DoInsertKeyImpl(pPosition, false, key);
+ }
+
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::node_type*
+ rbtree<K, V, C, A, E, bM, bU>::DoGetKeyInsertionPositionUniqueKeysHint(const_iterator position, bool& bForceToLeft, const key_type& key)
+ {
+ extract_key extractKey;
+
+ if((position.mpNode != mAnchor.mpNodeRight) && (position.mpNode != &mAnchor)) // If the user specified a specific insertion position...
+ {
+ iterator itNext(position.mpNode);
+ ++itNext;
+
+ // To consider: Change this so that 'position' specifies the position after
+ // where the insertion goes and not the position before where the insertion goes.
+ // Doing so would make this more in line with user expectations and with LWG #233.
+ const bool bPositionLessThanValue = compare(extractKey(position.mpNode->mValue), key);
+
+ if(bPositionLessThanValue) // If (value > *position)...
+ {
+ EASTL_VALIDATE_COMPARE(!compare(key, extractKey(position.mpNode->mValue))); // Validate that the compare function is sane.
+
+ const bool bValueLessThanNext = compare(key, extractKey(itNext.mpNode->mValue));
+
+ if(bValueLessThanNext) // If value < *itNext...
+ {
+ EASTL_VALIDATE_COMPARE(!compare(extractKey(itNext.mpNode->mValue), key)); // Validate that the compare function is sane.
+
+ if(position.mpNode->mpNodeRight)
+ {
+ bForceToLeft = true; // Specifically insert in front of (to the left of) itNext (and thus after 'position').
+ return itNext.mpNode;
+ }
+
+ bForceToLeft = false;
+ return position.mpNode;
+ }
+ }
+
+ bForceToLeft = false;
+ return NULL; // The above specified hint was not useful, then we do a regular insertion.
+ }
+
+ if(mnSize && compare(extractKey(((node_type*)mAnchor.mpNodeRight)->mValue), key))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(key, extractKey(((node_type*)mAnchor.mpNodeRight)->mValue))); // Validate that the compare function is sane.
+ bForceToLeft = false;
+ return (node_type*)mAnchor.mpNodeRight;
+ }
+
+ bForceToLeft = false;
+ return NULL; // The caller can do a default insert.
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::node_type*
+ rbtree<K, V, C, A, E, bM, bU>::DoGetKeyInsertionPositionNonuniqueKeysHint(const_iterator position, bool& bForceToLeft, const key_type& key)
+ {
+ extract_key extractKey;
+
+ if((position.mpNode != mAnchor.mpNodeRight) && (position.mpNode != &mAnchor)) // If the user specified a specific insertion position...
+ {
+ iterator itNext(position.mpNode);
+ ++itNext;
+
+ // To consider: Change this so that 'position' specifies the position after
+ // where the insertion goes and not the position before where the insertion goes.
+ // Doing so would make this more in line with user expectations and with LWG #233.
+ if(!compare(key, extractKey(position.mpNode->mValue)) && // If value >= *position &&
+ !compare(extractKey(itNext.mpNode->mValue), key)) // if value <= *itNext...
+ {
+ if(position.mpNode->mpNodeRight) // If there are any nodes to the right... [this expression will always be true as long as we aren't at the end()]
+ {
+ bForceToLeft = true; // Specifically insert in front of (to the left of) itNext (and thus after 'position').
+ return itNext.mpNode;
+ }
+
+ bForceToLeft = false;
+ return position.mpNode;
+ }
+
+ bForceToLeft = false;
+ return NULL; // The above specified hint was not useful, then we do a regular insertion.
+ }
+
+ // This pathway shouldn't be commonly executed, as the user shouldn't be calling
+ // this hinted version of insert if the user isn't providing a useful hint.
+ if(mnSize && !compare(key, extractKey(((node_type*)mAnchor.mpNodeRight)->mValue))) // If we are non-empty and the value is >= the last node...
+ {
+ bForceToLeft =false;
+ return (node_type*)mAnchor.mpNodeRight;
+ }
+
+ bForceToLeft = false;
+ return NULL;
+ }
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <class... Args>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertValueHint(true_type, const_iterator position, Args&&... args) // true_type means keys are unique.
+ {
+ // This is the pathway for insertion of unique keys (map and set, but not multimap and multiset).
+ //
+ // We follow the same approach as SGI STL/STLPort and use the position as
+ // a forced insertion position for the value when possible.
+
+ node_type* pNodeNew = DoCreateNode(eastl::forward<Args>(args)...); // Note that pNodeNew->mpLeft, mpRight, mpParent, will be uninitialized.
+ const key_type& key(extract_key{}(pNodeNew->mValue));
+
+ bool bForceToLeft;
+ node_type* pPosition = DoGetKeyInsertionPositionUniqueKeysHint(position, bForceToLeft, key);
+
+ if (!pPosition)
+ {
+ bool canInsert;
+ pPosition = DoGetKeyInsertionPositionUniqueKeys(canInsert, key);
+
+ if (!canInsert)
+ {
+ DoFreeNode(pNodeNew);
+ return iterator(pPosition);
+ }
+
+ bForceToLeft = false;
+ }
+
+ return DoInsertValueImpl(pPosition, bForceToLeft, key, pNodeNew);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <class... Args>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertValueHint(false_type, const_iterator position, Args&&... args) // false_type means keys are not unique.
+ {
+ // This is the pathway for insertion of non-unique keys (multimap and multiset, but not map and set).
+ //
+ // We follow the same approach as SGI STL/STLPort and use the position as
+ // a forced insertion position for the value when possible.
+
+ node_type* pNodeNew = DoCreateNode(eastl::forward<Args>(args)...); // Note that pNodeNew->mpLeft, mpRight, mpParent, will be uninitialized.
+ const key_type& key(extract_key{}(pNodeNew->mValue));
+
+ bool bForceToLeft;
+ node_type* pPosition = DoGetKeyInsertionPositionNonuniqueKeysHint(position, bForceToLeft, key);
+
+ if (!pPosition)
+ {
+ pPosition = DoGetKeyInsertionPositionNonuniqueKeys(key);
+ bForceToLeft = false;
+ }
+
+ return DoInsertValueImpl(pPosition, bForceToLeft, key, pNodeNew);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertValueHint(true_type, const_iterator position, value_type&& value) // true_type means keys are unique.
+ {
+ // This is the pathway for insertion of unique keys (map and set, but not multimap and multiset).
+ //
+ // We follow the same approach as SGI STL/STLPort and use the position as
+ // a forced insertion position for the value when possible.
+
+ extract_key extractKey;
+ key_type key(extractKey(value));
+ bool bForceToLeft;
+ node_type* pPosition = DoGetKeyInsertionPositionUniqueKeysHint(position, bForceToLeft, key);
+
+ if(pPosition)
+ return DoInsertValueImpl(pPosition, bForceToLeft, key, eastl::move(value));
+ else
+ return DoInsertValue(has_unique_keys_type(), eastl::move(value)).first;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertValueHint(false_type, const_iterator position, value_type&& value) // false_type means keys are not unique.
+ {
+ // This is the pathway for insertion of non-unique keys (multimap and multiset, but not map and set).
+ //
+ // We follow the same approach as SGI STL/STLPort and use the position as
+ // a forced insertion position for the value when possible.
+ extract_key extractKey;
+ key_type key(extractKey(value));
+ bool bForceToLeft;
+ node_type* pPosition = DoGetKeyInsertionPositionNonuniqueKeysHint(position, bForceToLeft, key);
+
+ if(pPosition)
+ return DoInsertValueImpl(pPosition, bForceToLeft, key, eastl::move(value));
+ else
+ return DoInsertValue(has_unique_keys_type(), eastl::move(value));
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertKey(true_type, const_iterator position, const key_type& key) // true_type means keys are unique.
+ {
+ bool bForceToLeft;
+ node_type* pPosition = DoGetKeyInsertionPositionUniqueKeysHint(position, bForceToLeft, key);
+
+ if(pPosition)
+ return DoInsertKeyImpl(pPosition, bForceToLeft, key);
+ else
+ return DoInsertKey(has_unique_keys_type(), key).first;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertKey(false_type, const_iterator position, const key_type& key) // false_type means keys are not unique.
+ {
+ // This is the pathway for insertion of non-unique keys (multimap and multiset, but not map and set).
+ //
+ // We follow the same approach as SGI STL/STLPort and use the position as
+ // a forced insertion position for the value when possible.
+ bool bForceToLeft;
+ node_type* pPosition = DoGetKeyInsertionPositionNonuniqueKeysHint(position, bForceToLeft, key);
+
+ if(pPosition)
+ return DoInsertKeyImpl(pPosition, bForceToLeft, key);
+ else
+ return DoInsertKey(has_unique_keys_type(), key); // We are empty or we are inserting at the end.
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertKeyImpl(node_type* pNodeParent, bool bForceToLeft, const key_type& key)
+ {
+ RBTreeSide side;
+ extract_key extractKey;
+
+ // The reason we may want to have bForceToLeft == true is that pNodeParent->mValue and value may be equal.
+ // In that case it doesn't matter what side we insert on, except that the C++ LWG #233 improvement report
+ // suggests that we should use the insert hint position to force an ordering. So that's what we do.
+ if(bForceToLeft || (pNodeParent == &mAnchor) || compare(key, extractKey(pNodeParent->mValue)))
+ side = kRBTreeSideLeft;
+ else
+ side = kRBTreeSideRight;
+
+ node_type* const pNodeNew = DoCreateNodeFromKey(key); // Note that pNodeNew->mpLeft, mpRight, mpParent, will be uninitialized.
+ RBTreeInsert(pNodeNew, pNodeParent, &mAnchor, side);
+ mnSize++;
+
+ return iterator(pNodeNew);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ void rbtree<K, V, C, A, E, bM, bU>::insert(std::initializer_list<value_type> ilist)
+ {
+ for(typename std::initializer_list<value_type>::iterator it = ilist.begin(), itEnd = ilist.end(); it != itEnd; ++it)
+ DoInsertValue(has_unique_keys_type(), eastl::move(*it));
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <typename InputIterator>
+ void rbtree<K, V, C, A, E, bM, bU>::insert(InputIterator first, InputIterator last)
+ {
+ for( ; first != last; ++first)
+ DoInsertValue(has_unique_keys_type(), *first); // Or maybe we should call 'insert(end(), *first)' instead. If the first-last range was sorted then this might make some sense.
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline void rbtree<K, V, C, A, E, bM, bU>::clear()
+ {
+ // Erase the entire tree. DoNukeSubtree is not a
+ // conventional erase function, as it does no rebalancing.
+ DoNukeSubtree((node_type*)mAnchor.mpNodeParent);
+ reset_lose_memory();
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline void rbtree<K, V, C, A, E, bM, bU>::reset_lose_memory()
+ {
+ // The reset_lose_memory function is a special extension function which unilaterally
+ // resets the container to an empty state without freeing the memory of
+ // the contained objects. This is useful for very quickly tearing down a
+ // container built into scratch memory.
+ mAnchor.mpNodeRight = &mAnchor;
+ mAnchor.mpNodeLeft = &mAnchor;
+ mAnchor.mpNodeParent = NULL;
+ mAnchor.mColor = kRBTreeColorRed;
+ mnSize = 0;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::erase(const_iterator position)
+ {
+ const iterator iErase(position.mpNode);
+ --mnSize; // Interleave this between the two references to itNext. We expect no exceptions to occur during the code below.
+ ++position;
+ RBTreeErase(iErase.mpNode, &mAnchor);
+ DoFreeNode(iErase.mpNode);
+ return iterator(position.mpNode);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::erase(const_iterator first, const_iterator last)
+ {
+ // We expect that if the user means to clear the container, they will call clear.
+ if(EASTL_LIKELY((first.mpNode != mAnchor.mpNodeLeft) || (last.mpNode != &mAnchor))) // If (first != begin or last != end) ...
+ {
+ // Basic implementation:
+ while(first != last)
+ first = erase(first);
+ return iterator(first.mpNode);
+
+ // Inlined implementation:
+ //size_type n = 0;
+ //while(first != last)
+ //{
+ // const iterator itErase(first);
+ // ++n;
+ // ++first;
+ // RBTreeErase(itErase.mpNode, &mAnchor);
+ // DoFreeNode(itErase.mpNode);
+ //}
+ //mnSize -= n;
+ //return first;
+ }
+
+ clear();
+ return iterator((node_type*)&mAnchor); // Same as: return end();
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::reverse_iterator
+ rbtree<K, V, C, A, E, bM, bU>::erase(const_reverse_iterator position)
+ {
+ return reverse_iterator(erase((++position).base()));
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::reverse_iterator
+ rbtree<K, V, C, A, E, bM, bU>::erase(const_reverse_iterator first, const_reverse_iterator last)
+ {
+ // Version which erases in order from first to last.
+ // difference_type i(first.base() - last.base());
+ // while(i--)
+ // first = erase(first);
+ // return first;
+
+ // Version which erases in order from last to first, but is slightly more efficient:
+ return reverse_iterator(erase((++last).base(), (++first).base()));
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline void rbtree<K, V, C, A, E, bM, bU>::erase(const key_type* first, const key_type* last)
+ {
+ // We have no choice but to run a loop like this, as the first/last range could
+ // have values that are discontiguously located in the tree. And some may not
+ // even be in the tree.
+ while(first != last)
+ erase(*first++);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::find(const key_type& key)
+ {
+ // To consider: Implement this instead via calling lower_bound and
+ // inspecting the result. The following is an implementation of this:
+ // const iterator it(lower_bound(key));
+ // return ((it.mpNode == &mAnchor) || compare(key, extractKey(it.mpNode->mValue))) ? iterator(&mAnchor) : it;
+ // We don't currently implement the above because in practice people tend to call
+ // find a lot with trees, but very uncommonly call lower_bound.
+ extract_key extractKey;
+
+ node_type* pCurrent = (node_type*)mAnchor.mpNodeParent; // Start with the root node.
+ node_type* pRangeEnd = (node_type*)&mAnchor; // Set it to the container end for now.
+
+ while(EASTL_LIKELY(pCurrent)) // Do a walk down the tree.
+ {
+ if(EASTL_LIKELY(!compare(extractKey(pCurrent->mValue), key))) // If pCurrent is >= key...
+ {
+ pRangeEnd = pCurrent;
+ pCurrent = (node_type*)pCurrent->mpNodeLeft;
+ }
+ else
+ {
+ EASTL_VALIDATE_COMPARE(!compare(key, extractKey(pCurrent->mValue))); // Validate that the compare function is sane.
+ pCurrent = (node_type*)pCurrent->mpNodeRight;
+ }
+ }
+
+ if(EASTL_LIKELY((pRangeEnd != &mAnchor) && !compare(key, extractKey(pRangeEnd->mValue))))
+ return iterator(pRangeEnd);
+ return iterator((node_type*)&mAnchor);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_iterator
+ rbtree<K, V, C, A, E, bM, bU>::find(const key_type& key) const
+ {
+ typedef rbtree<K, V, C, A, E, bM, bU> rbtree_type;
+ return const_iterator(const_cast<rbtree_type*>(this)->find(key));
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <typename U, typename Compare2>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::find_as(const U& u, Compare2 compare2)
+ {
+ extract_key extractKey;
+
+ node_type* pCurrent = (node_type*)mAnchor.mpNodeParent; // Start with the root node.
+ node_type* pRangeEnd = (node_type*)&mAnchor; // Set it to the container end for now.
+
+ while(EASTL_LIKELY(pCurrent)) // Do a walk down the tree.
+ {
+ if(EASTL_LIKELY(!compare2(extractKey(pCurrent->mValue), u))) // If pCurrent is >= u...
+ {
+ pRangeEnd = pCurrent;
+ pCurrent = (node_type*)pCurrent->mpNodeLeft;
+ }
+ else
+ {
+ EASTL_VALIDATE_COMPARE(!compare2(u, extractKey(pCurrent->mValue))); // Validate that the compare function is sane.
+ pCurrent = (node_type*)pCurrent->mpNodeRight;
+ }
+ }
+
+ if(EASTL_LIKELY((pRangeEnd != &mAnchor) && !compare2(u, extractKey(pRangeEnd->mValue))))
+ return iterator(pRangeEnd);
+ return iterator((node_type*)&mAnchor);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <typename U, typename Compare2>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_iterator
+ rbtree<K, V, C, A, E, bM, bU>::find_as(const U& u, Compare2 compare2) const
+ {
+ typedef rbtree<K, V, C, A, E, bM, bU> rbtree_type;
+ return const_iterator(const_cast<rbtree_type*>(this)->find_as(u, compare2));
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::lower_bound(const key_type& key)
+ {
+ extract_key extractKey;
+
+ node_type* pCurrent = (node_type*)mAnchor.mpNodeParent; // Start with the root node.
+ node_type* pRangeEnd = (node_type*)&mAnchor; // Set it to the container end for now.
+
+ while(EASTL_LIKELY(pCurrent)) // Do a walk down the tree.
+ {
+ if(EASTL_LIKELY(!compare(extractKey(pCurrent->mValue), key))) // If pCurrent is >= key...
+ {
+ pRangeEnd = pCurrent;
+ pCurrent = (node_type*)pCurrent->mpNodeLeft;
+ }
+ else
+ {
+ EASTL_VALIDATE_COMPARE(!compare(key, extractKey(pCurrent->mValue))); // Validate that the compare function is sane.
+ pCurrent = (node_type*)pCurrent->mpNodeRight;
+ }
+ }
+
+ return iterator(pRangeEnd);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_iterator
+ rbtree<K, V, C, A, E, bM, bU>::lower_bound(const key_type& key) const
+ {
+ typedef rbtree<K, V, C, A, E, bM, bU> rbtree_type;
+ return const_iterator(const_cast<rbtree_type*>(this)->lower_bound(key));
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::upper_bound(const key_type& key)
+ {
+ extract_key extractKey;
+
+ node_type* pCurrent = (node_type*)mAnchor.mpNodeParent; // Start with the root node.
+ node_type* pRangeEnd = (node_type*)&mAnchor; // Set it to the container end for now.
+
+ while(EASTL_LIKELY(pCurrent)) // Do a walk down the tree.
+ {
+ if(EASTL_LIKELY(compare(key, extractKey(pCurrent->mValue)))) // If key is < pCurrent...
+ {
+ EASTL_VALIDATE_COMPARE(!compare(extractKey(pCurrent->mValue), key)); // Validate that the compare function is sane.
+ pRangeEnd = pCurrent;
+ pCurrent = (node_type*)pCurrent->mpNodeLeft;
+ }
+ else
+ pCurrent = (node_type*)pCurrent->mpNodeRight;
+ }
+
+ return iterator(pRangeEnd);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_iterator
+ rbtree<K, V, C, A, E, bM, bU>::upper_bound(const key_type& key) const
+ {
+ typedef rbtree<K, V, C, A, E, bM, bU> rbtree_type;
+ return const_iterator(const_cast<rbtree_type*>(this)->upper_bound(key));
+ }
+
+
+ // To do: Move this validate function entirely to a template-less implementation.
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ bool rbtree<K, V, C, A, E, bM, bU>::validate() const
+ {
+ // Red-black trees have the following canonical properties which we validate here:
+ // 1 Every node is either red or black.
+ // 2 Every leaf (NULL) is black by defintion. Any number of black nodes may appear in a sequence.
+ // 3 If a node is red, then both its children are black. Thus, on any path from
+ // the root to a leaf, red nodes must not be adjacent.
+ // 4 Every simple path from a node to a descendant leaf contains the same number of black nodes.
+ // 5 The mnSize member of the tree must equal the number of nodes in the tree.
+ // 6 The tree is sorted as per a conventional binary tree.
+ // 7 The comparison function is sane; it obeys strict weak ordering. If compare(a,b) is true, then compare(b,a) must be false. Both cannot be true.
+
+ extract_key extractKey;
+
+ if(mnSize)
+ {
+ // Verify basic integrity.
+ //if(!mAnchor.mpNodeParent || (mAnchor.mpNodeLeft == mAnchor.mpNodeRight))
+ // return false; // Fix this for case of empty tree.
+
+ if(mAnchor.mpNodeLeft != RBTreeGetMinChild(mAnchor.mpNodeParent))
+ return false;
+
+ if(mAnchor.mpNodeRight != RBTreeGetMaxChild(mAnchor.mpNodeParent))
+ return false;
+
+ const size_t nBlackCount = RBTreeGetBlackCount(mAnchor.mpNodeParent, mAnchor.mpNodeLeft);
+ size_type nIteratedSize = 0;
+
+ for(const_iterator it = begin(); it != end(); ++it, ++nIteratedSize)
+ {
+ const node_type* const pNode = (const node_type*)it.mpNode;
+ const node_type* const pNodeRight = (const node_type*)pNode->mpNodeRight;
+ const node_type* const pNodeLeft = (const node_type*)pNode->mpNodeLeft;
+
+ // Verify #7 above.
+ if(pNodeRight && compare(extractKey(pNodeRight->mValue), extractKey(pNode->mValue)) && compare(extractKey(pNode->mValue), extractKey(pNodeRight->mValue))) // Validate that the compare function is sane.
+ return false;
+
+ // Verify #7 above.
+ if(pNodeLeft && compare(extractKey(pNodeLeft->mValue), extractKey(pNode->mValue)) && compare(extractKey(pNode->mValue), extractKey(pNodeLeft->mValue))) // Validate that the compare function is sane.
+ return false;
+
+ // Verify item #1 above.
+ if((pNode->mColor != kRBTreeColorRed) && (pNode->mColor != kRBTreeColorBlack))
+ return false;
+
+ // Verify item #3 above.
+ if(pNode->mColor == kRBTreeColorRed)
+ {
+ if((pNodeRight && (pNodeRight->mColor == kRBTreeColorRed)) ||
+ (pNodeLeft && (pNodeLeft->mColor == kRBTreeColorRed)))
+ return false;
+ }
+
+ // Verify item #6 above.
+ if(pNodeRight && compare(extractKey(pNodeRight->mValue), extractKey(pNode->mValue)))
+ return false;
+
+ if(pNodeLeft && compare(extractKey(pNode->mValue), extractKey(pNodeLeft->mValue)))
+ return false;
+
+ if(!pNodeRight && !pNodeLeft) // If we are at a bottom node of the tree...
+ {
+ // Verify item #4 above.
+ if(RBTreeGetBlackCount(mAnchor.mpNodeParent, pNode) != nBlackCount)
+ return false;
+ }
+ }
+
+ // Verify item #5 above.
+ if(nIteratedSize != mnSize)
+ return false;
+
+ return true;
+ }
+ else
+ {
+ if((mAnchor.mpNodeLeft != &mAnchor) || (mAnchor.mpNodeRight != &mAnchor))
+ return false;
+ }
+
+ return true;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline int rbtree<K, V, C, A, E, bM, bU>::validate_iterator(const_iterator i) const
+ {
+ // To do: Come up with a more efficient mechanism of doing this.
+
+ for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp)
+ {
+ if(temp == i)
+ return (isf_valid | isf_current | isf_can_dereference);
+ }
+
+ if(i == end())
+ return (isf_valid | isf_current);
+
+ return isf_none;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::node_type*
+ rbtree<K, V, C, A, E, bM, bU>::DoAllocateNode()
+ {
+ auto* pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(node_type), 0);
+ EASTL_ASSERT_MSG(pNode != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined.");
+
+ return pNode;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline void rbtree<K, V, C, A, E, bM, bU>::DoFreeNode(node_type* pNode)
+ {
+ pNode->~node_type();
+ EASTLFree(mAllocator, pNode, sizeof(node_type));
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::node_type*
+ rbtree<K, V, C, A, E, bM, bU>::DoCreateNodeFromKey(const key_type& key)
+ {
+ // Note that this function intentionally leaves the node pointers uninitialized.
+ // The caller would otherwise just turn right around and modify them, so there's
+ // no point in us initializing them to anything (except in a debug build).
+ node_type* const pNode = DoAllocateNode();
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ ::new (eastl::addressof(pNode->mValue)) value_type(pair_first_construct, key);
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ DoFreeNode(pNode);
+ throw;
+ }
+ #endif
+
+ #if EASTL_DEBUG
+ pNode->mpNodeRight = NULL;
+ pNode->mpNodeLeft = NULL;
+ pNode->mpNodeParent = NULL;
+ pNode->mColor = kRBTreeColorBlack;
+ #endif
+
+ return pNode;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::node_type*
+ rbtree<K, V, C, A, E, bM, bU>::DoCreateNode(const value_type& value)
+ {
+ // Note that this function intentionally leaves the node pointers uninitialized.
+ // The caller would otherwise just turn right around and modify them, so there's
+ // no point in us initializing them to anything (except in a debug build).
+ node_type* const pNode = DoAllocateNode();
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ ::new(eastl::addressof(pNode->mValue)) value_type(value);
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ DoFreeNode(pNode);
+ throw;
+ }
+ #endif
+
+ #if EASTL_DEBUG
+ pNode->mpNodeRight = NULL;
+ pNode->mpNodeLeft = NULL;
+ pNode->mpNodeParent = NULL;
+ pNode->mColor = kRBTreeColorBlack;
+ #endif
+
+ return pNode;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::node_type*
+ rbtree<K, V, C, A, E, bM, bU>::DoCreateNode(value_type&& value)
+ {
+ // Note that this function intentionally leaves the node pointers uninitialized.
+ // The caller would otherwise just turn right around and modify them, so there's
+ // no point in us initializing them to anything (except in a debug build).
+ node_type* const pNode = DoAllocateNode();
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ ::new(eastl::addressof(pNode->mValue)) value_type(eastl::move(value));
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ DoFreeNode(pNode);
+ throw;
+ }
+ #endif
+
+ #if EASTL_DEBUG
+ pNode->mpNodeRight = NULL;
+ pNode->mpNodeLeft = NULL;
+ pNode->mpNodeParent = NULL;
+ pNode->mColor = kRBTreeColorBlack;
+ #endif
+
+ return pNode;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template<class... Args>
+ typename rbtree<K, V, C, A, E, bM, bU>::node_type*
+ rbtree<K, V, C, A, E, bM, bU>::DoCreateNode(Args&&... args)
+ {
+ // Note that this function intentionally leaves the node pointers uninitialized.
+ // The caller would otherwise just turn right around and modify them, so there's
+ // no point in us initializing them to anything (except in a debug build).
+ node_type* const pNode = DoAllocateNode();
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ ::new(eastl::addressof(pNode->mValue)) value_type(eastl::forward<Args>(args)...);
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ DoFreeNode(pNode);
+ throw;
+ }
+ #endif
+
+ #if EASTL_DEBUG
+ pNode->mpNodeRight = NULL;
+ pNode->mpNodeLeft = NULL;
+ pNode->mpNodeParent = NULL;
+ pNode->mColor = kRBTreeColorBlack;
+ #endif
+
+ return pNode;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::node_type*
+ rbtree<K, V, C, A, E, bM, bU>::DoCreateNode(const node_type* pNodeSource, node_type* pNodeParent)
+ {
+ node_type* const pNode = DoCreateNode(pNodeSource->mValue);
+
+ pNode->mpNodeRight = NULL;
+ pNode->mpNodeLeft = NULL;
+ pNode->mpNodeParent = pNodeParent;
+ pNode->mColor = pNodeSource->mColor;
+
+ return pNode;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::node_type*
+ rbtree<K, V, C, A, E, bM, bU>::DoCopySubtree(const node_type* pNodeSource, node_type* pNodeDest)
+ {
+ node_type* const pNewNodeRoot = DoCreateNode(pNodeSource, pNodeDest);
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ // Copy the right side of the tree recursively.
+ if(pNodeSource->mpNodeRight)
+ pNewNodeRoot->mpNodeRight = DoCopySubtree((const node_type*)pNodeSource->mpNodeRight, pNewNodeRoot);
+
+ node_type* pNewNodeLeft;
+
+ for(pNodeSource = (node_type*)pNodeSource->mpNodeLeft, pNodeDest = pNewNodeRoot;
+ pNodeSource;
+ pNodeSource = (node_type*)pNodeSource->mpNodeLeft, pNodeDest = pNewNodeLeft)
+ {
+ pNewNodeLeft = DoCreateNode(pNodeSource, pNodeDest);
+
+ pNodeDest->mpNodeLeft = pNewNodeLeft;
+
+ // Copy the right side of the tree recursively.
+ if(pNodeSource->mpNodeRight)
+ pNewNodeLeft->mpNodeRight = DoCopySubtree((const node_type*)pNodeSource->mpNodeRight, pNewNodeLeft);
+ }
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ DoNukeSubtree(pNewNodeRoot);
+ throw;
+ }
+ #endif
+
+ return pNewNodeRoot;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ void rbtree<K, V, C, A, E, bM, bU>::DoNukeSubtree(node_type* pNode)
+ {
+ while(pNode) // Recursively traverse the tree and destroy items as we go.
+ {
+ DoNukeSubtree((node_type*)pNode->mpNodeRight);
+
+ node_type* const pNodeLeft = (node_type*)pNode->mpNodeLeft;
+ DoFreeNode(pNode);
+ pNode = pNodeLeft;
+ }
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename K, typename V, typename A, typename C, typename E, bool bM, bool bU>
+ inline bool operator==(const rbtree<K, V, C, A, E, bM, bU>& a, const rbtree<K, V, C, A, E, bM, bU>& b)
+ {
+ return (a.size() == b.size()) && eastl::equal(a.begin(), a.end(), b.begin());
+ }
+
+
+ // Note that in operator< we do comparisons based on the tree value_type with operator<() of the
+ // value_type instead of the tree's Compare function. For set/multiset, the value_type is T, while
+ // for map/multimap the value_type is a pair<Key, T>. operator< for pair can be seen by looking
+ // utility.h, but it basically is uses the operator< for pair.first and pair.second. The C++ standard
+ // appears to require this behaviour, whether intentionally or not. If anything, a good reason to do
+ // this is for consistency. A map and a vector that contain the same items should compare the same.
+ template <typename K, typename V, typename A, typename C, typename E, bool bM, bool bU>
+ inline bool operator<(const rbtree<K, V, C, A, E, bM, bU>& a, const rbtree<K, V, C, A, E, bM, bU>& b)
+ {
+ return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end());
+ }
+
+
+ template <typename K, typename V, typename A, typename C, typename E, bool bM, bool bU>
+ inline bool operator!=(const rbtree<K, V, C, A, E, bM, bU>& a, const rbtree<K, V, C, A, E, bM, bU>& b)
+ {
+ return !(a == b);
+ }
+
+
+ template <typename K, typename V, typename A, typename C, typename E, bool bM, bool bU>
+ inline bool operator>(const rbtree<K, V, C, A, E, bM, bU>& a, const rbtree<K, V, C, A, E, bM, bU>& b)
+ {
+ return b < a;
+ }
+
+
+ template <typename K, typename V, typename A, typename C, typename E, bool bM, bool bU>
+ inline bool operator<=(const rbtree<K, V, C, A, E, bM, bU>& a, const rbtree<K, V, C, A, E, bM, bU>& b)
+ {
+ return !(b < a);
+ }
+
+
+ template <typename K, typename V, typename A, typename C, typename E, bool bM, bool bU>
+ inline bool operator>=(const rbtree<K, V, C, A, E, bM, bU>& a, const rbtree<K, V, C, A, E, bM, bU>& b)
+ {
+ return !(a < b);
+ }
+
+
+ template <typename K, typename V, typename A, typename C, typename E, bool bM, bool bU>
+ inline void swap(rbtree<K, V, C, A, E, bM, bU>& a, rbtree<K, V, C, A, E, bM, bU>& b)
+ {
+ a.swap(b);
+ }
+
+
+} // namespace eastl
+
+
+EA_RESTORE_VC_WARNING();
+
+
+#endif // Header include guard
diff --git a/EASTL/include/EASTL/internal/smart_ptr.h b/EASTL/include/EASTL/internal/smart_ptr.h
new file mode 100644
index 0000000..8a37950
--- /dev/null
+++ b/EASTL/include/EASTL/internal/smart_ptr.h
@@ -0,0 +1,267 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_SMART_PTR_H
+#define EASTL_INTERNAL_SMART_PTR_H
+
+
+#include <EABase/eabase.h>
+#include <EASTL/type_traits.h>
+#include <EASTL/memory.h>
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+namespace eastl
+{
+
+ namespace Internal
+ {
+ // Tells if the Deleter type has a typedef for pointer to T. If so then return it,
+ // else return T*. The large majority of the time the pointer type will be T*.
+ // The C++11 Standard requires that scoped_ptr let the deleter define the pointer type.
+ //
+ // Example usage:
+ // typedef typename unique_pointer_type<int, SomeDeleter>::type pointer
+ //
+ template <typename T, typename Deleter>
+ class unique_pointer_type
+ {
+ template <typename U>
+ static typename U::pointer test(typename U::pointer*);
+
+ template <typename U>
+ static T* test(...);
+
+ public:
+ typedef decltype(test<typename eastl::remove_reference<Deleter>::type>(0)) type;
+ };
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_array_cv_convertible
+ //
+ // Tells if the array pointer P1 is cv-convertible to array pointer P2.
+ // The two types have two be equivalent pointer types and be convertible
+ // when you consider const/volatile properties of them.
+ //
+ // Example usage:
+ // is_array_cv_convertible<int, Base*>::value => false
+ // is_array_cv_convertible<Base, Base*>::value => false
+ // is_array_cv_convertible<double*, bool*>::value => false
+ // is_array_cv_convertible<Subclass*, Base*>::value => false
+ // is_array_cv_convertible<const Base*, Base*>::value => false
+ // is_array_cv_convertible<Base*, Base*>::value => true
+ // is_array_cv_convertible<Base*, const Base*>::value => true
+ // is_array_cv_convertible<Base*, volatile Base*>::value => true
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_array_cv_convertible_CONFORMANCE 1
+
+ template <typename P1, typename P2, bool = eastl::is_same_v<eastl::remove_cv_t<typename pointer_traits<P1>::element_type>,
+ eastl::remove_cv_t<typename pointer_traits<P2>::element_type>>>
+ struct is_array_cv_convertible_impl
+ : public eastl::is_convertible<P1, P2> {}; // Return true if P1 is convertible to P2.
+
+ template <typename P1, typename P2>
+ struct is_array_cv_convertible_impl<P1, P2, false>
+ : public eastl::false_type {}; // P1's underlying type is not the same as P2's, so it can't be converted, even if P2 refers to a subclass of P1. Parent == Child, but Parent[] != Child[]
+
+ template <typename P1, typename P2, bool = eastl::is_scalar_v<P1> && !eastl::is_pointer_v<P1>>
+ struct is_array_cv_convertible
+ : public is_array_cv_convertible_impl<P1, P2> {};
+
+ template <typename P1, typename P2>
+ struct is_array_cv_convertible<P1, P2, true>
+ : public eastl::false_type {}; // P1 is scalar not a pointer, so it can't be converted to a pointer.
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_derived
+ //
+ // Given two (possibly identical) types Base and Derived, is_base_of<Base, Derived>::value == true
+ // if and only if Base is a direct or indirect base class of Derived. This is like is_base_of<Base, Derived>
+ // but returns false if Derived is the same as Base. So is_derived is true only if Derived is actually a subclass
+ // of Base and not Base itself.
+ //
+ // is_derived may only be applied to complete types.
+ //
+ // Example usage:
+ // is_derived<int, int>::value => false
+ // is_derived<int, bool>::value => false
+ // is_derived<Parent, Child>::value => true
+ // is_derived<Child, Parent>::value => false
+ ///////////////////////////////////////////////////////////////////////
+
+ #if EASTL_TYPE_TRAIT_is_base_of_CONFORMANCE
+ #define EASTL_TYPE_TRAIT_is_derived_CONFORMANCE 1
+
+ template <typename Base, typename Derived>
+ struct is_derived : public eastl::integral_constant<bool, eastl::is_base_of<Base, Derived>::value && !eastl::is_same<typename eastl::remove_cv<Base>::type, typename eastl::remove_cv<Derived>::type>::value> {};
+ #else
+ #define EASTL_TYPE_TRAIT_is_derived_CONFORMANCE 0
+
+ template <typename Base, typename Derived> // This returns true if Derived is unrelated to Base. That's a wrong answer, but is better for us than returning false for compilers that don't support is_base_of.
+ struct is_derived : public eastl::integral_constant<bool, !eastl::is_same<typename eastl::remove_cv<Base>::type, typename eastl::remove_cv<Derived>::type>::value> {};
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_safe_array_conversion
+ //
+ // Say you have two array types: T* t and U* u. You want to assign the u to t but only if
+ // that's a safe thing to do. As shown in the logic below, the array conversion
+ // is safe if U* and T* are convertible, if U is an array, and if either U or T is not
+ // a pointer or U is not derived from T.
+ //
+ // Note: Usage of this class could be replaced with is_array_cv_convertible usage.
+ // To do: Do this replacement and test it.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename T_pointer, typename U, typename U_pointer>
+ struct is_safe_array_conversion : public eastl::integral_constant<bool, eastl::is_convertible<U_pointer, T_pointer>::value &&
+ eastl::is_array<U>::value &&
+ (!eastl::is_pointer<U_pointer>::value || !is_pointer<T_pointer>::value || !Internal::is_derived<T, typename eastl::remove_extent<U>::type>::value)> {};
+
+ } // namespace Internal
+
+
+
+
+
+
+
+ /// default_delete
+ ///
+ /// C++11 smart pointer default delete function class.
+ ///
+ /// Provides a default way to delete an object. This default is simply to call delete on the
+ /// object pointer. You can provide an alternative to this class or you can override this on
+ /// a class-by-class basis like the following:
+ /// template <>
+ /// struct smart_ptr_deleter<MyClass>
+ /// {
+ /// void operator()(MyClass* p) const
+ /// { SomeCustomFunction(p); }
+ /// };
+ ///
+ template <typename T>
+ struct default_delete
+ {
+ #if defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION <= 4006) // GCC prior to 4.7 has a bug with noexcept here.
+ EA_CONSTEXPR default_delete() = default;
+ #else
+ EA_CONSTEXPR default_delete() EA_NOEXCEPT = default;
+ #endif
+
+ template <typename U> // Enable if T* can be constructed with U* (i.e. U* is convertible to T*).
+ default_delete(const default_delete<U>&, typename eastl::enable_if<is_convertible<U*, T*>::value>::type* = 0) EA_NOEXCEPT {}
+
+ void operator()(T* p) const EA_NOEXCEPT
+ {
+ static_assert(eastl::internal::is_complete_type_v<T>, "Attempting to call the destructor of an incomplete type");
+ delete p;
+ }
+ };
+
+
+ template <typename T>
+ struct default_delete<T[]> // Specialization for arrays.
+ {
+ #if defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION <= 4006) // GCC prior to 4.7 has a bug with noexcept here.
+ EA_CONSTEXPR default_delete() = default;
+ #else
+ EA_CONSTEXPR default_delete() EA_NOEXCEPT = default;
+ #endif
+
+ template <typename U> // This ctor is enabled if T is equal to or a base of U, and if U is less or equal const/volatile-qualified than T.
+ default_delete(const default_delete<U[]>&, typename eastl::enable_if<Internal::is_array_cv_convertible<U*, T*>::value>::type* = 0) EA_NOEXCEPT {}
+
+ void operator()(T* p) const EA_NOEXCEPT
+ { delete[] p; }
+ };
+
+
+
+
+ /// smart_ptr_deleter
+ ///
+ /// Deprecated in favor of the C++11 name: default_delete
+ ///
+ template <typename T>
+ struct smart_ptr_deleter
+ {
+ typedef T value_type;
+
+ void operator()(const value_type* p) const // We use a const argument type in order to be most flexible with what types we accept.
+ { delete const_cast<value_type*>(p); }
+ };
+
+ template <>
+ struct smart_ptr_deleter<void>
+ {
+ typedef void value_type;
+
+ void operator()(const void* p) const
+ { delete[] (char*)p; } // We don't seem to have much choice but to cast to a scalar type.
+ };
+
+ template <>
+ struct smart_ptr_deleter<const void>
+ {
+ typedef void value_type;
+
+ void operator()(const void* p) const
+ { delete[] (char*)p; } // We don't seem to have much choice but to cast to a scalar type.
+ };
+
+
+
+ /// smart_array_deleter
+ ///
+ /// Deprecated in favor of the C++11 name: default_delete
+ ///
+ template <typename T>
+ struct smart_array_deleter
+ {
+ typedef T value_type;
+
+ void operator()(const value_type* p) const // We use a const argument type in order to be most flexible with what types we accept.
+ { delete[] const_cast<value_type*>(p); }
+ };
+
+ template <>
+ struct smart_array_deleter<void>
+ {
+ typedef void value_type;
+
+ void operator()(const void* p) const
+ { delete[] (char*)p; } // We don't seem to have much choice but to cast to a scalar type.
+ };
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/internal/thread_support.h b/EASTL/include/EASTL/internal/thread_support.h
new file mode 100644
index 0000000..49856c0
--- /dev/null
+++ b/EASTL/include/EASTL/internal/thread_support.h
@@ -0,0 +1,160 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_THREAD_SUPPORT_H
+#define EASTL_INTERNAL_THREAD_SUPPORT_H
+
+
+#include <EABase/eabase.h>
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+#include <EASTL/internal/config.h>
+
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+// NOTE(rparolin): We need a fallback mutex implementation because the Microsoft implementation
+// of std::mutex can not be included in managed-cpp code.
+//
+// fatal error C1189: <mutex> is not supported when compiling with /clr or /clr:pure
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+#if !defined(EASTL_CPP11_MUTEX_ENABLED)
+ #if defined(EA_HAVE_CPP11_MUTEX) && !defined(EA_COMPILER_MANAGED_CPP)
+ #define EASTL_CPP11_MUTEX_ENABLED 1
+ #else
+ #define EASTL_CPP11_MUTEX_ENABLED 0
+ #endif
+#endif
+
+#if EASTL_CPP11_MUTEX_ENABLED
+ EA_DISABLE_ALL_VC_WARNINGS()
+ #include <mutex>
+ EA_RESTORE_ALL_VC_WARNINGS()
+#endif
+
+#if defined(EA_PLATFORM_MICROSOFT)
+ // Cannot include Windows headers in our headers, as they kill builds with their #defines.
+#elif defined(EA_PLATFORM_POSIX)
+ #include <pthread.h>
+#endif
+
+// copy constructor could not be generated because a base class copy constructor is inaccessible or deleted.
+// assignment operator could not be generated because a base class assignment operator is inaccessible or deleted.
+// non dll-interface class used as base for DLL-interface classkey 'identifier'.
+EA_DISABLE_VC_WARNING(4625 4626 4275);
+
+
+#if defined(EA_PLATFORM_MICROSOFT)
+ #if defined(EA_PROCESSOR_POWERPC)
+ extern "C" long __stdcall _InterlockedIncrement(long volatile* Addend);
+ #pragma intrinsic (_InterlockedIncrement)
+
+ extern "C" long __stdcall _InterlockedDecrement(long volatile* Addend);
+ #pragma intrinsic (_InterlockedDecrement)
+
+ extern "C" long __stdcall _InterlockedCompareExchange(long volatile* Dest, long Exchange, long Comp);
+ #pragma intrinsic (_InterlockedCompareExchange)
+ #else
+ extern "C" long _InterlockedIncrement(long volatile* Addend);
+ #pragma intrinsic (_InterlockedIncrement)
+
+ extern "C" long _InterlockedDecrement(long volatile* Addend);
+ #pragma intrinsic (_InterlockedDecrement)
+
+ extern "C" long _InterlockedCompareExchange(long volatile* Dest, long Exchange, long Comp);
+ #pragma intrinsic (_InterlockedCompareExchange)
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_THREAD_SUPPORT_AVAILABLE
+//
+// Defined as 0 or 1, based on existing support.
+// Identifies if thread support (e.g. atomics, mutexes) is available for use.
+// The large majority of EASTL doesn't use thread support, but a few parts
+// of it (e.g. shared_ptr) do.
+///////////////////////////////////////////////////////////////////////////////
+
+#if !defined(EASTL_THREAD_SUPPORT_AVAILABLE)
+ #if defined(__clang__) || (defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4003))
+ #define EASTL_THREAD_SUPPORT_AVAILABLE 1
+ #elif defined(EA_COMPILER_MSVC)
+ #define EASTL_THREAD_SUPPORT_AVAILABLE 1
+ #else
+ #define EASTL_THREAD_SUPPORT_AVAILABLE 0
+ #endif
+#endif
+
+
+namespace eastl
+{
+ namespace Internal
+ {
+ // mutex
+ #if EASTL_CPP11_MUTEX_ENABLED
+ using std::mutex;
+ #else
+ class EASTL_API mutex
+ {
+ public:
+ mutex();
+ ~mutex();
+
+ void lock();
+ void unlock();
+
+ protected:
+ #if defined(EA_PLATFORM_MICROSOFT)
+ #if defined(_WIN64)
+ uint64_t mMutexBuffer[40 / sizeof(uint64_t)]; // CRITICAL_SECTION is 40 bytes on Win64.
+ #elif defined(_WIN32)
+ uint32_t mMutexBuffer[24 / sizeof(uint32_t)]; // CRITICAL_SECTION is 24 bytes on Win32.
+ #endif
+ #elif defined(EA_PLATFORM_POSIX)
+ pthread_mutex_t mMutex;
+ #endif
+ };
+ #endif
+
+
+ // auto_mutex
+ class EASTL_API auto_mutex
+ {
+ public:
+ EA_FORCE_INLINE auto_mutex(mutex& mutex) : pMutex(&mutex)
+ { pMutex->lock(); }
+
+ EA_FORCE_INLINE ~auto_mutex()
+ { pMutex->unlock(); }
+
+ protected:
+ mutex* pMutex;
+
+ auto_mutex(const auto_mutex&) = delete;
+ void operator=(const auto_mutex&) = delete;
+ };
+
+
+ // shared_ptr_auto_mutex
+ class EASTL_API shared_ptr_auto_mutex : public auto_mutex
+ {
+ public:
+ shared_ptr_auto_mutex(const void* pSharedPtr);
+
+ shared_ptr_auto_mutex(const shared_ptr_auto_mutex&) = delete;
+ void operator=(shared_ptr_auto_mutex&&) = delete;
+ };
+
+
+ } // namespace Internal
+
+} // namespace eastl
+
+
+EA_RESTORE_VC_WARNING();
+
+
+#endif // Header include guard
diff --git a/EASTL/include/EASTL/internal/tuple_fwd_decls.h b/EASTL/include/EASTL/internal/tuple_fwd_decls.h
new file mode 100644
index 0000000..a2c773c
--- /dev/null
+++ b/EASTL/include/EASTL/internal/tuple_fwd_decls.h
@@ -0,0 +1,56 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_TUPLE_FWD_DECLS_H
+#define EASTL_TUPLE_FWD_DECLS_H
+
+#include <EASTL/internal/config.h>
+
+#if EASTL_TUPLE_ENABLED
+
+namespace eastl
+{
+ template <typename... T>
+ class tuple;
+
+ template <typename Tuple>
+ class tuple_size;
+
+ template <size_t I, typename Tuple>
+ class tuple_element;
+
+ template <size_t I, typename Tuple>
+ using tuple_element_t = typename tuple_element<I, Tuple>::type;
+
+ // const typename for tuple_element_t, for when tuple or TupleImpl cannot itself be const
+ template <size_t I, typename Tuple>
+ using const_tuple_element_t = typename conditional<
+ is_lvalue_reference<tuple_element_t<I, Tuple>>::value,
+ add_lvalue_reference_t<const remove_reference_t<tuple_element_t<I, Tuple>>>,
+ const tuple_element_t<I, Tuple>
+ >::type;
+
+ // get
+ template <size_t I, typename... Ts_>
+ tuple_element_t<I, tuple<Ts_...>>& get(tuple<Ts_...>& t);
+
+ template <size_t I, typename... Ts_>
+ const_tuple_element_t<I, tuple<Ts_...>>& get(const tuple<Ts_...>& t);
+
+ template <size_t I, typename... Ts_>
+ tuple_element_t<I, tuple<Ts_...>>&& get(tuple<Ts_...>&& t);
+
+ template <typename T, typename... ts_>
+ T& get(tuple<ts_...>& t);
+
+ template <typename T, typename... ts_>
+ const T& get(const tuple<ts_...>& t);
+
+ template <typename T, typename... ts_>
+ T&& get(tuple<ts_...>&& t);
+}
+
+#endif // EASTL_VARIADIC_TEMPLATES_ENABLED
+
+#endif // EASTL_TUPLE_FWD_DECLS_H
diff --git a/EASTL/include/EASTL/internal/type_compound.h b/EASTL/include/EASTL/internal/type_compound.h
new file mode 100644
index 0000000..339dc8e
--- /dev/null
+++ b/EASTL/include/EASTL/internal/type_compound.h
@@ -0,0 +1,715 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_TYPE_COMPOUND_H
+#define EASTL_INTERNAL_TYPE_COMPOUND_H
+
+
+#include <EABase/eabase.h>
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+// Until we revise the code below to handle EDG warnings, we don't have much choice but to disable them.
+#if defined(__EDG_VERSION__)
+ #pragma diag_suppress=1931 // operand of sizeof is not a type, variable, or dereferenced pointer expression
+#endif
+
+
+namespace eastl
+{
+
+ ///////////////////////////////////////////////////////////////////////
+ // extent
+ //
+ // extent<T, I>::value is an integral type representing the number of
+ // elements in the Ith dimension of array type T.
+ //
+ // For a given array type T[N], extent<T[N]>::value == N.
+ // For a given multi-dimensional array type T[M][N], extent<T[M][N], 0>::value == N.
+ // For a given multi-dimensional array type T[M][N], extent<T[M][N], 1>::value == M.
+ // For a given array type T and a given dimension I where I >= rank<T>::value, extent<T, I>::value == 0.
+ // For a given array type of unknown extent T[], extent<T[], 0>::value == 0.
+ // For a given non-array type T and an arbitrary dimension I, extent<T, I>::value == 0.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_extent_CONFORMANCE 1 // extent is conforming.
+
+ template<typename T, unsigned N>
+ struct extent_help : public eastl::integral_constant<size_t, 0> {};
+
+ template<typename T, unsigned I>
+ struct extent_help<T[I], 0> : public eastl::integral_constant<size_t, I> {};
+
+ template<typename T, unsigned N, unsigned I>
+ struct extent_help<T[I], N> : public eastl::extent_help<T, N - 1> { };
+
+ template<typename T, unsigned N>
+ struct extent_help<T[], N> : public eastl::extent_help<T, N - 1> {};
+
+ template<typename T, unsigned N = 0> // extent uses unsigned instead of size_t.
+ struct extent : public eastl::extent_help<T, N> { };
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T, unsigned N = 0>
+ EA_CONSTEXPR auto extent_v = extent<T, N>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_array
+ //
+ // is_array<T>::value == true if and only if T is an array type,
+ // including unbounded array types.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_array_CONFORMANCE 1 // is_array is conforming; doesn't make mistakes.
+
+ template<typename T>
+ struct is_array : public eastl::false_type {};
+
+ template<typename T>
+ struct is_array<T[]> : public eastl::true_type {};
+
+ template<typename T, size_t N>
+ struct is_array<T[N]> : public eastl::true_type {};
+
+ #if !defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ template<typename T>
+ EA_CONSTEXPR bool is_array_v = is_array<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_array_of_known_bounds
+ //
+ // Not part of the C++11 Standard.
+ // is_array_of_known_bounds<T>::value is true if T is an array and is
+ // of known bounds. is_array_of_unknown_bounds<int[3]>::value == true,
+ // while is_array_of_unknown_bounds<int[]>::value = false.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ template<typename T>
+ struct is_array_of_known_bounds
+ : public eastl::integral_constant<bool, eastl::extent<T>::value != 0> {};
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_array_of_unknown_bounds
+ //
+ // Not part of the C++11 Standard.
+ // is_array_of_unknown_bounds<T>::value is true if T is an array but is
+ // of unknown bounds. is_array_of_unknown_bounds<int[3]>::value == false,
+ // while is_array_of_unknown_bounds<int[]>::value = true.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ template<typename T>
+ struct is_array_of_unknown_bounds
+ : public eastl::integral_constant<bool, eastl::is_array<T>::value && (eastl::extent<T>::value == 0)> {};
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_member_function_pointer
+ //
+ // is_member_function_pointer<T>::value == true if and only if T is a
+ // pointer to member function type.
+ //
+ ///////////////////////////////////////////////////////////////////////
+ // We detect member functions with 0 to N arguments. We can extend this
+ // for additional arguments if necessary.
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_member_function_pointer_CONFORMANCE 1 // is_member_function_pointer is conforming; doesn't make mistakes.
+
+ namespace internal
+ {
+ template<typename T>
+ struct is_member_function_pointer_helper : false_type {};
+
+ template<typename T, typename U>
+ struct is_member_function_pointer_helper<T U::*> : is_function<T> {};
+ }
+
+ template<typename T>
+ struct is_member_function_pointer
+ : internal::is_member_function_pointer_helper<typename remove_cv<T>::type> {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ EA_CONSTEXPR bool is_member_function_pointer_v = is_member_function_pointer<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_member_pointer
+ //
+ // is_member_pointer<T>::value == true if and only if:
+ // is_member_object_pointer<T>::value == true, or
+ // is_member_function_pointer<T>::value == true
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_member_pointer_CONFORMANCE 1 // is_member_pointer is conforming; doesn't make mistakes.
+
+ namespace internal {
+ template <typename T>
+ struct is_member_pointer_helper
+ : public eastl::false_type {};
+
+ template <typename T, typename U>
+ struct is_member_pointer_helper<U T::*>
+ : public eastl::true_type {};
+ }
+
+ template<typename T>
+ struct is_member_pointer
+ : public internal::is_member_pointer_helper<typename remove_cv<T>::type>::type {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ EA_CONSTEXPR bool is_member_pointer_v = is_member_pointer<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_member_object_pointer
+ //
+ // is_member_object_pointer<T>::value == true if and only if T is a
+ // pointer to data member type.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_member_object_pointer_CONFORMANCE 1 // is_member_object_pointer is conforming; doesn't make mistakes.
+
+ template<typename T>
+ struct is_member_object_pointer : public eastl::integral_constant<bool,
+ eastl::is_member_pointer<T>::value &&
+ !eastl::is_member_function_pointer<T>::value
+ > {};
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ EA_CONSTEXPR bool is_member_object_pointer_v = is_member_object_pointer<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_pointer
+ //
+ // is_pointer<T>::value == true if and only if T is a pointer type.
+ // This category includes function pointer types, but not pointer to
+ // member types.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_pointer_CONFORMANCE 1 // is_pointer is conforming; doesn't make mistakes.
+
+ template <typename T> struct is_pointer_helper : public false_type{};
+
+ template <typename T> struct is_pointer_helper<T*> : public true_type{};
+ template <typename T> struct is_pointer_helper<T* const> : public true_type{};
+ template <typename T> struct is_pointer_helper<T* volatile> : public true_type{};
+ template <typename T> struct is_pointer_helper<T* const volatile> : public true_type{};
+
+ template <typename T>
+ struct is_pointer_value : public type_and<is_pointer_helper<T>::value, type_not<is_member_pointer<T>::value>::value> {};
+
+ template <typename T>
+ struct is_pointer : public integral_constant<bool, is_pointer_value<T>::value>{};
+
+ #if !defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ template<typename T>
+ EA_CONSTEXPR bool is_pointer_v = is_pointer<T>::value;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_convertible
+ //
+ // Given two (possible identical) types From and To, is_convertible<From, To>::value == true
+ // if and only if an lvalue of type From can be implicitly converted to type To,
+ // or is_void<To>::value == true
+ //
+ // An instance of the type predicate holds true if the expression To to = from;, where from is an object of type From, is well-formed.
+ //
+ // is_convertible may only be applied to complete types.
+ // Type To may not be an abstract type.
+ // If the conversion is ambiguous, the program is ill-formed.
+ // If either or both of From and To are class types, and the conversion would invoke
+ // non-public member functions of either From or To (such as a private constructor of To,
+ // or a private conversion operator of From), the program is ill-formed.
+ //
+ // Note that without compiler help, both is_convertible and is_base
+ // can produce compiler errors if the conversion is ambiguous.
+ // Example:
+ // struct A {};
+ // struct B : A {};
+ // struct C : A {};
+ // struct D : B, C {};
+ // is_convertible<D*, A*>::value; // Generates compiler error.
+ ///////////////////////////////////////////////////////////////////////
+
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || (defined(__clang__) && EA_COMPILER_HAS_FEATURE(is_convertible_to)))
+ #define EASTL_TYPE_TRAIT_is_convertible_CONFORMANCE 1 // is_convertible is conforming.
+
+ // Problem: VC++ reports that int is convertible to short, yet if you construct a short from an int then VC++ generates a warning:
+ // warning C4242: 'initializing' : conversion from 'int' to 'short', possible loss of data. We can deal with this by making
+ // is_convertible be false for conversions that could result in loss of data. Or we could make another trait called is_lossless_convertible
+ // and use that appropriately in our code. Or we could put the onus on the user to work around such warnings.
+ template <typename From, typename To>
+ struct is_convertible : public integral_constant<bool, __is_convertible_to(From, To)>{};
+
+ #else
+ #define EASTL_TYPE_TRAIT_is_convertible_CONFORMANCE 1
+
+ template<typename From, typename To, bool = eastl::is_void<From>::value || eastl::is_function<To>::value || eastl::is_array<To>::value >
+ struct is_convertible_helper // Anything is convertible to void. Nothing is convertible to a function or an array.
+ { static const bool value = eastl::is_void<To>::value; };
+
+ template<typename From, typename To>
+ class is_convertible_helper<From, To, false>
+ {
+ template<typename To1>
+ static void ToFunction(To1); // We try to call this function with an instance of From. It is valid if From can be converted to To.
+
+ template<typename /*From1*/, typename /*To1*/>
+ static eastl::no_type is(...);
+
+ template<typename From1, typename To1>
+ static decltype(ToFunction<To1>(eastl::declval<From1>()), eastl::yes_type()) is(int);
+
+ public:
+ static const bool value = sizeof(is<From, To>(0)) == 1;
+ };
+
+ template<typename From, typename To>
+ struct is_convertible
+ : public integral_constant<bool, is_convertible_helper<From, To>::value> {};
+
+ #endif
+
+ #if !defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ template<typename From, typename To>
+ EA_CONSTEXPR bool is_convertible_v = is_convertible<From, To>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_nothrow_convertible
+ //
+ // https://en.cppreference.com/w/cpp/types/is_convertible
+ //
+ // template<typename From, typename To>
+ // struct is_explicitly_convertible
+ // : public is_constructible<To, From> {};
+ ///////////////////////////////////////////////////////////////////////
+ // TODO(rparolin): implement type-trait
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_explicitly_convertible
+ //
+ // This sometime-seen extension trait is the same as is_constructible
+ // and so we don't define it.
+ //
+ // template<typename From, typename To>
+ // struct is_explicitly_convertible
+ // : public is_constructible<To, From> {};
+ ///////////////////////////////////////////////////////////////////////
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_union
+ //
+ // is_union<T>::value == true if and only if T is a union type.
+ //
+ // There is no way to tell if a type is a union without compiler help.
+ // As of this writing, only Metrowerks v8+ supports such functionality
+ // via 'msl::is_union<T>::value'. The user can force something to be
+ // evaluated as a union via EASTL_DECLARE_UNION.
+ ///////////////////////////////////////////////////////////////////////
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || (defined(__clang__) && EA_COMPILER_HAS_FEATURE(is_union)))
+ #define EASTL_TYPE_TRAIT_is_union_CONFORMANCE 1 // is_union is conforming.
+
+ template <typename T>
+ struct is_union : public integral_constant<bool, __is_union(T)>{};
+ #else
+ #define EASTL_TYPE_TRAIT_is_union_CONFORMANCE 0 // is_union is not fully conforming.
+
+ template <typename T> struct is_union : public false_type{};
+ #endif
+
+ #define EASTL_DECLARE_UNION(T) namespace eastl{ template <> struct is_union<T> : public true_type{}; template <> struct is_union<const T> : public true_type{}; }
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ EA_CONSTEXPR bool is_union_v = is_union<T>::value;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_class
+ //
+ // is_class<T>::value == true if and only if T is a class or struct
+ // type (and not a union type).
+ //
+ // Without specific compiler help, it is not possible to
+ // distinguish between unions and classes. As a result, is_class
+ // will erroneously evaluate to true for union types.
+ ///////////////////////////////////////////////////////////////////////
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || (defined(__clang__) && EA_COMPILER_HAS_FEATURE(is_class)))
+ #define EASTL_TYPE_TRAIT_is_class_CONFORMANCE 1 // is_class is conforming.
+
+ template <typename T>
+ struct is_class : public integral_constant<bool, __is_class(T)>{};
+ #elif defined(__EDG__)
+ #define EASTL_TYPE_TRAIT_is_class_CONFORMANCE EASTL_TYPE_TRAIT_is_union_CONFORMANCE
+
+ typedef char yes_array_type[1];
+ typedef char no_array_type[2];
+ template <typename U> static yes_array_type& is_class_helper(void (U::*)());
+ template <typename U> static no_array_type& is_class_helper(...);
+
+ template <typename T>
+ struct is_class : public integral_constant<bool,
+ sizeof(is_class_helper<T>(0)) == sizeof(yes_array_type) && !is_union<T>::value
+ >{};
+ #elif !defined(__GNUC__) || (((__GNUC__ * 100) + __GNUC_MINOR__) >= 304) // Not GCC or GCC 3.4+
+ #define EASTL_TYPE_TRAIT_is_class_CONFORMANCE EASTL_TYPE_TRAIT_is_union_CONFORMANCE
+
+ template <typename U> static yes_type is_class_helper(void (U::*)());
+ template <typename U> static no_type is_class_helper(...);
+
+ template <typename T>
+ struct is_class : public integral_constant<bool,
+ sizeof(is_class_helper<T>(0)) == sizeof(yes_type) && !is_union<T>::value
+ >{};
+ #else
+ #define EASTL_TYPE_TRAIT_is_class_CONFORMANCE 0 // is_class is not fully conforming.
+
+ // GCC 2.x version, due to GCC being broken.
+ template <typename T>
+ struct is_class : public false_type{};
+ #endif
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ EA_CONSTEXPR bool is_class_v = is_class<T>::value;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_polymorphic
+ //
+ // is_polymorphic<T>::value == true if and only if T is a class or struct
+ // that declares or inherits a virtual function. is_polymorphic may only
+ // be applied to complete types.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || (defined(__clang__) && EA_COMPILER_HAS_FEATURE(is_polymorphic)))
+ #define EASTL_TYPE_TRAIT_is_polymorphic_CONFORMANCE 1 // is_polymorphic is conforming.
+
+ template <typename T>
+ struct is_polymorphic : public integral_constant<bool, __is_polymorphic(T)>{};
+ #else
+ #define EASTL_TYPE_TRAIT_is_polymorphic_CONFORMANCE 1 // is_polymorphic is conforming.
+
+ template <typename T>
+ struct is_polymorphic_imp1
+ {
+ typedef typename remove_cv<T>::type t;
+
+ struct helper_1 : public t
+ {
+ helper_1();
+ ~helper_1() throw();
+ char pad[64];
+ };
+
+ struct helper_2 : public t
+ {
+ helper_2();
+ virtual ~helper_2() throw();
+ #ifndef _MSC_VER
+ virtual void foo();
+ #endif
+ char pad[64];
+ };
+
+ static const bool value = (sizeof(helper_1) == sizeof(helper_2));
+ };
+
+ template <typename T>
+ struct is_polymorphic_imp2{ static const bool value = false; };
+
+ template <bool is_class>
+ struct is_polymorphic_selector{ template <typename T> struct rebind{ typedef is_polymorphic_imp2<T> type; }; };
+
+ template <>
+ struct is_polymorphic_selector<true>{ template <typename T> struct rebind{ typedef is_polymorphic_imp1<T> type; }; };
+
+ template <typename T>
+ struct is_polymorphic_value{
+ typedef is_polymorphic_selector<is_class<T>::value> selector;
+ typedef typename selector::template rebind<T> binder;
+ typedef typename binder::type imp_type;
+ static const bool value = imp_type::value;
+ };
+
+ template <typename T>
+ struct is_polymorphic : public integral_constant<bool, is_polymorphic_value<T>::value>{};
+ #endif
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ EA_CONSTEXPR bool is_polymorphic_v = is_polymorphic<T>::value;
+ #endif
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_object
+ //
+ // is_object<T>::value == true if and only if:
+ // is_reference<T>::value == false, and
+ // is_function<T>::value == false, and
+ // is_void<T>::value == false
+ //
+ // The C++ standard, section 3.9p9, states: "An object type is a
+ // (possibly cv-qualified) type that is not a function type, not a
+ // reference type, and not incomplete (except for an incompletely
+ // defined object type).
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_object_CONFORMANCE (EASTL_TYPE_TRAIT_is_reference_CONFORMANCE && EASTL_TYPE_TRAIT_is_void_CONFORMANCE && EASTL_TYPE_TRAIT_is_function_CONFORMANCE)
+
+ template <typename T>
+ struct is_object : public integral_constant<bool,
+ !is_reference<T>::value && !is_void<T>::value && !is_function<T>::value
+ >{};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ EA_CONSTEXPR bool is_object_v = is_object<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_scalar
+ //
+ // is_scalar<T>::value == true if and only if:
+ // is_arithmetic<T>::value == true, or
+ // is_enum<T>::value == true, or
+ // is_pointer<T>::value == true, or
+ // is_member_pointer<T>::value == true, or
+ // is_null_pointer<T>::value == true
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_scalar_CONFORMANCE 1 // is_scalar is conforming.
+
+ template <typename T>
+ struct is_scalar : public integral_constant<bool,
+ is_arithmetic<T>::value || is_enum<T>::value || is_pointer<T>::value ||
+ is_member_pointer<T>::value ||
+ is_null_pointer<T>::value> {};
+
+ template <typename T> struct is_scalar<T*> : public true_type {};
+ template <typename T> struct is_scalar<T* const> : public true_type {};
+ template <typename T> struct is_scalar<T* volatile> : public true_type {};
+ template <typename T> struct is_scalar<T* const volatile> : public true_type {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ EA_CONSTEXPR bool is_scalar_v = is_scalar<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_compound
+ //
+ // Compound means anything but fundamental. See C++ standard, section 3.9.2.
+ //
+ // is_compound<T>::value == true if and only if:
+ // is_fundamental<T>::value == false
+ //
+ // Thus, is_compound<T>::value == true if and only if:
+ // is_floating_point<T>::value == false, and
+ // is_integral<T>::value == false, and
+ // is_void<T>::value == false
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_compound_CONFORMANCE EASTL_TYPE_TRAIT_is_fundamental_CONFORMANCE
+
+ template <typename T>
+ struct is_compound : public integral_constant<bool, !is_fundamental<T>::value>{};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ EA_CONSTEXPR bool is_compound_v = is_compound<T>::value;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // decay
+ //
+ // Converts the type T to its decayed equivalent. That means doing
+ // lvalue to rvalue, array to pointer, function to pointer conversions,
+ // and removal of const and volatile.
+ // This is the type conversion silently applied by the compiler to
+ // all function arguments when passed by value.
+
+ #define EASTL_TYPE_TRAIT_decay_CONFORMANCE 1 // decay is conforming.
+
+ template<typename T>
+ struct decay
+ {
+ typedef typename eastl::remove_reference<T>::type U;
+
+ typedef typename eastl::conditional<
+ eastl::is_array<U>::value,
+ typename eastl::remove_extent<U>::type*,
+ typename eastl::conditional<
+ eastl::is_function<U>::value,
+ typename eastl::add_pointer<U>::type,
+ typename eastl::remove_cv<U>::type
+ >::type
+ >::type type;
+ };
+
+
+ // decay_t is the C++14 using typedef for typename decay<T>::type, though
+ // it requires only C++11 compiler functionality to implement.
+ // We provide a backwards-compatible means to access it through a macro for pre-C++11 compilers.
+ #if defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ #define EASTL_DECAY_T(T) typename decay<T>::type
+ #else
+ template<typename T>
+ using decay_t = typename decay<T>::type;
+ #define EASTL_DECAY_T(T) decay_t<T>
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // common_type
+ //
+ // Determines the common type among all types T..., that is the type all T...
+ // can be implicitly converted to.
+ //
+ // It is intended that this be specialized by the user for cases where it
+ // is useful to do so. Example specialization:
+ // template <typename Class1, typename Class2>
+ // struct common_type<MyClass1, MyClass2>{ typedef MyBaseClassB type; };
+ //
+ // The member typedef type shall be defined as set out in 20.9.7.6,p3. All types in
+ // the parameter pack T shall be complete or (possibly cv) void. A program may
+ // specialize this trait if at least one template parameter in the specialization
+ // is a user-defined type. Note: Such specializations are needed when only
+ // explicit conversions are desired among the template arguments.
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_common_type_CONFORMANCE 1 // common_type is conforming.
+
+ template<typename... T>
+ struct common_type;
+
+ template<typename T>
+ struct common_type<T>
+ { typedef decay_t<T> type; }; // Question: Should we use T or decay_t<T> here? The C++11 Standard specifically (20.9.7.6,p3) specifies that it be without decay, but libc++ uses decay.
+
+ template<typename T, typename U>
+ struct common_type<T, U>
+ {
+ typedef decay_t<decltype(true ? declval<T>() : declval<U>())> type; // The type of a tertiary expression is set by the compiler to be the common type of the two result types.
+ };
+
+ template<typename T, typename U, typename... V>
+ struct common_type<T, U, V...>
+ { typedef typename common_type<typename common_type<T, U>::type, V...>::type type; };
+
+
+ // common_type_t is the C++14 using typedef for typename common_type<T...>::type.
+ // We provide a backwards-compatible means to access it through a macro for pre-C++11 compilers.
+ #if defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ #define EASTL_COMMON_TYPE_T(...) typename common_type<__VA_ARGS__>::type
+ #else
+ template <typename... T>
+ using common_type_t = typename common_type<T...>::type;
+ #define EASTL_COMMON_TYPE_T(...) common_type_t<__VA_ARGS__>
+ #endif
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_final
+ ///////////////////////////////////////////////////////////////////////
+ #if EASTL_IS_FINAL_AVAILABLE == 1
+ template <typename T>
+ struct is_final : public integral_constant<bool, __is_final(T)> {};
+ #else
+ // no compiler support so we always return false
+ template <typename T>
+ struct is_final : public false_type {};
+ #endif
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ EA_CONSTEXPR bool is_final_v = is_final<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_aggregate
+ //
+ // https://en.cppreference.com/w/cpp/language/aggregate_initialization
+ //
+ // An aggregate is one of the following types:
+ // * array type
+ // * class type (typically, struct or union), that has
+ // * no private or protected non-static data members
+ // * no user-provided constructors (explicitly defaulted or deleted constructors are allowed)
+ // * no user-provided, inherited, or explicit constructors
+ // * (explicitly defaulted or deleted constructors are allowed)
+ // * no virtual, private, or protected (since C++17) base classes
+ // * no virtual member functions
+ // * no default member initializers
+ //
+ ///////////////////////////////////////////////////////////////////////
+ #if EASTL_IS_AGGREGATE_AVAILABLE == 1
+ #define EASTL_TYPE_TRAIT_is_aggregate_CONFORMANCE 1
+
+ template <typename T>
+ struct is_aggregate : public integral_constant<bool, __is_aggregate(T)> {};
+ #else
+ #define EASTL_TYPE_TRAIT_is_aggregate_CONFORMANCE 0
+
+ // no compiler support so we always return false
+ template <typename T>
+ struct is_aggregate : public false_type {};
+ #endif
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <typename T>
+ EA_CONSTEXPR bool is_aggregate_v = is_aggregate<T>::value;
+ #endif
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
diff --git a/EASTL/include/EASTL/internal/type_detected.h b/EASTL/include/EASTL/internal/type_detected.h
new file mode 100644
index 0000000..e368a6f
--- /dev/null
+++ b/EASTL/include/EASTL/internal/type_detected.h
@@ -0,0 +1,180 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_TYPE_DETECTED_H
+#define EASTL_INTERNAL_TYPE_DETECTED_H
+
+
+#include <EABase/eabase.h>
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+#pragma once
+#endif
+
+#include <EASTL/type_traits.h>
+
+namespace eastl
+{
+ ///////////////////////////////////////////////////////////////////////
+ // nonesuch
+ //
+ // Type given as a result from detected_t if the supplied arguments does not respect the constraint.
+ //
+ // https://en.cppreference.com/w/cpp/experimental/nonesuch
+ //
+ ///////////////////////////////////////////////////////////////////////
+ struct nonesuch
+ {
+ ~nonesuch() = delete;
+ nonesuch(nonesuch const&) = delete;
+ void operator=(nonesuch const&) = delete;
+ };
+
+ namespace internal
+ {
+ template <class Default, class AlwaysVoid, template <class...> class Op, class... Args>
+ struct detector
+ {
+ using type = Default;
+ using value_t = false_type;
+ };
+
+ template <class Default, template <class...> class Op, class... Args>
+ struct detector<Default, void_t<Op<Args...>>, Op, Args...>
+ {
+ using type = Op<Args...>;
+ using value_t = true_type;
+ };
+ } // namespace internal
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_detected
+ //
+ // Checks if some supplied arguments (Args) respect a constraint (Op).
+ // is_detected expands to true_type if the arguments respect the constraint, false_type otherwise.
+ // This helper is convenient to use for compile time introspection.
+ //
+ // https://en.cppreference.com/w/cpp/experimental/is_detected
+ //
+ // Example:
+ // template <class T, class U>
+ // using detect_can_use_addition_operator = decltype(declval<T>() + declval<U>());
+ //
+ // template <class T, class U>
+ // void sum(const T& t, const U& u)
+ // {
+ // static_assert(is_detected<detect_can_use_addition_operator, T, U>::value, "Supplied types cannot be summedtogether.");
+ // // or...
+ // static_assert(is_detected_v<detect_can_use_addition_operator, T, U>, "Supplied types cannot be summedtogether.");
+ // return t + u;
+ // }
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <template <class...> class Op, class... Args>
+ using is_detected = typename internal::detector<nonesuch, void, Op, Args...>::value_t;
+
+#if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <template <class...> class Op, class... Args>
+ EA_CONSTEXPR bool is_detected_v = is_detected<Op, Args...>::value;
+#endif
+
+ ///////////////////////////////////////////////////////////////////////
+ // detected_t
+ //
+ // Check which type we obtain after expanding some arguments (Args) over a constraint (Op).
+ // If the constraint cannot be applied, the result type will be nonesuch.
+ //
+ // https://en.cppreference.com/w/cpp/experimental/is_detected
+ //
+ // Example:
+ // template <class T, class U>
+ // using detect_can_use_addition_operator = decltype(declval<T>() + declval<U>());
+ //
+ // using result_type = detected_t<detect_can_use_addition_operator, int, int>;
+ // // result_type == int
+ // using failed_result_type = detected_t<detect_can_use_addition_operator, int, string>;
+ // // failed_result_type == nonesuch
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <template <class...> class Op, class... Args>
+ using detected_t = typename internal::detector<nonesuch, void, Op, Args...>::type;
+
+ ///////////////////////////////////////////////////////////////////////
+ // detected_or
+ //
+ // Checks if some supplied arguments (Args) respect a constraint (Op).
+ // Expand to a struct that contains two type aliases:
+ // - type: the type we obtain after expanding some arguments (Args) over a constraint (Op).
+ // If the constraint cannot be applied, the result type will be the suplied Default type.
+ // - value_t: true_type if the arguments respect the constraint, false_type otherwise.
+ //
+ // https://en.cppreference.com/w/cpp/experimental/is_detected
+ //
+ // Example:
+ // template <class T, class U>
+ // using detected_calling_foo = decltype(declval<T>().foo());
+ //
+ // using result = detected_or<bool, detected_calling_foo, std::string>; // std::string doesn't have foo member.
+ // function.
+ // // result::type == bool
+ // // result::value_t == false_type
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <class Default, template <class...> class Op, class... Args>
+ using detected_or = internal::detector<Default, void, Op, Args...>;
+
+ ///////////////////////////////////////////////////////////////////////
+ // detected_or_t
+ //
+ // Equivalent to detected_or<Default, Op, Args...>::type.
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <class Default, template <class...> class Op, class... Args>
+ using detected_or_t = typename detected_or<Default, Op, Args...>::type;
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_detected_exact
+ //
+ // Check that the type we obtain after expanding some arguments (Args) over a constraint (Op) is equivalent to
+ // Expected.
+ //
+ // template <class T, class U>
+ // using detected_calling_size = decltype(declval<T>().size());
+ //
+ // using result = is_detected_exact<int, detected_calling_size, std::string>;
+ // result == false_type // std::string::size returns eastl_size_t which is not the same as int.
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <class Expected, template <class...> class Op, class... Args>
+ using is_detected_exact = is_same<Expected, detected_t<Op, Args...>>;
+
+#if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class Expected, template <class...> class Op, class... Args>
+ EA_CONSTEXPR bool is_detected_exact_v = is_detected_exact<Expected, Op, Args...>::value;
+#endif
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_detected_convertible
+ //
+ // Check that the type we obtain after expanding some arguments (Args) over a constraint (Op) is convertible to
+ // Expected.
+ //
+ // template <class T, class U>
+ // using detected_calling_size = decltype(declval<T>().size());
+ //
+ // using result = is_detected_convertible<int, detected_calling_size, std::string>;
+ // result == true_type // std::string::size returns eastl_size_t which is convertible to int.
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <class To, template <class...> class Op, class... Args>
+ using is_detected_convertible = is_convertible<detected_t<Op, Args...>, To>;
+
+#if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class To, template <class...> class Op, class... Args>
+ EA_CONSTEXPR bool is_detected_convertible_v = is_detected_convertible<To, Op, Args...>::value;
+#endif
+
+} // namespace eastl
+
+#endif // EASTL_INTERNAL_TYPE_DETECTED_H \ No newline at end of file
diff --git a/EASTL/include/EASTL/internal/type_fundamental.h b/EASTL/include/EASTL/internal/type_fundamental.h
new file mode 100644
index 0000000..c99b70c
--- /dev/null
+++ b/EASTL/include/EASTL/internal/type_fundamental.h
@@ -0,0 +1,346 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_TYPE_FUNDAMENTAL_H
+#define EASTL_INTERNAL_TYPE_FUNDAMENTAL_H
+
+
+#include <EABase/eabase.h>
+#include <EABase/nullptr.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+namespace eastl
+{
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_void
+ //
+ // is_void<T>::value == true if and only if T is one of the following types:
+ // [const][volatile] void
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_void_CONFORMANCE 1 // is_void is conforming.
+
+ template <typename T> struct is_void : public false_type{};
+
+ template <> struct is_void<void> : public true_type{};
+ template <> struct is_void<void const> : public true_type{};
+ template <> struct is_void<void volatile> : public true_type{};
+ template <> struct is_void<void const volatile> : public true_type{};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_void_v = is_void<T>::value;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // has_void_arg
+ //
+ // utility which identifies if any of the given template arguments is void.
+ //
+ // TODO(rparolin): refactor with fold expressions when C++17 compilers are widely available.
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename ...Args>
+ struct has_void_arg;
+
+ template <>
+ struct has_void_arg<>
+ : public eastl::false_type {};
+
+ template <typename A0, typename ...Args>
+ struct has_void_arg<A0, Args...>
+ { static const bool value = (eastl::is_void<A0>::value || eastl::has_void_arg<Args...>::value); };
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_null_pointer
+ //
+ // C++14 type trait. Refers only to nullptr_t and not NULL (0).
+ // eastl::is_null_pointer<nullptr>::value == true
+ // eastl::is_null_pointer<std::nullptr_t>::value == true
+ // eastl::is_null_pointer<void*>::value == false
+ // eastl::is_null_pointer<NULL>::value == [cannot compile]
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #if defined(EA_COMPILER_CPP11_ENABLED) && !defined(EA_COMPILER_NO_DECLTYPE) && !defined(_MSC_VER) // VC++'s handling of decltype(nullptr) is broken.
+ #define EASTL_TYPE_TRAIT_is_null_pointer_CONFORMANCE 1
+
+ template <typename T>
+ struct is_null_pointer : public eastl::is_same<typename eastl::remove_cv<T>::type, decltype(nullptr)> {}; // A C++11 compiler defines nullptr, but you need a C++11 standard library to declare std::nullptr_t. So it's safer to compare against decltype(nullptr) than to use std::nullptr_t, because we may have a C++11 compiler but C++98 library (happens with Apple frequently).
+ #else
+ #define EASTL_TYPE_TRAIT_is_null_pointer_CONFORMANCE 1
+
+ template <typename T>
+ struct is_null_pointer : public eastl::is_same<typename eastl::remove_cv<T>::type, std::nullptr_t> {};
+ #endif
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_null_pointer_v = is_null_pointer<T>::value;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_integral
+ //
+ // is_integral<T>::value == true if and only if T is one of the following types:
+ // [const] [volatile] bool
+ // [const] [volatile] char
+ // [const] [volatile] signed char
+ // [const] [volatile] unsigned char
+ // [const] [volatile] wchar_t
+ // [const] [volatile] short
+ // [const] [volatile] int
+ // [const] [volatile] long
+ // [const] [volatile] long long
+ // [const] [volatile] unsigned short
+ // [const] [volatile] unsigned int
+ // [const] [volatile] unsigned long
+ // [const] [volatile] unsigned long long
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_integral_CONFORMANCE 1 // is_integral is conforming.
+
+ template <typename T> struct is_integral_helper : public false_type{};
+
+ template <> struct is_integral_helper<unsigned char> : public true_type{};
+ template <> struct is_integral_helper<unsigned short> : public true_type{};
+ template <> struct is_integral_helper<unsigned int> : public true_type{};
+ template <> struct is_integral_helper<unsigned long> : public true_type{};
+ template <> struct is_integral_helper<unsigned long long> : public true_type{};
+
+ template <> struct is_integral_helper<signed char> : public true_type{};
+ template <> struct is_integral_helper<signed short> : public true_type{};
+ template <> struct is_integral_helper<signed int> : public true_type{};
+ template <> struct is_integral_helper<signed long> : public true_type{};
+ template <> struct is_integral_helper<signed long long> : public true_type{};
+
+ template <> struct is_integral_helper<bool> : public true_type{};
+ template <> struct is_integral_helper<char> : public true_type{};
+
+ #if defined(EA_CHAR8_UNIQUE) && EA_CHAR8_UNIQUE
+ template <> struct is_integral_helper<char8_t> : public true_type{};
+ #endif
+ #if defined(EA_CHAR16_NATIVE) && EA_CHAR16_NATIVE
+ template <> struct is_integral_helper<char16_t> : public true_type{};
+ #endif
+ #if defined(EA_CHAR32_NATIVE) && EA_CHAR32_NATIVE
+ template <> struct is_integral_helper<char32_t> : public true_type{};
+ #endif
+ #ifndef EA_WCHAR_T_NON_NATIVE // If wchar_t is a native type instead of simply a define to an existing type which is already handled above...
+ template <> struct is_integral_helper<wchar_t> : public true_type{};
+ #endif
+ #if EASTL_GCC_STYLE_INT128_SUPPORTED
+ template <> struct is_integral_helper<__int128_t> : public true_type{};
+ template <> struct is_integral_helper<__uint128_t> : public true_type{};
+ #endif
+
+ template <typename T>
+ struct is_integral : public eastl::is_integral_helper<typename eastl::remove_cv<T>::type>{};
+
+ #define EASTL_DECLARE_INTEGRAL(T) \
+ namespace eastl{ \
+ template <> struct is_integral<T> : public true_type{}; \
+ template <> struct is_integral<const T> : public true_type{}; \
+ template <> struct is_integral<volatile T> : public true_type{}; \
+ template <> struct is_integral<const volatile T> : public true_type{}; \
+ }
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_integral_v = is_integral<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_floating_point
+ //
+ // is_floating_point<T>::value == true if and only if T is one of the following types:
+ // [const] [volatile] float
+ // [const] [volatile] double
+ // [const] [volatile] long double
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_floating_point_CONFORMANCE 1 // is_floating_point is conforming.
+
+ template <typename T> struct is_floating_point_helper : public false_type{};
+
+ template <> struct is_floating_point_helper<float> : public true_type{};
+ template <> struct is_floating_point_helper<double> : public true_type{};
+ template <> struct is_floating_point_helper<long double> : public true_type{};
+
+ template <typename T>
+ struct is_floating_point : public eastl::is_floating_point_helper<typename eastl::remove_cv<T>::type>{};
+
+ #define EASTL_DECLARE_FLOATING_POINT(T) \
+ namespace eastl{ \
+ template <> struct is_floating_point<T> : public true_type{}; \
+ template <> struct is_floating_point<const T> : public true_type{}; \
+ template <> struct is_floating_point<volatile T> : public true_type{}; \
+ template <> struct is_floating_point<const volatile T> : public true_type{}; \
+ }
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_floating_point_v = is_floating_point<T>::value;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_arithmetic
+ //
+ // is_arithmetic<T>::value == true if and only if:
+ // is_floating_point<T>::value == true, or
+ // is_integral<T>::value == true
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_arithmetic_CONFORMANCE 1 // is_arithmetic is conforming.
+
+ template <typename T>
+ struct is_arithmetic
+ : public integral_constant<bool, is_integral<T>::value || is_floating_point<T>::value> {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ EA_CONSTEXPR bool is_arithmetic_v = is_arithmetic<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_fundamental
+ //
+ // is_fundamental<T>::value == true if and only if:
+ // is_floating_point<T>::value == true, or
+ // is_integral<T>::value == true, or
+ // is_void<T>::value == true
+ // is_null_pointer<T>::value == true
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_fundamental_CONFORMANCE 1 // is_fundamental is conforming.
+
+ template <typename T>
+ struct is_fundamental
+ : public bool_constant<is_void_v<T> || is_integral_v<T> || is_floating_point_v<T> || is_null_pointer_v<T>> {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ EA_CONSTEXPR bool is_fundamental_v = is_fundamental<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_hat_type
+ //
+ // is_hat_type<T>::value == true if and only if:
+ // underlying type is a C++/CX '^' type such as: Foo^
+ // meaning the type is heap allocated and ref-counted
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T> struct is_hat_type_helper : public false_type {};
+
+ #if (EABASE_VERSION_N > 20607 && defined(EA_COMPILER_WINRTCX_ENABLED)) || defined(__cplusplus_winrt)
+ template <typename T> struct is_hat_type_helper<T^> : public true_type{};
+ #endif
+
+ template <typename T>
+ struct is_hat_type : public eastl::is_hat_type_helper<T> {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ EA_CONSTEXPR bool is_hat_type_v = is_hat_type<T>::value;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_enum
+ //
+ // is_enum<T>::value == true if and only if T is an enumeration type.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || (defined(__clang__) && EA_COMPILER_HAS_FEATURE(is_enum)))
+ #define EASTL_TYPE_TRAIT_is_enum_CONFORMANCE 1 // is_enum is conforming.
+
+ template <typename T>
+ struct is_enum : public integral_constant<bool, __is_enum(T)>{};
+ #else
+ #define EASTL_TYPE_TRAIT_is_enum_CONFORMANCE 1 // is_enum is conforming.
+
+ struct int_convertible{ int_convertible(int); };
+
+ template <bool is_arithmetic_or_reference>
+ struct is_enum_helper { template <typename T> struct nest : public is_convertible<T, int_convertible>{}; };
+
+ template <>
+ struct is_enum_helper<true> { template <typename T> struct nest : public false_type {}; };
+
+ template <typename T>
+ struct is_enum_helper2
+ {
+ typedef type_or<is_arithmetic<T>::value, is_reference<T>::value, is_class<T>::value> selector;
+ typedef is_enum_helper<selector::value> helper_t;
+ typedef typename add_reference<T>::type ref_t;
+ typedef typename helper_t::template nest<ref_t> result;
+ };
+
+ template <typename T>
+ struct is_enum : public integral_constant<bool, is_enum_helper2<T>::result::value>{};
+
+ template <> struct is_enum<void> : public false_type {};
+ template <> struct is_enum<void const> : public false_type {};
+ template <> struct is_enum<void volatile> : public false_type {};
+ template <> struct is_enum<void const volatile> : public false_type {};
+ #endif
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ EA_CONSTEXPR bool is_enum_v = is_enum<T>::value;
+ #endif
+
+ #define EASTL_DECLARE_ENUM(T) namespace eastl{ template <> struct is_enum<T> : public true_type{}; template <> struct is_enum<const T> : public true_type{}; }
+
+
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/internal/type_pod.h b/EASTL/include/EASTL/internal/type_pod.h
new file mode 100644
index 0000000..fef5511
--- /dev/null
+++ b/EASTL/include/EASTL/internal/type_pod.h
@@ -0,0 +1,1948 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_TYPE_POD_H
+#define EASTL_INTERNAL_TYPE_POD_H
+
+
+#include <EABase/eabase.h>
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <limits.h>
+#include <EASTL/type_traits.h>
+
+namespace eastl
+{
+ ///////////////////////////////////////////////////////////////////////
+ // is_empty
+ //
+ // is_empty<T>::value == true if and only if T is an empty class or struct.
+ // is_empty may only be applied to complete types.
+ //
+ // is_empty cannot be used with union types until is_union can be made to work.
+ ///////////////////////////////////////////////////////////////////////
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || (defined(__clang__) && EA_COMPILER_HAS_FEATURE(is_empty)))
+ #define EASTL_TYPE_TRAIT_is_empty_CONFORMANCE 1 // is_empty is conforming.
+
+ template <typename T>
+ struct is_empty : public integral_constant<bool, __is_empty(T)>{};
+ #else
+ #define EASTL_TYPE_TRAIT_is_empty_CONFORMANCE 1 // is_empty is fully conforming.
+
+ template <typename T>
+ struct is_empty_helper_t1 : public T { char m[64]; };
+ struct is_empty_helper_t2 { char m[64]; };
+
+ // The inheritance in empty_helper_t1 will not work with non-class types
+ template <typename T, bool is_a_class = false>
+ struct is_empty_helper : public eastl::false_type{};
+
+ template <typename T>
+ struct is_empty_helper<T, true> : public eastl::integral_constant<bool,
+ sizeof(is_empty_helper_t1<T>) == sizeof(is_empty_helper_t2)
+ >{};
+
+ template <typename T>
+ struct is_empty_helper2
+ {
+ typedef typename eastl::remove_cv<T>::type _T;
+ typedef eastl::is_empty_helper<_T, eastl::is_class<_T>::value> type;
+ };
+
+ template <typename T>
+ struct is_empty : public eastl::is_empty_helper2<T>::type {};
+ #endif
+
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_empty_v = is_empty<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_pod
+ //
+ // is_pod<T>::value == true if and only if, for a given type T:
+ // - is_scalar<T>::value == true, or
+ // - T is a class or struct that has no user-defined copy assignment
+ // operator or destructor, and T has no non-static data members M for
+ // which is_pod<M>::value == false, and no members of reference type, or
+ // - T is the type of an array of objects E for which is_pod<E>::value == true
+ //
+ // is_pod may only be applied to complete types.
+ //
+ // Without some help from the compiler or user, is_pod will not report
+ // that a struct or class is a POD, but will correctly report that
+ // built-in types such as int are PODs. The user can help the compiler
+ // by using the EASTL_DECLARE_POD macro on a class.
+ ///////////////////////////////////////////////////////////////////////
+
+ #if defined(EA_COMPILER_MSVC)
+ #define EASTL_TYPE_TRAIT_is_pod_CONFORMANCE 1 // is_pod is conforming. Actually as of VS2008 it is apparently not fully conforming, as it flags the following as a non-pod: struct Pod{ Pod(){} };
+
+ EA_DISABLE_VC_WARNING(4647)
+ template <typename T> // We check for has_trivial_constructor only because the VC++ is_pod does. Is it due to some compiler bug?
+ struct is_pod : public eastl::integral_constant<bool, (__has_trivial_constructor(T) && __is_pod(T) && !eastl::is_hat_type<T>::value) || eastl::is_void<T>::value || eastl::is_scalar<T>::value>{};
+ EA_RESTORE_VC_WARNING()
+
+ #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(EA_COMPILER_GNUC) || (defined(__clang__) && EA_COMPILER_HAS_FEATURE(is_pod)))
+ #define EASTL_TYPE_TRAIT_is_pod_CONFORMANCE 1 // is_pod is conforming.
+
+ template <typename T>
+ struct is_pod : public eastl::integral_constant<bool, __is_pod(T) || eastl::is_void<T>::value || eastl::is_scalar<T>::value>{};
+ #else
+ #define EASTL_TYPE_TRAIT_is_pod_CONFORMANCE 0 // is_pod is not conforming. Can return false negatives.
+
+ template <typename T> // There's not much we can do here without some compiler extension.
+ struct is_pod : public eastl::integral_constant<bool, eastl::is_void<T>::value || eastl::is_scalar<typename eastl::remove_all_extents<T>::type>::value>{};
+ #endif
+
+ template <typename T, size_t N>
+ struct is_pod<T[N]> : public is_pod<T>{};
+
+ template <typename T>
+ struct is_POD : public is_pod<T>{}; // Backwards compatibility.
+
+ #define EASTL_DECLARE_IS_POD(T, isPod) \
+ namespace eastl { \
+ template <> struct is_pod<T> : public eastl::integral_constant<bool, isPod> { }; \
+ template <> struct is_pod<const T> : public eastl::integral_constant<bool, isPod> { }; \
+ template <> struct is_pod<volatile T> : public eastl::integral_constant<bool, isPod> { }; \
+ template <> struct is_pod<const volatile T> : public eastl::integral_constant<bool, isPod> { }; \
+ }
+
+ // Old style macro, for bacwards compatibility:
+ #define EASTL_DECLARE_POD(T) namespace eastl{ template <> struct is_pod<T> : public true_type{}; template <> struct is_pod<const T> : public true_type{}; }
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_pod_v = is_pod<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_standard_layout
+ //
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && ((defined(EA_COMPILER_MSVC) && (_MSC_VER >= 1700)) || (defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4006)) || (defined(__clang__) && EA_COMPILER_HAS_FEATURE(is_standard_layout)))
+ #define EASTL_TYPE_TRAIT_is_standard_layout_CONFORMANCE 1 // is_standard_layout is conforming.
+
+ template <typename T>
+ struct is_standard_layout : public eastl::integral_constant<bool, __is_standard_layout(T) || eastl::is_void<T>::value || eastl::is_scalar<T>::value>{};
+ #else
+ #define EASTL_TYPE_TRAIT_is_standard_layout_CONFORMANCE 0 // is_standard_layout is not conforming. Can return false negatives.
+
+ template <typename T> // There's not much we can do here without some compiler extension.
+ struct is_standard_layout : public eastl::integral_constant<bool, is_void<T>::value || is_scalar<T>::value>{};
+ #endif
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_standard_layout_v = is_standard_layout<T>::value;
+ #endif
+
+ #define EASTL_DECLARE_IS_STANDARD_LAYOUT(T, isStandardLayout) \
+ namespace eastl { \
+ template <> struct is_standard_layout<T> : public eastl::integral_constant<bool, isStandardLayout> { }; \
+ template <> struct is_standard_layout<const T> : public eastl::integral_constant<bool, isStandardLayout> { }; \
+ template <> struct is_standard_layout<volatile T> : public eastl::integral_constant<bool, isStandardLayout> { }; \
+ template <> struct is_standard_layout<const volatile T> : public eastl::integral_constant<bool, isStandardLayout> { }; \
+ }
+
+ // Old style macro, for bacwards compatibility:
+ #define EASTL_DECLARE_STANDARD_LAYOUT(T) namespace eastl{ template <> struct is_standard_layout<T> : public true_type{}; template <> struct is_standard_layout<const T> : public true_type{}; }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // has_trivial_constructor
+ //
+ // has_trivial_constructor<T>::value == true if and only if T is a class
+ // or struct that has a trivial constructor. A constructor is trivial if
+ // - it is implicitly defined by the compiler, and
+ // - is_polymorphic<T>::value == false, and
+ // - T has no virtual base classes, and
+ // - for every direct base class of T, has_trivial_constructor<B>::value == true,
+ // where B is the type of the base class, and
+ // - for every nonstatic data member of T that has class type or array
+ // of class type, has_trivial_constructor<M>::value == true,
+ // where M is the type of the data member
+ //
+ // has_trivial_constructor may only be applied to complete types.
+ //
+ // Without from the compiler or user, has_trivial_constructor will not
+ // report that a class or struct has a trivial constructor.
+ // The user can use EASTL_DECLARE_TRIVIAL_CONSTRUCTOR to help the compiler.
+ //
+ // A default constructor for a class X is a constructor of class X that
+ // can be called without an argument.
+ ///////////////////////////////////////////////////////////////////////
+
+ #if defined(_MSC_VER) && (_MSC_VER >= 1600) && !defined(EA_COMPILER_CLANG_CL) // VS2010+
+ #define EASTL_TYPE_TRAIT_has_trivial_constructor_CONFORMANCE 1 // has_trivial_constructor is conforming.
+
+ template <typename T>
+ struct has_trivial_constructor : public eastl::integral_constant<bool, (__has_trivial_constructor(T) || eastl::is_pod<T>::value) && !eastl::is_hat_type<T>::value>{};
+ #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || defined(__clang__))
+ #define EASTL_TYPE_TRAIT_has_trivial_constructor_CONFORMANCE 1 // has_trivial_constructor is conforming.
+
+ template <typename T>
+ struct has_trivial_constructor : public eastl::integral_constant<bool, __has_trivial_constructor(T) || eastl::is_pod<T>::value>{};
+ #else
+ #define EASTL_TYPE_TRAIT_has_trivial_constructor_CONFORMANCE 0 // has_trivial_constructor is not fully conforming. Can return false negatives.
+
+ // With current compilers, this is all we can do.
+ template <typename T>
+ struct has_trivial_constructor : public eastl::is_pod<T> {};
+ #endif
+
+ #define EASTL_DECLARE_HAS_TRIVIAL_CONSTRUCTOR(T, hasTrivialConstructor) \
+ namespace eastl { \
+ template <> struct has_trivial_constructor<T> : public eastl::integral_constant<bool, hasTrivialConstructor> { }; \
+ }
+
+ // Old style macro, for bacwards compatibility:
+ #define EASTL_DECLARE_TRIVIAL_CONSTRUCTOR(T) namespace eastl{ template <> struct has_trivial_constructor<T> : public true_type{}; template <> struct has_trivial_constructor<const T> : public true_type{}; }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // has_trivial_copy
+ //
+ // has_trivial_copy<T>::value == true if and only if T is a class or
+ // struct that has a trivial copy constructor. A copy constructor is
+ // trivial if
+ // - it is implicitly defined by the compiler, and
+ // - is_polymorphic<T>::value == false, and
+ // - T has no virtual base classes, and
+ // - for every direct base class of T, has_trivial_copy<B>::value == true,
+ // where B is the type of the base class, and
+ // - for every nonstatic data member of T that has class type or array
+ // of class type, has_trivial_copy<M>::value == true, where M is the
+ // type of the data member
+ //
+ // has_trivial_copy may only be applied to complete types.
+ //
+ // Another way of looking at this is:
+ // A copy constructor for class X is trivial if it is implicitly
+ // declared and if all the following are true:
+ // - Class X has no virtual functions (10.3) and no virtual base classes (10.1).
+ // - Each direct base class of X has a trivial copy constructor.
+ // - For all the nonstatic data members of X that are of class type
+ // (or array thereof), each such class type has a trivial copy constructor;
+ // otherwise the copy constructor is nontrivial.
+ //
+ // Without help from the compiler or user, has_trivial_copy will not report
+ // that a class or struct has a trivial copy constructor. The user can
+ // use EASTL_DECLARE_TRIVIAL_COPY to help the compiler.
+ ///////////////////////////////////////////////////////////////////////
+
+ #if defined(_MSC_VER) && !defined(EA_COMPILER_CLANG_CL)
+ #define EASTL_TYPE_TRAIT_has_trivial_copy_CONFORMANCE 1 // has_trivial_copy is conforming.
+
+ template <typename T>
+ struct has_trivial_copy : public eastl::integral_constant<bool, (__has_trivial_copy(T) || eastl::is_pod<T>::value) && !eastl::is_volatile<T>::value && !eastl::is_hat_type<T>::value>{};
+ #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(EA_COMPILER_GNUC) || defined(__clang__))
+ #define EASTL_TYPE_TRAIT_has_trivial_copy_CONFORMANCE 1 // has_trivial_copy is conforming.
+
+ template <typename T>
+ struct has_trivial_copy : public eastl::integral_constant<bool, (__has_trivial_copy(T) || eastl::is_pod<T>::value) && (!eastl::is_volatile<T>::value && !eastl::is_reference<T>::value)>{};
+ #else
+ #define EASTL_TYPE_TRAIT_has_trivial_copy_CONFORMANCE 0 // has_trivial_copy is not fully conforming. Can return false negatives.
+
+ template <typename T>
+ struct has_trivial_copy : public eastl::integral_constant<bool, eastl::is_pod<T>::value && !eastl::is_volatile<T>::value>{};
+ #endif
+
+ #define EASTL_DECLARE_HAS_TRIVIAL_COPY(T, hasTrivialCopy) \
+ namespace eastl { \
+ template <> struct has_trivial_copy<T> : public eastl::integral_constant<bool, hasTrivialCopy> { }; \
+ }
+
+ // Old style macro, for bacwards compatibility:
+ #define EASTL_DECLARE_TRIVIAL_COPY(T) namespace eastl{ template <> struct has_trivial_copy<T> : public true_type{}; template <> struct has_trivial_copy<const T> : public true_type{}; }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // has_trivial_assign
+ //
+ // has_trivial_assign<T>::value == true if and only if T is a class or
+ // struct that has a trivial copy assignment operator. A copy assignment
+ // operator is trivial if:
+ // - it is implicitly defined by the compiler, and
+ // - is_polymorphic<T>::value == false, and
+ // - T has no virtual base classes, and
+ // - for every direct base class of T, has_trivial_assign<B>::value == true,
+ // where B is the type of the base class, and
+ // - for every nonstatic data member of T that has class type or array
+ // of class type, has_trivial_assign<M>::value == true, where M is
+ // the type of the data member.
+ //
+ // has_trivial_assign may only be applied to complete types.
+ //
+ // Without from the compiler or user, has_trivial_assign will not
+ // report that a class or struct has trivial assignment. The user
+ // can use EASTL_DECLARE_TRIVIAL_ASSIGN to help the compiler.
+ ///////////////////////////////////////////////////////////////////////
+
+ #if defined(_MSC_VER) && (_MSC_VER >= 1600) && !defined(EA_COMPILER_CLANG_CL)
+ #define EASTL_TYPE_TRAIT_has_trivial_assign_CONFORMANCE 1 // has_trivial_assign is conforming.
+
+ template <typename T>
+ struct has_trivial_assign : public integral_constant<bool, (__has_trivial_assign(T) || eastl::is_pod<T>::value) && !eastl::is_const<T>::value && !eastl::is_volatile<T>::value && !eastl::is_hat_type<T>::value>{};
+ #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || defined(__clang__))
+ #define EASTL_TYPE_TRAIT_has_trivial_assign_CONFORMANCE 1 // has_trivial_assign is conforming.
+
+ template <typename T>
+ struct has_trivial_assign : public integral_constant<bool, (__has_trivial_assign(T) || eastl::is_pod<T>::value) && !eastl::is_const<T>::value && !eastl::is_volatile<T>::value>{};
+ #else
+ #define EASTL_TYPE_TRAIT_has_trivial_assign_CONFORMANCE 0 // is_pod is not fully conforming. Can return false negatives.
+
+ template <typename T>
+ struct has_trivial_assign : public integral_constant<bool,
+ is_pod<T>::value && !is_const<T>::value && !is_volatile<T>::value
+ >{};
+ #endif
+
+ #define EASTL_DECLARE_HAS_TRIVIAL_ASSIGN(T, hasTrivialAssign) \
+ namespace eastl { \
+ template <> struct has_trivial_assign<T> : public eastl::integral_constant<bool, hasTrivialAssign> { }; \
+ }
+
+ // Old style macro, for bacwards compatibility:
+ #define EASTL_DECLARE_TRIVIAL_ASSIGN(T) namespace eastl{ template <> struct has_trivial_assign<T> : public true_type{}; template <> struct has_trivial_assign<const T> : public true_type{}; }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // has_trivial_destructor
+ //
+ // has_trivial_destructor<T>::value == true if and only if T is a class
+ // or struct that has a trivial destructor. A destructor is trivial if
+ // - it is implicitly defined by the compiler, and
+ // - for every direct base class of T, has_trivial_destructor<B>::value == true,
+ // where B is the type of the base class, and
+ // - for every nonstatic data member of T that has class type or
+ // array of class type, has_trivial_destructor<M>::value == true,
+ // where M is the type of the data member
+ //
+ // has_trivial_destructor may only be applied to complete types.
+ //
+ // Without from the compiler or user, has_trivial_destructor will not
+ // report that a class or struct has a trivial destructor.
+ // The user can use EASTL_DECLARE_TRIVIAL_DESTRUCTOR to help the compiler.
+ ///////////////////////////////////////////////////////////////////////
+
+ #if defined(_MSC_VER) && (_MSC_VER >= 1600) && !defined(EA_COMPILER_CLANG_CL)
+ #define EASTL_TYPE_TRAIT_has_trivial_destructor_CONFORMANCE 1 // has_trivial_destructor is conforming.
+
+ template <typename T>
+ struct has_trivial_destructor : public eastl::integral_constant<bool, (__has_trivial_destructor(T) || eastl::is_pod<T>::value) && !eastl::is_hat_type<T>::value>{};
+ #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || defined(__clang__))
+ #define EASTL_TYPE_TRAIT_has_trivial_destructor_CONFORMANCE 1 // has_trivial_destructor is conforming.
+
+ template <typename T>
+ struct has_trivial_destructor : public eastl::integral_constant<bool, __has_trivial_destructor(T) || eastl::is_pod<T>::value>{};
+ #else
+ #define EASTL_TYPE_TRAIT_has_trivial_destructor_CONFORMANCE 0 // is_pod is not fully conforming. Can return false negatives.
+
+ // With current compilers, this is all we can do.
+ template <typename T>
+ struct has_trivial_destructor : public eastl::is_pod<T>{};
+ #endif
+
+ #define EASTL_DECLARE_HAS_TRIVIAL_DESTRUCTOR(T, hasTrivialDestructor) \
+ namespace eastl { \
+ template <> struct has_trivial_destructor<T> : public eastl::integral_constant<bool, hasTrivialDestructor> { }; \
+ }
+
+ // Old style macro, for bacwards compatibility:
+ #define EASTL_DECLARE_TRIVIAL_DESTRUCTOR(T) namespace eastl{ template <> struct has_trivial_destructor<T> : public true_type{}; template <> struct has_trivial_destructor<const T> : public true_type{}; }
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool has_trivial_destructor_v = has_trivial_destructor<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // has_trivial_relocate
+ //
+ // This is an EA extension to the type traits standard.
+ // This trait is deprecated under conforming C++11 compilers, as C++11
+ // move functionality supercedes this functionality and we want to
+ // migrate away from it in the future.
+ //
+ // A trivially relocatable object is one that can be safely memmove'd
+ // to uninitialized memory. construction, assignment, and destruction
+ // properties are not addressed by this trait. A type that has the
+ // is_fundamental trait would always have the has_trivial_relocate trait.
+ // A type that has the has_trivial_constructor, has_trivial_copy or
+ // has_trivial_assign traits would usally have the has_trivial_relocate
+ // trait, but this is not strictly guaranteed.
+ //
+ // The user can use EASTL_DECLARE_TRIVIAL_RELOCATE to help the compiler.
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_has_trivial_relocate_CONFORMANCE 0 // is_pod is not fully conforming. Can return false negatives.
+
+ template <typename T>
+ struct has_trivial_relocate : public eastl::bool_constant<eastl::is_pod_v<T> && !eastl::is_volatile_v<T>> {};
+
+ #define EASTL_DECLARE_TRIVIAL_RELOCATE(T) namespace eastl{ template <> struct has_trivial_relocate<T> : public true_type{}; template <> struct has_trivial_relocate<const T> : public true_type{}; }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // has_nothrow_constructor
+ //
+ // has_nothrow_constructor<T>::value == true if and only if T is a
+ // class or struct whose default constructor has an empty throw specification.
+ //
+ // has_nothrow_constructor may only be applied to complete types.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(EA_COMPILER_GNUC) || defined(__clang__))
+ #define EASTL_TYPE_TRAIT_has_nothrow_constructor_CONFORMANCE 1
+
+ template <typename T>
+ struct has_nothrow_constructor
+ : public eastl::integral_constant<bool, __has_nothrow_constructor(T)>{};
+
+ #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && defined(_MSC_VER)
+ // Microsoft's implementation of __has_nothrow_constructor is crippled and returns true only if T is a class that has an explicit constructor.
+ // "Returns true if the default constructor has an empty exception specification."
+ #define EASTL_TYPE_TRAIT_has_nothrow_constructor_CONFORMANCE 0
+
+ template <typename T> // This is mistakenly returning true for an unbounded array of scalar type.
+ struct has_nothrow_constructor : public eastl::integral_constant<bool, __has_nothrow_constructor(T) || eastl::is_scalar<typename eastl::remove_all_extents<T>::type>::value || eastl::is_reference<T>::value>{};
+
+ #else
+ #define EASTL_TYPE_TRAIT_has_nothrow_constructor_CONFORMANCE 0 // has_nothrow_constructor is not fully conforming. Can return false negatives.
+
+ template <typename T>
+ struct has_nothrow_constructor // To do: Improve this to include other types that can work.
+ { static const bool value = eastl::is_scalar<typename eastl::remove_all_extents<T>::type>::value || eastl::is_reference<T>::value; };
+ #endif
+
+ #define EASTL_DECLARE_HAS_NOTHROW_CONSTRUCTOR(T, hasNothrowConstructor) \
+ namespace eastl { \
+ template <> struct has_nothrow_constructor<T> : public eastl::integral_constant<bool, hasNothrowConstructor> { }; \
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // has_nothrow_copy
+ //
+ // has_nothrow_copy<T>::value == true if and only if T is a class or
+ // struct whose copy constructor has an empty throw specification.
+ //
+ // has_nothrow_copy may only be applied to complete types.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(EA_COMPILER_GNUC) || defined(__clang__))
+ #define EASTL_TYPE_TRAIT_has_nothrow_copy_CONFORMANCE 1
+
+ template <typename T>
+ struct has_nothrow_copy : public eastl::integral_constant<bool, __has_nothrow_copy(T)>{};
+
+ #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && defined(_MSC_VER)
+ // Microsoft's implementation of __has_nothrow_copy is crippled and returns true only if T is a class that has a copy constructor.
+ // "Returns true if the copy constructor has an empty exception specification."
+ #define EASTL_TYPE_TRAIT_has_nothrow_copy_CONFORMANCE 0
+
+ template <typename T>
+ struct has_nothrow_copy : public eastl::integral_constant<bool, __has_nothrow_copy(T) || eastl::is_scalar<typename eastl::remove_all_extents<T>::type>::value || eastl::is_reference<T>::value>{};
+
+ #else
+ #define EASTL_TYPE_TRAIT_has_nothrow_copy_CONFORMANCE 0 // has_nothrow_copy is not fully conforming. Can return false negatives.
+
+ template <typename T>
+ struct has_nothrow_copy // To do: Improve this to include other types that can work.
+ { static const bool value = eastl::is_scalar<typename eastl::remove_all_extents<T>::type>::value || eastl::is_reference<T>::value; };
+ #endif
+
+ #define EASTL_DECLARE_HAS_NOTHROW_COPY(T, hasNothrowCopy) \
+ namespace eastl { \
+ template <> struct has_nothrow_copy<T> : public eastl::integral_constant<bool, hasNothrowCopy> { }; \
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // has_nothrow_assign
+ //
+ // has_nothrow_assign<T>::value == true if and only if T is a class or
+ // struct whose copy assignment operator has an empty throw specification.
+ //
+ // has_nothrow_assign may only be applied to complete types.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(EA_COMPILER_GNUC) || defined(__clang__))
+ #define EASTL_TYPE_TRAIT_has_nothrow_assign_CONFORMANCE 1
+
+ template <typename T>
+ struct has_nothrow_assign : public eastl::integral_constant<bool, __has_nothrow_assign(T)>{};
+
+ #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && defined(_MSC_VER)
+ // Microsoft's implementation of __has_nothrow_assign is crippled and returns true only if T is a class that has an assignment operator.
+ // "Returns true if a copy assignment operator has an empty exception specification."
+ #define EASTL_TYPE_TRAIT_has_nothrow_assign_CONFORMANCE 0
+
+ template <typename T> // This is mistakenly returning true for an unbounded array of scalar type.
+ struct has_nothrow_assign : public eastl::integral_constant<bool, __has_nothrow_assign(T) || eastl::is_scalar<typename eastl::remove_all_extents<T>::type>::value || eastl::is_reference<T>::value>{};
+ #else
+ #define EASTL_TYPE_TRAIT_has_nothrow_assign_CONFORMANCE 0 // has_nothrow_assign is not fully conforming. Can return false negatives.
+
+ template <typename T>
+ struct has_nothrow_assign // To do: Improve this to include other types that can work.
+ { static const bool value = eastl::is_scalar<typename eastl::remove_all_extents<T>::type>::value || eastl::is_reference<T>::value; } ;
+ #endif
+
+ #define EASTL_DECLARE_HAS_NOTHROW_ASSIGN(T, hasNothrowAssign) \
+ namespace eastl { \
+ template <> struct has_nothrow_assign<T> : public eastl::integral_constant<bool, hasNothrowAssign> { }; \
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // has_virtual_destructor
+ //
+ // has_virtual_destructor<T>::value == true if and only if T is a class
+ // or struct with a virtual destructor.
+ //
+ // has_virtual_destructor may only be applied to complete types.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || defined(__clang__))
+ #define EASTL_TYPE_TRAIT_has_virtual_destructor_CONFORMANCE 1
+
+ template <typename T>
+ struct has_virtual_destructor : public eastl::integral_constant<bool, __has_virtual_destructor(T)>{};
+ #else
+ #define EASTL_TYPE_TRAIT_has_virtual_destructor_CONFORMANCE 0 // has_virtual_destructor is not fully conforming. Can return false negatives.
+
+ template <typename T>
+ struct has_virtual_destructor : public eastl::false_type{};
+ #endif
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool has_virtual_destructor_v = has_virtual_destructor<T>::value;
+ #endif
+
+ #define EASTL_DECLARE_HAS_VIRTUAL_DESTRUCTOR(T, hasVirtualDestructor) \
+ namespace eastl { \
+ template <> struct has_virtual_destructor<T> : public eastl::integral_constant<bool, hasVirtualDestructor> { }; \
+ template <> struct has_virtual_destructor<const T> : public eastl::integral_constant<bool, hasVirtualDestructor> { }; \
+ template <> struct has_virtual_destructor<volatile T> : public eastl::integral_constant<bool, hasVirtualDestructor> { }; \
+ template <> struct has_virtual_destructor<const volatile T> : public eastl::integral_constant<bool, hasVirtualDestructor> { }; \
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_literal_type
+ //
+ // See the C++11 Standard, section 2.9,p10.
+ // A type is a literal type if it is:
+ // - a scalar type; or
+ // - a reference type referring to a literal type; or
+ // - an array of literal type; or
+ // - a class type (Clause 9) that has all of the following properties:
+ // - it has a trivial destructor,
+ // - every constructor call and full-expression in the brace-or-equal-initializer s for non-static data members (if any) is a constant expression (5.19),
+ // - it is an aggregate type (8.5.1) or has at least one constexpr constructor or constructor template that is not a copy or move constructor, and
+ // - all of its non-static data members and base classes are of literal types.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(__clang__) && EA_COMPILER_HAS_FEATURE(is_literal))
+ #define EASTL_TYPE_TRAIT_is_literal_type_CONFORMANCE 1
+
+ template <typename T>
+ struct is_literal_type : public eastl::integral_constant<bool, __is_literal(T)>{};
+
+ #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && ((defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4006)) || (defined(_MSC_VER) && (_MSC_VER >= 1700))) // VS2012+
+ #if defined(EA_COMPILER_GNUC) && (!defined(EA_COMPILER_CPP11_ENABLED) || (EA_COMPILER_VERSION < 4007))
+ #define EASTL_TYPE_TRAIT_is_literal_type_CONFORMANCE 0 // It seems that in this case GCC supports the compiler intrinsic but reports it as false when it's true.
+ #else
+ #define EASTL_TYPE_TRAIT_is_literal_type_CONFORMANCE 1
+ #endif
+
+ template <typename T>
+ struct is_literal_type : public eastl::integral_constant<bool, __is_literal_type(T)>{};
+
+ #else
+ #define EASTL_TYPE_TRAIT_is_literal_type_CONFORMANCE 0
+
+ // It's not clear if this trait can be fully implemented without explicit compiler support.
+ // For now we assume that it can't be but implement something that gets it right at least
+ // some of the time. Recall that partial positives and false negatives are OK (though not ideal),
+ // while false positives are not OK for us to generate.
+
+ template <typename T> // This is not a complete implementation and will be true for only some literal types (the basic ones).
+ struct is_literal_type : public eastl::integral_constant<bool, eastl::is_scalar<typename eastl::remove_reference<typename eastl::remove_all_extents<T>::type>::type>::value>{};
+ #endif
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_literal_type_v = is_literal_type<T>::value;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_abstract
+ //
+ // is_abstract<T>::value == true if and only if T is a class or struct
+ // that has at least one pure virtual function. is_abstract may only
+ // be applied to complete types.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || (defined(__clang__) && EA_COMPILER_HAS_FEATURE(is_abstract)))
+ #define EASTL_TYPE_TRAIT_is_abstract_CONFORMANCE 1 // is_abstract is conforming.
+
+ template <typename T>
+ struct is_abstract : public integral_constant<bool, __is_abstract(T)>{};
+ #else
+ #define EASTL_TYPE_TRAIT_is_abstract_CONFORMANCE 0
+
+ template<typename T, bool = !eastl::is_object<T>::value>
+ class is_abstract_helper
+ {
+ template<typename>
+ static eastl::yes_type test(...);
+
+ template<typename T1>
+ static eastl::no_type test(T1(*)[1]); // The following: 'typedef SomeAbstractClass (*SomeFunctionType)[1];' is invalid (can't have an array of abstract types) and thus doesn't choose this path.
+
+ public:
+ static const bool value = (sizeof(test<T>(NULL)) == sizeof(eastl::yes_type));
+ };
+
+ template <typename T>
+ struct is_abstract_helper<T, true>
+ { static const bool value = false; };
+
+ template <typename T>
+ struct is_abstract
+ : public integral_constant<bool, is_abstract_helper<T>::value> { };
+
+ #endif
+
+ #define EASTL_DECLARE_IS_ABSTRACT(T, isAbstract) \
+ namespace eastl { \
+ template <> struct is_abstract<T> : public eastl::integral_constant<bool, isAbstract> { }; \
+ template <> struct is_abstract<const T> : public eastl::integral_constant<bool, isAbstract> { }; \
+ template <> struct is_abstract<volatile T> : public eastl::integral_constant<bool, isAbstract> { }; \
+ template <> struct is_abstract<const volatile T> : public eastl::integral_constant<bool, isAbstract> { }; \
+ }
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_abstract_v = is_abstract<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_trivially_copyable
+ //
+ // T is a trivially copyable type (3.9) T shall be a complete type,
+ // (possibly cv-qualified) void, or an array of unknown bound.
+ //
+ // 3.9,p3: For any trivially copyable type T, if two pointers to T
+ // point to distinct T objects obj1 and obj2, where neither obj1 nor
+ // obj2 is a base-class subobject, if the underlying bytes making
+ // up obj1 are copied into obj2, obj2 shall subsequently hold the
+ // same value as obj1. In other words, you can memcpy/memmove it.
+ ///////////////////////////////////////////////////////////////////////
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && ((defined(_MSC_VER) && (_MSC_VER >= 1700)) || (defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 5003)) || (defined(__clang__) && EA_COMPILER_HAS_FEATURE(is_trivially_copyable)))
+ #define EASTL_TYPE_TRAIT_is_trivially_copyable_CONFORMANCE 1
+
+ // https://connect.microsoft.com/VisualStudio/feedback/details/808827/c-std-is-trivially-copyable-produces-wrong-result-for-arrays
+ //
+ // From Microsoft:
+ // We're working on fixing this. When overhauling <type_traits> in VC 2013, I incorrectly believed that is_trivially_copyable was a synonym
+ // for is_trivially_copy_constructible. I've asked the compiler team to provide a compiler hook with 100% accurate answers. (Currently, the
+ // compiler hook has incorrect answers for volatile scalars, volatile data members, and various scenarios for defaulted/deleted/private
+ // special member functions - I wrote an exhaustive test case to exercise the complicated Standardese.) When the compiler hook is fixed,
+ // I'll change <type_traits> to invoke it.
+ //
+ // Microsoft broken VS2013 STL implementation:
+ // template<class _Ty>
+ // struct is_trivially_copyable
+ // : is_trivially_copy_constructible<_Ty>::type
+ // { // determine whether _Ty has a trivial copy constructor
+ // };
+ //
+
+ template <typename T>
+ struct is_trivially_copyable : public bool_constant<__is_trivially_copyable(T)> {};
+
+ #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(EA_COMPILER_MSVC) || defined(EA_COMPILER_GNUC))
+ #define EASTL_TYPE_TRAIT_is_trivially_copyable_CONFORMANCE 1
+
+ // Micrsoft (prior to VS2012) and GCC have __has_trivial_copy, but it may not be identical with the goals of this type trait.
+ template <typename T>
+ struct is_trivially_copyable : public integral_constant<bool, (__has_trivial_copy(T) || eastl::is_pod<typename eastl::remove_all_extents<T>::type>::value) && (!eastl::is_void<T>::value && !eastl::is_volatile<T>::value && !eastl::is_reference<T>::value)>{};
+ #else
+ #define EASTL_TYPE_TRAIT_is_trivially_copyable_CONFORMANCE 0 // Generates false negatives.
+
+ template <typename T>
+ struct is_trivially_copyable { static const bool value = eastl::is_scalar<typename eastl::remove_all_extents<T>::type>::value; };
+ #endif
+
+ #define EASTL_DECLARE_IS_TRIVIALLY_COPYABLE(T, isTriviallyCopyable) \
+ namespace eastl { \
+ template <> struct is_trivially_copyable<T> : public eastl::integral_constant<bool, isTriviallyCopyable> { }; \
+ template <> struct is_trivially_copyable<const T> : public eastl::integral_constant<bool, isTriviallyCopyable> { }; \
+ template <> struct is_trivially_copyable<volatile T> : public eastl::integral_constant<bool, isTriviallyCopyable> { }; \
+ template <> struct is_trivially_copyable<const volatile T> : public eastl::integral_constant<bool, isTriviallyCopyable> { }; \
+ }
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_trivially_copyable_v = is_trivially_copyable<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_constructible
+ //
+ // See the C++11 Standard, section 20.9.4.3,p6.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_constructible_CONFORMANCE 1
+
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || (defined(__clang__) && EA_COMPILER_HAS_FEATURE(is_constructible)))
+ template<typename T, typename... Args>
+ struct is_constructible : public bool_constant<__is_constructible(T, Args...) > {};
+ #else
+ // We implement a copy of move here has move_internal. We are currently stuck doing this because our move
+ // implementation is in <utility.h> and <utility.h> currently #includes us, and so we have a header
+ // chicken-and-egg problem. To do: Resolve this, probably by putting eastl::move somewhere else.
+ template <typename T>
+ inline typename eastl::remove_reference<T>::type&& move_internal(T&& x) EA_NOEXCEPT
+ { return ((typename eastl::remove_reference<T>::type&&)x); }
+
+ template <typename T, class ...Args>
+ typename first_type_select<eastl::true_type, decltype(eastl::move_internal(T(eastl::declval<Args>()...)))>::type is(T&&, Args&& ...);
+
+ template <typename T>
+ struct can_construct_scalar_helper
+ {
+ static eastl::true_type can(T);
+ static eastl::false_type can(...);
+ };
+
+ template <typename ...Args>
+ eastl::false_type is(argument_sink, Args&& ...);
+
+ // Except for scalars and references (handled below), check for constructibility via decltype.
+ template <bool, typename T, typename... Args>
+ struct is_constructible_helper_2 // argument_sink will catch all T that is not constructible from the Args and denote false_type
+ : public eastl::identity<decltype(is(eastl::declval<T>(), eastl::declval<Args>()...))>::type {};
+
+ template <typename T>
+ struct is_constructible_helper_2<true, T>
+ : public eastl::is_scalar<T> {};
+
+ template <typename T, typename Arg0> // We handle the case of multiple arguments below (by disallowing them).
+ struct is_constructible_helper_2<true, T, Arg0>
+ : public eastl::identity<decltype(can_construct_scalar_helper<T>::can(eastl::declval<Arg0>()))>::type {};
+
+ // Scalars and references can be constructed only with 0 or 1 argument. e.g the following is an invalid expression: int(17, 23)
+ template <typename T, typename Arg0, typename ...Args>
+ struct is_constructible_helper_2<true, T, Arg0, Args...>
+ : public eastl::false_type {};
+
+ template <bool, typename T, typename... Args>
+ struct is_constructible_helper_1
+ : public is_constructible_helper_2<eastl::is_scalar<T>::value || eastl::is_reference<T>::value, T, Args...> {};
+
+ // Unilaterally dismiss void, abstract, unknown bound arrays, and function types as not constructible.
+ template <typename T, typename... Args>
+ struct is_constructible_helper_1<true, T, Args...>
+ : public false_type {};
+
+ // is_constructible
+ template <typename T, typename... Args>
+ struct is_constructible
+ : public is_constructible_helper_1<(eastl::is_abstract<typename eastl::remove_all_extents<T>::type>::value ||
+ eastl::is_array_of_unknown_bounds<T>::value ||
+ eastl::is_function<typename eastl::remove_all_extents<T>::type>::value ||
+ eastl::has_void_arg<T, Args...>::value),
+ T, Args...> {};
+
+ // Array types are constructible if constructed with no arguments and if their element type is default-constructible
+ template <typename Array, size_t N>
+ struct is_constructible_helper_2<false, Array[N]>
+ : public eastl::is_constructible<typename eastl::remove_all_extents<Array>::type> {};
+
+ // Arrays with arguments are not constructible. e.g. the following is an invalid expression: int[3](37, 34, 12)
+ template <typename Array, size_t N, typename ...Args>
+ struct is_constructible_helper_2<false, Array[N], Args...>
+ : public eastl::false_type {};
+
+ #endif
+
+
+ // You need to manually declare const/volatile variants individually if you want them.
+ #define EASTL_DECLARE_IS_CONSTRUCTIBLE(T, U, isConstructible) \
+ namespace eastl { \
+ template <> struct is_constructible<T, U> : public eastl::integral_constant<bool, isConstructible> { }; \
+ }
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T, class... Args>
+ EA_CONSTEXPR bool is_constructible_v = is_constructible<T, Args...>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_trivially_constructible
+ //
+ // is_constructible<T, Args...>::value is true and the variable definition
+ // for is_constructible, as defined below, is known to call no operation
+ // that is not trivial (3.9, 12). T and all types in the parameter pack
+ // Args shall be complete types, (possibly cv-qualified) void, or arrays
+ // of unknown bound.
+ //
+ // Note:
+ // C++11's is_trivially_constructible sounds the same as the pre-standard
+ // has_trivial_constructor type trait (which we also support here). However,
+ // the definition of has_trivial_constructor has never been formally standardized
+ // and so we can't just blindly equate the two to each other. Since we are
+ // moving forward with C++11 and deprecating the old type traits, we leave
+ // the old ones as-is, though we defer to them in cases where we don't seem
+ // to have a good alternative.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #if defined(EA_COMPILER_NO_VARIADIC_TEMPLATES)
+
+ #define EASTL_TYPE_TRAIT_is_trivially_constructible_CONFORMANCE 0
+
+ // In this version we allow only zero or one argument (Arg). We can add more arguments
+ // by creating a number of extra specializations. It's probably not possible to
+ // simplify the implementation with recursive templates because ctor argument
+ // presence is specific.
+ //
+ // To consider: we can fold the two implementations below by making a macro that's defined
+ // has __is_trivially_constructible(T) or eastl::has_trivial_copy<T>::value, depending on
+ // whether the __is_trivially_constructible compiler intrinsic is available.
+
+ // If the compiler has this trait built-in (which ideally all compilers would have since it's necessary for full conformance) use it.
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && ((defined(__clang__) && EA_COMPILER_HAS_FEATURE(is_trivially_constructible)) || defined(EA_COMPILER_MSVC))
+
+ template <typename T, typename Arg0 = eastl::unused>
+ struct is_trivially_constructible
+ : public eastl::false_type {};
+
+ template <typename T>
+ struct is_trivially_constructible<T, eastl::unused>
+ : public eastl::integral_constant<bool, __is_trivially_constructible(T)> {};
+
+ template <typename T>
+ struct is_trivially_constructible<T, T>
+ : public eastl::integral_constant<bool, __is_trivially_constructible(T)> {};
+
+ template <typename T>
+ struct is_trivially_constructible<T, T&>
+ : public eastl::integral_constant<bool, __is_trivially_constructible(T)> {};
+
+ template <typename T>
+ struct is_trivially_constructible<T, const T&>
+ : public eastl::integral_constant<bool, __is_trivially_constructible(T)> {};
+
+ template <typename T>
+ struct is_trivially_constructible<T, volatile T&>
+ : public eastl::integral_constant<bool, __is_trivially_constructible(T)> {};
+
+ template <typename T>
+ struct is_trivially_constructible<T, const volatile T&>
+ : public eastl::integral_constant<bool, __is_trivially_constructible(T)> {};
+
+ #else
+
+ template <typename T, typename Arg0 = eastl::unused>
+ struct is_trivially_constructible
+ : public eastl::false_type {};
+
+ template <typename T>
+ struct is_trivially_constructible<T, eastl::unused>
+ : public eastl::integral_constant<bool, eastl::is_constructible<T>::value && eastl::has_trivial_constructor<typename eastl::remove_all_extents<T>::type>::value> {};
+
+ template <typename T>
+ struct is_trivially_constructible<T, T>
+ : public eastl::integral_constant<bool, eastl::is_constructible<T>::value && eastl::has_trivial_copy<T>::value> {};
+
+ template <typename T>
+ struct is_trivially_constructible<T, T&>
+ : public eastl::integral_constant<bool, eastl::is_constructible<T>::value && eastl::has_trivial_copy<T>::value> {};
+
+ template <typename T>
+ struct is_trivially_constructible<T, const T&>
+ : public eastl::integral_constant<bool, eastl::is_constructible<T>::value && eastl::has_trivial_copy<T>::value> {};
+
+ template <typename T>
+ struct is_trivially_constructible<T, volatile T&>
+ : public eastl::integral_constant<bool, eastl::is_constructible<T>::value && eastl::has_trivial_copy<T>::value> {};
+
+ template <typename T>
+ struct is_trivially_constructible<T, const volatile T&>
+ : public eastl::integral_constant<bool, eastl::is_constructible<T>::value && eastl::has_trivial_copy<T>::value> {};
+
+ #endif
+
+ #else
+
+ // If the compiler has this trait built-in (which ideally all compilers would have since it's necessary for full conformance) use it.
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && ((defined(__clang__) && EA_COMPILER_HAS_FEATURE(is_trivially_constructible)) || defined(EA_COMPILER_MSVC))
+ #define EASTL_TYPE_TRAIT_is_trivially_constructible_CONFORMANCE 1
+
+ // We have a problem with clang here as of clang 3.4: __is_trivially_constructible(int[]) is false, yet I believe it should be true.
+ // Until it gets resolved, what we do is check for is_constructible along with __is_trivially_constructible().
+ template <typename T, typename... Args>
+ struct is_trivially_constructible
+ : public eastl::integral_constant<bool, eastl::is_constructible<T, Args...>::value && __is_trivially_constructible(T, Args...)> {};
+
+ #else
+
+ #define EASTL_TYPE_TRAIT_is_trivially_constructible_CONFORMANCE 0 // This is 0 but in fact it will work for most real-world cases due to the has_trivial_constructor specialization below.
+
+ template <typename T, typename... Args>
+ struct is_trivially_constructible
+ : public eastl::false_type {};
+
+ template <typename T>
+ struct is_trivially_constructible<T>
+ : public eastl::integral_constant<bool, eastl::is_constructible<T>::value && eastl::has_trivial_constructor<typename eastl::remove_all_extents<T>::type>::value> {};
+
+ // It's questionable whether we can use has_trivial_copy here, as it could theoretically create a false-positive.
+ template <typename T>
+ struct is_trivially_constructible<T, T>
+ : public eastl::integral_constant<bool, eastl::is_constructible<T>::value && eastl::has_trivial_copy<T>::value> {};
+
+ template <typename T>
+ struct is_trivially_constructible<T, T&&>
+ : public eastl::integral_constant<bool, eastl::is_constructible<T>::value && eastl::has_trivial_copy<T>::value> {};
+
+ template <typename T>
+ struct is_trivially_constructible<T, T&>
+ : public eastl::integral_constant<bool, eastl::is_constructible<T>::value && eastl::has_trivial_copy<T>::value> {};
+
+ template <typename T>
+ struct is_trivially_constructible<T, const T&>
+ : public eastl::integral_constant<bool, eastl::is_constructible<T>::value && eastl::has_trivial_copy<T>::value> {};
+
+ template <typename T>
+ struct is_trivially_constructible<T, volatile T&>
+ : public eastl::integral_constant<bool, eastl::is_constructible<T>::value && eastl::has_trivial_copy<T>::value> {};
+
+ template <typename T>
+ struct is_trivially_constructible<T, const volatile T&>
+ : public eastl::integral_constant<bool, eastl::is_constructible<T>::value && eastl::has_trivial_copy<T>::value> {};
+
+ #endif
+
+ #endif
+
+
+ #define EASTL_DECLARE_IS_TRIVIALLY_CONSTRUCTIBLE(T, isTriviallyConstructible) \
+ namespace eastl { \
+ template <> struct is_trivially_constructible<T> : public eastl::integral_constant<bool, isTriviallyConstructible> { }; \
+ template <> struct is_trivially_constructible<const T> : public eastl::integral_constant<bool, isTriviallyConstructible> { }; \
+ template <> struct is_trivially_constructible<volatile T> : public eastl::integral_constant<bool, isTriviallyConstructible> { }; \
+ template <> struct is_trivially_constructible<const volatile T> : public eastl::integral_constant<bool, isTriviallyConstructible> { }; \
+ }
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_trivially_constructible_v = is_trivially_constructible<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_trivially_default_constructible
+ //
+ // is_trivially_constructible<T>::value is true.
+ // This is thus identical to is_trivially_constructible.
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_trivially_default_constructible_CONFORMANCE EASTL_TYPE_TRAIT_is_trivially_constructible_CONFORMANCE
+
+ template <typename T>
+ struct is_trivially_default_constructible
+ : public eastl::is_trivially_constructible<T> {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_trivially_default_constructible_v = is_trivially_default_constructible<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_trivial
+ //
+ // is_trivial<T>::value == true if T is a scalar type, a trivially copyable
+ // class with a trivial default constructor, or array of such type/class,
+ // possibly cv-qualified), provides the member constant value equal true.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_trivial_CONFORMANCE ((EASTL_TYPE_TRAIT_is_trivially_default_constructible_CONFORMANCE && EASTL_TYPE_TRAIT_is_trivially_copyable_CONFORMANCE) ? 1 : 0)
+
+ #if defined(_MSC_VER) && _MSC_VER == 1800
+ template<bool, typename T>
+ struct is_trivial_helper
+ : public eastl::integral_constant<bool, eastl::is_trivially_copyable<T>::value && eastl::is_trivially_default_constructible<T>::value>{};
+
+ template<typename T>
+ struct is_trivial_helper<true, T>
+ : public false_type{};
+
+ template <typename T>
+ struct is_trivial
+ : public is_trivial_helper<(EA_ALIGN_OF(T) > EA_PLATFORM_MIN_MALLOC_ALIGNMENT), T>::type{};
+ #else
+ // All other compilers seem to be able to handle aligned types passed as value
+ template <typename T>
+ struct is_trivial
+ : public eastl::integral_constant<bool, eastl::is_trivially_copyable<T>::value && eastl::is_trivially_default_constructible<T>::value> {};
+ #endif
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_trivial_v = is_trivial<T>::value;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_nothrow_constructible
+ //
+ // is_constructible<T, Args...>::value is true and the variable definition
+ // for is_constructible, as defined below, is known not to throw any
+ // exceptions (5.3.7). T and all types in the parameter pack Args shall
+ // be complete types, (possibly cv-qualified) void, or arrays of unknown bound.
+ //
+ ///////////////////////////////////////////////////////////////////////
+ #if defined(EA_COMPILER_NO_NOEXCEPT)
+
+ #define EASTL_TYPE_TRAIT_is_nothrow_constructible_CONFORMANCE 0
+
+ template <typename T, typename... Args>
+ struct is_nothrow_constructible
+ : public eastl::false_type {};
+
+ template <typename T>
+ struct is_nothrow_constructible<T>
+ : public eastl::integral_constant<bool, eastl::has_nothrow_constructor<T>::value> {};
+
+ template <typename T>
+ struct is_nothrow_constructible<T, T>
+ : public eastl::integral_constant<bool, eastl::has_nothrow_copy<T>::value> {};
+
+ template <typename T>
+ struct is_nothrow_constructible<T, const T&>
+ : public eastl::integral_constant<bool, eastl::has_nothrow_copy<T>::value> {};
+
+ template <typename T>
+ struct is_nothrow_constructible<T, T&>
+ : public eastl::integral_constant<bool, eastl::has_nothrow_copy<T>::value> {};
+
+ template <typename T>
+ struct is_nothrow_constructible<T, T&&>
+ : public eastl::integral_constant<bool, eastl::has_nothrow_copy<T>::value> {};
+
+ #else
+ #if defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION < 4008)
+ #define EASTL_TYPE_TRAIT_is_nothrow_constructible_CONFORMANCE 0 // GCC up to v4.7's noexcept is broken and fails to generate true for the case of compiler-generated constructors.
+ #else
+ #define EASTL_TYPE_TRAIT_is_nothrow_constructible_CONFORMANCE EASTL_TYPE_TRAIT_is_constructible_CONFORMANCE
+ #endif
+
+ ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ // *_noexcept_wrapper implements a workaround for VS2015 preview. A standards conforming noexcept operator allows variadic template expansion.
+ // There appears to be an issue with VS2015 preview that prevents variadic template expansion into a noexcept operator that is passed directly
+ // to a template parameter.
+ //
+ // The fix hoists the noexcept expression into a separate struct and caches the result of the expression. This result is then passed to integral_constant.
+ //
+ // Example code from Clang libc++
+ // template <class _Tp, class... _Args>
+ // struct __libcpp_is_nothrow_constructible<[>is constructible*/true, /*is reference<]false, _Tp, _Args...>
+ // : public integral_constant<bool, noexcept(_Tp(declval<_Args>()...))> { };
+ //
+
+ template <typename T, typename... Args>
+ struct is_nothrow_constructible_helper_noexcept_wrapper
+ { static const bool value = noexcept(T(eastl::declval<Args>()...)); };
+
+ template <bool, typename T, typename... Args>
+ struct is_nothrow_constructible_helper;
+
+ template <typename T, typename... Args>
+ struct is_nothrow_constructible_helper<true, T, Args...>
+ : public eastl::integral_constant<bool, is_nothrow_constructible_helper_noexcept_wrapper<T, Args...>::value> {};
+
+ template<typename T, typename Arg>
+ struct is_nothrow_constructible_helper<true, T, Arg>
+ : public eastl::integral_constant<bool, noexcept(T(eastl::declval<Arg>()))> {};
+
+ template<typename T>
+ struct is_nothrow_constructible_helper<true, T>
+ : public eastl::integral_constant<bool, noexcept(T())> {};
+
+ template <typename T, typename... Args>
+ struct is_nothrow_constructible_helper<false, T, Args...>
+ : public eastl::false_type {};
+
+ template <typename T, typename... Args>
+ struct is_nothrow_constructible
+ : public eastl::is_nothrow_constructible_helper<eastl::is_constructible<T, Args...>::value, T, Args...> {};
+
+ template <typename T, size_t N>
+ struct is_nothrow_constructible<T[N]>
+ : public eastl::is_nothrow_constructible_helper<eastl::is_constructible<T>::value, T> {};
+ #endif
+
+ #define EASTL_DECLARE_IS_NOTHROW_CONSTRUCTIBLE(T, isNothrowConstructible) \
+ namespace eastl{ \
+ template <> struct is_nothrow_constructible<T> : public eastl::integral_constant<bool, isNothrowConstructible> { }; \
+ }
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T, typename... Args>
+ EA_CONSTEXPR bool is_nothrow_constructible_v = is_nothrow_constructible<T, Args...>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_default_constructible
+ //
+ // is_constructible<T>::value is true.
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_default_constructible_CONFORMANCE EASTL_TYPE_TRAIT_is_constructible_CONFORMANCE
+
+ template <typename T>
+ struct is_default_constructible
+ : public eastl::is_constructible<T> {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_default_constructible_v = is_default_constructible<T>::value;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_nothrow_default_constructible
+ ///////////////////////////////////////////////////////////////////////
+ // TODO(rparolin): implement type-trait
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_copy_constructible
+ //
+ // is_constructible<T, const T&>::value is true.
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_copy_constructible_CONFORMANCE EASTL_TYPE_TRAIT_is_constructible_CONFORMANCE
+
+ template <typename T>
+ struct is_copy_constructible
+ : public eastl::is_constructible<T, typename eastl::add_lvalue_reference<typename eastl::add_const<T>::type>::type> {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_copy_constructible_v = is_copy_constructible<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_trivially_copy_constructible
+ //
+ // is_trivially_constructible<T, const T&>::value is true.
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_trivially_copy_constructible_CONFORMANCE EASTL_TYPE_TRAIT_is_trivially_constructible_CONFORMANCE
+
+ template <typename T>
+ struct is_trivially_copy_constructible
+ : public eastl::is_trivially_constructible<T, typename eastl::add_lvalue_reference<typename eastl::add_const<T>::type>::type> {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_trivially_copy_constructible_v = is_trivially_copy_constructible<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_nothrow_copy_constructible
+ //
+ // is_nothrow_-constructible<T, const T&>::value is true.
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_nothrow_copy_constructible_CONFORMANCE EASTL_TYPE_TRAIT_is_nothrow_constructible_CONFORMANCE
+
+ template <typename T>
+ struct is_nothrow_copy_constructible
+ : public is_nothrow_constructible<T, typename eastl::add_lvalue_reference<typename eastl::add_const<T>::type>::type> {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_nothrow_copy_constructible_v = is_nothrow_copy_constructible<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_move_constructible
+ //
+ // is_constructible<T, T&&>::value is true.
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_move_constructible_CONFORMANCE EASTL_TYPE_TRAIT_is_constructible_CONFORMANCE
+
+ template <typename T>
+ struct is_move_constructible
+ : public eastl::is_constructible<T, typename eastl::add_rvalue_reference<T>::type> {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_move_constructible_v = is_move_constructible<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_trivially_move_constructible
+ //
+ // is_trivially_constructible<T, T&&>::value is true.
+ // T shall be a complete type, (possibly cv-qualified) void, or an
+ // array of unknown bound.
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_trivially_move_constructible_CONFORMANCE EASTL_TYPE_TRAIT_is_trivially_constructible_CONFORMANCE
+
+ template <typename T>
+ struct is_trivially_move_constructible
+ : public eastl::is_trivially_constructible<T, typename eastl::add_rvalue_reference<T>::type> {};
+
+ #define EASTL_DECLARE_IS_TRIVIALLY_MOVE_CONSTRUCTIBLE(T, isTrivallyMoveConstructible) \
+ namespace eastl{ \
+ template <> struct is_trivially_move_constructible<T> : public eastl::integral_constant<bool, isTriviallyMoveConstructible> { }; \
+ }
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_trivially_move_constructible_v = is_trivially_move_constructible<T>::value;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_assignable
+ //
+ // The expression declval<T>() = declval<U>() is well-formed when treated as an unevaluated operand.
+ // Access checking is performed as if in a context unrelated to T and U. Only the validity of
+ // the immediate context of the assignment expression is considered. The compilation of the expression
+ // can result in side effects such as the instantiation of class template specializations and function
+ // template specializations, the generation of implicitly-defined functions, and so on. Such side
+ // effects are not in the "immediate context" and can result in the program being ill-formed.
+ //
+ // Note:
+ // This type trait has a misleading and counter-intuitive name. It does not indicate whether an instance
+ // of U can be assigned to an instance of T (e.g. t = u). Instead it indicates whether the assignment can be
+ // done after adding rvalue references to both, as in add_rvalue_reference<T>::type = add_rvalue_reference<U>::type.
+ // A counterintuitive result of this is that is_assignable<int, int>::value == false. The is_copy_assignable
+ // trait indicates if a type can be assigned to its own type, though there isn't a standard C++ way to tell
+ // if an arbitrary type is assignable to another type.
+ // http://stackoverflow.com/questions/19920213/why-is-stdis-assignable-counter-intuitive
+ //
+ // Note:
+ // A true is_assignable value doesn't guarantee that the expression is compile-able, the compiler checks
+ // only that the assignment matches before compilation. In particular, if you have templated operator=
+ // for a class, the compiler will always say is_assignable is true, regardless of what's being tested
+ // on the right hand side of the expression. It may actually turn out during compilation that the
+ // templated operator= fails to compile because in practice it doesn't accept every possible type for
+ // the right hand side of the expression.
+ //
+ // Expected results:
+ // is_assignable<void, void>::value == false
+ // is_assignable<int&, int>::value == true
+ // is_assignable<int, int>::value == false
+ // is_assignable<int, int&>::value == false
+ // is_assignable<bool, bool>::value == false
+ // is_assignable<int, float>::value == false
+ // is_assignable<int[], int[]>::value == false
+ // is_assignable<char*, int*>::value == false
+ // is_assignable<char*, const char*>::value == false
+ // is_assignable<const char*, char*>::value == false
+ // is_assignable<PodA, PodB*>::value == false
+ // is_assignable<Assignable, Assignable>::value == true
+ // is_assignable<Assignable, Unrelated>::value == false
+ //
+ // Note:
+ // Our implementation here yields different results than does the std::is_assignable from Dinkumware-based Standard
+ // Libraries, but yields similar results to the std::is_assignable from GCC's libstdc++ and clang's libc++. It may
+ // possibly be that the Dinkumware results are intentionally different for some practical purpose or because they
+ // represent the spirit or the Standard but not the letter of the Standard.
+ //
+ ///////////////////////////////////////////////////////////////////////
+ #define EASTL_TYPE_TRAIT_is_assignable_CONFORMANCE 1
+
+ template<typename T, typename U>
+ struct is_assignable_helper
+ {
+ template<typename, typename>
+ static eastl::no_type is(...);
+
+ template<typename T1, typename U1>
+ static decltype(eastl::declval<T1>() = eastl::declval<U1>(), eastl::yes_type()) is(int);
+
+ static const bool value = (sizeof(is<T, U>(0)) == sizeof(eastl::yes_type));
+ };
+
+ template<typename T, typename U>
+ struct is_assignable :
+ public eastl::integral_constant<bool, eastl::is_assignable_helper<T, U>::value> {};
+
+ // The main purpose of this function is to help the non-conforming case above.
+ // Note: We don't handle const/volatile variations here, as we expect the user to
+ // manually specify any such variations via this macro.
+ // Example usage:
+ // EASTL_DECLARE_IS_ASSIGNABLE(int, int, false)
+ //
+ #define EASTL_DECLARE_IS_ASSIGNABLE(T, U, isAssignable) \
+ namespace eastl { \
+ template <> struct is_assignable<T, U> : public eastl::integral_constant<bool, isAssignable> { }; \
+ }
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T, class U>
+ EA_CONSTEXPR bool is_assignable_v = is_assignable<T, U>::value;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_lvalue_assignable
+ //
+ // This is an EASTL extension function which is like is_assignable but
+ // works for arbitrary assignments and not just rvalue assignments.
+ // This function provides an intuitive assignability test, as opposed
+ // to is_assignable.
+ //
+ // Note: is_lvalue_assignable<T, T> === is_copy_assignable<T>
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_lvalue_assignable_CONFORMANCE EASTL_TYPE_TRAIT_is_assignable_CONFORMANCE
+
+ template <typename T, typename U>
+ struct is_lvalue_assignable
+ : public eastl::is_assignable<typename eastl::add_lvalue_reference<T>::type,
+ typename eastl::add_lvalue_reference<typename eastl::add_const<U>::type>::type> {};
+
+ #define EASTL_DECLARE_IS_LVALUE_ASSIGNABLE(T, U, isLvalueAssignable) \
+ namespace eastl { \
+ template <> struct is_lvalue_assignable<T, U> : public eastl::integral_constant<bool, isLvalueAssignable> { }; \
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_trivially_assignable
+ //
+ // is_assignable<T, U>::value is true and the assignment, as defined by
+ // is_assignable, is known to call no operation that is not trivial (3.9, 12).
+ // T and U shall be complete types, (possibly cv-qualified) void, or
+ // arrays of unknown bound
+ ///////////////////////////////////////////////////////////////////////
+
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(__clang__) && EA_COMPILER_HAS_FEATURE(is_trivially_assignable))
+ #define EASTL_TYPE_TRAIT_is_trivially_assignable_CONFORMANCE 1
+
+ template <typename T, typename U>
+ struct is_trivially_assignable
+ : eastl::integral_constant<bool, __is_trivially_assignable(T, U)> {};
+
+ #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) && (_MSC_VER >= 1800))
+ #define EASTL_TYPE_TRAIT_is_trivially_assignable_CONFORMANCE EASTL_TYPE_TRAIT_is_assignable_CONFORMANCE
+
+ // This code path is attempting to work around the issue with VS2013 __is_trivially_assignable compiler intrinsic documented in the link
+ // below. todo: Re-evaluate in VS2014.
+ //
+ // https://connect.microsoft.com/VisualStudio/feedback/details/806233/std-is-trivially-copyable-const-int-n-and-std-is-trivially-copyable-int-n-incorrect
+
+ template <bool A, typename T, typename U>
+ struct is_trivially_assignable_helper;
+
+ template <typename T, typename U>
+ struct is_trivially_assignable_helper<true, T, U> : eastl::integral_constant<bool, __is_trivially_assignable(T, U)>{};
+
+ template <typename T, typename U>
+ struct is_trivially_assignable_helper<false, T, U> : false_type{};
+
+ template <typename T, typename U>
+ struct is_trivially_assignable
+ : eastl::integral_constant<bool, is_trivially_assignable_helper< eastl::is_assignable<T, U>::value, T, U >::value> {};
+
+ #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(EA_COMPILER_MSVC) || defined(EA_COMPILER_GNUC))
+ #define EASTL_TYPE_TRAIT_is_trivially_assignable_CONFORMANCE EASTL_TYPE_TRAIT_is_assignable_CONFORMANCE
+
+ // Micrsoft (up till at least VS2012) and GCC have __has_trivial_assign, but it may not be identical with the goals of this type trait.
+ // The Microsoft type trait headers suggest that a future version of VS will have a __is_trivially_assignable intrinsic, but we
+ // need to come up with something in the meantime. To do: Re-evalulate this for VS2013+ when it becomes available.
+ template <typename T, typename U>
+ struct is_trivially_assignable
+ : eastl::integral_constant<bool, eastl::is_assignable<T, U>::value &&
+ (eastl::is_pod<typename eastl::remove_reference<T>::type>::value || __has_trivial_assign(typename eastl::remove_reference<T>::type))> {};
+ #else
+
+ #define EASTL_TYPE_TRAIT_is_trivially_assignable_CONFORMANCE 0 // Generates false negatives.
+
+ template <typename T, typename U>
+ struct is_trivially_assignable
+ : public eastl::false_type {};
+
+ template <typename T>
+ struct is_trivially_assignable<T&, T>
+ : public eastl::integral_constant<bool, eastl::is_scalar<T>::value> {};
+
+ template <typename T>
+ struct is_trivially_assignable<T&, T&>
+ : public eastl::integral_constant<bool, eastl::is_scalar<T>::value> {};
+
+ template <typename T>
+ struct is_trivially_assignable<T&, const T&>
+ : public eastl::integral_constant<bool, eastl::is_scalar<T>::value> {};
+
+ template <typename T>
+ struct is_trivially_assignable<T&, T&&>
+ : public eastl::integral_constant<bool, eastl::is_scalar<T>::value> {};
+
+ #endif
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T, class U>
+ EA_CONSTEXPR bool is_trivially_assignable_v = is_trivially_assignable<T, U>::value;
+ #endif
+
+ // The main purpose of this function is to help the non-conforming case above.
+ // Note: We don't handle const/volatile variations here, as we expect the user to
+ // manually specify any such variations via this macro.
+ // Example usage:
+ // EASTL_DECLARE_IS_TRIVIALLY_ASSIGNABLE(int, int, false)
+ //
+ #define EASTL_DECLARE_IS_TRIVIALLY_ASSIGNABLE(T, U, isTriviallyAssignable) \
+ namespace eastl { \
+ template <> struct is_trivially_assignable<T, U> : public eastl::integral_constant<bool, isTriviallyAssignable> { }; \
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_nothrow_assignable
+ //
+ // is_assignable<T, U>::value is true and the assignment is known
+ // not to throw any exceptions (5.3.7). T and U shall be complete
+ // types, (possibly cv-qualified) void, or arrays of unknown bound.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #if defined(_MSC_VER) && (_MSC_VER >= 1800) // VS2013+
+ #define EASTL_TYPE_TRAIT_is_nothrow_assignable_CONFORMANCE 1
+
+ template <typename T, typename U>
+ struct is_nothrow_assignable
+ : eastl::integral_constant<bool, __is_nothrow_assignable(T, U)> {};
+
+ #elif defined(EA_COMPILER_NO_NOEXCEPT) || defined(__EDG_VERSION__) // EDG mis-compiles the conforming code below and so must be placed here.
+ #define EASTL_TYPE_TRAIT_is_nothrow_assignable_CONFORMANCE 0
+
+ template <typename T, typename U>
+ struct is_nothrow_assignable
+ : public false_type {};
+
+ // Note that the following are crippled in that they support only assignment of T types to other T types.
+ template <typename T>
+ struct is_nothrow_assignable<T&, T>
+ : public eastl::integral_constant<bool, eastl::has_nothrow_assign<T>::value> {};
+
+ template <typename T>
+ struct is_nothrow_assignable<T&, T&>
+ : public eastl::integral_constant<bool, eastl::has_nothrow_assign<T>::value> {};
+
+ template <typename T>
+ struct is_nothrow_assignable<T&, const T&>
+ : public eastl::integral_constant<bool, eastl::has_nothrow_assign<T>::value> {};
+
+ #else
+ #define EASTL_TYPE_TRAIT_is_nothrow_assignable_CONFORMANCE 1
+
+ template <bool, typename T, typename U>
+ struct is_nothrow_assignable_helper;
+
+ template <typename T, typename U>
+ struct is_nothrow_assignable_helper<false, T, U>
+ : public false_type {};
+
+ template <typename T, typename U>
+ struct is_nothrow_assignable_helper<true, T, U> // Set to true if the assignment (same as is_assignable) cannot generate an exception.
+ : public eastl::integral_constant<bool, noexcept(eastl::declval<T>() = eastl::declval<U>()) >
+ {
+ };
+
+ template <typename T, typename U>
+ struct is_nothrow_assignable
+ : public eastl::is_nothrow_assignable_helper<eastl::is_assignable<T, U>::value, T, U>
+ {
+ };
+ #endif
+
+ #define EASTL_DECLARE_IS_NOTHROW_ASSIGNABLE(T, isNothrowAssignable) \
+ namespace eastl{ \
+ template <> struct is_nothrow_assignable<T> : public eastl::integral_constant<bool, isNothrowAssignable> { }; \
+ template <> struct is_nothrow_assignable<const T> : public eastl::integral_constant<bool, isNothrowAssignable> { }; \
+ template <> struct is_nothrow_assignable<volatile T> : public eastl::integral_constant<bool, isNothrowAssignable> { }; \
+ template <> struct is_nothrow_assignable<const volatile T> : public eastl::integral_constant<bool, isNothrowAssignable> { }; \
+ }
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T, class U>
+ EA_CONSTEXPR bool is_nothrow_assignable_v = is_nothrow_assignable<T, U>::value;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_copy_assignable
+ //
+ // is_assignable<T&, const T&>::value is true. T shall be a complete type,
+ // (possibly cv -qualified) void, or an array of unknown bound.
+ //
+ // This (and not is_assignable) is the type trait you use to tell if you
+ // can do an arbitrary assignment. is_assignable tells if you can do an
+ // assignment specifically to an rvalue and not in general.
+ // http://stackoverflow.com/a/19921030/725009
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_copy_assignable_CONFORMANCE EASTL_TYPE_TRAIT_is_assignable_CONFORMANCE
+
+ template <typename T>
+ struct is_copy_assignable
+ : public eastl::is_assignable<typename eastl::add_lvalue_reference<T>::type,
+ typename eastl::add_lvalue_reference<typename eastl::add_const<T>::type>::type> {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_copy_assignable_v = is_copy_assignable<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_trivially_copy_assignable
+ //
+ // is_trivially_assignable<T&, const T&>::value is true. T shall be a
+ // complete type, (possibly cv-qualified) void, or an array of unknown bound.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_trivially_copy_assignable_CONFORMANCE EASTL_TYPE_TRAIT_is_trivially_assignable_CONFORMANCE
+
+#if EASTL_TYPE_TRAIT_is_trivially_copy_assignable_CONFORMANCE
+ template <typename T>
+ struct is_trivially_copy_assignable
+ : public eastl::is_trivially_assignable<typename eastl::add_lvalue_reference<T>::type,
+ typename eastl::add_lvalue_reference<typename eastl::add_const<T>::type>::type> {};
+#else
+ template <typename T>
+ struct is_trivially_copy_assignable
+ : public integral_constant<bool,
+ eastl::is_scalar<T>::value || eastl::is_pod<T>::value || eastl::is_trivially_assignable<typename eastl::add_lvalue_reference<T>::type, typename eastl::add_lvalue_reference<typename eastl::add_const<T>::type>::type>::value
+ > {};
+#endif
+
+ #define EASTL_DECLARE_IS_TRIVIALLY_COPY_ASSIGNABLE(T, isTriviallyCopyAssignable) \
+ namespace eastl { \
+ template <> struct is_trivially_copy_assignable<T> : public eastl::integral_constant<bool, isTriviallyCopyAssignable> { }; \
+ }
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_trivially_copy_assignable_v = is_trivially_copy_assignable<T>::value;
+ #endif
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_nothrow_copy_assignable
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_nothrow_copy_assignable_CONFORMANCE EASTL_TYPE_TRAIT_is_nothrow_assignable_CONFORMANCE
+
+ template <typename T>
+ struct is_nothrow_copy_assignable
+ : public eastl::is_nothrow_assignable<typename eastl::add_lvalue_reference<T>::type,
+ typename eastl::add_lvalue_reference<typename eastl::add_const<T>::type>::type> {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_nothrow_copy_assignable_v = is_nothrow_copy_assignable<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_move_assignable
+ //
+ // is_assignable<T&, T&&>::value is true. T shall be a complete type,
+ // (possibly cv -qualified) void, or an array of unknown bound.
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_move_assignable_CONFORMANCE EASTL_TYPE_TRAIT_is_assignable_CONFORMANCE
+
+ template <typename T>
+ struct is_move_assignable
+ : public eastl::is_assignable<typename eastl::add_lvalue_reference<T>::type,
+ typename eastl::add_rvalue_reference<T>::type> {};
+
+ #define EASTL_DECLARE_IS_MOVE_ASSIGNABLE(T, isMoveAssignable) \
+ namespace eastl{ \
+ template <> struct is_move_assignable<T> : public eastl::integral_constant<bool, isMoveAssignable> { }; \
+ template <> struct is_move_assignable<const T> : public eastl::integral_constant<bool, isMoveAssignable> { }; \
+ template <> struct is_move_assignable<volatile T> : public eastl::integral_constant<bool, isMoveAssignable> { }; \
+ template <> struct is_move_assignable<const volatile T> : public eastl::integral_constant<bool, isMoveAssignable> { }; \
+ }
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_move_assignable_v = is_move_assignable<T>::value;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_trivially_move_assignable
+ //
+ // is_trivially_-assignable<T&, T&&>::value is true. T shall be a complete type,
+ // (possibly cv-qualified) void, or an array of unknown bound.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_trivially_move_assignable_CONFORMANCE EASTL_TYPE_TRAIT_is_trivially_assignable_CONFORMANCE
+
+ template <typename T>
+ struct is_trivially_move_assignable
+ : public eastl::is_trivially_assignable<typename eastl::add_lvalue_reference<T>::type,
+ typename eastl::add_rvalue_reference<T>::type> {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_trivially_move_assignable_v = is_trivially_move_assignable<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_nothrow_move_assignable
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_nothrow_move_assignable_CONFORMANCE EASTL_TYPE_TRAIT_is_nothrow_assignable_CONFORMANCE
+
+ template <typename T>
+ struct is_nothrow_move_assignable
+ : public eastl::is_nothrow_assignable<typename eastl::add_lvalue_reference<T>::type,
+ typename eastl::add_rvalue_reference<T>::type> {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_nothrow_move_assignable_v = is_nothrow_move_assignable<T>::value;
+ #endif
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_destructible
+ //
+ // For a complete type T and given
+ // template <class U>
+ // struct test { U u; };
+ // test<T>::~test() is not deleted (C++11 "= delete").
+ // T shall be a complete type, (possibly cv-qualified) void, or an array of unknown bound.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #if defined(_MSC_VER) && (_MSC_VER >= 1920)
+ #define EASTL_TYPE_TRAIT_is_destructible_CONFORMANCE 1
+
+ template <typename T>
+ struct is_destructible
+ : integral_constant<bool, __is_destructible(T)> {};
+
+ #elif defined(EA_COMPILER_NO_DECLTYPE) || defined(EA_COMPILER_NO_FUNCTION_TEMPLATE_DEFAULT_ARGS) || defined(_MSC_VER) || defined(__EDG_VERSION__) // VS2012 and EDG mis-compile the conforming code below and so must be placed here.
+ #define EASTL_TYPE_TRAIT_is_destructible_CONFORMANCE 0
+
+ // This implementation works for almost all cases, with the primary exception being the
+ // case that the user declared the destructor as deleted. To deal with that case the
+ // user needs to use EASTL_DECLARE_IS_NOT_DESTRUCTIBLE to cause is_destructible<T>::value
+ // to be false.
+
+ template <typename T>
+ struct is_destructible
+ : public eastl::integral_constant<bool, !eastl::is_array_of_unknown_bounds<T>::value &&
+ !eastl::is_void<T>::value &&
+ !eastl::is_function<T>::value> {};
+ #else
+ #define EASTL_TYPE_TRAIT_is_destructible_CONFORMANCE 1
+
+ template <typename>
+ eastl::false_type destructible_test_function(...);
+
+ template <typename T, typename U = typename eastl::remove_all_extents<T>::type, typename V = decltype(eastl::declval<U&>().~U())>
+ eastl::true_type destructible_test_function(int);
+
+ template <typename T, bool = eastl::is_array_of_unknown_bounds<T>::value || // Exclude these types from being considered destructible.
+ eastl::is_void<T>::value ||
+ eastl::is_function<T>::value>
+ struct is_destructible_helper
+ : public eastl::identity<decltype(eastl::destructible_test_function<T>(0))>::type {}; // Need to wrap decltype with identity because some compilers otherwise don't like the bare decltype usage.
+
+ template <typename T>
+ struct is_destructible_helper<T, true>
+ : public eastl::false_type {};
+
+ template <typename T, bool Whatever>
+ struct is_destructible_helper<T&, Whatever> // Reference are trivially destructible.
+ : public eastl::true_type {};
+
+ template <typename T, bool Whatever>
+ struct is_destructible_helper<T&&, Whatever> // Reference are trivially destructible.
+ : public eastl::true_type {};
+
+ template <typename T>
+ struct is_destructible
+ : public is_destructible_helper<T> {};
+
+ #endif
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_destructible_v = is_destructible<T>::value;
+ #endif
+
+ #define EASTL_DECLARE_IS_DESTRUCTIBLE(T, isDestructible) \
+ namespace eastl{ \
+ template <> struct is_destructible<T> : public eastl::integral_constant<bool, isDestructible>{}; \
+ template <> struct is_destructible<const T> : public eastl::integral_constant<bool, isDestructible>{}; \
+ template <> struct is_destructible<volatile T> : public eastl::integral_constant<bool, isDestructible>{}; \
+ template <> struct is_destructible<const volatile T> : public eastl::integral_constant<bool, isDestructible>{}; \
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_trivially_destructible
+ //
+ // is_destructible<T>::value is true and the indicated destructor is
+ // known to be trivial. T shall be a complete type, (possibly cv-qualified)
+ // void, or an array of unknown bound.
+ //
+ // A destructor is trivial if it is not user-provided and if:
+ // - the destructor is not virtual,
+ // - all of the direct base classes of its class have trivial destructors, and
+ // - for all of the non-static data members of its class that are of
+ // class type (or array thereof), each such class has a trivial destructor.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #if defined(_MSC_VER) && (_MSC_VER >= 1920)
+ #define EASTL_TYPE_TRAIT_is_trivially_destructible_CONFORMANCE 1
+
+ template <typename T>
+ struct is_trivially_destructible
+ : integral_constant<bool, __is_trivially_destructible(T)> {};
+
+ #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || defined(__clang__))
+ #define EASTL_TYPE_TRAIT_is_trivially_destructible_CONFORMANCE EASTL_TYPE_TRAIT_is_destructible_CONFORMANCE
+
+ template <typename T>
+ struct is_trivially_destructible // Can't use just __has_trivial_destructor(T) because some compilers give it slightly different meaning, and are just plain broken, such as VC++'s __has_trivial_destructor, which says false for fundamental types.
+ : public integral_constant<bool, eastl::is_destructible<T>::value && ((__has_trivial_destructor(T) && !eastl::is_hat_type<T>::value)|| eastl::is_scalar<typename eastl::remove_all_extents<T>::type>::value)> {};
+
+ #else
+ #define EASTL_TYPE_TRAIT_is_trivially_destructible_CONFORMANCE 0
+
+ template <typename T>
+ struct is_trivially_destructible_helper
+ : public integral_constant<bool, (eastl::is_pod<T>::value || eastl::is_scalar<T>::value || eastl::is_reference<T>::value) && !eastl::is_void<T>::value> {};
+
+ template <typename T>
+ struct is_trivially_destructible
+ : public eastl::is_trivially_destructible_helper<typename eastl::remove_all_extents<T>::type> {};
+ #endif
+
+ #define EASTL_DECLARE_IS_TRIVIALLY_DESTRUCTIBLE(T, isTriviallyDestructible) \
+ namespace eastl{ \
+ template <> struct is_trivially_destructible<T> : public eastl::integral_constant<bool, isTriviallyDestructible>{}; \
+ template <> struct is_trivially_destructible<const T> : public eastl::integral_constant<bool, isTriviallyDestructible>{}; \
+ template <> struct is_trivially_destructible<volatile T> : public eastl::integral_constant<bool, isTriviallyDestructible>{}; \
+ template <> struct is_trivially_destructible<const volatile T> : public eastl::integral_constant<bool, isTriviallyDestructible>{}; \
+ }
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_trivially_destructible_v = is_trivially_destructible<T>::value;
+ #endif
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_nothrow_destructible
+ //
+ // is_destructible<T>::value is true and the indicated destructor is
+ // known not to throw any exceptions (5.3.7). T shall be a complete type,
+ // (possibly cv-qualified) void, or an array of unknown bound.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #if defined(_MSC_VER) && (_MSC_VER >= 1920)
+ #define EASTL_TYPE_TRAIT_is_nothrow_destructible_CONFORMANCE ((_MSC_VER >= 1900) ? 1 : 0) // VS2013 (1800) doesn't support noexcept and so can't support all usage of this properly (in particular default exception specifications defined in [C++11 Standard, 15.4 paragraph 14].
+
+ template <typename T>
+ struct is_nothrow_destructible
+ : integral_constant<bool, __is_nothrow_destructible(T)> {};
+
+ #elif defined(EA_COMPILER_NO_NOEXCEPT)
+ #define EASTL_TYPE_TRAIT_is_nothrow_destructible_CONFORMANCE 0
+
+ template <typename T>
+ struct is_nothrow_destructible_helper
+ : public eastl::integral_constant<bool, eastl::is_scalar<T>::value || eastl::is_reference<T>::value> {};
+
+ template <typename T>
+ struct is_nothrow_destructible
+ : public eastl::is_nothrow_destructible_helper<typename eastl::remove_all_extents<T>::type> {};
+
+ #else
+ #if defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION < 4008)
+ #define EASTL_TYPE_TRAIT_is_nothrow_destructible_CONFORMANCE 0 // GCC up to v4.7's noexcept is broken and fails to generate true for the case of compiler-generated destructors.
+ #else
+ #define EASTL_TYPE_TRAIT_is_nothrow_destructible_CONFORMANCE EASTL_TYPE_TRAIT_is_destructible_CONFORMANCE
+ #endif
+ ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ // *_noexcept_wrapper implements a workaround for VS2015. A standards conforming noexcept operator allows variadic template expansion.
+ // There appears to be an issue with VS2015 that prevents variadic template expansion into a noexcept operator that is passed directly
+ // to a template parameter.
+ //
+ // The fix hoists the noexcept expression into a separate struct and caches the result of the expression. This result is then passed to integral_constant.
+ //
+ // Example code from Clang libc++
+ // template <class _Tp, class... _Args>
+ // struct __libcpp_is_nothrow_constructible<[>is constructible*/true, /*is reference<]false, _Tp, _Args...>
+ // : public integral_constant<bool, noexcept(_Tp(declval<_Args>()...))> { };
+ //
+
+ template <typename T>
+ struct is_nothrow_destructible_helper_noexcept_wrapper
+ { static const bool value = noexcept(eastl::declval<T&>().~T()); };
+
+ template <typename T, bool>
+ struct is_nothrow_destructible_helper;
+
+ template <typename T>
+ struct is_nothrow_destructible_helper<T, false>
+ : public eastl::false_type {};
+
+ template <typename T>
+ struct is_nothrow_destructible_helper<T, true> // If the expression T::~T is a noexcept expression then it's nothrow.
+ : public eastl::integral_constant<bool, is_nothrow_destructible_helper_noexcept_wrapper<T>::value > {};
+
+ template <typename T>
+ struct is_nothrow_destructible // A type needs to at least be destructible before it could be nothrow destructible.
+ : public eastl::is_nothrow_destructible_helper<T, eastl::is_destructible<T>::value> {};
+
+ template <typename T, size_t N> // An array is nothrow destructible if its element type is nothrow destructible.
+ struct is_nothrow_destructible<T[N]> // To consider: Replace this with a remove_all_extents pathway.
+ : public eastl::is_nothrow_destructible<T> {};
+
+ template <typename T>
+ struct is_nothrow_destructible<T&> // A reference type cannot throw while being destructed. It's just a reference.
+ : public eastl::true_type {};
+
+ template <typename T>
+ struct is_nothrow_destructible<T&&> // An rvalue reference type cannot throw while being destructed.
+ : public eastl::true_type {};
+
+ #endif
+
+ #define EASTL_DECLARE_IS_NOTHROW_DESTRUCTIBLE(T, isNoThrowDestructible) \
+ namespace eastl{ \
+ template <> struct is_nothrow_destructible<T> { static const bool value = isNoThrowDestructible; }; \
+ template <> struct is_nothrow_destructible<const T> { static const bool value = isNoThrowDestructible; }; \
+ template <> struct is_nothrow_destructible<volatile T> { static const bool value = isNoThrowDestructible; }; \
+ template <> struct is_nothrow_destructible<const volatile T> { static const bool value = isNoThrowDestructible; }; \
+ }
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_nothrow_destructible_v = is_nothrow_destructible<T>::value;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_nothrow_default_constructible
+ //
+ ///////////////////////////////////////////////////////////////////////
+ #define EASTL_TYPE_TRAIT_is_nothrow_default_constructible_CONFORMANCE EASTL_TYPE_TRAIT_is_nothrow_constructible_CONFORMANCE
+
+ template <typename T>
+ struct is_nothrow_default_constructible
+ : public eastl::is_nothrow_constructible<T> {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_nothrow_default_constructible_v = is_nothrow_default_constructible<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_nothrow_move_constructible
+ //
+ ///////////////////////////////////////////////////////////////////////
+ #define EASTL_TYPE_TRAIT_is_nothrow_move_constructible_CONFORMANCE EASTL_TYPE_TRAIT_is_nothrow_constructible_CONFORMANCE
+
+ template <typename T>
+ struct is_nothrow_move_constructible
+ : public eastl::is_nothrow_constructible<T, typename eastl::add_rvalue_reference<T>::type> {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_nothrow_move_constructible_v = is_nothrow_move_constructible<T>::value;
+ #endif
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
diff --git a/EASTL/include/EASTL/internal/type_properties.h b/EASTL/include/EASTL/internal/type_properties.h
new file mode 100644
index 0000000..78bdfca
--- /dev/null
+++ b/EASTL/include/EASTL/internal/type_properties.h
@@ -0,0 +1,457 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_TYPE_PROPERTIES_H
+#define EASTL_INTERNAL_TYPE_PROPERTIES_H
+
+
+#include <EABase/eabase.h>
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <limits.h>
+#include <EASTL/internal/type_compound.h>
+
+
+namespace eastl
+{
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // underlying_type
+ //
+ // Defines a member typedef type of type that is the underlying type for the enumeration T.
+ // Requires explicit compiler support to implement.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && ((defined(_MSC_VER) && (_MSC_VER >= 1700)) || (defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4007)) || defined(__clang__)) // VS2012+
+ #define EASTL_TYPE_TRAIT_underlying_type_CONFORMANCE 1 // underlying_type is conforming.
+
+ template <typename T>
+ struct underlying_type{ typedef __underlying_type(T) type; };
+
+ #else
+ #define EASTL_TYPE_TRAIT_underlying_type_CONFORMANCE 0
+
+ template <typename T>
+ struct underlying_type{ typedef int type; }; // This is of course wrong, but we emulate libstdc++ and typedef it as int.
+ #endif
+
+ #if !defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ template <typename T>
+ using underlying_type_t = typename underlying_type<T>::type;
+ #endif
+
+ ///////////////////////////////////////////////////////////////////////
+ // to_underlying
+ //
+ // Cast a enum value to its underlying type.
+ // For example:
+ //
+ // enum class MyEnum : uint8_t { Value = 0; }
+ // auto x = MyEnum::Value;
+ // std::cout << to_underlying(x); // equivalent to sts::cout << static_cast<uint8_t>(x);
+ ///////////////////////////////////////////////////////////////////////
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED && !defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ template<class T>
+ constexpr underlying_type_t<T> to_underlying(T value) noexcept
+ {
+ return static_cast<underlying_type_t<T>>(value);
+ }
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // has_unique_object_representations
+ //
+ // If T is TriviallyCopyable and if any two objects of type T with the same
+ // value have the same object representation, value is true. For any other
+ // type, value is false.
+ //
+ // http://en.cppreference.com/w/cpp/types/has_unique_object_representations
+ ///////////////////////////////////////////////////////////////////////
+ #if EASTL_HAS_UNIQUE_OBJECT_REPRESENTATIONS_AVAILABLE
+ #define EASTL_TYPE_TRAIT_has_unique_object_representations_CONFORMANCE 1
+
+ template <typename T>
+ struct has_unique_object_representations
+ : public integral_constant<bool, __has_unique_object_representations(remove_cv_t<remove_all_extents_t<T>>)>
+ {
+ };
+
+ #else
+ #define EASTL_TYPE_TRAIT_has_unique_object_representations_CONFORMANCE 0
+
+ template <typename T>
+ struct has_unique_object_representations
+ : public integral_constant<bool, is_integral_v<remove_cv_t<remove_all_extents_t<T>>>> // only integral types (floating point types excluded).
+ {
+ };
+
+ #endif
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR auto has_unique_object_representations_v = has_unique_object_representations<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_signed
+ //
+ // is_signed<T>::value == true if T is a (possibly cv-qualified) floating-point or signed integer type.
+ //
+ // Used to determine if a type is signed.
+ // Given that there are some user-made classes which emulate integral
+ // types, we provide the EASTL_DECLARE_SIGNED macro to allow you to
+ // set a given class to be identified as a signed type.
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_signed_CONFORMANCE 1 // is_signed is conforming.
+
+#ifdef _MSC_VER
+ #pragma warning(push)
+ #pragma warning(disable: 4296) // '<': expression is always false
+#endif
+ template<typename T, bool = is_arithmetic<T>::value>
+ struct is_signed_helper : bool_constant<T(-1) < T(0)> {};
+#ifdef _MSC_VER
+ #pragma warning(pop)
+#endif
+
+ template<typename T>
+ struct is_signed_helper<T, false> : false_type {};
+
+ template <typename T>
+ struct is_signed : public eastl::is_signed_helper<T>::type {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_signed_v = is_signed<T>::value;
+ #endif
+
+ #define EASTL_DECLARE_SIGNED(T) \
+ namespace eastl{ \
+ template <> struct is_signed<T> : public true_type{}; \
+ template <> struct is_signed<const T> : public true_type{}; \
+ template <> struct is_signed<volatile T> : public true_type{}; \
+ template <> struct is_signed<const volatile T> : public true_type{}; \
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_unsigned
+ //
+ // is_unsigned<T>::value == true if T is a (possibly cv-qualified) bool or unsigned integer type.
+ //
+ // Used to determine if a type is unsigned.
+ // Given that there are some user-made classes which emulate integral
+ // types, we provide the EASTL_DECLARE_UNSIGNED macro to allow you to
+ // set a given class to be identified as an unsigned type.
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_unsigned_CONFORMANCE 1 // is_unsigned is conforming.
+
+#ifdef _MSC_VER
+ #pragma warning(push)
+ #pragma warning(disable: 4296) // '<': expression is always false
+#endif
+ template<typename T, bool = is_arithmetic<T>::value>
+ struct is_unsigned_helper : integral_constant<bool, T(0) < T(-1)> {};
+#ifdef _MSC_VER
+ #pragma warning(pop)
+#endif
+
+ template<typename T>
+ struct is_unsigned_helper<T, false> : false_type {};
+
+ template <typename T>
+ struct is_unsigned : public eastl::is_unsigned_helper<T>::type {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_unsigned_v = is_unsigned<T>::value;
+ #endif
+
+ #define EASTL_DECLARE_UNSIGNED(T) \
+ namespace eastl{ \
+ template <> struct is_unsigned<T> : public true_type{}; \
+ template <> struct is_unsigned<const T> : public true_type{}; \
+ template <> struct is_unsigned<volatile T> : public true_type{}; \
+ template <> struct is_unsigned<const volatile T> : public true_type{}; \
+ }
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_bounded_array
+ //
+ // is_bounded_array<T>::value == true if T is an array type of known bound.
+ //
+ // is_bounded_array<int>::value is false.
+ // is_bounded_array<int[5]>::value is true.
+ // is_bounded_array<int[]>::value is false.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_bounded_array_CONFORMANCE 1 // is_bounded_array is conforming.
+
+ template<class T>
+ struct is_bounded_array: eastl::false_type {};
+
+ template<class T, size_t N>
+ struct is_bounded_array<T[N]> : eastl::true_type {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_bounded_array_v = is_bounded_array<T>::value;
+ #endif
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_unbounded_array
+ //
+ // is_unbounded_array<T>::value == true if T is an array type of known bound.
+ //
+ // is_unbounded_array<int>::value is false.
+ // is_unbounded_array<int[5]>::value is false.
+ // is_unbounded_array<int[]>::value is true.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_unbounded_array_CONFORMANCE 1 // is_unbounded_array is conforming.
+
+ template<class T>
+ struct is_unbounded_array: eastl::false_type {};
+
+ template<class T>
+ struct is_unbounded_array<T[]> : eastl::true_type {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_unbounded_array_v = is_unbounded_array<T>::value;
+ #endif
+
+ ///////////////////////////////////////////////////////////////////////
+ // alignment_of
+ //
+ // alignment_of<T>::value is an integral value representing, in bytes,
+ // the memory alignment of objects of type T.
+ //
+ // alignment_of may only be applied to complete types.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_alignment_of_CONFORMANCE 1 // alignment_of is conforming.
+
+ template <typename T>
+ struct alignment_of_value{ static const size_t value = EASTL_ALIGN_OF(T); };
+
+ template <typename T>
+ struct alignment_of : public integral_constant<size_t, alignment_of_value<T>::value>{};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR size_t alignment_of_v = alignment_of<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_aligned
+ //
+ // Defined as true if the type has alignment requirements greater
+ // than default alignment, which is taken to be 8. This allows for
+ // doing specialized object allocation and placement for such types.
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_aligned_CONFORMANCE 1 // is_aligned is conforming.
+
+ template <typename T>
+ struct is_aligned_value{ static const bool value = (EASTL_ALIGN_OF(T) > 8); };
+
+ template <typename T>
+ struct is_aligned : public integral_constant<bool, is_aligned_value<T>::value>{};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR size_t is_aligned_v = is_aligned<T>::value;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // rank
+ //
+ // rank<T>::value is an integral value representing the number of
+ // dimensions possessed by an array type. For example, given a
+ // multi-dimensional array type T[M][N], std::tr1::rank<T[M][N]>::value == 2.
+ // For a given non-array type T, std::tr1::rank<T>::value == 0.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_rank_CONFORMANCE 1 // rank is conforming.
+
+ template<typename T>
+ struct rank : public eastl::integral_constant<size_t, 0> {};
+
+ template<typename T>
+ struct rank<T[]> : public eastl::integral_constant<size_t, rank<T>::value + 1> {};
+
+ template<typename T, size_t N>
+ struct rank<T[N]> : public eastl::integral_constant<size_t, rank<T>::value + 1> {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR auto rank_v = rank<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_base_of
+ //
+ // Given two (possibly identical) types Base and Derived, is_base_of<Base, Derived>::value == true
+ // if and only if Base is a direct or indirect base class of Derived,
+ // or Base and Derived are the same type.
+ //
+ // is_base_of may only be applied to complete types.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || ((defined(__clang__)) && EA_COMPILER_HAS_FEATURE(is_base_of)))
+ #define EASTL_TYPE_TRAIT_is_base_of_CONFORMANCE 1 // is_base_of is conforming.
+
+ template <typename Base, typename Derived>
+ struct is_base_of : public eastl::integral_constant<bool, __is_base_of(Base, Derived) || eastl::is_same<Base, Derived>::value>{};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <typename Base, typename Derived>
+ EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR bool is_base_of_v = is_base_of<Base, Derived>::value;
+ #endif
+ #else
+ // Not implemented yet.
+ // This appears to be implementable.
+ #define EASTL_TYPE_TRAIT_is_base_of_CONFORMANCE 0
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_lvalue_reference
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_lvalue_reference_CONFORMANCE 1 // is_lvalue_reference is conforming.
+
+ template<typename T> struct is_lvalue_reference : public eastl::false_type {};
+ template<typename T> struct is_lvalue_reference<T&> : public eastl::true_type {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ EA_CONSTEXPR bool is_lvalue_reference_v = is_lvalue_reference<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_rvalue_reference
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_rvalue_reference_CONFORMANCE 1 // is_rvalue_reference is conforming.
+
+ template <typename T> struct is_rvalue_reference : public eastl::false_type {};
+ template <typename T> struct is_rvalue_reference<T&&> : public eastl::true_type {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ EA_CONSTEXPR bool is_rvalue_reference_v = is_rvalue_reference<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // result_of
+ //
+ ///////////////////////////////////////////////////////////////////////
+ #define EASTL_TYPE_TRAIT_result_of_CONFORMANCE 1 // result_of is conforming.
+
+ template<typename> struct result_of;
+
+ template<typename F, typename... ArgTypes>
+ struct result_of<F(ArgTypes...)>
+ { typedef decltype(eastl::declval<F>()(eastl::declval<ArgTypes>()...)) type; };
+
+
+ // result_of_t is the C++14 using typedef for typename result_of<T>::type.
+ // We provide a backwards-compatible means to access it through a macro for pre-C++11 compilers.
+ #if defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ #define EASTL_RESULT_OF_T(T) typename result_of<T>::type
+ #else
+ template <typename T>
+ using result_of_t = typename result_of<T>::type;
+ #define EASTL_RESULT_OF_T(T) result_of_t<T>
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // has_equality
+ //
+ // Determines if the specified type can be tested for equality.
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <typename, typename = eastl::void_t<>>
+ struct has_equality : eastl::false_type {};
+
+ template <typename T>
+ struct has_equality<T, eastl::void_t<decltype(eastl::declval<T>() == eastl::declval<T>())>> : eastl::true_type
+ {
+ };
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR auto has_equality_v = has_equality<T>::value;
+ #endif
+
+ namespace internal
+ {
+ ///////////////////////////////////////////////////////////////////////
+ // is_complete_type
+ //
+ // Determines if the specified type is complete
+ //
+ // Warning: Be careful when using is_complete_type since the value is fixed at first instantiation.
+ // Consider the following:
+ //
+ // struct Foo;
+ // is_complete_type_v<Foo> // false
+ // struct Foo {};
+ // is_complete_type_v<Foo> // still false
+ ///////////////////////////////////////////////////////////////////////
+
+ template<typename T, typename = void>
+ struct is_complete_type : public false_type {};
+
+ template<typename T>
+ struct is_complete_type<T, eastl::void_t<decltype(sizeof(T) != 0)>> : public true_type {};
+
+ template<>
+ struct is_complete_type<const volatile void> : public false_type {};
+ template<>
+ struct is_complete_type<const void> : public false_type {};
+ template<>
+ struct is_complete_type<volatile void> : public false_type {};
+ template<>
+ struct is_complete_type<void> : public false_type {};
+
+ template<typename T>
+ struct is_complete_type<T, eastl::enable_if_t<eastl::is_function_v<T>>> : public true_type {};
+
+ template <typename T>
+ EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR bool is_complete_type_v = is_complete_type<T, void>::value;
+ }
+
+} // namespace eastl
+
+
+#endif // Header include guard
diff --git a/EASTL/include/EASTL/internal/type_transformations.h b/EASTL/include/EASTL/internal/type_transformations.h
new file mode 100644
index 0000000..5454cfa
--- /dev/null
+++ b/EASTL/include/EASTL/internal/type_transformations.h
@@ -0,0 +1,792 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_TYPE_TRANFORMATIONS_H
+#define EASTL_INTERNAL_TYPE_TRANFORMATIONS_H
+
+
+#include <EABase/eabase.h>
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <limits.h>
+
+
+namespace eastl
+{
+
+ ///////////////////////////////////////////////////////////////////////
+ // add_const
+ //
+ // Add const to a type.
+ //
+ // Tor a given type T, add_const<T>::type is equivalent to T
+ // const if is_const<T>::value == false, and
+ // - is_void<T>::value == true, or
+ // - is_object<T>::value == true.
+ //
+ // Otherwise, add_const<T>::type is equivalent to T.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_add_const_CONFORMANCE 1 // add_const is conforming.
+
+ template <typename T, bool = eastl::is_const<T>::value || eastl::is_reference<T>::value || eastl::is_function<T>::value>
+ struct add_const_helper
+ { typedef T type; };
+
+ template <typename T>
+ struct add_const_helper<T, false>
+ { typedef const T type; };
+
+ template <typename T>
+ struct add_const
+ { typedef typename eastl::add_const_helper<T>::type type; };
+
+ // add_const_t is the C++17 using typedef for typename add_const<T>::type.
+ // We provide a backwards-compatible means to access it through a macro for pre-C++11 compilers.
+ #if defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ #define EASTL_ADD_CONST_T(T) typename add_const<T>::type
+ #else
+ template <typename T>
+ using add_const_t = typename add_const<T>::type;
+ #define EASTL_ADD_CONST_T(T) add_const_t<T>
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // add_volatile
+ //
+ // Add volatile to a type.
+ //
+ // For a given type T, add_volatile<T>::type is equivalent to T volatile
+ // if is_volatile<T>::value == false, and
+ // - is_void<T>::value == true, or
+ // - is_object<T>::value == true.
+ //
+ // Otherwise, add_volatile<T>::type is equivalent to T.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_add_volatile_CONFORMANCE 1 // add_volatile is conforming.
+
+ template <typename T, bool = eastl::is_volatile<T>::value || eastl::is_reference<T>::value || eastl::is_function<T>::value>
+ struct add_volatile_helper
+ { typedef T type; };
+
+ template <typename T>
+ struct add_volatile_helper<T, false>
+ { typedef volatile T type; };
+
+ template <typename T> struct add_volatile
+ { typedef typename eastl::add_volatile_helper<T>::type type; };
+
+ template <class T> using add_volatile_t = typename add_volatile<T>::type;
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // add_cv
+ //
+ // The add_cv transformation trait adds const and volatile qualification
+ // to the type to which it is applied. For a given type T,
+ // add_volatile<T>::type is equivalent to add_const<add_volatile<T>::type>::type.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_add_cv_CONFORMANCE 1 // add_cv is conforming.
+
+ template<typename T>
+ struct add_cv
+ {
+ typedef typename add_const<typename add_volatile<T>::type>::type type;
+ };
+
+ template <class T> using add_cv_t = typename add_cv<T>::type;
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // make_signed
+ //
+ // Used to convert an integral type to its signed equivalent, if not already.
+ // T shall be a (possibly const and/or volatile-qualified) integral type
+ // or enumeration but not a bool type.;
+ //
+ // The user can define their own make_signed overrides for their own
+ // types by making a template specialization like done below and adding
+ // it to the user's code.
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_make_signed_CONFORMANCE 1
+
+ namespace internal
+ {
+ template <typename T, bool = eastl::is_enum_v<T> || eastl::is_integral_v<T>>
+ struct make_signed_helper_0
+ {
+ struct char_helper
+ {
+ typedef signed char type;
+ };
+
+ struct short_helper
+ {
+ typedef signed short type;
+ };
+
+ struct int_helper
+ {
+ typedef signed int type;
+ };
+
+ struct long_helper
+ {
+ typedef signed long type;
+ };
+
+ struct longlong_helper
+ {
+ typedef signed long long type;
+ };
+
+ struct int128_helper
+ {
+ #if EASTL_GCC_STYLE_INT128_SUPPORTED
+ typedef __int128_t type;
+ #endif
+ };
+
+ struct no_type_helper
+ {
+ };
+
+ typedef typename
+ eastl::conditional<sizeof(T) <= sizeof(signed char), char_helper,
+ eastl::conditional_t<sizeof(T) <= sizeof(signed short), short_helper,
+ eastl::conditional_t<sizeof(T) <= sizeof(signed int), int_helper,
+ eastl::conditional_t<sizeof(T) <= sizeof(signed long), long_helper,
+ eastl::conditional_t<sizeof(T) <= sizeof(signed long long), longlong_helper,
+ #if EASTL_GCC_STYLE_INT128_SUPPORTED
+ eastl::conditional_t<sizeof(T) <= sizeof(__int128_t), int128_helper,
+ no_type_helper
+ >
+ #else
+ no_type_helper
+ #endif
+ >
+ >
+ >
+ >
+ >::type type;
+ };
+
+ template <typename T>
+ struct make_signed_helper_0<T, false>
+ {
+ struct no_type_helper
+ {
+ };
+
+ typedef no_type_helper type;
+ };
+
+ template <typename T>
+ struct make_signed_helper_1
+ {
+ typedef typename T::type type;
+ };
+
+ template <typename T>
+ struct make_signed_helper
+ {
+ typedef typename eastl::internal::make_signed_helper_1<typename eastl::internal::make_signed_helper_0<T>::type>::type type;
+ };
+
+ } // namespace internal
+
+ template <typename T>
+ struct make_signed
+ {
+ typedef typename eastl::internal::make_signed_helper<T>::type type;
+ };
+
+ template <> struct make_signed<bool> {};
+ template <> struct make_signed<signed char> { typedef signed char type; };
+ template <> struct make_signed<unsigned char> { typedef signed char type; };
+ template <> struct make_signed<signed short> { typedef signed short type; };
+ template <> struct make_signed<unsigned short> { typedef signed short type; };
+ template <> struct make_signed<signed int> { typedef signed int type; };
+ template <> struct make_signed<unsigned int> { typedef signed int type; };
+ template <> struct make_signed<signed long> { typedef signed long type; };
+ template <> struct make_signed<unsigned long> { typedef signed long type; };
+ template <> struct make_signed<signed long long> { typedef signed long long type; };
+ template <> struct make_signed<unsigned long long> { typedef signed long long type; };
+ #if EASTL_GCC_STYLE_INT128_SUPPORTED
+ template <> struct make_signed<__int128_t> { typedef __int128_t type; };
+ template <> struct make_signed<__uint128_t> { typedef __int128_t type; };
+ #endif
+
+
+ #if (defined(CHAR_MAX) && defined(UCHAR_MAX) && (CHAR_MAX == UCHAR_MAX)) // If char is unsigned, we convert char to signed char. However, if char is signed then make_signed returns char itself and not signed char.
+ template <> struct make_signed<char> { typedef signed char type; };
+ #endif
+
+ template <typename T>
+ struct make_signed<const T>
+ {
+ typedef eastl::add_const_t<typename eastl::make_signed<T>::type> type;
+ };
+
+ template <typename T>
+ struct make_signed<volatile T>
+ {
+ typedef eastl::add_volatile_t<typename eastl::make_signed<T>::type> type;
+ };
+
+ template <typename T>
+ struct make_signed<const volatile T>
+ {
+ typedef eastl::add_cv_t<typename eastl::make_signed<T>::type> type;
+ };
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <typename T>
+ using make_signed_t = typename make_signed<T>::type;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // add_signed
+ //
+ // This is not a C++11 type trait, and is here for backwards compatibility
+ // only. Use the C++11 make_unsigned type trait instead.
+ ///////////////////////////////////////////////////////////////////////
+
+ template<class T>
+ struct add_signed : public make_signed<T>
+ { typedef typename eastl::make_signed<T>::type type; };
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // make_unsigned
+ //
+ // Used to convert an integral type to its unsigned equivalent, if not already.
+ // T shall be a (possibly const and/or volatile-qualified) integral type
+ // or enumeration but not a bool type.;
+ //
+ // The user can define their own make_unsigned overrides for their own
+ // types by making a template specialization like done below and adding
+ // it to the user's code.
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_make_unsigned_CONFORMANCE 1
+
+ namespace internal
+ {
+
+ template <typename T, bool = eastl::is_enum<T>::value || eastl::is_integral<T>::value>
+ struct make_unsigned_helper_0
+ {
+ struct char_helper
+ {
+ typedef unsigned char type;
+ };
+
+ struct short_helper
+ {
+ typedef unsigned short type;
+ };
+
+ struct int_helper
+ {
+ typedef unsigned int type;
+ };
+
+ struct long_helper
+ {
+ typedef unsigned long type;
+ };
+
+ struct longlong_helper
+ {
+ typedef unsigned long long type;
+ };
+
+ struct int128_helper
+ {
+ #if EASTL_GCC_STYLE_INT128_SUPPORTED
+ typedef __uint128_t type;
+ #endif
+ };
+
+ struct no_type_helper
+ {
+ };
+
+
+ typedef typename
+ eastl::conditional<sizeof(T) <= sizeof(unsigned char), char_helper,
+ eastl::conditional_t<sizeof(T) <= sizeof(unsigned short), short_helper,
+ eastl::conditional_t<sizeof(T) <= sizeof(unsigned int), int_helper,
+ eastl::conditional_t<sizeof(T) <= sizeof(unsigned long), long_helper,
+ eastl::conditional_t<sizeof(T) <= sizeof(unsigned long long), longlong_helper,
+ #if EASTL_GCC_STYLE_INT128_SUPPORTED
+ eastl::conditional_t<sizeof(T) <= sizeof(__uint128_t), int128_helper,
+ no_type_helper
+ >
+ #else
+ no_type_helper
+ #endif
+ >
+ >
+ >
+ >
+ >::type type;
+ };
+
+
+ template <typename T>
+ struct make_unsigned_helper_0<T, false>
+ {
+ struct no_type_helper
+ {
+ };
+
+ typedef no_type_helper type;
+ };
+
+ template <typename T>
+ struct make_unsigned_helper_1
+ {
+ typedef typename T::type type;
+ };
+
+ template <typename T>
+ struct make_unsigned_helper
+ {
+ typedef typename eastl::internal::make_unsigned_helper_1<typename eastl::internal::make_unsigned_helper_0<T>::type>::type type;
+ };
+
+ } // namespace internal
+
+ template <typename T>
+ struct make_unsigned
+ {
+ typedef typename eastl::internal::make_unsigned_helper<T>::type type;
+ };
+
+ template <> struct make_unsigned<bool> {};
+ template <> struct make_unsigned<signed char> { typedef unsigned char type; };
+ template <> struct make_unsigned<unsigned char> { typedef unsigned char type; };
+ template <> struct make_unsigned<signed short> { typedef unsigned short type; };
+ template <> struct make_unsigned<unsigned short> { typedef unsigned short type; };
+ template <> struct make_unsigned<signed int> { typedef unsigned int type; };
+ template <> struct make_unsigned<unsigned int> { typedef unsigned int type; };
+ template <> struct make_unsigned<signed long> { typedef unsigned long type; };
+ template <> struct make_unsigned<unsigned long> { typedef unsigned long type; };
+ template <> struct make_unsigned<signed long long> { typedef unsigned long long type; };
+ template <> struct make_unsigned<unsigned long long> { typedef unsigned long long type; };
+ #if EASTL_GCC_STYLE_INT128_SUPPORTED
+ template <> struct make_unsigned<__int128_t> { typedef __uint128_t type; };
+ template <> struct make_unsigned<__uint128_t> { typedef __uint128_t type; };
+ #endif
+
+ #if (CHAR_MIN < 0) // If char is signed, we convert char to unsigned char. However, if char is unsigned then make_unsigned returns char itself and not unsigned char.
+ template <> struct make_unsigned<char> { typedef unsigned char type; };
+ #endif
+
+ #if defined(EA_CHAR8_UNIQUE) && EA_CHAR8_UNIQUE
+ template <> struct make_unsigned<char8_t> { typedef unsigned char type; };
+ #endif
+
+ template <typename T>
+ struct make_unsigned<const T>
+ {
+ typedef eastl::add_const_t<typename eastl::make_unsigned<T>::type> type;
+ };
+
+ template <typename T>
+ struct make_unsigned<volatile T>
+ {
+ typedef eastl::add_volatile_t<typename eastl::make_unsigned<T>::type> type;
+ };
+
+ template <typename T>
+ struct make_unsigned<const volatile T>
+ {
+ typedef eastl::add_cv_t<typename eastl::make_unsigned<T>::type> type;
+ };
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <typename T>
+ using make_unsigned_t = typename make_unsigned<T>::type;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // add_unsigned
+ //
+ // This is not a C++11 type trait, and is here for backwards compatibility
+ // only. Use the C++11 make_unsigned type trait instead.
+ //
+ // Adds unsigned-ness to the given type.
+ // Modifies only integral values; has no effect on others.
+ // add_unsigned<int>::type is unsigned int
+ // add_unsigned<unsigned int>::type is unsigned int
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ template<class T>
+ struct add_unsigned : public make_unsigned<T>
+ { typedef typename eastl::make_signed<T>::type type; };
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // remove_pointer
+ //
+ // Remove pointer from a type.
+ //
+ // The remove_pointer transformation trait removes top-level indirection
+ // by pointer (if any) from the type to which it is applied. Pointers to
+ // members are not affected. For a given type T, remove_pointer<T*>::type
+ // is equivalent to T.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_remove_pointer_CONFORMANCE 1
+
+ template<typename T> struct remove_pointer { typedef T type; };
+ template<typename T> struct remove_pointer<T*> { typedef T type; };
+ template<typename T> struct remove_pointer<T* const> { typedef T type; };
+ template<typename T> struct remove_pointer<T* volatile> { typedef T type; };
+ template<typename T> struct remove_pointer<T* const volatile> { typedef T type; };
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ using remove_pointer_t = typename remove_pointer<T>::type;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // add_pointer
+ //
+ // Add pointer to a type.
+ // Provides the member typedef type which is the type T*.
+ //
+ // If T is a reference type,
+ // type member is a pointer to the referred type.
+ // If T is an object type, a function type that is not cv- or ref-qualified,
+ // or a (possibly cv-qualified) void type,
+ // type member is T*.
+ // Otherwise (T is a cv- or ref-qualified function type),
+ // type member is T (ie. not a pointer).
+ //
+ // cv- and ref-qualified function types are invalid, which is why there is a specific clause for it.
+ // See https://cplusplus.github.io/LWG/issue2101 for more.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_add_pointer_CONFORMANCE 1
+
+ namespace internal
+ {
+ template <typename T>
+ auto try_add_pointer(int) -> type_identity<typename std::remove_reference<T>::type*>;
+ template <typename T>
+ auto try_add_pointer(...) -> type_identity<T>;
+ }
+
+ template <typename T>
+ struct add_pointer : decltype(internal::try_add_pointer<T>(0)) {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ using add_pointer_t = typename add_pointer<T>::type;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // remove_extent
+ //
+ // The remove_extent transformation trait removes a dimension from an array.
+ // For a given non-array type T, remove_extent<T>::type is equivalent to T.
+ // For a given array type T[N], remove_extent<T[N]>::type is equivalent to T.
+ // For a given array type const T[N], remove_extent<const T[N]>::type is equivalent to const T.
+ // For example, given a multi-dimensional array type T[M][N], remove_extent<T[M][N]>::type is equivalent to T[N].
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_remove_extent_CONFORMANCE 1 // remove_extent is conforming.
+
+ template<class T> struct remove_extent { typedef T type; };
+ template<class T> struct remove_extent<T[]> { typedef T type; };
+ template<class T, size_t N> struct remove_extent<T[N]> { typedef T type; };
+
+ #if !defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ template <typename T>
+ using remove_extent_t = typename remove_extent<T>::type;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // remove_all_extents
+ //
+ // The remove_all_extents transformation trait removes all dimensions from an array.
+ // For a given non-array type T, remove_all_extents<T>::type is equivalent to T.
+ // For a given array type T[N], remove_all_extents<T[N]>::type is equivalent to T.
+ // For a given array type const T[N], remove_all_extents<const T[N]>::type is equivalent to const T.
+ // For example, given a multi-dimensional array type T[M][N], remove_all_extents<T[M][N]>::type is equivalent to T.
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_remove_all_extents_CONFORMANCE 1 // remove_all_extents is conforming.
+
+ template<typename T> struct remove_all_extents { typedef T type; };
+ template<typename T, size_t N> struct remove_all_extents<T[N]> { typedef typename eastl::remove_all_extents<T>::type type; };
+ template<typename T> struct remove_all_extents<T[]> { typedef typename eastl::remove_all_extents<T>::type type; };
+
+ #if !defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ template <typename T>
+ using remove_all_extents_t = typename remove_all_extents<T>::type;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // aligned_storage
+ //
+ // The aligned_storage transformation trait provides a type that is
+ // suitably aligned to store an object whose size is does not exceed length
+ // and whose alignment is a divisor of alignment. When using aligned_storage,
+ // length must be non-zero, and alignment must >= alignment_of<T>::value
+ // for some type T. We require the alignment value to be a power-of-two.
+ //
+ // GCC versions prior to 4.4 don't properly support this with stack-based
+ // variables. The EABase EA_ALIGN_MAX_AUTOMATIC define identifies the
+ // extent to which stack (automatic) variables can be aligned for the
+ // given compiler/platform combination.
+ //
+ // Example usage:
+ // aligned_storage<sizeof(Widget), alignment_of(Widget)>::type widget;
+ // Widget* pWidget = new(&widget) Widget;
+ //
+ // aligned_storage<sizeof(Widget), 64>::type widgetAlignedTo64;
+ // Widget* pWidget = new(&widgetAlignedTo64) Widget;
+ //
+ // aligned_storage<sizeof(Widget), alignment_of(Widget)>::type widgetArray[37];
+ // Widget* pWidgetArray = new(widgetArray) Widget[37];
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_aligned_storage_CONFORMANCE 1 // aligned_storage is conforming.
+
+ #if defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4008)
+ // New versions of GCC do not support using 'alignas' with a value greater than 128.
+ // However, this code using the GNU standard alignment attribute works properly.
+ template<size_t N, size_t Align = EASTL_ALIGN_OF(double)>
+ struct aligned_storage
+ {
+ struct type { unsigned char mCharData[N]; } EA_ALIGN(Align);
+ };
+ #elif (EABASE_VERSION_N >= 20040) && !defined(EA_COMPILER_NO_ALIGNAS) // If C++11 alignas is supported...
+ template<size_t N, size_t Align = EASTL_ALIGN_OF(double)>
+ struct aligned_storage
+ {
+ typedef struct {
+ alignas(Align) unsigned char mCharData[N];
+ } type;
+ };
+
+ #elif defined(EA_COMPILER_MSVC) || (defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION < 4007)) || defined(EA_COMPILER_EDG) // At some point GCC fixed their attribute(align) to support non-literals, though it's not clear what version aside from being no later than 4.7 and no earlier than 4.2.
+ // Some compilers don't allow you to to use EA_ALIGNED with anything by a numeric literal,
+ // so we can't use the simpler code like we do further below for other compilers. We support
+ // only up to so much of an alignment value here.
+ template<size_t N, size_t Align>
+ struct aligned_storage_helper { struct type{ unsigned char mCharData[N]; }; };
+
+ template<size_t N> struct aligned_storage_helper<N, 2> { struct EA_ALIGN( 2) type{ unsigned char mCharData[N]; }; };
+ template<size_t N> struct aligned_storage_helper<N, 4> { struct EA_ALIGN( 4) type{ unsigned char mCharData[N]; }; };
+ template<size_t N> struct aligned_storage_helper<N, 8> { struct EA_ALIGN( 8) type{ unsigned char mCharData[N]; }; };
+ template<size_t N> struct aligned_storage_helper<N, 16> { struct EA_ALIGN( 16) type{ unsigned char mCharData[N]; }; };
+ template<size_t N> struct aligned_storage_helper<N, 32> { struct EA_ALIGN( 32) type{ unsigned char mCharData[N]; }; };
+ template<size_t N> struct aligned_storage_helper<N, 64> { struct EA_ALIGN( 64) type{ unsigned char mCharData[N]; }; };
+ template<size_t N> struct aligned_storage_helper<N, 128> { struct EA_ALIGN( 128) type{ unsigned char mCharData[N]; }; };
+ template<size_t N> struct aligned_storage_helper<N, 256> { struct EA_ALIGN( 256) type{ unsigned char mCharData[N]; }; };
+ template<size_t N> struct aligned_storage_helper<N, 512> { struct EA_ALIGN( 512) type{ unsigned char mCharData[N]; }; };
+ template<size_t N> struct aligned_storage_helper<N, 1024> { struct EA_ALIGN(1024) type{ unsigned char mCharData[N]; }; };
+ template<size_t N> struct aligned_storage_helper<N, 2048> { struct EA_ALIGN(2048) type{ unsigned char mCharData[N]; }; };
+ template<size_t N> struct aligned_storage_helper<N, 4096> { struct EA_ALIGN(4096) type{ unsigned char mCharData[N]; }; };
+
+ template<size_t N, size_t Align = EASTL_ALIGN_OF(double)>
+ struct aligned_storage
+ {
+ typedef typename aligned_storage_helper<N, Align>::type type;
+ };
+
+ #else
+ template<size_t N, size_t Align = EASTL_ALIGN_OF(double)>
+ struct aligned_storage
+ {
+ union type
+ {
+ unsigned char mCharData[N];
+ struct EA_ALIGN(Align) mStruct{ };
+ };
+ };
+ #endif
+
+ #if defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ #define EASTL_ALIGNED_STORAGE_T(N, Align) typename eastl::aligned_storage_t<N, Align>::type
+ #else
+ template <size_t N, size_t Align = EASTL_ALIGN_OF(double)>
+ using aligned_storage_t = typename aligned_storage<N, Align>::type;
+ #define EASTL_ALIGNED_STORAGE_T(N, Align) eastl::aligned_storage_t<N, Align>
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // aligned_union
+ //
+ // The member typedef type shall be a POD type suitable for use as
+ // uninitialized storage for any object whose type is listed in Types;
+ // its size shall be at least Len. The static member alignment_value
+ // shall be an integral constant of type std::size_t whose value is
+ // the strictest alignment of all types listed in Types.
+ // Note that the resulting type is not a C/C++ union, but simply memory
+ // block (of pod type) that can be used to placement-new an actual
+ // C/C++ union of the types. The actual union you declare can be a non-POD union.
+ //
+ // Example usage:
+ // union MyUnion {
+ // char c;
+ // int i;
+ // float f;
+ //
+ // MyUnion(float fValue) : f(fValue) {}
+ // };
+ //
+ // aligned_union<sizeof(MyUnion), char, int, float>::type myUnionStorage;
+ // MyUnion* pMyUnion = new(&myUnionStorage) MyUnion(21.4f);
+ // pMyUnion->i = 37;
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #if defined(EA_COMPILER_NO_VARIADIC_TEMPLATES) || !EASTL_TYPE_TRAIT_static_max_CONFORMANCE
+ #define EASTL_TYPE_TRAIT_aligned_union_CONFORMANCE 0 // aligned_union is not conforming, as it supports only a two-member unions.
+
+ // To consider: Expand this to include more possible types. We may want to convert this to be a recursive
+ // template instead of like below.
+ template <size_t minSize, typename Type0, typename Type1 = char, typename Type2 = char, typename Type3 = char>
+ struct aligned_union
+ {
+ static const size_t size0 = eastl::static_max<minSize, sizeof(Type0)>::value;
+ static const size_t size1 = eastl::static_max<size0, sizeof(Type1)>::value;
+ static const size_t size2 = eastl::static_max<size1, sizeof(Type2)>::value;
+ static const size_t size = eastl::static_max<size2, sizeof(Type3)>::value;
+
+ static const size_t alignment0 = eastl::static_max<EA_ALIGN_OF(Type0), EA_ALIGN_OF(Type1)>::value;
+ static const size_t alignment1 = eastl::static_max<alignment0, EA_ALIGN_OF(Type2)>::value;
+ static const size_t alignment_value = eastl::static_max<alignment1, EA_ALIGN_OF(Type3)>::value;
+
+ typedef typename eastl::aligned_storage<size, alignment_value>::type type;
+ };
+
+ #if defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ // To do: define macro.
+ #else
+ template <size_t minSize, typename Type0, typename Type1 = char, typename Type2 = char, typename Type3 = char>
+ using aligned_union_t = typename aligned_union<minSize, Type0, Type1, Type2, Type3>::type;
+ #endif
+ #else
+ #define EASTL_TYPE_TRAIT_aligned_union_CONFORMANCE 1 // aligned_union is conforming.
+
+ template <size_t minSize, typename Type0, typename ...TypeN>
+ struct aligned_union
+ {
+ static const size_t size = eastl::static_max<minSize, sizeof(Type0), sizeof(TypeN)...>::value;
+ static const size_t alignment_value = eastl::static_max<EA_ALIGN_OF(Type0), EA_ALIGN_OF(TypeN)...>::value;
+
+ typedef typename eastl::aligned_storage<size, alignment_value>::type type;
+ };
+
+ #if defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ // To do: define macro.
+ #else
+ template <size_t minSize, typename... TypeN>
+ using aligned_union_t = typename aligned_union<minSize, TypeN...>::type;
+ #endif
+
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // union_cast
+ //
+ // Safely converts between unrelated types that have a binary equivalency.
+ // This appoach is required by strictly conforming C++ compilers because
+ // directly using a C or C++ cast between unrelated types is fraught with
+ // the possibility of undefined runtime behavior due to type aliasing.
+ // The Source and Dest types must be POD types due to the use of a union
+ // in C++ versions prior to C++11. C++11 relaxes the definition of a POD
+ // such that it allows a classes with trivial default constructors whereas
+ // previous versions did not, so beware of this when writing portable code.
+ //
+ // Example usage:
+ // float f32 = 1.234f;
+ // uint32_t n32 = union_cast<uint32_t>(f32);
+ //
+ // Example possible mis-usage:
+ // The following is valid only if you are aliasing the pointer value and
+ // not what it points to. Most of the time the user intends the latter,
+ // which isn't strictly possible.
+ // Widget* pWidget = CreateWidget();
+ // Foo* pFoo = union_cast<Foo*>(pWidget);
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename DestType, typename SourceType>
+ DestType union_cast(SourceType sourceValue)
+ {
+ EASTL_CT_ASSERT((sizeof(DestType) == sizeof(SourceType)) &&
+ (EA_ALIGN_OF(DestType) == EA_ALIGN_OF(SourceType))); // To support differening alignments, we would need to use a memcpy-based solution or find a way to make the two union members align with each other.
+ //EASTL_CT_ASSERT(is_pod<DestType>::value && is_pod<SourceType>::value); // Disabled because we don't want to restrict what the user can do, as some compiler's definitions of is_pod aren't up to C++11 Standards.
+ //EASTL_CT_ASSERT(!is_pointer<DestType>::value && !is_pointer<SourceType>::value); // Disabled because it's valid to alias pointers as long as you are aliasong the pointer value and not what it points to.
+
+ union {
+ SourceType sourceValue;
+ DestType destValue;
+ } u;
+ u.sourceValue = sourceValue;
+
+ return u.destValue;
+ }
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/internal/type_void_t.h b/EASTL/include/EASTL/internal/type_void_t.h
new file mode 100644
index 0000000..40c6818
--- /dev/null
+++ b/EASTL/include/EASTL/internal/type_void_t.h
@@ -0,0 +1,43 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_TYPE_VOID_T_H
+#define EASTL_INTERNAL_TYPE_VOID_T_H
+
+
+#include <EABase/eabase.h>
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+namespace eastl
+{
+
+ ///////////////////////////////////////////////////////////////////////
+ // void_t
+ //
+ // Maps a sequence of any types to void. This utility class is used in
+ // template meta programming to simplify compile time reflection mechanisms
+ // required by the standard library.
+ //
+ // http://en.cppreference.com/w/cpp/types/void_t
+ //
+ // Example:
+ // template <typename T, typename = void>
+ // struct is_iterable : false_type {};
+ //
+ // template <typename T>
+ // struct is_iterable<T, void_t<decltype(declval<T>().begin()),
+ // decltype(declval<T>().end())>> : true_type {};
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <class...>
+ using void_t = void;
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
diff --git a/EASTL/include/EASTL/intrusive_hash_map.h b/EASTL/include/EASTL/intrusive_hash_map.h
new file mode 100644
index 0000000..37f1618
--- /dev/null
+++ b/EASTL/include/EASTL/intrusive_hash_map.h
@@ -0,0 +1,98 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_INTRUSIVE_HASH_MAP_H
+#define EASTL_INTRUSIVE_HASH_MAP_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/intrusive_hashtable.h>
+#include <EASTL/functional.h>
+#include <EASTL/utility.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+
+ /// intrusive_hash_map
+ ///
+ /// Template parameters:
+ /// Key The key object (key in the key/value pair). T must contain a member of type Key named mKey.
+ /// T The type of object the map holds (a.k.a. value).
+ /// bucketCount The number of buckets to use. Best if it's a prime number.
+ /// Hash Hash function. See functional.h for examples of hash functions.
+ /// Equal Equality testing predicate; tells if two elements are equal.
+ ///
+ template <typename Key, typename T, size_t bucketCount, typename Hash = eastl::hash<Key>, typename Equal = eastl::equal_to<Key> >
+ class intrusive_hash_map : public intrusive_hashtable<Key, T, Hash, Equal, bucketCount, false, true>
+ {
+ public:
+ typedef intrusive_hashtable<Key, T, Hash, Equal, bucketCount, false, true> base_type;
+ typedef intrusive_hash_map<Key, T, bucketCount, Hash, Equal> this_type;
+
+ public:
+ explicit intrusive_hash_map(const Hash& h = Hash(), const Equal& eq = Equal())
+ : base_type(h, eq)
+ {
+ // Empty
+ }
+
+ // To consider: Is this feasible, given how initializer_list works by creating a temporary array? Even if it is feasible, is it a good idea?
+ //intrusive_hash_map(std::initializer_list<value_type> ilist);
+
+ }; // intrusive_hash_map
+
+
+
+
+ /// intrusive_hash_multimap
+ ///
+ /// Implements a intrusive_hash_multimap, which is the same thing as a intrusive_hash_map
+ /// except that contained elements need not be unique. See the documentation
+ /// for intrusive_hash_map for details.
+ ///
+ /// Template parameters:
+ /// Key The key object (key in the key/value pair). T must contain a member of type Key named mKey.
+ /// T The type of object the map holds (a.k.a. value).
+ /// bucketCount The number of buckets to use. Best if it's a prime number.
+ /// Hash Hash function. See functional.h for examples of hash functions.
+ /// Equal Equality testing predicate; tells if two elements are equal.
+ ///
+ template <typename Key, typename T, size_t bucketCount, typename Hash = eastl::hash<Key>, typename Equal = eastl::equal_to<Key> >
+ class intrusive_hash_multimap : public intrusive_hashtable<Key, T, Hash, Equal, bucketCount, false, false>
+ {
+ public:
+ typedef intrusive_hashtable<Key, T, Hash, Equal, bucketCount, false, false> base_type;
+ typedef intrusive_hash_multimap<Key, T, bucketCount, Hash, Equal> this_type;
+
+ public:
+ explicit intrusive_hash_multimap(const Hash& h = Hash(), const Equal& eq = Equal())
+ : base_type(h, eq)
+ {
+ // Empty
+ }
+
+ // To consider: Is this feasible, given how initializer_list works by creating a temporary array? Even if it is feasible, is it a good idea?
+ //intrusive_hash_multimap(std::initializer_list<value_type> ilist);
+
+ }; // intrusive_hash_multimap
+
+
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/intrusive_hash_set.h b/EASTL/include/EASTL/intrusive_hash_set.h
new file mode 100644
index 0000000..a25d03a
--- /dev/null
+++ b/EASTL/include/EASTL/intrusive_hash_set.h
@@ -0,0 +1,100 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_INTRUSIVE_HASH_SET_H
+#define EASTL_INTRUSIVE_HASH_SET_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/intrusive_hashtable.h>
+#include <EASTL/functional.h>
+#include <EASTL/utility.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+
+ /// intrusive_hash_set
+ ///
+ /// Template parameters:
+ /// T The type of object the set holds (a.k.a. value).
+ /// bucketCount The number of buckets to use. Best if it's a prime number.
+ /// Hash Hash function. See functional.h for examples of hash functions.
+ /// Equal Equality testing predicate; tells if two elements are equal.
+ ///
+ template <typename T, size_t bucketCount, typename Hash = eastl::hash<T>, typename Equal = eastl::equal_to<T> >
+ class intrusive_hash_set : public intrusive_hashtable<T, T, Hash, Equal, bucketCount, true, true>
+ {
+ public:
+ typedef intrusive_hashtable<T, T, Hash, Equal, bucketCount, true, true> base_type;
+ typedef intrusive_hash_set<T, bucketCount, Hash, Equal> this_type;
+
+ public:
+ explicit intrusive_hash_set(const Hash& h = Hash(), const Equal& eq = Equal())
+ : base_type(h, eq)
+ {
+ // Empty
+ }
+
+ // To consider: Is this feasible, given how initializer_list works by creating a temporary array? Even if it is feasible, is it a good idea?
+ //intrusive_hash_set(std::initializer_list<value_type> ilist);
+
+ }; // intrusive_hash_set
+
+
+
+
+ /// intrusive_hash_multiset
+ ///
+ /// Implements a intrusive_hash_multiset, which is the same thing as a intrusive_hash_set
+ /// except that contained elements need not be unique. See the documentation
+ /// for intrusive_hash_set for details.
+ ///
+ /// Template parameters:
+ /// T The type of object the set holds (a.k.a. value).
+ /// bucketCount The number of buckets to use. Best if it's a prime number.
+ /// Hash Hash function. See functional.h for examples of hash functions.
+ /// Equal Equality testing predicate; tells if two elements are equal.
+ ///
+ template <typename T, size_t bucketCount, typename Hash = eastl::hash<T>, typename Equal = eastl::equal_to<T> >
+ class intrusive_hash_multiset : public intrusive_hashtable<T, T, Hash, Equal, bucketCount, true, false>
+ {
+ public:
+ typedef intrusive_hashtable<T, T, Hash, Equal, bucketCount, true, false> base_type;
+ typedef intrusive_hash_multiset<T, bucketCount, Hash, Equal> this_type;
+
+ public:
+ explicit intrusive_hash_multiset(const Hash& h = Hash(), const Equal& eq = Equal())
+ : base_type(h, eq)
+ {
+ // Empty
+ }
+
+ // To consider: Is this feasible, given how initializer_list works by creating a temporary array? Even if it is feasible, is it a good idea?
+ //intrusive_hash_multiset(std::initializer_list<value_type> ilist);
+
+ }; // intrusive_hash_multiset
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/intrusive_list.h b/EASTL/include/EASTL/intrusive_list.h
new file mode 100644
index 0000000..dc0129f
--- /dev/null
+++ b/EASTL/include/EASTL/intrusive_list.h
@@ -0,0 +1,1323 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+
+///////////////////////////////////////////////////////////////////////////////
+// The intrusive list container is similar to a list, with the primary
+// different being that intrusive lists allow you to control memory
+// allocation.
+//
+// * Intrusive lists store the nodes directly in the data items. This
+// is done by deriving the object from intrusive_list_node.
+//
+// * The container does no memory allocation -- it works entirely with
+// the submitted nodes. This does mean that it is the client's job to
+// free the nodes in an intrusive list, though.
+//
+// * Valid node pointers can be converted back to iterators in O(1).
+// This is because objects in the list are also nodes in the list.
+//
+// * intrusive_list does not support copy construction or assignment;
+// the push, pop, and insert operations take ownership of the
+// passed object.
+//
+// Usage notes:
+//
+// * You can use an intrusive_list directly with the standard nodes
+// if you have some other way of converting the node pointer back
+// to your data pointer.
+//
+// * Remember that the list destructor doesn't deallocate nodes -- it can't.
+//
+// * The size is not cached; this makes size() linear time but splice() is
+// constant time. This does mean that you can remove() an element without
+// having to figure out which list it is in, however.
+//
+// * You can insert a node into multiple intrusive_lists. One way to do so
+// is to (ab)use inheritance:
+//
+// struct NodeA : public intrusive_list_node {};
+// struct NodeB : public intrusive_list_node {};
+// struct Object : public NodeA, nodeB {};
+//
+// intrusive_list<NodeA> listA;
+// intrusive_list<NodeB> listB;
+//
+// listA.push_back(obj);
+// listB.push_back(obj);
+//
+// * find() vs. locate()
+// The find(v) algorithm returns an iterator p such that *p == v; intrusive_list::locate(v)
+// returns an iterator p such that &*p == &v. intrusive_list<> doesn't have find() mainly
+// because list<> doesn't have it either, but there's no reason it couldn't. intrusive_list
+// uses the name 'find' because:
+// - So as not to confuse the member function with the well-defined free function from algorithm.h.
+// - Because it is not API-compatible with eastl::find().
+// - Because it simply locates an object within the list based on its node entry and doesn't perform before any value-based searches or comparisons.
+//
+// Differences between intrusive_list and std::list:
+//
+// Issue std::list intrusive_list
+// --------------------------------------------------------------
+// Automatic node ctor/dtor Yes No
+// Can memmove() container Maybe* No
+// Same item in list twice Yes(copy/byref) No
+// Can store non-copyable items No Yes
+// size() O(1) or O(n) O(n)
+// clear() O(n) O(1)
+// erase(range) O(n) O(1)
+// splice(range) O(1) or O(n) O(1)
+// Convert reference to iterator No O(1)
+// Remove without container No O(1)
+// Nodes in mixed allocators No Yes
+//
+// *) Not required by standard but can be done with some STL implementations.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTRUSIVE_LIST_H
+#define EASTL_INTRUSIVE_LIST_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/iterator.h>
+#include <EASTL/algorithm.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+
+ /// intrusive_list_node
+ ///
+ /// By design this must be a POD, as user structs will be inheriting from
+ /// it and they may wish to remain POD themselves. However, if the
+ /// EASTL_VALIDATE_INTRUSIVE_LIST option is enabled
+ ///
+ struct intrusive_list_node
+ {
+ intrusive_list_node* mpNext;
+ intrusive_list_node* mpPrev;
+
+ #if EASTL_VALIDATE_INTRUSIVE_LIST
+ intrusive_list_node() // Implemented inline because GCC can't deal with member functions
+ { // of may-alias classes being defined outside the declaration.
+ mpNext = mpPrev = NULL;
+ }
+
+ ~intrusive_list_node()
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(mpNext || mpPrev)
+ EASTL_FAIL_MSG("~intrusive_list_node(): List is non-empty.");
+ #endif
+ }
+ #endif
+ } EASTL_MAY_ALIAS; // It's not clear if this really should be needed. An old GCC compatible compiler is generating some crashing optimized code when strict aliasing is enabled, but analysis of it seems to blame the compiler. However, this topic can be tricky.
+
+
+
+ /// intrusive_list_iterator
+ ///
+ template <typename T, typename Pointer, typename Reference>
+ class intrusive_list_iterator
+ {
+ public:
+ typedef intrusive_list_iterator<T, Pointer, Reference> this_type;
+ typedef intrusive_list_iterator<T, T*, T&> iterator;
+ typedef intrusive_list_iterator<T, const T*, const T&> const_iterator;
+ typedef T value_type;
+ typedef T node_type;
+ typedef ptrdiff_t difference_type;
+ typedef Pointer pointer;
+ typedef Reference reference;
+ typedef EASTL_ITC_NS::bidirectional_iterator_tag iterator_category;
+
+ public:
+ pointer mpNode; // Needs to be public for operator==() to work
+
+ public:
+ intrusive_list_iterator();
+ explicit intrusive_list_iterator(pointer pNode); // Note that you can also construct an iterator from T via this, since value_type == node_type.
+ intrusive_list_iterator(const iterator& x);
+ intrusive_list_iterator& operator=(const iterator& x);
+
+ reference operator*() const;
+ pointer operator->() const;
+
+ intrusive_list_iterator& operator++();
+ intrusive_list_iterator& operator--();
+
+ intrusive_list_iterator operator++(int);
+ intrusive_list_iterator operator--(int);
+
+ }; // class intrusive_list_iterator
+
+
+
+ /// intrusive_list_base
+ ///
+ class intrusive_list_base
+ {
+ public:
+ typedef eastl_size_t size_type; // See config.h for the definition of this, which defaults to size_t.
+ typedef ptrdiff_t difference_type;
+
+ protected:
+ intrusive_list_node mAnchor; ///< Sentinel node (end). All data nodes are linked in a ring from this node.
+
+ public:
+ intrusive_list_base();
+ ~intrusive_list_base();
+
+ bool empty() const EA_NOEXCEPT;
+ eastl_size_t size() const EA_NOEXCEPT; ///< Returns the number of elements in the list; O(n).
+ void clear() EA_NOEXCEPT; ///< Clears the list; O(1). No deallocation occurs.
+ void pop_front(); ///< Removes an element from the front of the list; O(1). The element must exist, but is not deallocated.
+ void pop_back(); ///< Removes an element from the back of the list; O(1). The element must exist, but is not deallocated.
+ EASTL_API void reverse() EA_NOEXCEPT; ///< Reverses a list so that front and back are swapped; O(n).
+
+ EASTL_API bool validate() const; ///< Scans a list for linkage inconsistencies; O(n) time, O(1) space. Returns false if errors are detected, such as loops or branching.
+
+ }; // class intrusive_list_base
+
+
+
+ /// intrusive_list
+ ///
+ /// Example usage:
+ /// struct IntNode : public eastl::intrusive_list_node {
+ /// int mX;
+ /// IntNode(int x) : mX(x) { }
+ /// };
+ ///
+ /// IntNode nodeA(0);
+ /// IntNode nodeB(1);
+ ///
+ /// intrusive_list<IntNode> intList;
+ /// intList.push_back(nodeA);
+ /// intList.push_back(nodeB);
+ /// intList.remove(nodeA);
+ ///
+ template <typename T = intrusive_list_node>
+ class intrusive_list : public intrusive_list_base
+ {
+ public:
+ typedef intrusive_list<T> this_type;
+ typedef intrusive_list_base base_type;
+ typedef T node_type;
+ typedef T value_type;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::difference_type difference_type;
+ typedef T& reference;
+ typedef const T& const_reference;
+ typedef T* pointer;
+ typedef const T* const_pointer;
+ typedef intrusive_list_iterator<T, T*, T&> iterator;
+ typedef intrusive_list_iterator<T, const T*, const T&> const_iterator;
+ typedef eastl::reverse_iterator<iterator> reverse_iterator;
+ typedef eastl::reverse_iterator<const_iterator> const_reverse_iterator;
+
+ public:
+ intrusive_list(); ///< Creates an empty list.
+ intrusive_list(const this_type& x); ///< Creates an empty list; ignores the argument.
+ //intrusive_list(std::initializer_list<value_type> ilist); To consider: Is this feasible, given how initializer_list works by creating a temporary array? Even if it is feasible, is it a good idea?
+
+ this_type& operator=(const this_type& x); ///< Clears the list; ignores the argument.
+ void swap(this_type&); ///< Swaps the contents of two intrusive lists; O(1).
+
+ iterator begin() EA_NOEXCEPT; ///< Returns an iterator pointing to the first element in the list.
+ const_iterator begin() const EA_NOEXCEPT; ///< Returns a const_iterator pointing to the first element in the list.
+ const_iterator cbegin() const EA_NOEXCEPT; ///< Returns a const_iterator pointing to the first element in the list.
+
+ iterator end() EA_NOEXCEPT; ///< Returns an iterator pointing one-after the last element in the list.
+ const_iterator end() const EA_NOEXCEPT; ///< Returns a const_iterator pointing one-after the last element in the list.
+ const_iterator cend() const EA_NOEXCEPT; ///< Returns a const_iterator pointing one-after the last element in the list.
+
+ reverse_iterator rbegin() EA_NOEXCEPT; ///< Returns a reverse_iterator pointing at the end of the list (start of the reverse sequence).
+ const_reverse_iterator rbegin() const EA_NOEXCEPT; ///< Returns a const_reverse_iterator pointing at the end of the list (start of the reverse sequence).
+ const_reverse_iterator crbegin() const EA_NOEXCEPT; ///< Returns a const_reverse_iterator pointing at the end of the list (start of the reverse sequence).
+
+ reverse_iterator rend() EA_NOEXCEPT; ///< Returns a reverse_iterator pointing at the start of the list (end of the reverse sequence).
+ const_reverse_iterator rend() const EA_NOEXCEPT; ///< Returns a const_reverse_iterator pointing at the start of the list (end of the reverse sequence).
+ const_reverse_iterator crend() const EA_NOEXCEPT; ///< Returns a const_reverse_iterator pointing at the start of the list (end of the reverse sequence).
+
+ reference front(); ///< Returns a reference to the first element. The list must be non-empty.
+ const_reference front() const; ///< Returns a const reference to the first element. The list must be non-empty.
+ reference back(); ///< Returns a reference to the last element. The list must be non-empty.
+ const_reference back() const; ///< Returns a const reference to the last element. The list must be non-empty.
+
+ void push_front(value_type& x); ///< Adds an element to the front of the list; O(1). The element is not copied. The element must not be in any other list.
+ void push_back(value_type& x); ///< Adds an element to the back of the list; O(1). The element is not copied. The element must not be in any other list.
+
+ bool contains(const value_type& x) const; ///< Returns true if the given element is in the list; O(n). Equivalent to (locate(x) != end()).
+
+ iterator locate(value_type& x); ///< Converts a reference to an object in the list back to an iterator, or returns end() if it is not part of the list. O(n)
+ const_iterator locate(const value_type& x) const; ///< Converts a const reference to an object in the list back to a const iterator, or returns end() if it is not part of the list. O(n)
+
+ iterator insert(const_iterator pos, value_type& x); ///< Inserts an element before the element pointed to by the iterator. O(1)
+ iterator erase(const_iterator pos); ///< Erases the element pointed to by the iterator. O(1)
+ iterator erase(const_iterator pos, const_iterator last); ///< Erases elements within the iterator range [pos, last). O(1)
+
+ reverse_iterator erase(const_reverse_iterator pos);
+ reverse_iterator erase(const_reverse_iterator pos, const_reverse_iterator last);
+
+ static void remove(value_type& value); ///< Erases an element from a list; O(1). Note that this is static so you don't need to know which list the element, although it must be in some list.
+
+ void splice(const_iterator pos, value_type& x);
+ ///< Moves the given element into this list before the element pointed to by pos; O(1).
+ ///< Required: x must be in some list or have first/next pointers that point it itself.
+
+ void splice(const_iterator pos, intrusive_list& x);
+ ///< Moves the contents of a list into this list before the element pointed to by pos; O(1).
+ ///< Required: &x != this (same as std::list).
+
+ void splice(const_iterator pos, intrusive_list& x, const_iterator i);
+ ///< Moves the given element pointed to i within the list x into the current list before
+ ///< the element pointed to by pos; O(1).
+
+ void splice(const_iterator pos, intrusive_list& x, const_iterator first, const_iterator last);
+ ///< Moves the range of elements [first, last) from list x into the current list before
+ ///< the element pointed to by pos; O(1).
+ ///< Required: pos must not be in [first, last). (same as std::list).
+
+ public:
+ // Sorting functionality
+ // This is independent of the global sort algorithms, as lists are
+ // linked nodes and can be sorted more efficiently by moving nodes
+ // around in ways that global sort algorithms aren't privy to.
+
+ void merge(this_type& x);
+
+ template <typename Compare>
+ void merge(this_type& x, Compare compare);
+
+ void unique();
+
+ template <typename BinaryPredicate>
+ void unique(BinaryPredicate);
+
+ void sort();
+
+ template<typename Compare>
+ void sort(Compare compare);
+
+ public:
+ // bool validate() const; // Inherited from parent.
+ int validate_iterator(const_iterator i) const;
+
+ }; // intrusive_list
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // intrusive_list_node
+ ///////////////////////////////////////////////////////////////////////
+
+ // Moved to be inline within the class because the may-alias attribute is
+ // triggering what appears to be a bug in GCC that effectively requires
+ // may-alias structs to implement inline member functions within the class
+ // declaration. We don't have a .cpp file for
+ // #if EASTL_VALIDATE_INTRUSIVE_LIST
+ // inline intrusive_list_node::intrusive_list_node()
+ // {
+ // mpNext = mpPrev = NULL;
+ // }
+ //
+ // inline intrusive_list_node::~intrusive_list_node()
+ // {
+ // #if EASTL_ASSERT_ENABLED
+ // if(mpNext || mpPrev)
+ // EASTL_FAIL_MSG("~intrusive_list_node(): List is non-empty.");
+ // #endif
+ // }
+ // #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // intrusive_list_iterator
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Pointer, typename Reference>
+ inline intrusive_list_iterator<T, Pointer, Reference>::intrusive_list_iterator()
+ {
+ #if EASTL_DEBUG
+ mpNode = NULL;
+ #endif
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline intrusive_list_iterator<T, Pointer, Reference>::intrusive_list_iterator(pointer pNode)
+ : mpNode(pNode)
+ {
+ // Empty
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline intrusive_list_iterator<T, Pointer, Reference>::intrusive_list_iterator(const iterator& x)
+ : mpNode(x.mpNode)
+ {
+ // Empty
+ }
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename intrusive_list_iterator<T, Pointer, Reference>::this_type&
+ intrusive_list_iterator<T, Pointer, Reference>::operator=(const iterator& x)
+ {
+ mpNode = x.mpNode;
+ return *this;
+ }
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename intrusive_list_iterator<T, Pointer, Reference>::reference
+ intrusive_list_iterator<T, Pointer, Reference>::operator*() const
+ {
+ return *mpNode;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename intrusive_list_iterator<T, Pointer, Reference>::pointer
+ intrusive_list_iterator<T, Pointer, Reference>::operator->() const
+ {
+ return mpNode;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename intrusive_list_iterator<T, Pointer, Reference>::this_type&
+ intrusive_list_iterator<T, Pointer, Reference>::operator++()
+ {
+ mpNode = static_cast<node_type*>(mpNode->mpNext);
+ return *this;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename intrusive_list_iterator<T, Pointer, Reference>::this_type
+ intrusive_list_iterator<T, Pointer, Reference>::operator++(int)
+ {
+ intrusive_list_iterator it(*this);
+ mpNode = static_cast<node_type*>(mpNode->mpNext);
+ return it;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename intrusive_list_iterator<T, Pointer, Reference>::this_type&
+ intrusive_list_iterator<T, Pointer, Reference>::operator--()
+ {
+ mpNode = static_cast<node_type*>(mpNode->mpPrev);
+ return *this;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename intrusive_list_iterator<T, Pointer, Reference>::this_type
+ intrusive_list_iterator<T, Pointer, Reference>::operator--(int)
+ {
+ intrusive_list_iterator it(*this);
+ mpNode = static_cast<node_type*>(mpNode->mpPrev);
+ return it;
+ }
+
+
+ // The C++ defect report #179 requires that we support comparisons between const and non-const iterators.
+ // Thus we provide additional template paremeters here to support this. The defect report does not
+ // require us to support comparisons between reverse_iterators and const_reverse_iterators.
+ template <typename T, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB>
+ inline bool operator==(const intrusive_list_iterator<T, PointerA, ReferenceA>& a,
+ const intrusive_list_iterator<T, PointerB, ReferenceB>& b)
+ {
+ return a.mpNode == b.mpNode;
+ }
+
+
+ template <typename T, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB>
+ inline bool operator!=(const intrusive_list_iterator<T, PointerA, ReferenceA>& a,
+ const intrusive_list_iterator<T, PointerB, ReferenceB>& b)
+ {
+ return a.mpNode != b.mpNode;
+ }
+
+
+ // We provide a version of operator!= for the case where the iterators are of the
+ // same type. This helps prevent ambiguity errors in the presence of rel_ops.
+ template <typename T, typename Pointer, typename Reference>
+ inline bool operator!=(const intrusive_list_iterator<T, Pointer, Reference>& a,
+ const intrusive_list_iterator<T, Pointer, Reference>& b)
+ {
+ return a.mpNode != b.mpNode;
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // intrusive_list_base
+ ///////////////////////////////////////////////////////////////////////
+
+ inline intrusive_list_base::intrusive_list_base()
+ {
+ mAnchor.mpNext = mAnchor.mpPrev = &mAnchor;
+ }
+
+ inline intrusive_list_base::~intrusive_list_base()
+ {
+ #if EASTL_VALIDATE_INTRUSIVE_LIST
+ clear();
+ mAnchor.mpNext = mAnchor.mpPrev = NULL;
+ #endif
+ }
+
+
+ inline bool intrusive_list_base::empty() const EA_NOEXCEPT
+ {
+ return mAnchor.mpPrev == &mAnchor;
+ }
+
+
+ inline intrusive_list_base::size_type intrusive_list_base::size() const EA_NOEXCEPT
+ {
+ const intrusive_list_node* p = &mAnchor;
+ size_type n = (size_type)-1;
+
+ do {
+ ++n;
+ p = p->mpNext;
+ } while(p != &mAnchor);
+
+ return n;
+ }
+
+
+ inline void intrusive_list_base::clear() EA_NOEXCEPT
+ {
+ #if EASTL_VALIDATE_INTRUSIVE_LIST
+ // Need to clear out all the next/prev pointers in the elements;
+ // this makes this operation O(n) instead of O(1).
+ intrusive_list_node* pNode = mAnchor.mpNext;
+
+ while(pNode != &mAnchor)
+ {
+ intrusive_list_node* const pNextNode = pNode->mpNext;
+ pNode->mpNext = pNode->mpPrev = NULL;
+ pNode = pNextNode;
+ }
+ #endif
+
+ mAnchor.mpNext = mAnchor.mpPrev = &mAnchor;
+ }
+
+
+ inline void intrusive_list_base::pop_front()
+ {
+ #if EASTL_VALIDATE_INTRUSIVE_LIST
+ intrusive_list_node* const pNode = mAnchor.mpNext;
+ #endif
+
+ mAnchor.mpNext->mpNext->mpPrev = &mAnchor;
+ mAnchor.mpNext = mAnchor.mpNext->mpNext;
+
+ #if EASTL_VALIDATE_INTRUSIVE_LIST
+ if(pNode != &mAnchor)
+ pNode->mpNext = pNode->mpPrev = NULL;
+ #if EASTL_ASSERT_ENABLED
+ else
+ EASTL_FAIL_MSG("intrusive_list::pop_front(): empty list.");
+ #endif
+ #endif
+ }
+
+
+ inline void intrusive_list_base::pop_back()
+ {
+ #if EASTL_VALIDATE_INTRUSIVE_LIST
+ intrusive_list_node* const pNode = mAnchor.mpPrev;
+ #endif
+
+ mAnchor.mpPrev->mpPrev->mpNext = &mAnchor;
+ mAnchor.mpPrev = mAnchor.mpPrev->mpPrev;
+
+ #if EASTL_VALIDATE_INTRUSIVE_LIST
+ if(pNode != &mAnchor)
+ pNode->mpNext = pNode->mpPrev = NULL;
+ #if EASTL_ASSERT_ENABLED
+ else
+ EASTL_FAIL_MSG("intrusive_list::pop_back(): empty list.");
+ #endif
+ #endif
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // intrusive_list
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T>
+ inline intrusive_list<T>::intrusive_list()
+ {
+ }
+
+
+ template <typename T>
+ inline intrusive_list<T>::intrusive_list(const this_type& /*x*/)
+ : intrusive_list_base()
+ {
+ // We intentionally ignore argument x.
+ // To consider: Shouldn't this function simply not exist? Is there a useful purpose for having this function?
+ // There should be a comment here about it, though my first guess is that this exists to quell VC++ level 4/-Wall compiler warnings.
+ }
+
+
+ template <typename T>
+ inline typename intrusive_list<T>::this_type& intrusive_list<T>::operator=(const this_type& /*x*/)
+ {
+ // We intentionally ignore argument x.
+ // See notes above in the copy constructor about questioning the existence of this function.
+ return *this;
+ }
+
+
+ template <typename T>
+ inline typename intrusive_list<T>::iterator intrusive_list<T>::begin() EA_NOEXCEPT
+ {
+ return iterator(static_cast<T*>(mAnchor.mpNext));
+ }
+
+
+ template <typename T>
+ inline typename intrusive_list<T>::const_iterator intrusive_list<T>::begin() const EA_NOEXCEPT
+ {
+ return const_iterator(static_cast<T*>(mAnchor.mpNext));
+ }
+
+
+ template <typename T>
+ inline typename intrusive_list<T>::const_iterator intrusive_list<T>::cbegin() const EA_NOEXCEPT
+ {
+ return const_iterator(static_cast<T*>(mAnchor.mpNext));
+ }
+
+
+ template <typename T>
+ inline typename intrusive_list<T>::iterator intrusive_list<T>::end() EA_NOEXCEPT
+ {
+ return iterator(static_cast<T*>(&mAnchor));
+ }
+
+
+ template <typename T>
+ inline typename intrusive_list<T>::const_iterator intrusive_list<T>::end() const EA_NOEXCEPT
+ {
+ return const_iterator(static_cast<const T*>(&mAnchor));
+ }
+
+
+ template <typename T>
+ inline typename intrusive_list<T>::const_iterator intrusive_list<T>::cend() const EA_NOEXCEPT
+ {
+ return const_iterator(static_cast<const T*>(&mAnchor));
+ }
+
+
+ template <typename T>
+ inline typename intrusive_list<T>::reverse_iterator intrusive_list<T>::rbegin() EA_NOEXCEPT
+ {
+ return reverse_iterator(iterator(static_cast<T*>(&mAnchor)));
+ }
+
+
+ template <typename T>
+ inline typename intrusive_list<T>::const_reverse_iterator intrusive_list<T>::rbegin() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(const_iterator(static_cast<const T*>(&mAnchor)));
+ }
+
+
+ template <typename T>
+ inline typename intrusive_list<T>::const_reverse_iterator intrusive_list<T>::crbegin() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(const_iterator(static_cast<const T*>(&mAnchor)));
+ }
+
+
+ template <typename T>
+ inline typename intrusive_list<T>::reverse_iterator intrusive_list<T>::rend() EA_NOEXCEPT
+ {
+ return reverse_iterator(iterator(static_cast<T*>(mAnchor.mpNext)));
+ }
+
+
+ template <typename T>
+ inline typename intrusive_list<T>::const_reverse_iterator intrusive_list<T>::rend() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(const_iterator(static_cast<const T*>(mAnchor.mpNext)));
+ }
+
+
+ template <typename T>
+ inline typename intrusive_list<T>::const_reverse_iterator intrusive_list<T>::crend() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(const_iterator(static_cast<const T*>(mAnchor.mpNext)));
+ }
+
+
+ template <typename T>
+ inline typename intrusive_list<T>::reference intrusive_list<T>::front()
+ {
+ #if EASTL_VALIDATE_INTRUSIVE_LIST && EASTL_ASSERT_ENABLED
+ if(mAnchor.mpNext == &mAnchor)
+ EASTL_FAIL_MSG("intrusive_list::front(): empty list.");
+ #endif
+
+ return *static_cast<T*>(mAnchor.mpNext);
+ }
+
+
+ template <typename T>
+ inline typename intrusive_list<T>::const_reference intrusive_list<T>::front() const
+ {
+ #if EASTL_VALIDATE_INTRUSIVE_LIST && EASTL_ASSERT_ENABLED
+ if(mAnchor.mpNext == &mAnchor)
+ EASTL_FAIL_MSG("intrusive_list::front(): empty list.");
+ #endif
+
+ return *static_cast<const T*>(mAnchor.mpNext);
+ }
+
+
+ template <typename T>
+ inline typename intrusive_list<T>::reference intrusive_list<T>::back()
+ {
+ #if EASTL_VALIDATE_INTRUSIVE_LIST && EASTL_ASSERT_ENABLED
+ if(mAnchor.mpNext == &mAnchor)
+ EASTL_FAIL_MSG("intrusive_list::back(): empty list.");
+ #endif
+
+ return *static_cast<T*>(mAnchor.mpPrev);
+ }
+
+
+ template <typename T>
+ inline typename intrusive_list<T>::const_reference intrusive_list<T>::back() const
+ {
+ #if EASTL_VALIDATE_INTRUSIVE_LIST && EASTL_ASSERT_ENABLED
+ if(mAnchor.mpNext == &mAnchor)
+ EASTL_FAIL_MSG("intrusive_list::back(): empty list.");
+ #endif
+
+ return *static_cast<const T*>(mAnchor.mpPrev);
+ }
+
+
+ template <typename T>
+ inline void intrusive_list<T>::push_front(value_type& x)
+ {
+ #if EASTL_VALIDATE_INTRUSIVE_LIST && EASTL_ASSERT_ENABLED
+ if(x.mpNext || x.mpPrev)
+ EASTL_FAIL_MSG("intrusive_list::push_front(): element already on a list.");
+ #endif
+
+ x.mpNext = mAnchor.mpNext;
+ x.mpPrev = &mAnchor;
+ mAnchor.mpNext = &x;
+ x.mpNext->mpPrev = &x;
+ }
+
+
+ template <typename T>
+ inline void intrusive_list<T>::push_back(value_type& x)
+ {
+ #if EASTL_VALIDATE_INTRUSIVE_LIST && EASTL_ASSERT_ENABLED
+ if(x.mpNext || x.mpPrev)
+ EASTL_FAIL_MSG("intrusive_list::push_back(): element already on a list.");
+ #endif
+
+ x.mpPrev = mAnchor.mpPrev;
+ x.mpNext = &mAnchor;
+ mAnchor.mpPrev = &x;
+ x.mpPrev->mpNext = &x;
+ }
+
+
+ template <typename T>
+ inline bool intrusive_list<T>::contains(const value_type& x) const
+ {
+ for(const intrusive_list_node* p = mAnchor.mpNext; p != &mAnchor; p = p->mpNext)
+ {
+ if(p == &x)
+ return true;
+ }
+
+ return false;
+ }
+
+
+ template <typename T>
+ inline typename intrusive_list<T>::iterator intrusive_list<T>::locate(value_type& x)
+ {
+ for(intrusive_list_node* p = (T*)mAnchor.mpNext; p != &mAnchor; p = p->mpNext)
+ {
+ if(p == &x)
+ return iterator(static_cast<T*>(p));
+ }
+
+ return iterator((T*)&mAnchor);
+ }
+
+
+ template <typename T>
+ inline typename intrusive_list<T>::const_iterator intrusive_list<T>::locate(const value_type& x) const
+ {
+ for(const intrusive_list_node* p = mAnchor.mpNext; p != &mAnchor; p = p->mpNext)
+ {
+ if(p == &x)
+ return const_iterator(static_cast<const T*>(p));
+ }
+
+ return const_iterator((T*)&mAnchor);
+ }
+
+
+ template <typename T>
+ inline typename intrusive_list<T>::iterator intrusive_list<T>::insert(const_iterator pos, value_type& x)
+ {
+ #if EASTL_VALIDATE_INTRUSIVE_LIST && EASTL_ASSERT_ENABLED
+ if(x.mpNext || x.mpPrev)
+ EASTL_FAIL_MSG("intrusive_list::insert(): element already on a list.");
+ #endif
+
+ intrusive_list_node& next = *const_cast<node_type*>(pos.mpNode);
+ intrusive_list_node& prev = *static_cast<node_type*>(next.mpPrev);
+ prev.mpNext = next.mpPrev = &x;
+ x.mpPrev = &prev;
+ x.mpNext = &next;
+
+ return iterator(&x);
+ }
+
+
+ template <typename T>
+ inline typename intrusive_list<T>::iterator
+ intrusive_list<T>::erase(const_iterator pos)
+ {
+ intrusive_list_node& prev = *static_cast<node_type*>(pos.mpNode->mpPrev);
+ intrusive_list_node& next = *static_cast<node_type*>(pos.mpNode->mpNext);
+ prev.mpNext = &next;
+ next.mpPrev = &prev;
+
+ #if EASTL_VALIDATE_INTRUSIVE_LIST
+ iterator ii(const_cast<node_type*>(pos.mpNode));
+ ii.mpNode->mpPrev = ii.mpNode->mpNext = NULL;
+ #endif
+
+ return iterator(static_cast<node_type*>(&next));
+ }
+
+
+ template <typename T>
+ inline typename intrusive_list<T>::iterator
+ intrusive_list<T>::erase(const_iterator first, const_iterator last)
+ {
+ intrusive_list_node& prev = *static_cast<node_type*>(first.mpNode->mpPrev);
+ intrusive_list_node& next = *const_cast<node_type*>(last.mpNode);
+
+ #if EASTL_VALIDATE_INTRUSIVE_LIST
+ // need to clear out all the next/prev pointers in the elements;
+ // this makes this operation O(n) instead of O(1), sadly, although
+ // it's technically amortized O(1) since you could count yourself
+ // as paying this cost with each insert.
+ intrusive_list_node* pCur = const_cast<node_type*>(first.mpNode);
+
+ while(pCur != &next)
+ {
+ intrusive_list_node* const pCurNext = pCur->mpNext;
+ pCur->mpPrev = pCur->mpNext = NULL;
+ pCur = pCurNext;
+ }
+ #endif
+
+ prev.mpNext = &next;
+ next.mpPrev = &prev;
+
+ return iterator(const_cast<node_type*>(last.mpNode));
+ }
+
+
+ template <typename T>
+ inline typename intrusive_list<T>::reverse_iterator
+ intrusive_list<T>::erase(const_reverse_iterator position)
+ {
+ return reverse_iterator(erase((++position).base()));
+ }
+
+
+ template <typename T>
+ inline typename intrusive_list<T>::reverse_iterator
+ intrusive_list<T>::erase(const_reverse_iterator first, const_reverse_iterator last)
+ {
+ // Version which erases in order from first to last.
+ // difference_type i(first.base() - last.base());
+ // while(i--)
+ // first = erase(first);
+ // return first;
+
+ // Version which erases in order from last to first, but is slightly more efficient:
+ return reverse_iterator(erase((++last).base(), (++first).base()));
+ }
+
+
+ template <typename T>
+ void intrusive_list<T>::swap(intrusive_list& x)
+ {
+ // swap anchors
+ intrusive_list_node temp(mAnchor);
+ mAnchor = x.mAnchor;
+ x.mAnchor = temp;
+
+ // Fixup node pointers into the anchor, since the addresses of
+ // the anchors must stay the same with each list.
+ if(mAnchor.mpNext == &x.mAnchor)
+ mAnchor.mpNext = mAnchor.mpPrev = &mAnchor;
+ else
+ mAnchor.mpNext->mpPrev = mAnchor.mpPrev->mpNext = &mAnchor;
+
+ if(x.mAnchor.mpNext == &mAnchor)
+ x.mAnchor.mpNext = x.mAnchor.mpPrev = &x.mAnchor;
+ else
+ x.mAnchor.mpNext->mpPrev = x.mAnchor.mpPrev->mpNext = &x.mAnchor;
+
+ #if EASTL_VALIDATE_INTRUSIVE_LIST
+ temp.mpPrev = temp.mpNext = NULL;
+ #endif
+ }
+
+
+ template <typename T>
+ void intrusive_list<T>::splice(const_iterator pos, value_type& value)
+ {
+ // Note that splice(pos, x, pos) and splice(pos+1, x, pos)
+ // are valid and need to be handled correctly.
+
+ if(pos.mpNode != &value)
+ {
+ // Unlink item from old list.
+ intrusive_list_node& oldNext = *value.mpNext;
+ intrusive_list_node& oldPrev = *value.mpPrev;
+ oldNext.mpPrev = &oldPrev;
+ oldPrev.mpNext = &oldNext;
+
+ // Relink item into new list.
+ intrusive_list_node& newNext = *const_cast<node_type*>(pos.mpNode);
+ intrusive_list_node& newPrev = *newNext.mpPrev;
+
+ newPrev.mpNext = &value;
+ newNext.mpPrev = &value;
+ value.mpPrev = &newPrev;
+ value.mpNext = &newNext;
+ }
+ }
+
+
+ template <typename T>
+ void intrusive_list<T>::splice(const_iterator pos, intrusive_list& x)
+ {
+ // Note: &x == this is prohibited, so self-insertion is not a problem.
+ if(x.mAnchor.mpNext != &x.mAnchor) // If the list 'x' isn't empty...
+ {
+ intrusive_list_node& next = *const_cast<node_type*>(pos.mpNode);
+ intrusive_list_node& prev = *static_cast<node_type*>(next.mpPrev);
+ intrusive_list_node& insertPrev = *static_cast<node_type*>(x.mAnchor.mpNext);
+ intrusive_list_node& insertNext = *static_cast<node_type*>(x.mAnchor.mpPrev);
+
+ prev.mpNext = &insertPrev;
+ insertPrev.mpPrev = &prev;
+ insertNext.mpNext = &next;
+ next.mpPrev = &insertNext;
+ x.mAnchor.mpPrev = x.mAnchor.mpNext = &x.mAnchor;
+ }
+ }
+
+
+ template <typename T>
+ void intrusive_list<T>::splice(const_iterator pos, intrusive_list& /*x*/, const_iterator i)
+ {
+ // Note: &x == this is prohibited, so self-insertion is not a problem.
+
+ // Note that splice(pos, x, pos) and splice(pos + 1, x, pos)
+ // are valid and need to be handled correctly.
+
+ // We don't need to check if the source list is empty, because
+ // this function expects a valid iterator from the source list,
+ // and thus the list cannot be empty in such a situation.
+
+ iterator ii(const_cast<node_type*>(i.mpNode)); // Make a temporary non-const version.
+
+ if(pos != ii)
+ {
+ // Unlink item from old list.
+ intrusive_list_node& oldNext = *ii.mpNode->mpNext;
+ intrusive_list_node& oldPrev = *ii.mpNode->mpPrev;
+ oldNext.mpPrev = &oldPrev;
+ oldPrev.mpNext = &oldNext;
+
+ // Relink item into new list.
+ intrusive_list_node& newNext = *const_cast<node_type*>(pos.mpNode);
+ intrusive_list_node& newPrev = *newNext.mpPrev;
+
+ newPrev.mpNext = ii.mpNode;
+ newNext.mpPrev = ii.mpNode;
+ ii.mpNode->mpPrev = &newPrev;
+ ii.mpNode->mpNext = &newNext;
+ }
+ }
+
+
+ template <typename T>
+ void intrusive_list<T>::splice(const_iterator pos, intrusive_list& /*x*/, const_iterator first, const_iterator last)
+ {
+ // Note: &x == this is prohibited, so self-insertion is not a problem.
+ if(first != last)
+ {
+ intrusive_list_node& insertPrev = *const_cast<node_type*>(first.mpNode);
+ intrusive_list_node& insertNext = *static_cast<node_type*>(last.mpNode->mpPrev);
+
+ // remove from old list
+ insertNext.mpNext->mpPrev = insertPrev.mpPrev;
+ insertPrev.mpPrev->mpNext = insertNext.mpNext;
+
+ // insert into this list
+ intrusive_list_node& next = *const_cast<node_type*>(pos.mpNode);
+ intrusive_list_node& prev = *static_cast<node_type*>(next.mpPrev);
+
+ prev.mpNext = &insertPrev;
+ insertPrev.mpPrev = &prev;
+ insertNext.mpNext = &next;
+ next.mpPrev = &insertNext;
+ }
+ }
+
+
+ template <typename T>
+ inline void intrusive_list<T>::remove(value_type& value)
+ {
+ intrusive_list_node& prev = *value.mpPrev;
+ intrusive_list_node& next = *value.mpNext;
+ prev.mpNext = &next;
+ next.mpPrev = &prev;
+
+ #if EASTL_VALIDATE_INTRUSIVE_LIST
+ value.mpPrev = value.mpNext = NULL;
+ #endif
+ }
+
+
+ template <typename T>
+ void intrusive_list<T>::merge(this_type& x)
+ {
+ if(this != &x)
+ {
+ iterator first(begin());
+ iterator firstX(x.begin());
+ const iterator last(end());
+ const iterator lastX(x.end());
+
+ while((first != last) && (firstX != lastX))
+ {
+ if(*firstX < *first)
+ {
+ iterator next(firstX);
+
+ splice(first, x, firstX, ++next);
+ firstX = next;
+ }
+ else
+ ++first;
+ }
+
+ if(firstX != lastX)
+ splice(last, x, firstX, lastX);
+ }
+ }
+
+
+ template <typename T>
+ template <typename Compare>
+ void intrusive_list<T>::merge(this_type& x, Compare compare)
+ {
+ if(this != &x)
+ {
+ iterator first(begin());
+ iterator firstX(x.begin());
+ const iterator last(end());
+ const iterator lastX(x.end());
+
+ while((first != last) && (firstX != lastX))
+ {
+ if(compare(*firstX, *first))
+ {
+ iterator next(firstX);
+
+ splice(first, x, firstX, ++next);
+ firstX = next;
+ }
+ else
+ ++first;
+ }
+
+ if(firstX != lastX)
+ splice(last, x, firstX, lastX);
+ }
+ }
+
+
+ template <typename T>
+ void intrusive_list<T>::unique()
+ {
+ iterator first(begin());
+ const iterator last(end());
+
+ if(first != last)
+ {
+ iterator next(first);
+
+ while(++next != last)
+ {
+ if(*first == *next)
+ erase(next);
+ else
+ first = next;
+ next = first;
+ }
+ }
+ }
+
+
+ template <typename T>
+ template <typename BinaryPredicate>
+ void intrusive_list<T>::unique(BinaryPredicate predicate)
+ {
+ iterator first(begin());
+ const iterator last(end());
+
+ if(first != last)
+ {
+ iterator next(first);
+
+ while(++next != last)
+ {
+ if(predicate(*first, *next))
+ erase(next);
+ else
+ first = next;
+ next = first;
+ }
+ }
+ }
+
+
+ template <typename T>
+ void intrusive_list<T>::sort()
+ {
+ // We implement the algorithm employed by Chris Caulfield whereby we use recursive
+ // function calls to sort the list. The sorting of a very large list may fail due to stack overflow
+ // if the stack is exhausted. The limit depends on the platform and the avaialble stack space.
+
+ // Easier-to-understand version of the 'if' statement:
+ // iterator i(begin());
+ // if((i != end()) && (++i != end())) // If the size is >= 2 (without calling the more expensive size() function)...
+
+ // Faster, more inlinable version of the 'if' statement:
+ if((static_cast<node_type*>(mAnchor.mpNext) != &mAnchor) &&
+ (static_cast<node_type*>(mAnchor.mpNext) != static_cast<node_type*>(mAnchor.mpPrev)))
+ {
+ // Split the array into 2 roughly equal halves.
+ this_type leftList; // This should cause no memory allocation.
+ this_type rightList;
+
+ // We find an iterator which is in the middle of the list. The fastest way to do
+ // this is to iterate from the base node both forwards and backwards with two
+ // iterators and stop when they meet each other. Recall that our size() function
+ // is not O(1) but is instead O(n), at least when EASTL_LIST_SIZE_CACHE is disabled.
+ #if EASTL_LIST_SIZE_CACHE
+ iterator mid(begin());
+ eastl::advance(mid, size() / 2);
+ #else
+ iterator mid(begin()), tail(end());
+
+ while((mid != tail) && (++mid != tail))
+ --tail;
+ #endif
+
+ // Move the left half of this into leftList and the right half into rightList.
+ leftList.splice(leftList.begin(), *this, begin(), mid);
+ rightList.splice(rightList.begin(), *this);
+
+ // Sort the sub-lists.
+ leftList.sort();
+ rightList.sort();
+
+ // Merge the two halves into this list.
+ splice(begin(), leftList);
+ merge(rightList);
+ }
+ }
+
+
+ template <typename T>
+ template<typename Compare>
+ void intrusive_list<T>::sort(Compare compare)
+ {
+ // We implement the algorithm employed by Chris Caulfield whereby we use recursive
+ // function calls to sort the list. The sorting of a very large list may fail due to stack overflow
+ // if the stack is exhausted. The limit depends on the platform and the avaialble stack space.
+
+ // Easier-to-understand version of the 'if' statement:
+ // iterator i(begin());
+ // if((i != end()) && (++i != end())) // If the size is >= 2 (without calling the more expensive size() function)...
+
+ // Faster, more inlinable version of the 'if' statement:
+ if((static_cast<node_type*>(mAnchor.mpNext) != &mAnchor) &&
+ (static_cast<node_type*>(mAnchor.mpNext) != static_cast<node_type*>(mAnchor.mpPrev)))
+ {
+ // Split the array into 2 roughly equal halves.
+ this_type leftList; // This should cause no memory allocation.
+ this_type rightList;
+
+ // We find an iterator which is in the middle of the list. The fastest way to do
+ // this is to iterate from the base node both forwards and backwards with two
+ // iterators and stop when they meet each other. Recall that our size() function
+ // is not O(1) but is instead O(n), at least when EASTL_LIST_SIZE_CACHE is disabled.
+ #if EASTL_LIST_SIZE_CACHE
+ iterator mid(begin());
+ eastl::advance(mid, size() / 2);
+ #else
+ iterator mid(begin()), tail(end());
+
+ while((mid != tail) && (++mid != tail))
+ --tail;
+ #endif
+
+ // Move the left half of this into leftList and the right half into rightList.
+ leftList.splice(leftList.begin(), *this, begin(), mid);
+ rightList.splice(rightList.begin(), *this);
+
+ // Sort the sub-lists.
+ leftList.sort(compare);
+ rightList.sort(compare);
+
+ // Merge the two halves into this list.
+ splice(begin(), leftList);
+ merge(rightList, compare);
+ }
+ }
+
+
+ template <typename T>
+ inline int intrusive_list<T>::validate_iterator(const_iterator i) const
+ {
+ // To do: Come up with a more efficient mechanism of doing this.
+
+ for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp)
+ {
+ if(temp == i)
+ return (isf_valid | isf_current | isf_can_dereference);
+ }
+
+ if(i == end())
+ return (isf_valid | isf_current);
+
+ return isf_none;
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T>
+ bool operator==(const intrusive_list<T>& a, const intrusive_list<T>& b)
+ {
+ // If we store an mSize member for intrusive_list, we want to take advantage of it here.
+ typename intrusive_list<T>::const_iterator ia = a.begin();
+ typename intrusive_list<T>::const_iterator ib = b.begin();
+ typename intrusive_list<T>::const_iterator enda = a.end();
+ typename intrusive_list<T>::const_iterator endb = b.end();
+
+ while((ia != enda) && (ib != endb) && (*ia == *ib))
+ {
+ ++ia;
+ ++ib;
+ }
+ return (ia == enda) && (ib == endb);
+ }
+
+ template <typename T>
+ bool operator!=(const intrusive_list<T>& a, const intrusive_list<T>& b)
+ {
+ return !(a == b);
+ }
+
+ template <typename T>
+ bool operator<(const intrusive_list<T>& a, const intrusive_list<T>& b)
+ {
+ return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end());
+ }
+
+ template <typename T>
+ bool operator>(const intrusive_list<T>& a, const intrusive_list<T>& b)
+ {
+ return b < a;
+ }
+
+ template <typename T>
+ bool operator<=(const intrusive_list<T>& a, const intrusive_list<T>& b)
+ {
+ return !(b < a);
+ }
+
+ template <typename T>
+ bool operator>=(const intrusive_list<T>& a, const intrusive_list<T>& b)
+ {
+ return !(a < b);
+ }
+
+ template <typename T>
+ void swap(intrusive_list<T>& a, intrusive_list<T>& b)
+ {
+ a.swap(b);
+ }
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/intrusive_ptr.h b/EASTL/include/EASTL/intrusive_ptr.h
new file mode 100644
index 0000000..af4e686
--- /dev/null
+++ b/EASTL/include/EASTL/intrusive_ptr.h
@@ -0,0 +1,426 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTRUSIVE_PTR_H
+#define EASTL_INTRUSIVE_PTR_H
+
+
+#include <EASTL/internal/config.h>
+#include <stddef.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+ // We provide default implementations of AddRef and Release in the eastl namespace.
+ // The user can override these on a per-class basis by defining their own specialized
+ // intrusive_ptr_add_ref and intrusive_ptr_release functions. User-defined specializations
+ // do not need to exist in the eastl namespace, but should preferably be in the namespace
+ // of the templated class T.
+ template <typename T>
+ void intrusive_ptr_add_ref(T* p)
+ {
+ p->AddRef();
+ }
+
+ template <typename T>
+ void intrusive_ptr_release(T* p)
+ {
+ p->Release();
+ }
+
+
+ //////////////////////////////////////////////////////////////////////////////
+ /// intrusive_ptr
+ ///
+ /// This is a class that acts like the C++ auto_ptr class except that instead
+ /// of deleting its member data when it goes out of scope, it releases its
+ /// member data when it goes out of scope. This class thus requires that the
+ /// templated data type have an AddRef and Release function (or whatever is
+ /// configured to be the two refcount functions).
+ ///
+ /// This class is useful for automatically releasing an object when this
+ /// class goes out of scope. See below for some usage.
+ /// You should be careful about putting instances of this class as members of
+ /// another class. If you do so, then the intrusive_ptr destructor will only
+ /// be called if the object that owns it is destructed. This creates a potential
+ /// chicken-and-egg situation. What if the intrusive_ptr member contains a
+ /// pointer to an object that has a reference on the object that owns the
+ /// intrusive_ptr member? The answer is that the neither object can ever be
+ /// destructed. The solution is to:
+ /// 1) Be very careful about what objects you put into member intrusive_ptr objects.
+ /// 2) Clear out your intrusive_ptr members in your shutdown function.
+ /// 3) Simply don't use intrusive_ptr objects as class members.
+ ///
+ /// Example usage:
+ /// intrusive_ptr<IWidget> pWidget = new Widget;
+ /// pWidget = new Widget;
+ /// pWidget->Reset();
+ ///
+ template <typename T>
+ class intrusive_ptr
+ {
+ protected:
+ // Friend declarations.
+ template <typename U> friend class intrusive_ptr;
+ typedef intrusive_ptr<T> this_type;
+
+ T* mpObject;
+
+ public:
+ /// element_type
+ /// This typedef is present for consistency with the C++ standard library
+ /// auto_ptr template. It allows users to refer to the templated type via
+ /// a typedef. This is sometimes useful to be able to do.
+ ///
+ /// Example usage:
+ /// intrusive_ptr<IWidget> ip;
+ /// void DoSomething(intrusive_ptr<IWidget>::element_type someType);
+ ///
+ typedef T element_type;
+
+ /// intrusive_ptr
+ /// Default constructor. The member object is set to NULL.
+ intrusive_ptr()
+ : mpObject(NULL)
+ {
+ // Empty
+ }
+
+ /// intrusive_ptr
+ /// Provides a constructor which takes ownership of a pointer.
+ /// The incoming pointer is AddRefd.
+ ///
+ /// Example usage:
+ /// intrusive_ptr<Widget> pWidget(new Widget);
+ intrusive_ptr(T* p, bool bAddRef = true)
+ : mpObject(p)
+ {
+ if(mpObject && bAddRef)
+ intrusive_ptr_add_ref(mpObject); // Intentionally do not prefix the call with eastl:: but instead allow namespace lookup to resolve the namespace.
+ }
+
+ /// intrusive_ptr
+ /// Construction from self type.
+ intrusive_ptr(const intrusive_ptr& ip)
+ : mpObject(ip.mpObject)
+ {
+ if(mpObject)
+ intrusive_ptr_add_ref(mpObject);
+ }
+
+
+ /// intrusive_ptr
+ /// move constructor
+ intrusive_ptr(intrusive_ptr&& ip)
+ : mpObject(nullptr)
+ {
+ swap(ip);
+ }
+
+ /// intrusive_ptr
+ /// Provides a constructor which copies a pointer from another intrusive_ptr.
+ /// The incoming pointer is AddRefd. The source intrusive_ptr object maintains
+ /// its AddRef on the pointer.
+ ///
+ /// Example usage:
+ /// intrusive_ptr<Widget> pWidget1;
+ /// intrusive_ptr<Widget> pWidget2(pWidget1);
+ template <typename U>
+ intrusive_ptr(const intrusive_ptr<U>& ip)
+ : mpObject(ip.mpObject)
+ {
+ if(mpObject)
+ intrusive_ptr_add_ref(mpObject);
+ }
+
+ /// intrusive_ptr
+ /// Releases the owned pointer.
+ ~intrusive_ptr()
+ {
+ if(mpObject)
+ intrusive_ptr_release(mpObject);
+ }
+
+
+ /// operator=
+ /// Assignment to self type.
+ intrusive_ptr& operator=(const intrusive_ptr& ip)
+ {
+ return operator=(ip.mpObject);
+ }
+
+
+ /// operator=
+ /// Move assignment operator
+ intrusive_ptr& operator=(intrusive_ptr&& ip)
+ {
+ swap(ip);
+ return *this;
+ }
+
+
+ /// operator =
+ /// Assigns an intrusive_ptr object to this intrusive_ptr object.
+ /// The incoming pointer is AddRefd. The source intrusive_ptr object
+ /// maintains its AddRef on the pointer. If there is an existing member
+ /// pointer, it is Released before the incoming pointer is assigned.
+ /// If the incoming pointer is equal to the existing pointer, no
+ /// action is taken. The incoming pointer is AddRefd before any
+ /// member pointer is Released.
+ template <typename U>
+ intrusive_ptr& operator=(const intrusive_ptr<U>& ip)
+ {
+ return operator=(ip.mpObject);
+ }
+
+ /// operator=
+ /// Assigns an intrusive_ptr object to this intrusive_ptr object.
+ /// The incoming pointer is AddRefd. If there is an existing member
+ /// pointer, it is Released before the incoming pointer is assigned.
+ /// If the incoming pointer is equal to the existing pointer, no
+ /// action is taken. The incoming pointer is AddRefd before any
+ /// member pointer is Released.
+ intrusive_ptr& operator=(T* pObject)
+ {
+ if(pObject != mpObject)
+ {
+ T* const pTemp = mpObject; // Create temporary to prevent possible problems with re-entrancy.
+ if(pObject)
+ intrusive_ptr_add_ref(pObject);
+ mpObject = pObject;
+ if(pTemp)
+ intrusive_ptr_release(pTemp);
+ }
+ return *this;
+ }
+
+ /// operator *
+ /// Returns a reference to the contained object.
+ T& operator *() const
+ {
+ return *mpObject;
+ }
+
+ /// operator *
+ /// Returns a pointer to the contained object, allowing the
+ /// user to use this container as if it were contained pointer itself.
+ T* operator ->() const
+ {
+ return mpObject;
+ }
+
+ /// get()
+ /// Returns a pointer to the contained object.
+ T* get() const
+ {
+ return mpObject;
+ }
+
+ /// reset
+ /// Releases the owned object and clears our reference to it.
+ void reset()
+ {
+ T* const pTemp = mpObject;
+ mpObject = NULL;
+ if(pTemp)
+ intrusive_ptr_release(pTemp);
+ }
+
+ /// swap
+ /// Exchanges the owned pointer beween two intrusive_ptr objects.
+ void swap(this_type& ip)
+ {
+ T* const pTemp = mpObject;
+ mpObject = ip.mpObject;
+ ip.mpObject = pTemp;
+ }
+
+ /// attach
+ /// Sets an intrusive_ptr pointer without calling AddRef() on
+ /// the pointed object. The intrusive_ptr thus eventually only does a
+ /// Release() on the object. This is useful for assuming a reference
+ /// that someone else has handed you and making sure it is always
+ /// released, even if you return in the middle of a function or an
+ /// exception is thrown.
+ ///
+ void attach(T* pObject)
+ {
+ T* const pTemp = mpObject;
+ mpObject = pObject;
+ if(pTemp)
+ intrusive_ptr_release(pTemp);
+ }
+
+ /// detach
+ /// Surrenders the reference held by an intrusive_ptr pointer --
+ /// it returns the current reference and nulls the pointer. If the returned
+ /// pointer is non-null it must be released. This is useful in functions
+ /// that must return a reference while possibly being aborted by a return
+ /// or thrown exception:
+ ///
+ /// bool GetFoo(T** pp){
+ /// intrusive_ptr<T> p(PrivateGetFoo());
+ /// if(p->Method())
+ /// return false;
+ /// *pp = p.detach();
+ /// return true;
+ /// }
+ T* detach()
+ {
+ T* const pTemp = mpObject;
+ mpObject = NULL;
+ return pTemp;
+ }
+
+ /// Implicit operator bool
+ /// Allows for using a intrusive_ptr as a boolean.
+ /// Example usage:
+ /// intrusive_ptr<Widget> ptr = new Widget;
+ /// if(ptr)
+ /// ++*ptr;
+ ///
+ /// Note that below we do not use operator bool(). The reason for this
+ /// is that booleans automatically convert up to short, int, float, etc.
+ /// The result is that this: if(intrusivePtr == 1) would yield true (bad).
+ typedef T* (this_type::*bool_)() const;
+ operator bool_() const
+ {
+ if(mpObject)
+ return &this_type::get;
+ return NULL;
+ }
+
+ /// operator!
+ /// This returns the opposite of operator bool; it returns true if
+ /// the owned pointer is null. Some compilers require this and some don't.
+ /// intrusive_ptr<Widget> ptr = new Widget;
+ /// if(!ptr)
+ /// assert(false);
+ bool operator!() const
+ {
+ return (mpObject == NULL);
+ }
+
+ }; // class intrusive_ptr
+
+
+ /// get_pointer
+ /// returns intrusive_ptr::get() via the input intrusive_ptr.
+ template <typename T>
+ inline T* get_pointer(const intrusive_ptr<T>& intrusivePtr)
+ {
+ return intrusivePtr.get();
+ }
+
+ /// swap
+ /// Exchanges the owned pointer beween two intrusive_ptr objects.
+ /// This non-member version is useful for compatibility of intrusive_ptr
+ /// objects with the C++ Standard Library and other libraries.
+ template <typename T>
+ inline void swap(intrusive_ptr<T>& intrusivePtr1, intrusive_ptr<T>& intrusivePtr2)
+ {
+ intrusivePtr1.swap(intrusivePtr2);
+ }
+
+
+ template <typename T, typename U>
+ bool operator==(intrusive_ptr<T> const& iPtr1, intrusive_ptr<U> const& iPtr2)
+ {
+ return (iPtr1.get() == iPtr2.get());
+ }
+
+ template <typename T, typename U>
+ bool operator!=(intrusive_ptr<T> const& iPtr1, intrusive_ptr<U> const& iPtr2)
+ {
+ return (iPtr1.get() != iPtr2.get());
+ }
+
+ template <typename T>
+ bool operator==(intrusive_ptr<T> const& iPtr1, T* p)
+ {
+ return (iPtr1.get() == p);
+ }
+
+ template <typename T>
+ bool operator!=(intrusive_ptr<T> const& iPtr1, T* p)
+ {
+ return (iPtr1.get() != p);
+ }
+
+ template <typename T>
+ bool operator==(T* p, intrusive_ptr<T> const& iPtr2)
+ {
+ return (p == iPtr2.get());
+ }
+
+ template <typename T>
+ bool operator!=(T* p, intrusive_ptr<T> const& iPtr2)
+ {
+ return (p != iPtr2.get());
+ }
+
+ template <typename T, typename U>
+ bool operator<(intrusive_ptr<T> const& iPtr1, intrusive_ptr<U> const& iPtr2)
+ {
+ return ((uintptr_t)iPtr1.get() < (uintptr_t)iPtr2.get());
+ }
+
+
+ /// static_pointer_cast
+ /// Returns an intrusive_ptr<T> static-casted from a intrusive_ptr<U>.
+ template <class T, class U>
+ intrusive_ptr<T> static_pointer_cast(const intrusive_ptr<U>& intrusivePtr)
+ {
+ return static_cast<T*>(intrusivePtr.get());
+ }
+
+
+ #if EASTL_RTTI_ENABLED
+
+ /// dynamic_pointer_cast
+ /// Returns an intrusive_ptr<T> dynamic-casted from a intrusive_ptr<U>.
+ template <class T, class U>
+ intrusive_ptr<T> dynamic_pointer_cast(const intrusive_ptr<U>& intrusivePtr)
+ {
+ return dynamic_cast<T*>(intrusivePtr.get());
+ }
+
+ #endif
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/iterator.h b/EASTL/include/EASTL/iterator.h
new file mode 100644
index 0000000..6c268aa
--- /dev/null
+++ b/EASTL/include/EASTL/iterator.h
@@ -0,0 +1,1250 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ITERATOR_H
+#define EASTL_ITERATOR_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/move_help.h>
+#include <EASTL/internal/type_detected.h>
+#include <EASTL/internal/type_void_t.h>
+#include <EASTL/initializer_list.h>
+
+EA_DISABLE_ALL_VC_WARNINGS();
+
+#include <stddef.h>
+
+EA_RESTORE_ALL_VC_WARNINGS();
+
+// If the user has specified that we use std iterator
+// categories instead of EASTL iterator categories,
+// then #include <iterator>.
+#if EASTL_STD_ITERATOR_CATEGORY_ENABLED
+ EA_DISABLE_ALL_VC_WARNINGS();
+
+ #include <iterator>
+
+ EA_RESTORE_ALL_VC_WARNINGS();
+#endif
+
+
+EA_DISABLE_VC_WARNING(4619); // There is no warning number 'number'.
+EA_DISABLE_VC_WARNING(4217); // Member template functions cannot be used for copy-assignment or copy-construction.
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+ /// iterator_status_flag
+ ///
+ /// Defines the validity status of an iterator. This is primarily used for
+ /// iterator validation in debug builds. These are implemented as OR-able
+ /// flags (as opposed to mutually exclusive values) in order to deal with
+ /// the nature of iterator status. In particular, an iterator may be valid
+ /// but not dereferencable, as in the case with an iterator to container end().
+ /// An iterator may be valid but also dereferencable, as in the case with an
+ /// iterator to container begin().
+ ///
+ enum iterator_status_flag
+ {
+ isf_none = 0x00, /// This is called none and not called invalid because it is not strictly the opposite of invalid.
+ isf_valid = 0x01, /// The iterator is valid, which means it is in the range of [begin, end].
+ isf_current = 0x02, /// The iterator is valid and points to the same element it did when created. For example, if an iterator points to vector::begin() but an element is inserted at the front, the iterator is valid but not current. Modification of elements in place do not make iterators non-current.
+ isf_can_dereference = 0x04 /// The iterator is dereferencable, which means it is in the range of [begin, end). It may or may not be current.
+ };
+
+
+
+ // The following declarations are taken directly from the C++ standard document.
+ // input_iterator_tag, etc.
+ // iterator
+ // iterator_traits
+ // reverse_iterator
+
+ // Iterator categories
+ // Every iterator is defined as belonging to one of the iterator categories that
+ // we define here. These categories come directly from the C++ standard.
+ #if !EASTL_STD_ITERATOR_CATEGORY_ENABLED // If we are to use our own iterator category definitions...
+ struct input_iterator_tag { };
+ struct output_iterator_tag { };
+ struct forward_iterator_tag : public input_iterator_tag { };
+ struct bidirectional_iterator_tag : public forward_iterator_tag { };
+ struct random_access_iterator_tag : public bidirectional_iterator_tag { };
+ struct contiguous_iterator_tag : public random_access_iterator_tag { }; // Extension to the C++ standard. Contiguous ranges are more than random access, they are physically contiguous.
+ #endif
+
+
+ // struct iterator
+ template <typename Category, typename T, typename Distance = ptrdiff_t,
+ typename Pointer = T*, typename Reference = T&>
+ struct iterator
+ {
+ typedef Category iterator_category;
+ typedef T value_type;
+ typedef Distance difference_type;
+ typedef Pointer pointer;
+ typedef Reference reference;
+ };
+
+
+ // struct iterator_traits
+ namespace internal
+ {
+ // Helper to make iterator_traits SFINAE friendly as N3844 requires.
+ template <typename Iterator, class = void>
+ struct default_iterator_traits {};
+
+ template <typename Iterator>
+ struct default_iterator_traits<
+ Iterator,
+ void_t<
+ typename Iterator::iterator_category,
+ typename Iterator::value_type,
+ typename Iterator::difference_type,
+ typename Iterator::pointer,
+ typename Iterator::reference
+ >
+ >
+ {
+ typedef typename Iterator::iterator_category iterator_category;
+ typedef typename Iterator::value_type value_type;
+ typedef typename Iterator::difference_type difference_type;
+ typedef typename Iterator::pointer pointer;
+ typedef typename Iterator::reference reference;
+ };
+ }
+
+ template <typename Iterator>
+ struct iterator_traits : internal::default_iterator_traits<Iterator> {};
+
+ template <typename T>
+ struct iterator_traits<T*>
+ {
+ typedef EASTL_ITC_NS::random_access_iterator_tag iterator_category; // To consider: Change this to contiguous_iterator_tag for the case that
+ typedef T value_type; // EASTL_ITC_NS is "eastl" instead of "std".
+ typedef ptrdiff_t difference_type;
+ typedef T* pointer;
+ typedef T& reference;
+ };
+
+ template <typename T>
+ struct iterator_traits<const T*>
+ {
+ typedef EASTL_ITC_NS::random_access_iterator_tag iterator_category;
+ typedef T value_type;
+ typedef ptrdiff_t difference_type;
+ typedef const T* pointer;
+ typedef const T& reference;
+ };
+
+
+
+
+ /// is_iterator_wrapper
+ ///
+ /// Tells if an Iterator type is a wrapper type as opposed to a regular type.
+ /// Relies on the class declaring a member function called unwrap.
+ ///
+ /// Examples of wrapping iterators:
+ /// generic_iterator
+ /// move_iterator
+ /// reverse_iterator<T> (if T is a wrapped iterator)
+ /// Examples of non-wrapping iterators:
+ /// iterator
+ /// list::iterator
+ /// char*
+ ///
+ /// Example behavior:
+ /// is_iterator_wrapper(int*)::value => false
+ /// is_iterator_wrapper(eastl::array<char>*)::value => false
+ /// is_iterator_wrapper(eastl::vector<int>::iterator)::value => false
+ /// is_iterator_wrapper(eastl::generic_iterator<int*>)::value => true
+ /// is_iterator_wrapper(eastl::move_iterator<eastl::array<int>::iterator>)::value => true
+ /// is_iterator_wrapper(eastl::reverse_iterator<int*>)::value => false
+ /// is_iterator_wrapper(eastl::reverse_iterator<eastl::move_iterator<int*>>)::value => true
+ ///
+ template<typename Iterator>
+ class is_iterator_wrapper
+ {
+#if defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_CLANG_CL)
+ // Using a default template type parameter trick here because
+ // of a bug in clang that makes the other implementation not
+ // work when unwrap() is private and this is class is a
+ // friend.
+ // See: https://bugs.llvm.org/show_bug.cgi?id=25334
+ template<typename T, typename U = decltype(eastl::declval<T>().unwrap())>
+ using detect_has_unwrap = U;
+#else
+ // Note: the above implementation does not work on GCC when
+ // unwrap() is private and this class is a friend. So we're
+ // forced to diverge here to support both GCC and clang.
+ template<typename T>
+ using detect_has_unwrap = decltype(eastl::declval<T>().unwrap());
+#endif
+ public:
+ static const bool value = eastl::is_detected<detect_has_unwrap, Iterator>::value;
+ };
+
+
+ /// unwrap_iterator
+ ///
+ /// Takes a wrapper Iterator (e.g. move_iterator, reverse_iterator, generic_iterator) instance
+ /// and returns the wrapped iterator type. If Iterator is not a wrapper (including being a pointer),
+ /// or is not an iterator, then this function returns it as-is.
+ /// unwrap_iterator unwraps only a single layer of iterator at a time. You need to call it twice,
+ /// for example, to unwrap two layers of iterators.
+ ///
+ /// Example usage:
+ /// int* pInt = unwrap_iterator(&pIntArray[15]);
+ /// int* pInt = unwrap_iterator(generic_iterator(&pIntArray[15]));
+ /// MyVector::iterator it = unwrap_iterator(myVector.begin());
+ /// MyVector::iterator it = unwrap_iterator(move_iterator(myVector.begin()));
+ ///
+ template <typename Iterator, bool isWrapper>
+ struct is_iterator_wrapper_helper
+ {
+ using iterator_type = Iterator;
+
+ static iterator_type get_unwrapped(Iterator it) { return it; }
+ };
+
+
+ template <typename Iterator>
+ struct is_iterator_wrapper_helper<Iterator, true>
+ {
+ // get_unwrapped must return by value since we're returning
+ // it.unwrap(), and `it` will be out of scope as soon as
+ // get_unwrapped returns.
+ using iterator_type =
+ typename eastl::remove_cvref<decltype(eastl::declval<Iterator>().unwrap())>::type;
+
+ static iterator_type get_unwrapped(Iterator it) { return it.unwrap(); }
+ };
+
+
+ template <typename Iterator>
+ inline typename is_iterator_wrapper_helper<Iterator, eastl::is_iterator_wrapper<Iterator>::value>::iterator_type unwrap_iterator(Iterator it)
+ { return eastl::is_iterator_wrapper_helper<Iterator, eastl::is_iterator_wrapper<Iterator>::value>::get_unwrapped(it); }
+
+
+
+ /// reverse_iterator
+ ///
+ /// From the C++ standard:
+ /// Bidirectional and random access iterators have corresponding reverse
+ /// iterator adaptors that iterate through the data structure in the
+ /// opposite direction. They have the same signatures as the corresponding
+ /// iterators. The fundamental relation between a reverse iterator and its
+ /// corresponding iterator i is established by the identity:
+ /// &*(reverse_iterator(i)) == &*(i - 1).
+ /// This mapping is dictated by the fact that while there is always a pointer
+ /// past the end of an array, there might not be a valid pointer before the
+ /// beginning of an array.
+ ///
+ template <typename Iterator>
+ class reverse_iterator : public iterator<typename eastl::iterator_traits<Iterator>::iterator_category,
+ typename eastl::iterator_traits<Iterator>::value_type,
+ typename eastl::iterator_traits<Iterator>::difference_type,
+ typename eastl::iterator_traits<Iterator>::pointer,
+ typename eastl::iterator_traits<Iterator>::reference>
+ {
+ private:
+ using base_wrapped_iterator_type =
+ typename eastl::is_iterator_wrapper_helper<Iterator,
+ eastl::is_iterator_wrapper<Iterator>::value>::iterator_type;
+
+ public:
+ typedef Iterator iterator_type;
+ typedef typename eastl::iterator_traits<Iterator>::pointer pointer;
+ typedef typename eastl::iterator_traits<Iterator>::reference reference;
+ typedef typename eastl::iterator_traits<Iterator>::difference_type difference_type;
+
+ protected:
+ Iterator mIterator;
+
+ public:
+ EA_CPP14_CONSTEXPR reverse_iterator() // It's important that we construct mIterator, because if Iterator
+ : mIterator() { } // is a pointer, there's a difference between doing it and not.
+
+ EA_CPP14_CONSTEXPR explicit reverse_iterator(iterator_type i)
+ : mIterator(i) { }
+
+ EA_CPP14_CONSTEXPR reverse_iterator(const reverse_iterator& ri)
+ : mIterator(ri.mIterator) { }
+
+ template <typename U>
+ EA_CPP14_CONSTEXPR reverse_iterator(const reverse_iterator<U>& ri)
+ : mIterator(ri.base()) { }
+
+ // This operator= isn't in the standard, but the the C++
+ // library working group has tentatively approved it, as it
+ // allows const and non-const reverse_iterators to interoperate.
+ template <typename U>
+ EA_CPP14_CONSTEXPR reverse_iterator<Iterator>& operator=(const reverse_iterator<U>& ri)
+ { mIterator = ri.base(); return *this; }
+
+ EA_CPP14_CONSTEXPR iterator_type base() const
+ { return mIterator; }
+
+ EA_CPP14_CONSTEXPR reference operator*() const
+ {
+ iterator_type i(mIterator);
+ return *--i;
+ }
+
+ EA_CPP14_CONSTEXPR pointer operator->() const
+ { return &(operator*()); }
+
+ EA_CPP14_CONSTEXPR reverse_iterator& operator++()
+ { --mIterator; return *this; }
+
+ EA_CPP14_CONSTEXPR reverse_iterator operator++(int)
+ {
+ reverse_iterator ri(*this);
+ --mIterator;
+ return ri;
+ }
+
+ EA_CPP14_CONSTEXPR reverse_iterator& operator--()
+ { ++mIterator; return *this; }
+
+ EA_CPP14_CONSTEXPR reverse_iterator operator--(int)
+ {
+ reverse_iterator ri(*this);
+ ++mIterator;
+ return ri;
+ }
+
+ EA_CPP14_CONSTEXPR reverse_iterator operator+(difference_type n) const
+ { return reverse_iterator(mIterator - n); }
+
+ EA_CPP14_CONSTEXPR reverse_iterator& operator+=(difference_type n)
+ { mIterator -= n; return *this; }
+
+ EA_CPP14_CONSTEXPR reverse_iterator operator-(difference_type n) const
+ { return reverse_iterator(mIterator + n); }
+
+ EA_CPP14_CONSTEXPR reverse_iterator& operator-=(difference_type n)
+ { mIterator += n; return *this; }
+
+ // http://cplusplus.github.io/LWG/lwg-defects.html#386,
+ // http://llvm.org/bugs/show_bug.cgi?id=17883
+ // random_access_iterator operator[] is merely required to return something convertible to reference.
+ // reverse_iterator operator[] can't necessarily know what to return as the underlying iterator
+ // operator[] may return something other than reference.
+ EA_CPP14_CONSTEXPR reference operator[](difference_type n) const
+ { return mIterator[-n - 1]; }
+
+
+ private:
+ // Unwrapping interface, not part of the public API.
+ template <typename U = iterator_type>
+ EA_CPP14_CONSTEXPR typename eastl::enable_if<eastl::is_iterator_wrapper<U>::value, reverse_iterator<base_wrapped_iterator_type>>::type unwrap() const
+ { return reverse_iterator<base_wrapped_iterator_type>(unwrap_iterator(mIterator)); }
+
+ // The unwrapper helpers need access to unwrap() (when it exists).
+ using this_type = reverse_iterator<Iterator>;
+ friend is_iterator_wrapper_helper<this_type, is_iterator_wrapper<iterator_type>::value>;
+ friend is_iterator_wrapper<this_type>;
+ };
+
+
+ // The C++ library working group has tentatively approved the usage of two
+ // template parameters (Iterator1 and Iterator2) in order to allow reverse_iterators
+ // and const_reverse iterators to be comparable. This is a similar issue to the
+ // C++ defect report #179 regarding comparison of container iterators and const_iterators.
+ //
+ // libstdc++ reports that std::relops breaks the usage of two iterator types and if we
+ // want to support relops then we need to also make versions of each of below with
+ // a single template parameter to placate std::relops. But relops is hardly used due to
+ // the troubles it causes and so we are avoiding support here until somebody complains about it.
+ template <typename Iterator1, typename Iterator2>
+ EA_CPP14_CONSTEXPR inline bool
+ operator==(const reverse_iterator<Iterator1>& a, const reverse_iterator<Iterator2>& b)
+ { return a.base() == b.base(); }
+
+
+ template <typename Iterator1, typename Iterator2>
+ EA_CPP14_CONSTEXPR inline bool
+ operator<(const reverse_iterator<Iterator1>& a, const reverse_iterator<Iterator2>& b)
+ { return a.base() > b.base(); }
+
+
+ template <typename Iterator1, typename Iterator2>
+ EA_CPP14_CONSTEXPR inline bool
+ operator!=(const reverse_iterator<Iterator1>& a, const reverse_iterator<Iterator2>& b)
+ { return a.base() != b.base(); }
+
+
+ template <typename Iterator1, typename Iterator2>
+ EA_CPP14_CONSTEXPR inline bool
+ operator>(const reverse_iterator<Iterator1>& a, const reverse_iterator<Iterator2>& b)
+ { return a.base() < b.base(); }
+
+
+ template <typename Iterator1, typename Iterator2>
+ EA_CPP14_CONSTEXPR inline bool
+ operator<=(const reverse_iterator<Iterator1>& a, const reverse_iterator<Iterator2>& b)
+ { return a.base() >= b.base(); }
+
+
+ template <typename Iterator1, typename Iterator2>
+ EA_CPP14_CONSTEXPR inline bool
+ operator>=(const reverse_iterator<Iterator1>& a, const reverse_iterator<Iterator2>& b)
+ { return a.base() <= b.base(); }
+
+
+ template <typename Iterator1, typename Iterator2>
+ EA_CPP14_CONSTEXPR inline typename reverse_iterator<Iterator1>::difference_type
+ operator-(const reverse_iterator<Iterator1>& a, const reverse_iterator<Iterator2>& b)
+ { return b.base() - a.base(); }
+
+
+ template <typename Iterator>
+ EA_CPP14_CONSTEXPR inline reverse_iterator<Iterator>
+ operator+(typename reverse_iterator<Iterator>::difference_type n, const reverse_iterator<Iterator>& a)
+ { return reverse_iterator<Iterator>(a.base() - n); }
+
+
+ /// is_reverse_iterator
+ ///
+ /// This is a type traits extension utility.
+ /// Given an iterator, tells if it's a reverse_iterator vs anything else.
+ /// If it's a reverse iterator wrapped by another iterator then value is false.
+ /// To consider: Detect that if it's a move_iterator<reverse_iterator> and unwrap
+ /// move_iterator so we can detect that underneath it's reverse_iterator.
+ ///
+ template <typename T>
+ struct is_reverse_iterator
+ : public eastl::false_type {};
+
+ template<typename Iterator>
+ struct is_reverse_iterator< eastl::reverse_iterator<Iterator> >
+ : public eastl::true_type {};
+
+ /// unwrap_reverse_iterator is not implemented since there's no
+ /// good use case and there's some abiguitiy. Note that
+ /// unwrap_iterator(reverse_iterator<T>) returns
+ /// reverse_iterator<unwrap(T)>. However, given what
+ /// unwrap_generic_iterator and unwrap_move_iterator do, one might
+ /// expect unwrap_reverse_iterator(reverse_iterator<T>) to return
+ /// T, which is not the same. To avoid that confusion, and because
+ /// there's no current use case for this, we don't provide
+ /// unwrap_reverse_iterator.
+
+
+
+ /// move_iterator
+ ///
+ /// From the C++11 Standard, section 24.5.3.1:
+ /// Class template move_iterator is an iterator adaptor with the same behavior as the underlying iterator
+ /// except that its dereference operator implicitly converts the value returned by the underlying iterator's
+ /// dereference operator to an rvalue reference. Some generic algorithms can be called with move iterators to
+ /// replace copying with moving.
+
+ template<typename Iterator>
+ class move_iterator // Don't inherit from iterator.
+ {
+ private:
+ using WrappedIteratorReference = typename iterator_traits<Iterator>::reference;
+
+ public:
+ typedef Iterator iterator_type;
+ typedef iterator_traits<Iterator> traits_type;
+ typedef typename traits_type::iterator_category iterator_category;
+ typedef typename traits_type::value_type value_type;
+ typedef typename traits_type::difference_type difference_type;
+ typedef Iterator pointer;
+ using reference = conditional_t<is_reference<WrappedIteratorReference>::value,
+ remove_reference_t<WrappedIteratorReference>&&,
+ WrappedIteratorReference>;
+
+ protected:
+ iterator_type mIterator;
+
+ public:
+ move_iterator()
+ : mIterator()
+ {
+ }
+
+ explicit move_iterator(iterator_type mi)
+ : mIterator(mi) { }
+
+ template<typename U>
+ move_iterator(const move_iterator<U>& mi)
+ : mIterator(mi.base())
+ {
+ }
+
+ iterator_type base() const
+ { return mIterator; }
+
+ reference operator*() const { return static_cast<reference>(*mIterator); }
+
+ pointer operator->() const
+ { return mIterator; }
+
+ move_iterator& operator++()
+ {
+ ++mIterator;
+ return *this;
+ }
+
+ move_iterator operator++(int)
+ {
+ move_iterator tempMoveIterator = *this;
+ ++mIterator;
+ return tempMoveIterator;
+ }
+
+ move_iterator& operator--()
+ {
+ --mIterator;
+ return *this;
+ }
+
+ move_iterator operator--(int)
+ {
+ move_iterator tempMoveIterator = *this;
+ --mIterator;
+ return tempMoveIterator;
+ }
+
+ move_iterator operator+(difference_type n) const
+ { return move_iterator(mIterator + n); }
+
+ move_iterator& operator+=(difference_type n)
+ {
+ mIterator += n;
+ return *this;
+ }
+
+ move_iterator operator-(difference_type n) const
+ { return move_iterator(mIterator - n); }
+
+ move_iterator& operator-=(difference_type n)
+ {
+ mIterator -= n;
+ return *this;
+ }
+
+ reference operator[](difference_type n) const
+ { return eastl::move(mIterator[n]); }
+
+ private:
+ // Unwrapping interface, not part of the public API.
+ iterator_type unwrap() const
+ { return mIterator; }
+
+ // The unwrapper helpers need access to unwrap().
+ using this_type = move_iterator<Iterator>;
+ friend is_iterator_wrapper_helper<this_type, true>;
+ friend is_iterator_wrapper<this_type>;
+ };
+
+ template<typename Iterator1, typename Iterator2>
+ inline bool
+ operator==(const move_iterator<Iterator1>& a, const move_iterator<Iterator2>& b)
+ { return a.base() == b.base(); }
+
+
+ template<typename Iterator1, typename Iterator2>
+ inline bool
+ operator!=(const move_iterator<Iterator1>& a, const move_iterator<Iterator2>& b)
+ { return !(a == b); }
+
+
+ template<typename Iterator1, typename Iterator2>
+ inline bool
+ operator<(const move_iterator<Iterator1>& a, const move_iterator<Iterator2>& b)
+ { return a.base() < b.base(); }
+
+
+ template<typename Iterator1, typename Iterator2>
+ inline bool
+ operator<=(const move_iterator<Iterator1>& a, const move_iterator<Iterator2>& b)
+ { return !(b < a); }
+
+
+ template<typename Iterator1, typename Iterator2>
+ inline bool
+ operator>(const move_iterator<Iterator1>& a, const move_iterator<Iterator2>& b)
+ { return b < a; }
+
+
+ template<typename Iterator1, typename Iterator2>
+ inline bool
+ operator>=(const move_iterator<Iterator1>& a, const move_iterator<Iterator2>& b)
+ { return !(a < b); }
+
+
+ template<typename Iterator1, typename Iterator2>
+ inline auto
+ operator-(const move_iterator<Iterator1>& a, const move_iterator<Iterator2>& b) -> decltype(a.base() - b.base())
+ { return a.base() - b.base(); }
+
+
+ template<typename Iterator>
+ inline move_iterator<Iterator>
+ operator+(typename move_iterator<Iterator>::difference_type n, const move_iterator<Iterator>& a)
+ { return a + n; }
+
+
+ template<typename Iterator>
+ inline move_iterator<Iterator> make_move_iterator(Iterator i)
+ { return move_iterator<Iterator>(i); }
+
+
+ // make_move_if_noexcept_iterator returns move_iterator<Iterator> if the Iterator is of a noexcept type;
+ // otherwise returns Iterator as-is. The point of this is to be able to avoid moves that can generate exceptions and instead
+ // fall back to copies or whatever the default IteratorType::operator* returns for use by copy/move algorithms.
+ // To consider: merge the conditional expression usage here with the one used by move_if_noexcept, as they are the same condition.
+ #if EASTL_EXCEPTIONS_ENABLED
+ template <typename Iterator, typename IteratorType = typename eastl::conditional<eastl::is_nothrow_move_constructible<typename eastl::iterator_traits<Iterator>::value_type>::value ||
+ !eastl::is_copy_constructible<typename eastl::iterator_traits<Iterator>::value_type>::value,
+ eastl::move_iterator<Iterator>, Iterator>::type>
+ inline IteratorType make_move_if_noexcept_iterator(Iterator i)
+ { return IteratorType(i); }
+ #else
+ // Else there are no exceptions and thus we always return a move_iterator.
+ template <typename Iterator>
+ inline eastl::move_iterator<Iterator> make_move_if_noexcept_iterator(Iterator i)
+ { return eastl::move_iterator<Iterator>(i); }
+ #endif
+
+
+
+ /// is_move_iterator
+ ///
+ /// This is a type traits extension utility.
+ /// Given an iterator, tells if it's a move iterator vs anything else.
+ /// Example usage (though somewhat useless):
+ /// template <typename T>
+ /// bool IsMoveIterator() { return typename eastl::is_move_iterator<T>::value; }
+ ///
+ template <typename T>
+ struct is_move_iterator
+ : public eastl::false_type {};
+
+ template<typename Iterator>
+ struct is_move_iterator< eastl::move_iterator<Iterator> >
+ : public eastl::true_type {};
+
+
+ /// unwrap_move_iterator
+ ///
+ /// Returns `it.base()` if it's a move_iterator, else returns `it` as-is.
+ ///
+ /// Example usage:
+ /// vector<int> intVector;
+ /// eastl::move_iterator<vector<int>::iterator> moveIterator(intVector.begin());
+ /// vector<int>::iterator it = unwrap_move_iterator(moveIterator);
+ ///
+ template <typename Iterator>
+ inline typename eastl::is_iterator_wrapper_helper<Iterator, eastl::is_move_iterator<Iterator>::value>::iterator_type unwrap_move_iterator(Iterator it)
+ {
+ // get_unwrapped(it) -> it.unwrap() which is equivalent to `it.base()` for move_iterator and to `it` otherwise.
+ return eastl::is_iterator_wrapper_helper<Iterator, eastl::is_move_iterator<Iterator>::value>::get_unwrapped(it);
+ }
+
+
+ /// back_insert_iterator
+ ///
+ /// A back_insert_iterator is simply a class that acts like an iterator but when you
+ /// assign a value to it, it calls push_back on the container with the value.
+ ///
+ template <typename Container>
+ class back_insert_iterator : public iterator<EASTL_ITC_NS::output_iterator_tag, void, void, void, void>
+ {
+ public:
+ typedef back_insert_iterator<Container> this_type;
+ typedef Container container_type;
+ typedef typename Container::const_reference const_reference;
+
+ protected:
+ Container& container;
+
+ public:
+ //back_insert_iterator(); // Not valid. Must construct with a Container.
+
+ //back_insert_iterator(const this_type& x) // Compiler-implemented
+ // : container(x.container) { }
+
+ explicit back_insert_iterator(Container& x)
+ : container(x) { }
+
+ back_insert_iterator& operator=(const_reference value)
+ { container.push_back(value); return *this; }
+
+ back_insert_iterator& operator=(typename Container::value_type&& value)
+ { container.push_back(eastl::move(value)); return *this; }
+
+ back_insert_iterator& operator*()
+ { return *this; }
+
+ back_insert_iterator& operator++()
+ { return *this; } // This is by design.
+
+ back_insert_iterator operator++(int)
+ { return *this; } // This is by design.
+
+ protected:
+ void operator=(const this_type&){} // Declared to avoid compiler warnings about inability to generate this function.
+ };
+
+
+ /// back_inserter
+ ///
+ /// Creates an instance of a back_insert_iterator.
+ ///
+ template <typename Container>
+ inline back_insert_iterator<Container>
+ back_inserter(Container& x)
+ { return back_insert_iterator<Container>(x); }
+
+
+
+
+ /// front_insert_iterator
+ ///
+ /// A front_insert_iterator is simply a class that acts like an iterator but when you
+ /// assign a value to it, it calls push_front on the container with the value.
+ ///
+ template <typename Container>
+ class front_insert_iterator : public iterator<EASTL_ITC_NS::output_iterator_tag, void, void, void, void>
+ {
+ public:
+ typedef front_insert_iterator<Container> this_type;
+ typedef Container container_type;
+ typedef typename Container::const_reference const_reference;
+
+ protected:
+ Container& container;
+
+ public:
+ //front_insert_iterator(); // Not valid. Must construct with a Container.
+
+ //front_insert_iterator(const this_type& x) // Compiler-implemented
+ // : container(x.container) { }
+
+ explicit front_insert_iterator(Container& x)
+ : container(x) { }
+
+ front_insert_iterator& operator=(const_reference value)
+ { container.push_front(value); return *this; }
+
+ front_insert_iterator& operator*()
+ { return *this; }
+
+ front_insert_iterator& operator++()
+ { return *this; } // This is by design.
+
+ front_insert_iterator operator++(int)
+ { return *this; } // This is by design.
+
+ protected:
+ void operator=(const this_type&){} // Declared to avoid compiler warnings about inability to generate this function.
+ };
+
+
+ /// front_inserter
+ ///
+ /// Creates an instance of a front_insert_iterator.
+ ///
+ template <typename Container>
+ inline front_insert_iterator<Container>
+ front_inserter(Container& x)
+ { return front_insert_iterator<Container>(x); }
+
+
+
+
+ /// insert_iterator
+ ///
+ /// An insert_iterator is like an iterator except that when you assign a value to it,
+ /// the insert_iterator inserts the value into the container and increments the iterator.
+ ///
+ /// insert_iterator is an iterator adaptor that functions as an OutputIterator:
+ /// assignment through an insert_iterator inserts an object into a container.
+ /// Specifically, if ii is an insert_iterator, then ii keeps track of a container c and
+ /// an insertion point p; the expression *ii = x performs the insertion container.insert(p, x).
+ ///
+ /// If you assign through an insert_iterator several times, then you will be inserting
+ /// several elements into the underlying container. In the case of a sequence, they will
+ /// appear at a particular location in the underlying sequence, in the order in which
+ /// they were inserted: one of the arguments to insert_iterator's constructor is an
+ /// iterator p, and the new range will be inserted immediately before p.
+ ///
+ template <typename Container>
+ class insert_iterator : public iterator<EASTL_ITC_NS::output_iterator_tag, void, void, void, void>
+ {
+ public:
+ typedef Container container_type;
+ typedef typename Container::iterator iterator_type;
+ typedef typename Container::const_reference const_reference;
+
+ protected:
+ Container& container;
+ iterator_type it;
+
+ public:
+ // This assignment operator is defined more to stop compiler warnings (e.g. VC++ C4512)
+ // than to be useful. However, it does allow an insert_iterator to be assigned to another
+ // insert iterator provided that they point to the same container.
+ insert_iterator& operator=(const insert_iterator& x)
+ {
+ EASTL_ASSERT(&x.container == &container);
+ it = x.it;
+ return *this;
+ }
+
+ insert_iterator(Container& x, iterator_type itNew)
+ : container(x), it(itNew) {}
+
+ insert_iterator& operator=(const_reference value)
+ {
+ it = container.insert(it, value);
+ ++it;
+ return *this;
+ }
+
+ insert_iterator& operator*()
+ { return *this; }
+
+ insert_iterator& operator++()
+ { return *this; } // This is by design.
+
+ insert_iterator& operator++(int)
+ { return *this; } // This is by design.
+
+ }; // insert_iterator
+
+
+ /// inserter
+ ///
+ /// Creates an instance of an insert_iterator.
+ ///
+ template <typename Container, typename Iterator>
+ inline eastl::insert_iterator<Container>
+ inserter(Container& x, Iterator i)
+ {
+ typedef typename Container::iterator iterator;
+ return eastl::insert_iterator<Container>(x, iterator(i));
+ }
+
+
+ /// is_insert_iterator
+ ///
+ /// This is a type traits extension utility.
+ /// Given an iterator, tells if it's an insert_iterator vs anything else.
+ /// If it's a insert_iterator wrapped by another iterator then value is false.
+ ///
+ template <typename T>
+ struct is_insert_iterator
+ : public eastl::false_type {};
+
+ template<typename Iterator>
+ struct is_insert_iterator< eastl::insert_iterator<Iterator> >
+ : public eastl::true_type {};
+
+
+
+
+ //////////////////////////////////////////////////////////////////////////////////
+ /// distance
+ ///
+ /// Implements the distance() function. There are two versions, one for
+ /// random access iterators (e.g. with vector) and one for regular input
+ /// iterators (e.g. with list). The former is more efficient.
+ ///
+ template <typename InputIterator>
+ EA_CONSTEXPR
+ inline typename eastl::iterator_traits<InputIterator>::difference_type
+ distance_impl(InputIterator first, InputIterator last, EASTL_ITC_NS::input_iterator_tag)
+ {
+ typename eastl::iterator_traits<InputIterator>::difference_type n = 0;
+
+ while(first != last)
+ {
+ ++first;
+ ++n;
+ }
+ return n;
+ }
+
+ template <typename RandomAccessIterator>
+ EA_CONSTEXPR
+ inline typename eastl::iterator_traits<RandomAccessIterator>::difference_type
+ distance_impl(RandomAccessIterator first, RandomAccessIterator last, EASTL_ITC_NS::random_access_iterator_tag)
+ {
+ return last - first;
+ }
+
+ // Special version defined so that std C++ iterators can be recognized by
+ // this function. Unfortunately, this function treats all foreign iterators
+ // as InputIterators and thus can seriously hamper performance in the case
+ // of large ranges of bidirectional_iterator_tag iterators.
+ //template <typename InputIterator>
+ //inline typename eastl::iterator_traits<InputIterator>::difference_type
+ //distance_impl(InputIterator first, InputIterator last, ...)
+ //{
+ // typename eastl::iterator_traits<InputIterator>::difference_type n = 0;
+ //
+ // while(first != last)
+ // {
+ // ++first;
+ // ++n;
+ // }
+ // return n;
+ //}
+
+ template <typename InputIterator>
+ EA_CONSTEXPR
+ inline typename eastl::iterator_traits<InputIterator>::difference_type
+ distance(InputIterator first, InputIterator last)
+ {
+ typedef typename eastl::iterator_traits<InputIterator>::iterator_category IC;
+
+ return eastl::distance_impl(first, last, IC());
+ }
+
+
+
+
+ //////////////////////////////////////////////////////////////////////////////////
+ /// advance
+ ///
+ /// Implements the advance() function. There are three versions, one for
+ /// random access iterators (e.g. with vector), one for bidirectional
+ /// iterators (list) and one for regular input iterators (e.g. with slist).
+ ///
+ template <typename InputIterator, typename Distance>
+ inline void
+ advance_impl(InputIterator& i, Distance n, EASTL_ITC_NS::input_iterator_tag)
+ {
+ while(n--)
+ ++i;
+ }
+
+ template <bool signedDistance>
+ struct advance_bi_impl
+ {
+ template <typename BidirectionalIterator, typename Distance>
+ static void advance_impl(BidirectionalIterator& i, Distance n) // Specialization for unsigned distance type.
+ {
+ while(n--)
+ ++i;
+ }
+ };
+
+ template <>
+ struct advance_bi_impl<true>
+ {
+ template <typename BidirectionalIterator, typename Distance>
+ static void advance_impl(BidirectionalIterator& i, Distance n) // Specialization for signed distance type.
+ {
+ if(n > 0)
+ {
+ while(n--)
+ ++i;
+ }
+ else
+ {
+ while(n++)
+ --i;
+ }
+ }
+ };
+
+ template <typename BidirectionalIterator, typename Distance>
+ inline void
+ advance_impl(BidirectionalIterator& i, Distance n, EASTL_ITC_NS::bidirectional_iterator_tag)
+ {
+ advance_bi_impl<eastl::is_signed<Distance>::value>::advance_impl(i, n);
+ }
+
+ template <typename RandomAccessIterator, typename Distance>
+ inline void
+ advance_impl(RandomAccessIterator& i, Distance n, EASTL_ITC_NS::random_access_iterator_tag)
+ {
+ i += n;
+ }
+
+ // Special version defined so that std C++ iterators can be recognized by
+ // this function. Unfortunately, this function treats all foreign iterators
+ // as InputIterators and thus can seriously hamper performance in the case
+ // of large ranges of bidirectional_iterator_tag iterators.
+ //template <typename InputIterator, typename Distance>
+ //inline void
+ //advance_impl(InputIterator& i, Distance n, ...)
+ //{
+ // while(n--)
+ // ++i;
+ //}
+
+ template <typename InputIterator, typename Distance>
+ inline void
+ advance(InputIterator& i, Distance n)
+ {
+ typedef typename eastl::iterator_traits<InputIterator>::iterator_category IC;
+
+ eastl::advance_impl(i, n, IC());
+ }
+
+
+ // eastl::next / eastl::prev
+ // Return the nth/-nth successor of iterator it.
+ //
+ // http://en.cppreference.com/w/cpp/iterator/next
+ //
+ template<typename InputIterator>
+ inline InputIterator
+ next(InputIterator it, typename eastl::iterator_traits<InputIterator>::difference_type n = 1)
+ {
+ eastl::advance(it, n);
+ return it;
+ }
+
+ template<typename InputIterator>
+ inline InputIterator
+ prev(InputIterator it, typename eastl::iterator_traits<InputIterator>::difference_type n = 1)
+ {
+ eastl::advance(it, -n);
+ return it;
+ }
+
+
+#if defined(EA_COMPILER_CPP11_ENABLED) && EA_COMPILER_CPP11_ENABLED
+
+ // eastl::data
+ //
+ // http://en.cppreference.com/w/cpp/iterator/data
+ //
+ template <class Container>
+ EA_CPP14_CONSTEXPR auto data(Container& c) -> decltype(c.data())
+ { return c.data(); }
+
+ template <class Container>
+ EA_CPP14_CONSTEXPR auto data(const Container& c) -> decltype(c.data())
+ { return c.data(); }
+
+ template <class T, size_t N>
+ EA_CPP14_CONSTEXPR T* data(T(&array)[N]) EA_NOEXCEPT
+ { return array; }
+
+ template <class E>
+ EA_CPP14_CONSTEXPR const E* data(std::initializer_list<E> il) EA_NOEXCEPT
+ { return il.begin(); }
+
+
+ // eastl::size
+ //
+ // http://en.cppreference.com/w/cpp/iterator/size
+ //
+ template <class C>
+ EA_CPP14_CONSTEXPR auto size(const C& c) -> decltype(c.size())
+ { return c.size(); }
+
+ template <class T, size_t N>
+ EA_CPP14_CONSTEXPR size_t size(const T (&)[N]) EA_NOEXCEPT
+ { return N; }
+
+
+ // eastl::ssize
+ //
+ // https://en.cppreference.com/w/cpp/iterator/size
+ //
+ template <class T, ptrdiff_t N>
+ EA_CPP14_CONSTEXPR ptrdiff_t ssize(const T(&)[N]) EA_NOEXCEPT
+ { return N; }
+
+ template <class C>
+ EA_CPP14_CONSTEXPR auto ssize(const C& c)
+ -> eastl::common_type_t<ptrdiff_t, eastl::make_signed_t<decltype(c.size())>>
+ {
+ using R = eastl::common_type_t<ptrdiff_t, eastl::make_signed_t<decltype(c.size())>>;
+ return static_cast<R>(c.size());
+ }
+
+
+ // eastl::empty
+ //
+ // http://en.cppreference.com/w/cpp/iterator/empty
+ //
+ template <class Container>
+ EA_CPP14_CONSTEXPR auto empty(const Container& c) -> decltype(c.empty())
+ { return c.empty(); }
+
+ template <class T, size_t N>
+ EA_CPP14_CONSTEXPR bool empty(const T (&)[N]) EA_NOEXCEPT
+ { return false; }
+
+ template <class E>
+ EA_CPP14_CONSTEXPR bool empty(std::initializer_list<E> il) EA_NOEXCEPT
+ { return il.size() == 0; }
+
+#endif // defined(EA_COMPILER_CPP11_ENABLED) && EA_COMPILER_CPP11_ENABLED
+
+
+ // eastl::begin / eastl::end
+ // http://en.cppreference.com/w/cpp/iterator/begin
+ //
+ // In order to enable eastl::begin and eastl::end, the compiler needs to have conforming support
+ // for argument-dependent lookup if it supports C++11 range-based for loops. The reason for this is
+ // that in C++11 range-based for loops result in usage of std::begin/std::end, but allow that to
+ // be overridden by argument-dependent lookup:
+ // C++11 Standard, section 6.5.4, paragraph 1.
+ // "otherwise, begin-expr and end-expr are begin(__range) and end(__range), respectively,
+ // where begin and end are looked up with argument-dependent lookup (3.4.2). For the
+ // purposes of this name lookup, namespace std is an associated namespace."
+ // It turns out that one compiler has a problem: GCC 4.6. That version added support for
+ // range-based for loops but has broken argument-dependent lookup which was fixed in GCC 4.7.
+ //
+ #if (defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION == 4006))
+ #define EASTL_BEGIN_END_ENABLED 0
+ #else
+ #define EASTL_BEGIN_END_ENABLED 1
+ #endif
+
+ #if EASTL_BEGIN_END_ENABLED
+ template <typename Container>
+ EA_CPP14_CONSTEXPR inline auto begin(Container& container) -> decltype(container.begin())
+ {
+ return container.begin();
+ }
+
+ template <typename Container>
+ EA_CPP14_CONSTEXPR inline auto begin(const Container& container) -> decltype(container.begin())
+ {
+ return container.begin();
+ }
+
+ template<typename T, size_t arraySize>
+ EA_CPP14_CONSTEXPR inline T* begin(T (&arrayObject)[arraySize]) EA_NOEXCEPT
+ {
+ return arrayObject;
+ }
+
+ template <typename Container>
+ EA_CPP14_CONSTEXPR inline auto cbegin(const Container& container) -> decltype(eastl::begin(container))
+ {
+ return eastl::begin(container);
+ }
+
+ template <typename Container>
+ EA_CPP14_CONSTEXPR inline auto end(Container& container) -> decltype(container.end())
+ {
+ return container.end();
+ }
+
+ template <typename Container>
+ EA_CPP14_CONSTEXPR inline auto end(const Container& container) -> decltype(container.end())
+ {
+ return container.end();
+ }
+
+ template<typename T, size_t arraySize>
+ EA_CPP14_CONSTEXPR inline T* end(T (&arrayObject)[arraySize]) EA_NOEXCEPT
+ {
+ return (arrayObject + arraySize);
+ }
+
+ template <typename Container>
+ EA_CPP14_CONSTEXPR inline auto cend(const Container& container) -> decltype(eastl::end(container))
+ {
+ return eastl::end(container);
+ }
+
+ template <typename Container>
+ EA_CPP14_CONSTEXPR inline auto rbegin(Container& container) -> decltype(container.rbegin())
+ {
+ return container.rbegin();
+ }
+
+ template <typename Container>
+ EA_CPP14_CONSTEXPR inline auto rbegin(const Container& container) -> decltype(container.rbegin())
+ {
+ return container.rbegin();
+ }
+
+ template <typename Container>
+ EA_CPP14_CONSTEXPR inline auto rend(Container& container) -> decltype(container.rend())
+ {
+ return container.rend();
+ }
+
+ template <typename Container>
+ EA_CPP14_CONSTEXPR inline auto rend(const Container& container) -> decltype(container.rend())
+ {
+ return container.rend();
+ }
+
+ template <typename Container>
+ EA_CPP14_CONSTEXPR inline auto crbegin(const Container& container) -> decltype(eastl::rbegin(container))
+ {
+ return container.rbegin();
+ }
+
+ template <typename Container>
+ EA_CPP14_CONSTEXPR inline auto crend(const Container& container) -> decltype(eastl::rend(container))
+ {
+ return container.rend();
+ }
+
+
+ template <typename T, size_t arraySize>
+ EA_CPP14_CONSTEXPR inline reverse_iterator<T*> rbegin(T (&arrayObject)[arraySize])
+ {
+ return reverse_iterator<T*>(arrayObject + arraySize);
+ }
+
+ template <typename T, size_t arraySize>
+ EA_CPP14_CONSTEXPR inline reverse_iterator<T*> rend(T (&arrayObject)[arraySize])
+ {
+ return reverse_iterator<T*>(arrayObject);
+ }
+
+ template <typename E>
+ EA_CPP14_CONSTEXPR inline reverse_iterator<const E*> rbegin(std::initializer_list<E> ilist)
+ {
+ return eastl::reverse_iterator<const E*>(ilist.end());
+ }
+
+ template <typename E>
+ EA_CPP14_CONSTEXPR inline reverse_iterator<const E*> rend(std::initializer_list<E> ilist)
+ {
+ return eastl::reverse_iterator<const E*>(ilist.begin());
+ }
+
+ template <typename Iterator>
+ EA_CPP14_CONSTEXPR reverse_iterator<Iterator> make_reverse_iterator(Iterator i)
+ { return reverse_iterator<Iterator>(i); }
+
+ #endif // EASTL_BEGIN_END_ENABLED
+
+} // namespace eastl
+
+
+
+// Some compilers (e.g. GCC 4.6) support range-based for loops, but have a bug with
+// respect to argument-dependent lookup which results on them unilaterally using std::begin/end
+// with range-based for loops. To work around this we #include <iterator> for this case in
+// order to make std::begin/end visible to users of <EASTL/iterator.h>, for portability.
+#if !EASTL_BEGIN_END_ENABLED && !defined(EA_COMPILER_NO_RANGE_BASED_FOR_LOOP)
+ #include <iterator>
+#endif
+
+
+
+EA_RESTORE_VC_WARNING();
+EA_RESTORE_VC_WARNING();
+
+#endif // Header include guard
diff --git a/EASTL/include/EASTL/linked_array.h b/EASTL/include/EASTL/linked_array.h
new file mode 100644
index 0000000..88d9914
--- /dev/null
+++ b/EASTL/include/EASTL/linked_array.h
@@ -0,0 +1,336 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This class implements a linked_array template, which is an array version
+// of linked_ptr. See linked_ptr for detailed documentation.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_LINKED_ARRAY_H
+#define EASTL_LINKED_ARRAY_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/smart_ptr.h> // Defines smart_array_deleter
+#include <EASTL/linked_ptr.h> // Defines linked_ptr_base
+#include <stddef.h> // Definition of ptrdiff_t
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+
+ /// class linked_array
+ ///
+ /// This class implements a linked_array template, which is an array version
+ /// of linked_ptr. See linked_ptr for detailed documentation.
+ ///
+ template <typename T, typename Deleter = smart_array_deleter<T> >
+ class linked_array
+ {
+
+ protected:
+
+ /// this_type
+ /// This is an alias for linked_array<T>, this class.
+ typedef linked_array<T> this_type;
+
+ /// deleter_type
+ typedef Deleter deleter_type;
+
+ T* mpArray;
+ mutable const this_type* mpPrev;
+ mutable const this_type* mpNext;
+
+ void link(const linked_array& linkedArray)
+ { // This code can only be called when we are in a reset state.
+ // assert(!mpArray && (mpNext == mpPrev));
+ mpNext = linkedArray.mpNext;
+ mpNext->mpPrev = this;
+ mpPrev = &linkedArray;
+ linkedArray.mpNext = this;
+ }
+
+ public:
+ /// element_type
+ /// Synonym for type T, useful for external code to reference the
+ /// type in a generic way.
+ typedef T element_type;
+
+
+ /// linked_array
+ /// Takes ownership of the pointer. It is OK if the input pointer is null.
+ explicit linked_array(T* pArray = NULL)
+ : mpArray(pArray)
+ {
+ mpPrev = mpNext = this;
+ }
+
+
+ /// linked_array
+ /// Shares ownership of a pointer with another instance of linked_array.
+ linked_array(const linked_array& linkedArray)
+ : mpArray(linkedArray.mpArray)
+ {
+ if(mpArray)
+ link(linkedArray);
+ else
+ mpPrev = mpNext = this;
+ }
+
+
+ /// ~linked_array
+ /// Removes this object from the of objects using the shared pointer.
+ /// If this object is the last owner of the shared pointer, the shared
+ /// pointer is deleted.
+ ~linked_array()
+ {
+ reset();
+ }
+
+
+ /// operator=
+ /// Copies another linked_array to this object. Note that this object
+ /// may already own a shared pointer with another different pointer
+ /// (but still of the same type) before this call. In that case,
+ /// this function removes ownership of the old pointer and takes shared
+ /// ownership of the new pointer and increments its reference count.
+ linked_array& operator=(const linked_array& linkedArray)
+ {
+ if(linkedArray.mpArray != mpArray)
+ {
+ reset(linkedArray.mpArray);
+ if(linkedArray.mpArray)
+ link(linkedArray);
+ }
+ return *this;
+ }
+
+
+ /// operator=
+ /// Assigns a new pointer. If the new pointer is equivalent
+ /// to the current pointer, nothing is done. Otherwise the
+ /// current pointer is unlinked and possibly destroyed.
+ /// The new pointer can be NULL.
+ linked_array& operator=(T* pArray)
+ {
+ reset(pArray);
+ return *this;
+ }
+
+
+ /// reset
+ /// Releases the owned pointer and takes ownership of the
+ /// passed in pointer. If the passed in pointer is the same
+ /// as the owned pointer, nothing is done. The passed in pointer
+ /// can be null, in which case the use count is set to 1.
+ void reset(T* pArray = NULL)
+ {
+ if(pArray != mpArray)
+ {
+ if(unique())
+ {
+ deleter_type del;
+ del(mpArray);
+ }
+ else
+ {
+ mpPrev->mpNext = mpNext;
+ mpNext->mpPrev = mpPrev;
+ mpPrev = mpNext = this;
+ }
+ mpArray = pArray;
+ }
+ }
+
+
+ /// swap
+ /// Exchanges the owned pointer beween two linkedArray objects.
+ ///
+ /// This function is disabled as it is currently deemed unsafe.
+ /// The problem is that the only way to implement this function
+ /// is to transfer pointers between the objects; you cannot
+ /// transfer the linked list membership between the objects.
+ /// Thus unless both linked_array objects were 'unique()', the
+ /// shared pointers would be duplicated amongst containers,
+ /// resulting in a crash.
+ //void swap(linked_array& linkedArray)
+ //{
+ // if(linkedArray.mpArray != mpArray)
+ // { // This is only safe if both linked_arrays are unique().
+ // linkedArray::element_type* const pArrayTemp = linkedArray.mpArray;
+ // linkedArray.reset(mpArray);
+ // reset(pArrayTemp);
+ // }
+ //}
+
+
+ /// operator[]
+ /// Returns a reference to the specified item in the owned pointer array.
+ T& operator[](ptrdiff_t i) const
+ {
+ // assert(mpArray && (i >= 0));
+ return mpArray[i];
+ }
+
+
+ /// operator*
+ /// Returns the owner pointer dereferenced.
+ T& operator*() const
+ {
+ return *mpArray;
+ }
+
+
+ /// operator->
+ /// Allows access to the owned pointer via operator->()
+ T* operator->() const
+ {
+ return mpArray;
+ }
+
+
+ /// get
+ /// Returns the owned pointer. Note that this class does
+ /// not provide an operator T() function. This is because such
+ /// a thing (automatic conversion) is deemed unsafe.
+ T* get() const
+ {
+ return mpArray;
+ }
+
+
+ /// use_count
+ /// Returns the use count of the shared pointer.
+ /// The return value is one if the owned pointer is null.
+ /// This function is provided for compatibility with the
+ /// proposed C++ standard and for debugging purposes. It is not
+ /// intended for runtime use given that its execution time is
+ /// not constant.
+ int use_count() const
+ {
+ int useCount(1);
+
+ for(const linked_ptr_base* pCurrent = this; pCurrent->mpNext != this; pCurrent = pCurrent->mpNext)
+ ++useCount;
+
+ return useCount;
+ }
+
+
+ /// unique
+ /// Returns true if the use count of the owned pointer is one.
+ /// The return value is true if the owned pointer is null.
+ bool unique() const
+ {
+ return (mpNext == this);
+ }
+
+
+ /// Implicit operator bool
+ /// Allows for using a linked_array as a boolean.
+ /// Note that below we do not use operator bool(). The reason for this
+ /// is that booleans automatically convert up to short, int, float, etc.
+ /// The result is that this: if(linkedArray == 1) would yield true (bad).
+ typedef T* (this_type::*bool_)() const;
+ operator bool_() const
+ {
+ if(mpArray)
+ return &this_type::get;
+ return NULL;
+ }
+
+
+ /// operator!
+ /// This returns the opposite of operator bool; it returns true if
+ /// the owned pointer is null. Some compilers require this and some don't.
+ bool operator!()
+ {
+ return (mpArray == NULL);
+ }
+
+
+ /// force_delete
+ /// Forces deletion of the shared pointer. Fixes all references to the
+ /// pointer by any other owners to be NULL.
+ void force_delete()
+ {
+ T* const pArray = mpArray;
+
+ this_type* p = this;
+ do
+ {
+ this_type* const pNext = const_cast<this_type*>(p->mpNext);
+ p->mpArray = NULL;
+ p->mpNext = p->mpPrev = p;
+ p = pNext;
+ }
+ while(p != this);
+
+ deleter_type del;
+ del(pArray);
+ }
+
+ }; // class linked_array
+
+
+
+ /// get_pointer
+ /// Returns linked_array::get() via the input linked_array. Provided for compatibility
+ /// with certain well-known libraries that use this functionality.
+ template <typename T>
+ inline T* get_pointer(const linked_array<T>& linkedArray)
+ {
+ return linkedArray.get();
+ }
+
+
+ /// operator==
+ /// Compares two linked_array objects for equality. Equality is defined as
+ /// being true when the pointer shared between two linked_array objects is equal.
+ template <typename T, typename TD, typename U, typename UD>
+ inline bool operator==(const linked_array<T, TD>& linkedArray1, const linked_array<U, UD>& linkedArray2)
+ {
+ return (linkedArray1.get() == linkedArray2.get());
+ }
+
+
+ /// operator!=
+ /// Compares two linked_array objects for inequality. Equality is defined as
+ /// being true when the pointer shared between two linked_array objects is equal.
+ template <typename T, typename TD, typename U, typename UD>
+ inline bool operator!=(const linked_array<T, TD>& linkedArray1, const linked_array<U, UD>& linkedArray2)
+ {
+ return (linkedArray1.get() != linkedArray2.get());
+ }
+
+
+ /// operator<
+ /// Returns which linked_array is 'less' than the other. Useful when storing
+ /// sorted containers of linked_array objects.
+ template <typename T, typename TD, typename U, typename UD>
+ inline bool operator<(const linked_array<T, TD>& linkedArray1, const linked_array<U, UD>& linkedArray2)
+ {
+ return (linkedArray1.get() < linkedArray2.get());
+ }
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/linked_ptr.h b/EASTL/include/EASTL/linked_ptr.h
new file mode 100644
index 0000000..f57681a
--- /dev/null
+++ b/EASTL/include/EASTL/linked_ptr.h
@@ -0,0 +1,426 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_LINKED_PTR_H
+#define EASTL_LINKED_PTR_H
+
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/smart_ptr.h> // Defines smart_ptr_deleter
+#include <EASTL/allocator.h>
+#include <stddef.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+
+ /// linked_ptr_base
+ ///
+ /// This class allows linked_ptr<T> and linked_ptr<U> to share the same
+ /// base nodes and thus be in the same linked list.
+ ///
+ struct linked_ptr_base
+ {
+ mutable linked_ptr_base* mpPrev;
+ mutable linked_ptr_base* mpNext;
+ };
+
+
+ /// linked_ptr
+ ///
+ /// This class implements a linked_ptr template. A linked_ptr is like the C++
+ /// Standard Library auto_ptr except that it allows sharing of pointers between
+ /// instances of auto_ptr via reference counting. linked_ptr objects can safely
+ /// be copied and can safely be used in C++ Standard Library containers such
+ /// as std::vector or std::list. This implementation, however, is not thread-safe.
+ /// you would need to use a separate linked_ptr_mt (multi-threaded) to get
+ /// thread safety.
+ ///
+ /// linked_ptr is a variation of shared_ptr (a.k.a. counted_ptr) which differs
+ /// in that instead of being implemented by a shared integer stored on the heap,
+ /// it is implemented by linked list stored within the linked_ptr object itself.
+ /// The result is that no memory is explicitly allocated from the heap, though
+ /// the cost of each linked_ptr object is 12 bytes of memory (32 bit machine)
+ /// instead of 4 bytes for the case of shared_ptr (depending on the heap).
+ ///
+ template <typename T, typename Deleter = smart_ptr_deleter<T> >
+ class linked_ptr : public linked_ptr_base
+ {
+ protected:
+ template <typename U, typename D> friend class linked_ptr;
+
+ /// this_type
+ /// This is an alias for linked_ptr<T>, this class.
+ typedef linked_ptr<T> this_type;
+
+ /// deleter_type
+ typedef Deleter deleter_type;
+
+ T* mpValue; /// The owned pointer.
+
+ template <typename U, typename D>
+ void link(const linked_ptr<U, D>& linkedPtr)
+ { // This code can only be called when we are in a reset state.
+ // assert(!mpValue && (mpNext == mpPrev));
+ mpNext = linkedPtr.mpNext;
+ mpNext->mpPrev = this;
+ mpPrev = const_cast<linked_ptr<U, D>*>(&linkedPtr);
+ linkedPtr.mpNext = this;
+ }
+
+ public:
+ /// element_type
+ /// Synonym for type T, useful for external code to reference the
+ /// type in a generic way.
+ typedef T element_type;
+
+
+ /// linked_ptr
+ /// Default constructor.
+ linked_ptr()
+ : mpValue(NULL)
+ {
+ mpPrev = mpNext = this;
+ }
+
+
+ /// linked_ptr
+ /// Takes ownership of the pointer. It is OK if the input pointer is null.
+ template <typename U>
+ explicit linked_ptr(U* pValue)
+ : mpValue(pValue)
+ {
+ mpPrev = mpNext = this;
+ }
+
+
+ /// linked_ptr
+ /// Construction with self type.
+ /// If we want a shared_ptr constructor that is templated on linked_ptr<U>,
+ /// then we need to make it in addition to this function, as otherwise
+ /// the compiler will generate this function and things will go wrong.
+ linked_ptr(const linked_ptr& linkedPtr)
+ : mpValue(linkedPtr.mpValue)
+ {
+ if(mpValue)
+ link(linkedPtr);
+ else
+ mpPrev = mpNext = this;
+ }
+
+
+ /// linked_ptr
+ /// Shares ownership of a pointer with another instance of linked_ptr.
+ template <typename U, typename D>
+ linked_ptr(const linked_ptr<U, D>& linkedPtr)
+ : mpValue(linkedPtr.mpValue)
+ {
+ if(mpValue)
+ link(linkedPtr);
+ else
+ mpPrev = mpNext = this;
+ }
+
+
+ /// ~linked_ptr
+ /// Removes this object from the of objects using the shared pointer.
+ /// If this object is the last owner of the shared pointer, the shared
+ /// pointer is deleted.
+ ~linked_ptr()
+ {
+ reset();
+ }
+
+
+ /// operator=
+ /// If we want a shared_ptr operator= that is templated on linked_ptr<U>,
+ /// then we need to make it in addition to this function, as otherwise
+ /// the compiler will generate this function and things will go wrong.
+ linked_ptr& operator=(const linked_ptr& linkedPtr)
+ {
+ if(linkedPtr.mpValue != mpValue)
+ {
+ reset(linkedPtr.mpValue);
+ if(linkedPtr.mpValue)
+ link(linkedPtr);
+ }
+ return *this;
+ }
+
+
+ /// operator=
+ /// Copies another linked_ptr to this object. Note that this object
+ /// may already own a shared pointer with another different pointer
+ /// (but still of the same type) before this call. In that case,
+ /// this function removes ownership of the old pointer and takes shared
+ /// ownership of the new pointer and increments its reference count.
+ template <typename U, typename D>
+ linked_ptr& operator=(const linked_ptr<U, D>& linkedPtr)
+ {
+ if(linkedPtr.mpValue != mpValue)
+ {
+ reset(linkedPtr.mpValue);
+ if(linkedPtr.mpValue)
+ link(linkedPtr);
+ }
+ return *this;
+ }
+
+
+ /// operator=
+ /// Assigns a new pointer. If the new pointer is equivalent
+ /// to the current pointer, nothing is done. Otherwise the
+ /// current pointer is unlinked and possibly destroyed.
+ /// The new pointer can be NULL.
+ template <typename U>
+ linked_ptr& operator=(U* pValue)
+ {
+ reset(pValue);
+ return *this;
+ }
+
+
+ /// reset
+ /// Releases the owned pointer and takes ownership of the
+ /// passed in pointer. If the passed in pointer is the same
+ /// as the owned pointer, nothing is done. The passed in pointer
+ /// can be NULL, in which case the use count is set to 1.
+ template <typename U>
+ void reset(U* pValue)
+ {
+ if(pValue != mpValue)
+ {
+ if(unique())
+ {
+ deleter_type del;
+ del(mpValue);
+ }
+ else
+ {
+ mpPrev->mpNext = mpNext;
+ mpNext->mpPrev = mpPrev;
+ mpPrev = mpNext = this;
+ }
+ mpValue = pValue;
+ }
+ }
+
+
+ /// reset
+ /// Resets the container with NULL. If the current pointer
+ /// is non-NULL, it is unlinked and possibly destroyed.
+ void reset()
+ {
+ reset((T*)NULL);
+ }
+
+
+ /// swap
+ /// Exchanges the owned pointer beween two linkedPtr objects.
+ ///
+ /// This function is disabled as it is currently deemed unsafe.
+ /// The problem is that the only way to implement this function
+ /// is to transfer pointers between the objects; you cannot
+ /// transfer the linked list membership between the objects.
+ /// Thus unless both linked_ptr objects were 'unique()', the
+ /// shared pointers would be duplicated amongst containers,
+ /// resulting in a crash.
+ //template <typename U, typename D>
+ //void swap(linked_ptr<U, D>& linkedPtr)
+ //{
+ // if(linkedPtr.mpValue != mpValue)
+ // { // This is only safe if both linked_ptrs are unique().
+ // linkedPtr::element_type* const pValueTemp = linkedPtr.mpValue;
+ // linkedPtr.reset(mpValue);
+ // reset(pValueTemp);
+ // }
+ //}
+
+
+ /// operator*
+ /// Returns the owner pointer dereferenced.
+ T& operator*() const
+ {
+ return *mpValue;
+ }
+
+
+ /// operator->
+ /// Allows access to the owned pointer via operator->()
+ T* operator->() const
+ {
+ return mpValue;
+ }
+
+
+ /// get
+ /// Returns the owned pointer. Note that this class does
+ /// not provide an operator T() function. This is because such
+ /// a thing (automatic conversion) is deemed unsafe.
+ T* get() const
+ {
+ return mpValue;
+ }
+
+
+ /// use_count
+ /// Returns the use count of the shared pointer.
+ /// The return value is one if the owned pointer is null.
+ /// This function is provided for compatibility with the
+ /// proposed C++ standard and for debugging purposes. It is not
+ /// intended for runtime use given that its execution time is
+ /// not constant.
+ int use_count() const
+ {
+ int useCount(1);
+
+ for(const linked_ptr_base* pCurrent = static_cast<const linked_ptr_base*>(this);
+ pCurrent->mpNext != static_cast<const linked_ptr_base*>(this); pCurrent = pCurrent->mpNext)
+ ++useCount;
+
+ return useCount;
+ }
+
+
+ /// unique
+ /// Returns true if the use count of the owned pointer is one.
+ /// The return value is true if the owned pointer is null.
+ bool unique() const
+ {
+ return (mpNext == static_cast<const linked_ptr_base*>(this));
+ }
+
+
+ /// Implicit operator bool
+ /// Allows for using a linked_ptr as a boolean.
+ /// Note that below we do not use operator bool(). The reason for this
+ /// is that booleans automatically convert up to short, int, float, etc.
+ /// The result is that this: if(linkedPtr == 1) would yield true (bad).
+ typedef T* (this_type::*bool_)() const;
+ operator bool_() const
+ {
+ if(mpValue)
+ return &this_type::get;
+ return NULL;
+ }
+
+
+ /// operator!
+ /// This returns the opposite of operator bool; it returns true if
+ /// the owned pointer is null. Some compilers require this and some don't.
+ bool operator!()
+ {
+ return (mpValue == NULL);
+ }
+
+
+ /// detach
+ /// Returns ownership of the pointer to the caller. Fixes all
+ /// references to the pointer by any other owners to be NULL.
+ /// This function can work properly only if all entries in the list
+ /// refer to type T and none refer to any other type (e.g. U).
+ T* detach()
+ {
+ T* const pValue = mpValue;
+
+ linked_ptr_base* p = this;
+ do
+ {
+ linked_ptr_base* const pNext = p->mpNext;
+ static_cast<this_type*>(p)->mpValue = NULL;
+ p->mpNext = p->mpPrev = p;
+ p = pNext;
+ }
+ while(p != this);
+
+ return pValue;
+ }
+
+ /// force_delete
+ /// Forces deletion of the shared pointer. Fixes all references to the
+ /// pointer by any other owners to be NULL.
+ /// This function can work properly only if all entries in the list
+ /// refer to type T and none refer to any other type (e.g. U).
+ void force_delete()
+ {
+ T* const pValue = detach();
+ Deleter del;
+ del(pValue);
+ }
+
+ }; // class linked_ptr
+
+
+
+ /// get_pointer
+ /// Returns linked_ptr::get() via the input linked_ptr. Provided for compatibility
+ /// with certain well-known libraries that use this functionality.
+ template <typename T, typename D>
+ inline T* get_pointer(const linked_ptr<T, D>& linkedPtr)
+ {
+ return linkedPtr.get();
+ }
+
+
+ /// operator==
+ /// Compares two linked_ptr objects for equality. Equality is defined as
+ /// being true when the pointer shared between two linked_ptr objects is equal.
+ template <typename T, typename TD, typename U, typename UD>
+ inline bool operator==(const linked_ptr<T, TD>& linkedPtr1, const linked_ptr<U, UD>& linkedPtr2)
+ {
+ return (linkedPtr1.get() == linkedPtr2.get());
+ }
+
+
+ /// operator!=
+ /// Compares two linked_ptr objects for inequality. Equality is defined as
+ /// being true when the pointer shared between two linked_ptr objects is equal.
+ template <typename T, typename TD, typename U, typename UD>
+ inline bool operator!=(const linked_ptr<T, TD>& linkedPtr1, const linked_ptr<U, UD>& linkedPtr2)
+ {
+ return (linkedPtr1.get() != linkedPtr2.get());
+ }
+
+
+ /// operator<
+ /// Returns which linked_ptr is 'less' than the other. Useful when storing
+ /// sorted containers of linked_ptr objects.
+ template <typename T, typename TD, typename U, typename UD>
+ inline bool operator<(const linked_ptr<T, TD>& linkedPtr1, const linked_ptr<U, UD>& linkedPtr2)
+ {
+ return (linkedPtr1.get() < linkedPtr2.get());
+ }
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/list.h b/EASTL/include/EASTL/list.h
new file mode 100644
index 0000000..be99c01
--- /dev/null
+++ b/EASTL/include/EASTL/list.h
@@ -0,0 +1,2183 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements a doubly-linked list, much like the C++ std::list class.
+// The primary distinctions between this list and std::list are:
+// - list doesn't implement some of the less-frequently used functions
+// of std::list. Any required functions can be added at a later time.
+// - list has a couple extension functions that increase performance.
+// - list can contain objects with alignment requirements. std::list cannot
+// do so without a bit of tedious non-portable effort.
+// - list has optimizations that don't exist in the STL implementations
+// supplied by library vendors for our targeted platforms.
+// - list supports debug memory naming natively.
+// - list::size() by default is not a constant time function, like the list::size
+// in some std implementations such as STLPort and SGI STL but unlike the
+// list in Dinkumware and Metrowerks. The EASTL_LIST_SIZE_CACHE option can change this.
+// - list provides a guaranteed portable node definition that allows users
+// to write custom fixed size node allocators that are portable.
+// - list is easier to read, debug, and visualize.
+// - list is savvy to an environment that doesn't have exception handling,
+// as is sometimes the case with console or embedded environments.
+// - list has less deeply nested function calls and allows the user to
+// enable forced inlining in debug builds in order to reduce bloat.
+// - list doesn't keep a member size variable. This means that list is
+// smaller than std::list (depends on std::list) and that for most operations
+// it is faster than std::list. However, the list::size function is slower.
+// - list::size_type is defined as eastl_size_t instead of size_t in order to
+// save memory and run faster on 64 bit systems.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_LIST_H
+#define EASTL_LIST_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/allocator.h>
+#include <EASTL/type_traits.h>
+#include <EASTL/iterator.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/initializer_list.h>
+#include <EASTL/bonus/compressed_pair.h>
+
+EA_DISABLE_ALL_VC_WARNINGS()
+#include <new>
+#include <stddef.h>
+EA_RESTORE_ALL_VC_WARNINGS()
+
+
+// 4530 - C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc
+// 4345 - Behavior change: an object of POD type constructed with an initializer of the form () will be default-initialized
+// 4571 - catch(...) semantics changed since Visual C++ 7.1; structured exceptions (SEH) are no longer caught.
+// 4623 - default constructor was implicitly defined as deleted
+EA_DISABLE_VC_WARNING(4530 4345 4571 4623);
+
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+
+ /// EASTL_LIST_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_LIST_DEFAULT_NAME
+ #define EASTL_LIST_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " list" // Unless the user overrides something, this is "EASTL list".
+ #endif
+
+
+ /// EASTL_LIST_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_LIST_DEFAULT_ALLOCATOR
+ #define EASTL_LIST_DEFAULT_ALLOCATOR allocator_type(EASTL_LIST_DEFAULT_NAME)
+ #endif
+
+
+
+ /// ListNodeBase
+ ///
+ /// We define a ListNodeBase separately from ListNode (below), because it allows
+ /// us to have non-templated operations such as insert, remove (below), and it
+ /// makes it so that the list anchor node doesn't carry a T with it, which would
+ /// waste space and possibly lead to surprising the user due to extra Ts existing
+ /// that the user didn't explicitly create. The downside to all of this is that
+ /// it makes debug viewing of a list harder, given that the node pointers are of
+ /// type ListNodeBase and not ListNode. However, see ListNodeBaseProxy below.
+ ///
+ struct ListNodeBase
+ {
+ ListNodeBase* mpNext;
+ ListNodeBase* mpPrev;
+
+ void insert(ListNodeBase* pNext) EA_NOEXCEPT; // Inserts this standalone node before the node pNext in pNext's list.
+ void remove() EA_NOEXCEPT; // Removes this node from the list it's in. Leaves this node's mpNext/mpPrev invalid.
+ void splice(ListNodeBase* pFirst, ListNodeBase* pLast) EA_NOEXCEPT; // Removes [pFirst,pLast) from the list it's in and inserts it before this in this node's list.
+ void reverse() EA_NOEXCEPT; // Reverses the order of nodes in the circular list this node is a part of.
+ static void swap(ListNodeBase& a, ListNodeBase& b) EA_NOEXCEPT; // Swaps the nodes a and b in the lists to which they belong.
+
+ void insert_range(ListNodeBase* pFirst, ListNodeBase* pFinal) EA_NOEXCEPT; // Differs from splice in that first/final aren't in another list.
+ static void remove_range(ListNodeBase* pFirst, ListNodeBase* pFinal) EA_NOEXCEPT; //
+ } EASTL_LIST_PROXY_MAY_ALIAS;
+
+
+ #if EASTL_LIST_PROXY_ENABLED
+
+ /// ListNodeBaseProxy
+ ///
+ /// In debug builds, we define ListNodeBaseProxy to be the same thing as
+ /// ListNodeBase, except it is templated on the parent ListNode class.
+ /// We do this because we want users in debug builds to be able to easily
+ /// view the list's contents in a debugger GUI. We do this only in a debug
+ /// build for the reasons described above: that ListNodeBase needs to be
+ /// as efficient as possible and not cause code bloat or extra function
+ /// calls (inlined or not).
+ ///
+ /// ListNodeBaseProxy *must* be separate from its parent class ListNode
+ /// because the list class must have a member node which contains no T value.
+ /// It is thus incorrect for us to have one single ListNode class which
+ /// has mpNext, mpPrev, and mValue. So we do a recursive template trick in
+ /// the definition and use of SListNodeBaseProxy.
+ ///
+ template <typename LN>
+ struct ListNodeBaseProxy
+ {
+ LN* mpNext;
+ LN* mpPrev;
+ };
+
+ template <typename T>
+ struct ListNode : public ListNodeBaseProxy< ListNode<T> >
+ {
+ T mValue;
+ };
+
+ #else
+
+ EA_DISABLE_VC_WARNING(4625 4626)
+ template <typename T>
+ struct ListNode : public ListNodeBase
+ {
+ T mValue;
+ };
+ EA_RESTORE_VC_WARNING()
+
+ #endif
+
+
+
+
+ /// ListIterator
+ ///
+ template <typename T, typename Pointer, typename Reference>
+ struct ListIterator
+ {
+ typedef ListIterator<T, Pointer, Reference> this_type;
+ typedef ListIterator<T, T*, T&> iterator;
+ typedef ListIterator<T, const T*, const T&> const_iterator;
+ typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t.
+ typedef ptrdiff_t difference_type;
+ typedef T value_type;
+ typedef ListNode<T> node_type;
+ typedef Pointer pointer;
+ typedef Reference reference;
+ typedef EASTL_ITC_NS::bidirectional_iterator_tag iterator_category;
+
+ public:
+ node_type* mpNode;
+
+ public:
+ ListIterator() EA_NOEXCEPT;
+ ListIterator(const ListNodeBase* pNode) EA_NOEXCEPT;
+ ListIterator(const iterator& x) EA_NOEXCEPT;
+
+ this_type next() const EA_NOEXCEPT;
+ this_type prev() const EA_NOEXCEPT;
+
+ reference operator*() const EA_NOEXCEPT;
+ pointer operator->() const EA_NOEXCEPT;
+
+ this_type& operator++() EA_NOEXCEPT;
+ this_type operator++(int) EA_NOEXCEPT;
+
+ this_type& operator--() EA_NOEXCEPT;
+ this_type operator--(int) EA_NOEXCEPT;
+
+ }; // ListIterator
+
+
+
+
+ /// ListBase
+ ///
+ /// See VectorBase (class vector) for an explanation of why we
+ /// create this separate base class.
+ ///
+ template <typename T, typename Allocator>
+ class ListBase
+ {
+ public:
+ typedef T value_type;
+ typedef Allocator allocator_type;
+ typedef ListNode<T> node_type;
+ typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t.
+ typedef ptrdiff_t difference_type;
+ #if EASTL_LIST_PROXY_ENABLED
+ typedef ListNodeBaseProxy< ListNode<T> > base_node_type;
+ #else
+ typedef ListNodeBase base_node_type; // We use ListNodeBase instead of ListNode<T> because we don't want to create a T.
+ #endif
+
+ protected:
+ eastl::compressed_pair<base_node_type, allocator_type> mNodeAllocator;
+ #if EASTL_LIST_SIZE_CACHE
+ size_type mSize;
+ #endif
+
+ base_node_type& internalNode() EA_NOEXCEPT { return mNodeAllocator.first(); }
+ base_node_type const& internalNode() const EA_NOEXCEPT { return mNodeAllocator.first(); }
+ allocator_type& internalAllocator() EA_NOEXCEPT { return mNodeAllocator.second(); }
+ const allocator_type& internalAllocator() const EA_NOEXCEPT { return mNodeAllocator.second(); }
+
+ public:
+ const allocator_type& get_allocator() const EA_NOEXCEPT;
+ allocator_type& get_allocator() EA_NOEXCEPT;
+ void set_allocator(const allocator_type& allocator);
+
+ protected:
+ ListBase();
+ ListBase(const allocator_type& a);
+ ~ListBase();
+
+ node_type* DoAllocateNode();
+ void DoFreeNode(node_type* pNode);
+
+ void DoInit() EA_NOEXCEPT;
+ void DoClear();
+
+ }; // ListBase
+
+
+
+
+ /// list
+ ///
+ /// -- size() is O(n) --
+ /// Note that as of this writing, list::size() is an O(n) operation when EASTL_LIST_SIZE_CACHE is disabled.
+ /// That is, getting the size of the list is not a fast operation, as it requires traversing the list and
+ /// counting the nodes. We could make list::size() be fast by having a member mSize variable. There are reasons
+ /// for having such functionality and reasons for not having such functionality. We currently choose
+ /// to not have a member mSize variable as it would add four bytes to the class, add a tiny amount
+ /// of processing to functions such as insert and erase, and would only serve to improve the size
+ /// function, but no others. The alternative argument is that the C++ standard states that std::list
+ /// should be an O(1) operation (i.e. have a member size variable), most C++ standard library list
+ /// implementations do so, the size is but an integer which is quick to update, and many users
+ /// expect to have a fast size function. The EASTL_LIST_SIZE_CACHE option changes this.
+ /// To consider: Make size caching an optional template parameter.
+ ///
+ /// Pool allocation
+ /// If you want to make a custom memory pool for a list container, your pool
+ /// needs to contain items of type list::node_type. So if you have a memory
+ /// pool that has a constructor that takes the size of pool items and the
+ /// count of pool items, you would do this (assuming that MemoryPool implements
+ /// the Allocator interface):
+ /// typedef list<Widget, MemoryPool> WidgetList; // Delare your WidgetList type.
+ /// MemoryPool myPool(sizeof(WidgetList::node_type), 100); // Make a pool of 100 Widget nodes.
+ /// WidgetList myList(&myPool); // Create a list that uses the pool.
+ ///
+ template <typename T, typename Allocator = EASTLAllocatorType>
+ class list : public ListBase<T, Allocator>
+ {
+ typedef ListBase<T, Allocator> base_type;
+ typedef list<T, Allocator> this_type;
+
+ public:
+ typedef T value_type;
+ typedef T* pointer;
+ typedef const T* const_pointer;
+ typedef T& reference;
+ typedef const T& const_reference;
+ typedef ListIterator<T, T*, T&> iterator;
+ typedef ListIterator<T, const T*, const T&> const_iterator;
+ typedef eastl::reverse_iterator<iterator> reverse_iterator;
+ typedef eastl::reverse_iterator<const_iterator> const_reverse_iterator;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::difference_type difference_type;
+ typedef typename base_type::allocator_type allocator_type;
+ typedef typename base_type::node_type node_type;
+ typedef typename base_type::base_node_type base_node_type;
+
+ using base_type::mNodeAllocator;
+ using base_type::DoAllocateNode;
+ using base_type::DoFreeNode;
+ using base_type::DoClear;
+ using base_type::DoInit;
+ using base_type::get_allocator;
+ #if EASTL_LIST_SIZE_CACHE
+ using base_type::mSize;
+ #endif
+ using base_type::internalNode;
+ using base_type::internalAllocator;
+
+ public:
+ list();
+ list(const allocator_type& allocator);
+ explicit list(size_type n, const allocator_type& allocator = EASTL_LIST_DEFAULT_ALLOCATOR);
+ list(size_type n, const value_type& value, const allocator_type& allocator = EASTL_LIST_DEFAULT_ALLOCATOR);
+ list(const this_type& x);
+ list(const this_type& x, const allocator_type& allocator);
+ list(this_type&& x);
+ list(this_type&&, const allocator_type&);
+ list(std::initializer_list<value_type> ilist, const allocator_type& allocator = EASTL_LIST_DEFAULT_ALLOCATOR);
+
+ template <typename InputIterator>
+ list(InputIterator first, InputIterator last); // allocator arg removed because VC7.1 fails on the default arg. To do: Make a second version of this function without a default arg.
+
+ this_type& operator=(const this_type& x);
+ this_type& operator=(std::initializer_list<value_type> ilist);
+ this_type& operator=(this_type&& x);
+
+ // In the case that the two containers' allocators are unequal, swap copies elements instead
+ // of replacing them in place. In this case swap is an O(n) operation instead of O(1).
+ void swap(this_type& x);
+
+ void assign(size_type n, const value_type& value);
+
+ template <typename InputIterator> // It turns out that the C++ std::list specifies a two argument
+ void assign(InputIterator first, InputIterator last); // version of assign that takes (int size, int value). These are not
+ // iterators, so we need to do a template compiler trick to do the right thing.
+ void assign(std::initializer_list<value_type> ilist);
+
+ iterator begin() EA_NOEXCEPT;
+ const_iterator begin() const EA_NOEXCEPT;
+ const_iterator cbegin() const EA_NOEXCEPT;
+
+ iterator end() EA_NOEXCEPT;
+ const_iterator end() const EA_NOEXCEPT;
+ const_iterator cend() const EA_NOEXCEPT;
+
+ reverse_iterator rbegin() EA_NOEXCEPT;
+ const_reverse_iterator rbegin() const EA_NOEXCEPT;
+ const_reverse_iterator crbegin() const EA_NOEXCEPT;
+
+ reverse_iterator rend() EA_NOEXCEPT;
+ const_reverse_iterator rend() const EA_NOEXCEPT;
+ const_reverse_iterator crend() const EA_NOEXCEPT;
+
+ bool empty() const EA_NOEXCEPT;
+ size_type size() const EA_NOEXCEPT;
+
+ void resize(size_type n, const value_type& value);
+ void resize(size_type n);
+
+ reference front();
+ const_reference front() const;
+
+ reference back();
+ const_reference back() const;
+
+ template <typename... Args>
+ void emplace_front(Args&&... args);
+
+ template <typename... Args>
+ void emplace_back(Args&&... args);
+
+ void push_front(const value_type& value);
+ void push_front(value_type&& x);
+ reference push_front();
+ void* push_front_uninitialized();
+
+ void push_back(const value_type& value);
+ void push_back(value_type&& x);
+ reference push_back();
+ void* push_back_uninitialized();
+
+ void pop_front();
+ void pop_back();
+
+ template <typename... Args>
+ iterator emplace(const_iterator position, Args&&... args);
+
+ iterator insert(const_iterator position);
+ iterator insert(const_iterator position, const value_type& value);
+ iterator insert(const_iterator position, value_type&& x);
+ iterator insert(const_iterator position, std::initializer_list<value_type> ilist);
+ iterator insert(const_iterator position, size_type n, const value_type& value);
+
+ template <typename InputIterator>
+ iterator insert(const_iterator position, InputIterator first, InputIterator last);
+
+ iterator erase(const_iterator position);
+ iterator erase(const_iterator first, const_iterator last);
+
+ reverse_iterator erase(const_reverse_iterator position);
+ reverse_iterator erase(const_reverse_iterator first, const_reverse_iterator last);
+
+ void clear() EA_NOEXCEPT;
+ void reset_lose_memory() EA_NOEXCEPT; // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs.
+
+ size_type remove(const T& x);
+
+ template <typename Predicate>
+ size_type remove_if(Predicate);
+
+ void reverse() EA_NOEXCEPT;
+
+ // splice inserts elements in the range [first,last) before position and removes the elements from x.
+ // In the case that the two containers' allocators are unequal, splice copies elements
+ // instead of splicing them. In this case elements are not removed from x, and iterators
+ // into the spliced elements from x continue to point to the original values in x.
+ void splice(const_iterator position, this_type& x);
+ void splice(const_iterator position, this_type& x, const_iterator i);
+ void splice(const_iterator position, this_type& x, const_iterator first, const_iterator last);
+ void splice(const_iterator position, this_type&& x);
+ void splice(const_iterator position, this_type&& x, const_iterator i);
+ void splice(const_iterator position, this_type&& x, const_iterator first, const_iterator last);
+
+ public:
+ // For merge, see notes for splice regarding the handling of unequal allocators.
+ void merge(this_type& x);
+ void merge(this_type&& x);
+
+ template <typename Compare>
+ void merge(this_type& x, Compare compare);
+
+ template <typename Compare>
+ void merge(this_type&& x, Compare compare);
+
+ void unique();
+
+ template <typename BinaryPredicate>
+ void unique(BinaryPredicate);
+
+ // Sorting functionality
+ // This is independent of the global sort algorithms, as lists are
+ // linked nodes and can be sorted more efficiently by moving nodes
+ // around in ways that global sort algorithms aren't privy to.
+ void sort();
+
+ template<typename Compare>
+ void sort(Compare compare);
+
+ public:
+ bool validate() const;
+ int validate_iterator(const_iterator i) const;
+
+ protected:
+ node_type* DoCreateNode();
+
+ template<typename... Args>
+ node_type* DoCreateNode(Args&&... args);
+
+ template <typename Integer>
+ void DoAssign(Integer n, Integer value, true_type);
+
+ template <typename InputIterator>
+ void DoAssign(InputIterator first, InputIterator last, false_type);
+
+ void DoAssignValues(size_type n, const value_type& value);
+
+ template <typename Integer>
+ void DoInsert(ListNodeBase* pNode, Integer n, Integer value, true_type);
+
+ template <typename InputIterator>
+ void DoInsert(ListNodeBase* pNode, InputIterator first, InputIterator last, false_type);
+
+ void DoInsertValues(ListNodeBase* pNode, size_type n, const value_type& value);
+
+ template<typename... Args>
+ void DoInsertValue(ListNodeBase* pNode, Args&&... args);
+
+ void DoErase(ListNodeBase* pNode);
+
+ void DoSwap(this_type& x);
+
+ template <typename Compare>
+ iterator DoSort(iterator i1, iterator end2, size_type n, Compare& compare);
+
+ }; // class list
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // ListNodeBase
+ ///////////////////////////////////////////////////////////////////////
+
+ // Swaps the nodes a and b in the lists to which they belong. This is similar to
+ // splicing a into b's list and b into a's list at the same time.
+ // Works by swapping the members of a and b, and fixes up the lists that a and b
+ // were part of to point to the new members.
+ inline void ListNodeBase::swap(ListNodeBase& a, ListNodeBase& b) EA_NOEXCEPT
+ {
+ const ListNodeBase temp(a);
+ a = b;
+ b = temp;
+
+ if(a.mpNext == &b)
+ a.mpNext = a.mpPrev = &a;
+ else
+ a.mpNext->mpPrev = a.mpPrev->mpNext = &a;
+
+ if(b.mpNext == &a)
+ b.mpNext = b.mpPrev = &b;
+ else
+ b.mpNext->mpPrev = b.mpPrev->mpNext = &b;
+ }
+
+
+ // splices the [first,last) range from its current list into our list before this node.
+ inline void ListNodeBase::splice(ListNodeBase* first, ListNodeBase* last) EA_NOEXCEPT
+ {
+ // We assume that [first, last] are not within our list.
+ last->mpPrev->mpNext = this;
+ first->mpPrev->mpNext = last;
+ this->mpPrev->mpNext = first;
+
+ ListNodeBase* const pTemp = this->mpPrev;
+ this->mpPrev = last->mpPrev;
+ last->mpPrev = first->mpPrev;
+ first->mpPrev = pTemp;
+ }
+
+
+ inline void ListNodeBase::reverse() EA_NOEXCEPT
+ {
+ ListNodeBase* pNode = this;
+ do
+ {
+ EA_ANALYSIS_ASSUME(pNode != NULL);
+ ListNodeBase* const pTemp = pNode->mpNext;
+ pNode->mpNext = pNode->mpPrev;
+ pNode->mpPrev = pTemp;
+ pNode = pNode->mpPrev;
+ }
+ while(pNode != this);
+ }
+
+
+ inline void ListNodeBase::insert(ListNodeBase* pNext) EA_NOEXCEPT
+ {
+ mpNext = pNext;
+ mpPrev = pNext->mpPrev;
+ pNext->mpPrev->mpNext = this;
+ pNext->mpPrev = this;
+ }
+
+
+ // Removes this node from the list that it's in. Assumes that the
+ // node is within a list and thus that its prev/next pointers are valid.
+ inline void ListNodeBase::remove() EA_NOEXCEPT
+ {
+ mpNext->mpPrev = mpPrev;
+ mpPrev->mpNext = mpNext;
+ }
+
+
+ // Inserts the standalone range [pFirst, pFinal] before pPosition. Assumes that the
+ // range is not within a list and thus that it's prev/next pointers are not valid.
+ // Assumes that this node is within a list and thus that its prev/next pointers are valid.
+ inline void ListNodeBase::insert_range(ListNodeBase* pFirst, ListNodeBase* pFinal) EA_NOEXCEPT
+ {
+ mpPrev->mpNext = pFirst;
+ pFirst->mpPrev = mpPrev;
+ mpPrev = pFinal;
+ pFinal->mpNext = this;
+ }
+
+
+ // Removes the range [pFirst, pFinal] from the list that it's in. Assumes that the
+ // range is within a list and thus that its prev/next pointers are valid.
+ inline void ListNodeBase::remove_range(ListNodeBase* pFirst, ListNodeBase* pFinal) EA_NOEXCEPT
+ {
+ pFinal->mpNext->mpPrev = pFirst->mpPrev;
+ pFirst->mpPrev->mpNext = pFinal->mpNext;
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // ListIterator
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Pointer, typename Reference>
+ inline ListIterator<T, Pointer, Reference>::ListIterator() EA_NOEXCEPT
+ : mpNode() // To consider: Do we really need to intialize mpNode?
+ {
+ // Empty
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline ListIterator<T, Pointer, Reference>::ListIterator(const ListNodeBase* pNode) EA_NOEXCEPT
+ : mpNode(static_cast<node_type*>((ListNode<T>*)const_cast<ListNodeBase*>(pNode))) // All this casting is in the name of making runtime debugging much easier on the user.
+ {
+ // Empty
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline ListIterator<T, Pointer, Reference>::ListIterator(const iterator& x) EA_NOEXCEPT
+ : mpNode(const_cast<node_type*>(x.mpNode))
+ {
+ // Empty
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename ListIterator<T, Pointer, Reference>::this_type
+ ListIterator<T, Pointer, Reference>::next() const EA_NOEXCEPT
+ {
+ return ListIterator(mpNode->mpNext);
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename ListIterator<T, Pointer, Reference>::this_type
+ ListIterator<T, Pointer, Reference>::prev() const EA_NOEXCEPT
+ {
+ return ListIterator(mpNode->mpPrev);
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename ListIterator<T, Pointer, Reference>::reference
+ ListIterator<T, Pointer, Reference>::operator*() const EA_NOEXCEPT
+ {
+ return mpNode->mValue;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename ListIterator<T, Pointer, Reference>::pointer
+ ListIterator<T, Pointer, Reference>::operator->() const EA_NOEXCEPT
+ {
+ return &mpNode->mValue;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename ListIterator<T, Pointer, Reference>::this_type&
+ ListIterator<T, Pointer, Reference>::operator++() EA_NOEXCEPT
+ {
+ mpNode = static_cast<node_type*>(mpNode->mpNext);
+ return *this;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename ListIterator<T, Pointer, Reference>::this_type
+ ListIterator<T, Pointer, Reference>::operator++(int) EA_NOEXCEPT
+ {
+ this_type temp(*this);
+ mpNode = static_cast<node_type*>(mpNode->mpNext);
+ return temp;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename ListIterator<T, Pointer, Reference>::this_type&
+ ListIterator<T, Pointer, Reference>::operator--() EA_NOEXCEPT
+ {
+ mpNode = static_cast<node_type*>(mpNode->mpPrev);
+ return *this;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename ListIterator<T, Pointer, Reference>::this_type
+ ListIterator<T, Pointer, Reference>::operator--(int) EA_NOEXCEPT
+ {
+ this_type temp(*this);
+ mpNode = static_cast<node_type*>(mpNode->mpPrev);
+ return temp;
+ }
+
+
+ // The C++ defect report #179 requires that we support comparisons between const and non-const iterators.
+ // Thus we provide additional template paremeters here to support this. The defect report does not
+ // require us to support comparisons between reverse_iterators and const_reverse_iterators.
+ template <typename T, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB>
+ inline bool operator==(const ListIterator<T, PointerA, ReferenceA>& a,
+ const ListIterator<T, PointerB, ReferenceB>& b) EA_NOEXCEPT
+ {
+ return a.mpNode == b.mpNode;
+ }
+
+
+ template <typename T, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB>
+ inline bool operator!=(const ListIterator<T, PointerA, ReferenceA>& a,
+ const ListIterator<T, PointerB, ReferenceB>& b) EA_NOEXCEPT
+ {
+ return a.mpNode != b.mpNode;
+ }
+
+
+ // We provide a version of operator!= for the case where the iterators are of the
+ // same type. This helps prevent ambiguity errors in the presence of rel_ops.
+ template <typename T, typename Pointer, typename Reference>
+ inline bool operator!=(const ListIterator<T, Pointer, Reference>& a,
+ const ListIterator<T, Pointer, Reference>& b) EA_NOEXCEPT
+ {
+ return a.mpNode != b.mpNode;
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // ListBase
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Allocator>
+ inline ListBase<T, Allocator>::ListBase()
+ : mNodeAllocator(base_node_type(), allocator_type(EASTL_LIST_DEFAULT_NAME))
+ #if EASTL_LIST_SIZE_CACHE
+ , mSize(0)
+ #endif
+ {
+ DoInit();
+ }
+
+ template <typename T, typename Allocator>
+ inline ListBase<T, Allocator>::ListBase(const allocator_type& allocator)
+ : mNodeAllocator(base_node_type(), allocator)
+ #if EASTL_LIST_SIZE_CACHE
+ , mSize(0)
+ #endif
+ {
+ DoInit();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline ListBase<T, Allocator>::~ListBase()
+ {
+ DoClear();
+ }
+
+
+ template <typename T, typename Allocator>
+ const typename ListBase<T, Allocator>::allocator_type&
+ ListBase<T, Allocator>::get_allocator() const EA_NOEXCEPT
+ {
+ return internalAllocator();
+ }
+
+
+ template <typename T, typename Allocator>
+ typename ListBase<T, Allocator>::allocator_type&
+ ListBase<T, Allocator>::get_allocator() EA_NOEXCEPT
+ {
+ return internalAllocator();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void ListBase<T, Allocator>::set_allocator(const allocator_type& allocator)
+ {
+ EASTL_ASSERT((internalAllocator() == allocator) || (static_cast<node_type*>(internalNode().mpNext) == &internalNode())); // We can only assign a different allocator if we are empty of elements.
+ internalAllocator() = allocator;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename ListBase<T, Allocator>::node_type*
+ ListBase<T, Allocator>::DoAllocateNode()
+ {
+ node_type* pNode = (node_type*)allocate_memory(internalAllocator(), sizeof(node_type), EASTL_ALIGN_OF(node_type), 0);
+ EASTL_ASSERT(pNode != nullptr);
+ return pNode;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void ListBase<T, Allocator>::DoFreeNode(node_type* p)
+ {
+ EASTLFree(internalAllocator(), p, sizeof(node_type));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void ListBase<T, Allocator>::DoInit() EA_NOEXCEPT
+ {
+ internalNode().mpNext = (ListNode<T>*)&internalNode();
+ internalNode().mpPrev = (ListNode<T>*)&internalNode();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void ListBase<T, Allocator>::DoClear()
+ {
+ node_type* p = static_cast<node_type*>(internalNode().mpNext);
+
+ while(p != &internalNode())
+ {
+ node_type* const pTemp = p;
+ p = static_cast<node_type*>(p->mpNext);
+ pTemp->~node_type();
+ EASTLFree(internalAllocator(), pTemp, sizeof(node_type));
+ }
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // list
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Allocator>
+ inline list<T, Allocator>::list()
+ : base_type()
+ {
+ // Empty
+ }
+
+
+ template <typename T, typename Allocator>
+ inline list<T, Allocator>::list(const allocator_type& allocator)
+ : base_type(allocator)
+ {
+ // Empty
+ }
+
+
+ template <typename T, typename Allocator>
+ inline list<T, Allocator>::list(size_type n, const allocator_type& allocator)
+ : base_type(allocator)
+ {
+ DoInsertValues((ListNodeBase*)&internalNode(), n, value_type());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline list<T, Allocator>::list(size_type n, const value_type& value, const allocator_type& allocator)
+ : base_type(allocator)
+ {
+ DoInsertValues((ListNodeBase*)&internalNode(), n, value);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline list<T, Allocator>::list(const this_type& x)
+ : base_type(x.internalAllocator())
+ {
+ DoInsert((ListNodeBase*)&internalNode(), const_iterator((ListNodeBase*)x.internalNode().mpNext), const_iterator((ListNodeBase*)&x.internalNode()), false_type());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline list<T, Allocator>::list(const this_type& x, const allocator_type& allocator)
+ : base_type(allocator)
+ {
+ DoInsert((ListNodeBase*)&internalNode(), const_iterator((ListNodeBase*)x.internalNode().mpNext), const_iterator((ListNodeBase*)&x.internalNode()), false_type());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline list<T, Allocator>::list(this_type&& x)
+ : base_type(eastl::move(x.internalAllocator()))
+ {
+ swap(x);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline list<T, Allocator>::list(this_type&& x, const allocator_type& allocator)
+ : base_type(allocator)
+ {
+ swap(x); // member swap handles the case that x has a different allocator than our allocator by doing a copy.
+ }
+
+
+ template <typename T, typename Allocator>
+ inline list<T, Allocator>::list(std::initializer_list<value_type> ilist, const allocator_type& allocator)
+ : base_type(allocator)
+ {
+ DoInsert((ListNodeBase*)&internalNode(), ilist.begin(), ilist.end(), false_type());
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename InputIterator>
+ list<T, Allocator>::list(InputIterator first, InputIterator last)
+ : base_type(EASTL_LIST_DEFAULT_ALLOCATOR)
+ {
+ //insert(const_iterator((ListNodeBase*)&internalNode()), first, last);
+ DoInsert((ListNodeBase*)&internalNode(), first, last, is_integral<InputIterator>());
+ }
+
+
+ template <typename T, typename Allocator>
+ typename list<T, Allocator>::iterator
+ inline list<T, Allocator>::begin() EA_NOEXCEPT
+ {
+ return iterator((ListNodeBase*)internalNode().mpNext);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::const_iterator
+ list<T, Allocator>::begin() const EA_NOEXCEPT
+ {
+ return const_iterator((ListNodeBase*)internalNode().mpNext);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::const_iterator
+ list<T, Allocator>::cbegin() const EA_NOEXCEPT
+ {
+ return const_iterator((ListNodeBase*)internalNode().mpNext);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::iterator
+ list<T, Allocator>::end() EA_NOEXCEPT
+ {
+ return iterator((ListNodeBase*)&internalNode());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::const_iterator
+ list<T, Allocator>::end() const EA_NOEXCEPT
+ {
+ return const_iterator((ListNodeBase*)&internalNode());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::const_iterator
+ list<T, Allocator>::cend() const EA_NOEXCEPT
+ {
+ return const_iterator((ListNodeBase*)&internalNode());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::reverse_iterator
+ list<T, Allocator>::rbegin() EA_NOEXCEPT
+ {
+ return reverse_iterator((ListNodeBase*)&internalNode());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::const_reverse_iterator
+ list<T, Allocator>::rbegin() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator((ListNodeBase*)&internalNode());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::const_reverse_iterator
+ list<T, Allocator>::crbegin() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator((ListNodeBase*)&internalNode());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::reverse_iterator
+ list<T, Allocator>::rend() EA_NOEXCEPT
+ {
+ return reverse_iterator((ListNodeBase*)internalNode().mpNext);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::const_reverse_iterator
+ list<T, Allocator>::rend() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator((ListNodeBase*)internalNode().mpNext);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::const_reverse_iterator
+ list<T, Allocator>::crend() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator((ListNodeBase*)internalNode().mpNext);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::reference
+ list<T, Allocator>::front()
+ {
+ #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(static_cast<node_type*>(internalNode().mpNext) == &internalNode()))
+ EASTL_FAIL_MSG("list::front -- empty container");
+ #else
+ // We allow the user to reference an empty container.
+ #endif
+
+ return static_cast<node_type*>(internalNode().mpNext)->mValue;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::const_reference
+ list<T, Allocator>::front() const
+ {
+ #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(static_cast<node_type*>(internalNode().mpNext) == &internalNode()))
+ EASTL_FAIL_MSG("list::front -- empty container");
+ #else
+ // We allow the user to reference an empty container.
+ #endif
+
+ return static_cast<node_type*>(internalNode().mpNext)->mValue;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::reference
+ list<T, Allocator>::back()
+ {
+ #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(static_cast<node_type*>(internalNode().mpNext) == &internalNode()))
+ EASTL_FAIL_MSG("list::back -- empty container");
+ #else
+ // We allow the user to reference an empty container.
+ #endif
+
+ return static_cast<node_type*>(internalNode().mpPrev)->mValue;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::const_reference
+ list<T, Allocator>::back() const
+ {
+ #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(static_cast<node_type*>(internalNode().mpNext) == &internalNode()))
+ EASTL_FAIL_MSG("list::back -- empty container");
+ #else
+ // We allow the user to reference an empty container.
+ #endif
+
+ return static_cast<node_type*>(internalNode().mpPrev)->mValue;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool list<T, Allocator>::empty() const EA_NOEXCEPT
+ {
+ #if EASTL_LIST_SIZE_CACHE
+ return (mSize == 0);
+ #else
+ return static_cast<node_type*>(internalNode().mpNext) == &internalNode();
+ #endif
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::size_type
+ list<T, Allocator>::size() const EA_NOEXCEPT
+ {
+ #if EASTL_LIST_SIZE_CACHE
+ return mSize;
+ #else
+ #if EASTL_DEBUG
+ const ListNodeBase* p = (ListNodeBase*)internalNode().mpNext;
+ size_type n = 0;
+ while(p != (ListNodeBase*)&internalNode())
+ {
+ ++n;
+ p = (ListNodeBase*)p->mpNext;
+ }
+ return n;
+ #else
+ // The following optimizes to slightly better code than the code above.
+ return (size_type)eastl::distance(const_iterator((ListNodeBase*)internalNode().mpNext), const_iterator((ListNodeBase*)&internalNode()));
+ #endif
+ #endif
+ }
+
+
+ template <typename T, typename Allocator>
+ typename list<T, Allocator>::this_type&
+ list<T, Allocator>::operator=(const this_type& x)
+ {
+ if(this != &x) // If not assigning to self...
+ {
+ // If (EASTL_ALLOCATOR_COPY_ENABLED == 1) and the current contents are allocated by an
+ // allocator that's unequal to x's allocator, we need to reallocate our elements with
+ // our current allocator and reallocate it with x's allocator. If the allocators are
+ // equal then we can use a more optimal algorithm that doesn't reallocate our elements
+ // but instead can copy them in place.
+
+ #if EASTL_ALLOCATOR_COPY_ENABLED
+ bool bSlowerPathwayRequired = (internalAllocator() != x.internalAllocator());
+ #else
+ bool bSlowerPathwayRequired = false;
+ #endif
+
+ if(bSlowerPathwayRequired)
+ {
+ clear();
+
+ #if EASTL_ALLOCATOR_COPY_ENABLED
+ internalAllocator() = x.internalAllocator();
+ #endif
+ }
+
+ DoAssign(x.begin(), x.end(), eastl::false_type());
+ }
+
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ typename list<T, Allocator>::this_type&
+ list<T, Allocator>::operator=(this_type&& x)
+ {
+ if(this != &x)
+ {
+ clear(); // To consider: Are we really required to clear here? x is going away soon and will clear itself in its dtor.
+ swap(x); // member swap handles the case that x has a different allocator than our allocator by doing a copy.
+ }
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ typename list<T, Allocator>::this_type&
+ list<T, Allocator>::operator=(std::initializer_list<value_type> ilist)
+ {
+ DoAssign(ilist.begin(), ilist.end(), false_type());
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::assign(size_type n, const value_type& value)
+ {
+ DoAssignValues(n, value);
+ }
+
+
+ // It turns out that the C++ std::list specifies a two argument
+ // version of assign that takes (int size, int value). These are not
+ // iterators, so we need to do a template compiler trick to do the right thing.
+ template <typename T, typename Allocator>
+ template <typename InputIterator>
+ inline void list<T, Allocator>::assign(InputIterator first, InputIterator last)
+ {
+ DoAssign(first, last, is_integral<InputIterator>());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::assign(std::initializer_list<value_type> ilist)
+ {
+ DoAssign(ilist.begin(), ilist.end(), false_type());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::clear() EA_NOEXCEPT
+ {
+ DoClear();
+ DoInit();
+ #if EASTL_LIST_SIZE_CACHE
+ mSize = 0;
+ #endif
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::reset_lose_memory() EA_NOEXCEPT
+ {
+ // The reset_lose_memory function is a special extension function which unilaterally
+ // resets the container to an empty state without freeing the memory of
+ // the contained objects. This is useful for very quickly tearing down a
+ // container built into scratch memory.
+ DoInit();
+ #if EASTL_LIST_SIZE_CACHE
+ mSize = 0;
+ #endif
+ }
+
+
+ template <typename T, typename Allocator>
+ void list<T, Allocator>::resize(size_type n, const value_type& value)
+ {
+ iterator current((ListNodeBase*)internalNode().mpNext);
+ size_type i = 0;
+
+ while((current.mpNode != &internalNode()) && (i < n))
+ {
+ ++current;
+ ++i;
+ }
+ if(i == n)
+ erase(current, (ListNodeBase*)&internalNode());
+ else
+ insert((ListNodeBase*)&internalNode(), n - i, value);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::resize(size_type n)
+ {
+ resize(n, value_type());
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename... Args>
+ void list<T, Allocator>::emplace_front(Args&&... args)
+ {
+ DoInsertValue((ListNodeBase*)internalNode().mpNext, eastl::forward<Args>(args)...);
+ }
+
+ template <typename T, typename Allocator>
+ template <typename... Args>
+ void list<T, Allocator>::emplace_back(Args&&... args)
+ {
+ DoInsertValue((ListNodeBase*)&internalNode(), eastl::forward<Args>(args)...);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::push_front(const value_type& value)
+ {
+ DoInsertValue((ListNodeBase*)internalNode().mpNext, value);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::push_front(value_type&& value)
+ {
+ emplace(begin(), eastl::move(value));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::reference
+ list<T, Allocator>::push_front()
+ {
+ node_type* const pNode = DoCreateNode();
+ ((ListNodeBase*)pNode)->insert((ListNodeBase*)internalNode().mpNext);
+ #if EASTL_LIST_SIZE_CACHE
+ ++mSize;
+ #endif
+ return static_cast<node_type*>(internalNode().mpNext)->mValue; // Same as return front();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void* list<T, Allocator>::push_front_uninitialized()
+ {
+ node_type* const pNode = DoAllocateNode();
+ ((ListNodeBase*)pNode)->insert((ListNodeBase*)internalNode().mpNext);
+ #if EASTL_LIST_SIZE_CACHE
+ ++mSize;
+ #endif
+ return &pNode->mValue;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::pop_front()
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(static_cast<node_type*>(internalNode().mpNext) == &internalNode()))
+ EASTL_FAIL_MSG("list::pop_front -- empty container");
+ #endif
+
+ DoErase((ListNodeBase*)internalNode().mpNext);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::push_back(const value_type& value)
+ {
+ DoInsertValue((ListNodeBase*)&internalNode(), value);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::push_back(value_type&& value)
+ {
+ emplace(end(), eastl::move(value));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::reference
+ list<T, Allocator>::push_back()
+ {
+ node_type* const pNode = DoCreateNode();
+ ((ListNodeBase*)pNode)->insert((ListNodeBase*)&internalNode());
+ #if EASTL_LIST_SIZE_CACHE
+ ++mSize;
+ #endif
+ return static_cast<node_type*>(internalNode().mpPrev)->mValue; // Same as return back();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void* list<T, Allocator>::push_back_uninitialized()
+ {
+ node_type* const pNode = DoAllocateNode();
+ ((ListNodeBase*)pNode)->insert((ListNodeBase*)&internalNode());
+ #if EASTL_LIST_SIZE_CACHE
+ ++mSize;
+ #endif
+ return &pNode->mValue;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::pop_back()
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(static_cast<node_type*>(internalNode().mpNext) == &internalNode()))
+ EASTL_FAIL_MSG("list::pop_back -- empty container");
+ #endif
+
+ DoErase((ListNodeBase*)internalNode().mpPrev);
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename... Args>
+ inline typename list<T, Allocator>::iterator
+ list<T, Allocator>::emplace(const_iterator position, Args&&... args)
+ {
+ DoInsertValue(position.mpNode, eastl::forward<Args>(args)...);
+ return iterator(position.mpNode->mpPrev);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::iterator
+ list<T, Allocator>::insert(const_iterator position)
+ {
+ node_type* const pNode = DoCreateNode(value_type());
+ ((ListNodeBase*)pNode)->insert((ListNodeBase*)position.mpNode);
+ #if EASTL_LIST_SIZE_CACHE
+ ++mSize;
+ #endif
+ return (ListNodeBase*)pNode;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::iterator
+ list<T, Allocator>::insert(const_iterator position, const value_type& value)
+ {
+ node_type* const pNode = DoCreateNode(value);
+ ((ListNodeBase*)pNode)->insert((ListNodeBase*)position.mpNode);
+ #if EASTL_LIST_SIZE_CACHE
+ ++mSize;
+ #endif
+ return (ListNodeBase*)pNode;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::iterator
+ list<T, Allocator>::insert(const_iterator position, value_type&& value)
+ {
+ return emplace(position, eastl::move(value));
+ }
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::iterator
+ list<T, Allocator>::insert(const_iterator position, size_type n, const value_type& value)
+ {
+ iterator itPrev(position.mpNode);
+ --itPrev;
+ DoInsertValues((ListNodeBase*)position.mpNode, n, value);
+ return ++itPrev; // Inserts in front of position, returns iterator to new elements.
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename InputIterator>
+ inline typename list<T, Allocator>::iterator
+ list<T, Allocator>::insert(const_iterator position, InputIterator first, InputIterator last)
+ {
+ iterator itPrev(position.mpNode);
+ --itPrev;
+ DoInsert((ListNodeBase*)position.mpNode, first, last, is_integral<InputIterator>());
+ return ++itPrev; // Inserts in front of position, returns iterator to new elements.
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::iterator
+ list<T, Allocator>::insert(const_iterator position, std::initializer_list<value_type> ilist)
+ {
+ iterator itPrev(position.mpNode);
+ --itPrev;
+ DoInsert((ListNodeBase*)position.mpNode, ilist.begin(), ilist.end(), false_type());
+ return ++itPrev; // Inserts in front of position, returns iterator to new elements.
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::iterator
+ list<T, Allocator>::erase(const_iterator position)
+ {
+ ++position;
+ DoErase((ListNodeBase*)position.mpNode->mpPrev);
+ return iterator(position.mpNode);
+ }
+
+
+ template <typename T, typename Allocator>
+ typename list<T, Allocator>::iterator
+ list<T, Allocator>::erase(const_iterator first, const_iterator last)
+ {
+ while(first != last)
+ first = erase(first);
+ return iterator(last.mpNode);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::reverse_iterator
+ list<T, Allocator>::erase(const_reverse_iterator position)
+ {
+ return reverse_iterator(erase((++position).base()));
+ }
+
+
+ template <typename T, typename Allocator>
+ typename list<T, Allocator>::reverse_iterator
+ list<T, Allocator>::erase(const_reverse_iterator first, const_reverse_iterator last)
+ {
+ // Version which erases in order from first to last.
+ // difference_type i(first.base() - last.base());
+ // while(i--)
+ // first = erase(first);
+ // return first;
+
+ // Version which erases in order from last to first, but is slightly more efficient:
+ const_iterator itLastBase((++last).base());
+ const_iterator itFirstBase((++first).base());
+
+ return reverse_iterator(erase(itLastBase, itFirstBase));
+ }
+
+
+ template <typename T, typename Allocator>
+ typename list<T, Allocator>::size_type list<T, Allocator>::remove(const value_type& value)
+ {
+ iterator current((ListNodeBase*)internalNode().mpNext);
+ size_type numRemoved = 0;
+
+ while(current.mpNode != &internalNode())
+ {
+ if(EASTL_LIKELY(!(*current == value)))
+ ++current; // We have duplicate '++current' statements here and below, but the logic here forces this.
+ else
+ {
+ ++current;
+ DoErase((ListNodeBase*)current.mpNode->mpPrev);
+ ++numRemoved;
+ }
+ }
+ return numRemoved;
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename Predicate>
+ inline typename list<T, Allocator>::size_type list<T, Allocator>::remove_if(Predicate predicate)
+ {
+ size_type numRemoved = 0;
+ for(iterator first((ListNodeBase*)internalNode().mpNext), last((ListNodeBase*)&internalNode()); first != last; )
+ {
+ iterator temp(first);
+ ++temp;
+ if(predicate(first.mpNode->mValue))
+ {
+ DoErase((ListNodeBase*)first.mpNode);
+ ++numRemoved;
+ }
+ first = temp;
+ }
+ return numRemoved;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::reverse() EA_NOEXCEPT
+ {
+ ((ListNodeBase&)internalNode()).reverse();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::splice(const_iterator position, this_type& x)
+ {
+ // Splicing operations cannot succeed if the two containers use unequal allocators.
+ // This issue is not addressed in the C++ 1998 standard but is discussed in the
+ // LWG defect reports, such as #431. There is no simple solution to this problem.
+ // One option is to throw an exception. Another option which probably captures the
+ // user intent most of the time is to copy the range from the source to the dest and
+ // remove it from the source.
+
+ if(internalAllocator() == x.internalAllocator())
+ {
+ #if EASTL_LIST_SIZE_CACHE
+ if(x.mSize)
+ {
+ ((ListNodeBase*)position.mpNode)->splice((ListNodeBase*)x.internalNode().mpNext, (ListNodeBase*)&x.internalNode());
+ mSize += x.mSize;
+ x.mSize = 0;
+ }
+ #else
+ if(!x.empty())
+ ((ListNodeBase*)position.mpNode)->splice((ListNodeBase*)x.internalNode().mpNext, (ListNodeBase*)&x.internalNode());
+ #endif
+ }
+ else
+ {
+ insert(position, x.begin(), x.end());
+ x.clear();
+ }
+ }
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::splice(const_iterator position, this_type&& x)
+ {
+ return splice(position, x); // This will call splice(const_iterator, const this_type&);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::splice(const_iterator position, list& x, const_iterator i)
+ {
+ if(internalAllocator() == x.internalAllocator())
+ {
+ iterator i2(i.mpNode);
+ ++i2;
+ if((position != i) && (position != i2))
+ {
+ ((ListNodeBase*)position.mpNode)->splice((ListNodeBase*)i.mpNode, (ListNodeBase*)i2.mpNode);
+
+ #if EASTL_LIST_SIZE_CACHE
+ ++mSize;
+ --x.mSize;
+ #endif
+ }
+ }
+ else
+ {
+ insert(position, *i);
+ x.erase(i);
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::splice(const_iterator position, list<T,Allocator>&& x, const_iterator i)
+ {
+ return splice(position, x, i); // This will call splice(const_iterator, const this_type&, const_iterator);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::splice(const_iterator position, this_type& x, const_iterator first, const_iterator last)
+ {
+ if(internalAllocator() == x.internalAllocator())
+ {
+ #if EASTL_LIST_SIZE_CACHE
+ const size_type n = (size_type)eastl::distance(first, last);
+
+ if(n)
+ {
+ ((ListNodeBase*)position.mpNode)->splice((ListNodeBase*)first.mpNode, (ListNodeBase*)last.mpNode);
+ mSize += n;
+ x.mSize -= n;
+ }
+ #else
+ if(first != last)
+ ((ListNodeBase*)position.mpNode)->splice((ListNodeBase*)first.mpNode, (ListNodeBase*)last.mpNode);
+ #endif
+ }
+ else
+ {
+ insert(position, first, last);
+ x.erase(first, last);
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::splice(const_iterator position, list<T,Allocator>&& x, const_iterator first, const_iterator last)
+ {
+ return splice(position, x, first, last); // This will call splice(const_iterator, const this_type&, const_iterator, const_iterator);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::swap(this_type& x)
+ {
+ if(internalAllocator() == x.internalAllocator()) // If allocators are equivalent...
+ DoSwap(x);
+ else // else swap the contents.
+ {
+ const this_type temp(*this); // Can't call eastl::swap because that would
+ *this = x; // itself call this member swap function.
+ x = temp;
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ void list<T, Allocator>::merge(this_type& x)
+ {
+ if(this != &x)
+ {
+ iterator first(begin());
+ iterator firstX(x.begin());
+ const iterator last(end());
+ const iterator lastX(x.end());
+
+ while((first != last) && (firstX != lastX))
+ {
+ if(*firstX < *first)
+ {
+ iterator next(firstX);
+
+ splice(first, x, firstX, ++next);
+ firstX = next;
+ }
+ else
+ ++first;
+ }
+
+ if(firstX != lastX)
+ splice(last, x, firstX, lastX);
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ void list<T, Allocator>::merge(this_type&& x)
+ {
+ return merge(x); // This will call merge(this_type&)
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename Compare>
+ void list<T, Allocator>::merge(this_type& x, Compare compare)
+ {
+ if(this != &x)
+ {
+ iterator first(begin());
+ iterator firstX(x.begin());
+ const iterator last(end());
+ const iterator lastX(x.end());
+
+ while((first != last) && (firstX != lastX))
+ {
+ if(compare(*firstX, *first))
+ {
+ iterator next(firstX);
+
+ splice(first, x, firstX, ++next);
+ firstX = next;
+ }
+ else
+ ++first;
+ }
+
+ if(firstX != lastX)
+ splice(last, x, firstX, lastX);
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename Compare>
+ void list<T, Allocator>::merge(this_type&& x, Compare compare)
+ {
+ return merge(x, compare); // This will call merge(this_type&, Compare)
+ }
+
+
+ template <typename T, typename Allocator>
+ void list<T, Allocator>::unique()
+ {
+ iterator first(begin());
+ const iterator last(end());
+
+ if(first != last)
+ {
+ iterator next(first);
+
+ while(++next != last)
+ {
+ if(*first == *next)
+ DoErase((ListNodeBase*)next.mpNode);
+ else
+ first = next;
+ next = first;
+ }
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename BinaryPredicate>
+ void list<T, Allocator>::unique(BinaryPredicate predicate)
+ {
+ iterator first(begin());
+ const iterator last(end());
+
+ if(first != last)
+ {
+ iterator next(first);
+
+ while(++next != last)
+ {
+ if(predicate(*first, *next))
+ DoErase((ListNodeBase*)next.mpNode);
+ else
+ first = next;
+ next = first;
+ }
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ void list<T, Allocator>::sort()
+ {
+ eastl::less<value_type> compare;
+ DoSort(begin(), end(), size(), compare);
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename Compare>
+ void list<T, Allocator>::sort(Compare compare)
+ {
+ DoSort(begin(), end(), size(), compare);
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename Compare>
+ typename list<T, Allocator>::iterator
+ list<T, Allocator>::DoSort(iterator i1, iterator end2, size_type n, Compare& compare)
+ {
+ // A previous version of this function did this by creating temporary lists,
+ // but that was incompatible with fixed_list because the sizes could be too big.
+ // We sort subsegments by recursive descent. Then merge as we ascend.
+ // Return an iterator to the beginning of the sorted subsegment.
+ // Start with a special case for small node counts.
+ switch (n)
+ {
+ case 0:
+ case 1:
+ return i1;
+
+ case 2:
+ // Potentialy swap these two nodes and return the resulting first of them.
+ if(compare(*--end2, *i1))
+ {
+ end2.mpNode->remove();
+ end2.mpNode->insert(i1.mpNode);
+ return end2;
+ }
+ return i1;
+
+ case 3:
+ {
+ // We do a list insertion sort. Measurements showed this improved performance 3-12%.
+ iterator lowest = i1;
+
+ for(iterator current = i1.next(); current != end2; ++current)
+ {
+ if(compare(*current, *lowest))
+ lowest = current;
+ }
+
+ if(lowest == i1)
+ ++i1;
+ else
+ {
+ lowest.mpNode->remove();
+ lowest.mpNode->insert(i1.mpNode);
+ }
+
+ if(compare(*--end2, *i1)) // At this point, i1 refers to the second element in this three element segment.
+ {
+ end2.mpNode->remove();
+ end2.mpNode->insert(i1.mpNode);
+ }
+
+ return lowest;
+ }
+ }
+
+ // Divide the range into two parts are recursively sort each part. Upon return we will have
+ // two halves that are each sorted but we'll need to merge the two together before returning.
+ iterator result;
+ size_type nMid = (n / 2);
+ iterator end1 = eastl::next(i1, (difference_type)nMid);
+ i1 = DoSort(i1, end1, nMid, compare); // Return the new beginning of the first sorted sub-range.
+ iterator i2 = DoSort(end1, end2, n - nMid, compare); // Return the new beginning of the second sorted sub-range.
+
+ // If the start of the second list is before the start of the first list, insert the first list
+ // into the second at an appropriate starting place.
+ if(compare(*i2, *i1))
+ {
+ // Find the position to insert the first list into the second list.
+ iterator ix = i2.next();
+ while((ix != end2) && compare(*ix, *i1))
+ ++ix;
+
+ // Cut out the initial segment of the second list and move it to be in front of the first list.
+ ListNodeBase* i2Cut = i2.mpNode;
+ ListNodeBase* i2CutLast = ix.mpNode->mpPrev;
+ result = i2;
+ end1 = i2 = ix;
+ ListNodeBase::remove_range(i2Cut, i2CutLast);
+ i1.mpNode->insert_range(i2Cut, i2CutLast);
+ }
+ else
+ {
+ result = i1;
+ end1 = i2;
+ }
+
+ // Merge the two segments. We do this by merging the second sub-segment into the first, by walking forward in each of the two sub-segments.
+ for(++i1; (i1 != end1) && (i2 != end2); ++i1) // while still working on either segment...
+ {
+ if(compare(*i2, *i1)) // If i2 is less than i1 and it needs to be merged in front of i1...
+ {
+ // Find the position to insert the i2 list into the i1 list.
+ iterator ix = i2.next();
+ while((ix != end2) && compare(*ix, *i1))
+ ++ix;
+
+ // Cut this section of the i2 sub-segment out and merge into the appropriate place in the i1 list.
+ ListNodeBase* i2Cut = i2.mpNode;
+ ListNodeBase* i2CutLast = ix.mpNode->mpPrev;
+ if(end1 == i2)
+ end1 = ix;
+ i2 = ix;
+ ListNodeBase::remove_range(i2Cut, i2CutLast);
+ i1.mpNode->insert_range(i2Cut, i2CutLast);
+ }
+ }
+
+ return result;
+ }
+
+
+ template <typename T, typename Allocator>
+ template<typename... Args>
+ inline typename list<T, Allocator>::node_type*
+ list<T, Allocator>::DoCreateNode(Args&&... args)
+ {
+ node_type* const pNode = DoAllocateNode(); // pNode is of type node_type, but it's uninitialized memory.
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ ::new((void*)&pNode->mValue) value_type(eastl::forward<Args>(args)...);
+ }
+ catch(...)
+ {
+ DoFreeNode(pNode);
+ throw;
+ }
+ #else
+ ::new((void*)&pNode->mValue) value_type(eastl::forward<Args>(args)...);
+ #endif
+
+ return pNode;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::node_type*
+ list<T, Allocator>::DoCreateNode()
+ {
+ node_type* const pNode = DoAllocateNode();
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ ::new((void*)&pNode->mValue) value_type();
+ }
+ catch(...)
+ {
+ DoFreeNode(pNode);
+ throw;
+ }
+ #else
+ ::new((void*)&pNode->mValue) value_type;
+ #endif
+
+ return pNode;
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename Integer>
+ inline void list<T, Allocator>::DoAssign(Integer n, Integer value, true_type)
+ {
+ DoAssignValues(static_cast<size_type>(n), static_cast<value_type>(value));
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename InputIterator>
+ void list<T, Allocator>::DoAssign(InputIterator first, InputIterator last, false_type)
+ {
+ node_type* pNode = static_cast<node_type*>(internalNode().mpNext);
+
+ for(; (pNode != &internalNode()) && (first != last); ++first)
+ {
+ pNode->mValue = *first;
+ pNode = static_cast<node_type*>(pNode->mpNext);
+ }
+
+ if(first == last)
+ erase(const_iterator((ListNodeBase*)pNode), (ListNodeBase*)&internalNode());
+ else
+ DoInsert((ListNodeBase*)&internalNode(), first, last, false_type());
+ }
+
+
+ template <typename T, typename Allocator>
+ void list<T, Allocator>::DoAssignValues(size_type n, const value_type& value)
+ {
+ node_type* pNode = static_cast<node_type*>(internalNode().mpNext);
+
+ for(; (pNode != &internalNode()) && (n > 0); --n)
+ {
+ pNode->mValue = value;
+ pNode = static_cast<node_type*>(pNode->mpNext);
+ }
+
+ if(n)
+ DoInsertValues((ListNodeBase*)&internalNode(), n, value);
+ else
+ erase(const_iterator((ListNodeBase*)pNode), (ListNodeBase*)&internalNode());
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename Integer>
+ inline void list<T, Allocator>::DoInsert(ListNodeBase* pNode, Integer n, Integer value, true_type)
+ {
+ DoInsertValues(pNode, static_cast<size_type>(n), static_cast<value_type>(value));
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename InputIterator>
+ inline void list<T, Allocator>::DoInsert(ListNodeBase* pNode, InputIterator first, InputIterator last, false_type)
+ {
+ for(; first != last; ++first)
+ DoInsertValue(pNode, *first);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::DoInsertValues(ListNodeBase* pNode, size_type n, const value_type& value)
+ {
+ for(; n > 0; --n)
+ DoInsertValue(pNode, value);
+ }
+
+
+ template <typename T, typename Allocator>
+ template<typename... Args>
+ inline void list<T, Allocator>::DoInsertValue(ListNodeBase* pNode, Args&&... args)
+ {
+ node_type* const pNodeNew = DoCreateNode(eastl::forward<Args>(args)...);
+ ((ListNodeBase*)pNodeNew)->insert(pNode);
+ #if EASTL_LIST_SIZE_CACHE
+ ++mSize;
+ #endif
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::DoErase(ListNodeBase* pNode)
+ {
+ pNode->remove();
+ ((node_type*)pNode)->~node_type();
+ DoFreeNode(((node_type*)pNode));
+ #if EASTL_LIST_SIZE_CACHE
+ --mSize;
+ #endif
+
+ /* Test version that uses union intermediates
+ union
+ {
+ ListNodeBase* mpBase;
+ node_type* mpNode;
+ } node = { pNode };
+
+ node.mpNode->~node_type();
+ node.mpBase->remove();
+ DoFreeNode(node.mpNode);
+ #if EASTL_LIST_SIZE_CACHE
+ --mSize;
+ #endif
+ */
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::DoSwap(this_type& x)
+ {
+ ListNodeBase::swap((ListNodeBase&)internalNode(), (ListNodeBase&)x.internalNode()); // We need to implement a special swap because we can't do a shallow swap.
+ eastl::swap(internalAllocator(), x.internalAllocator()); // We do this even if EASTL_ALLOCATOR_COPY_ENABLED is 0.
+ #if EASTL_LIST_SIZE_CACHE
+ eastl::swap(mSize, x.mSize);
+ #endif
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool list<T, Allocator>::validate() const
+ {
+ #if EASTL_LIST_SIZE_CACHE
+ size_type n = 0;
+
+ for(const_iterator i(begin()), iEnd(end()); i != iEnd; ++i)
+ ++n;
+
+ if(n != mSize)
+ return false;
+ #endif
+
+ // To do: More validation.
+ return true;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline int list<T, Allocator>::validate_iterator(const_iterator i) const
+ {
+ // To do: Come up with a more efficient mechanism of doing this.
+
+ for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp)
+ {
+ if(temp == i)
+ return (isf_valid | isf_current | isf_can_dereference);
+ }
+
+ if(i == end())
+ return (isf_valid | isf_current);
+
+ return isf_none;
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Allocator>
+ bool operator==(const list<T, Allocator>& a, const list<T, Allocator>& b)
+ {
+ typename list<T, Allocator>::const_iterator ia = a.begin();
+ typename list<T, Allocator>::const_iterator ib = b.begin();
+ typename list<T, Allocator>::const_iterator enda = a.end();
+
+ #if EASTL_LIST_SIZE_CACHE
+ if(a.size() == b.size())
+ {
+ while((ia != enda) && (*ia == *ib))
+ {
+ ++ia;
+ ++ib;
+ }
+ return (ia == enda);
+ }
+ return false;
+ #else
+ typename list<T, Allocator>::const_iterator endb = b.end();
+
+ while((ia != enda) && (ib != endb) && (*ia == *ib))
+ {
+ ++ia;
+ ++ib;
+ }
+ return (ia == enda) && (ib == endb);
+ #endif
+ }
+
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ template <typename T, typename Allocator>
+ inline synth_three_way_result<T> operator<=>(const list<T, Allocator>& a, const list<T, Allocator>& b)
+ {
+ return eastl::lexicographical_compare_three_way(a.begin(), a.end(), b.begin(), b.end(), synth_three_way{});
+ }
+#else
+ template <typename T, typename Allocator>
+ bool operator<(const list<T, Allocator>& a, const list<T, Allocator>& b)
+ {
+ return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end());
+ }
+
+ template <typename T, typename Allocator>
+ bool operator!=(const list<T, Allocator>& a, const list<T, Allocator>& b)
+ {
+ return !(a == b);
+ }
+
+ template <typename T, typename Allocator>
+ bool operator>(const list<T, Allocator>& a, const list<T, Allocator>& b)
+ {
+ return b < a;
+ }
+
+ template <typename T, typename Allocator>
+ bool operator<=(const list<T, Allocator>& a, const list<T, Allocator>& b)
+ {
+ return !(b < a);
+ }
+
+ template <typename T, typename Allocator>
+ bool operator>=(const list<T, Allocator>& a, const list<T, Allocator>& b)
+ {
+ return !(a < b);
+ }
+#endif
+ template <typename T, typename Allocator>
+ void swap(list<T, Allocator>& a, list<T, Allocator>& b)
+ {
+ a.swap(b);
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // erase / erase_if
+ //
+ // https://en.cppreference.com/w/cpp/container/list/erase2
+ ///////////////////////////////////////////////////////////////////////
+ template <class T, class Allocator, class U>
+ typename list<T, Allocator>::size_type erase(list<T, Allocator>& c, const U& value)
+ {
+ // Erases all elements that compare equal to value from the container.
+ return c.remove(value);
+ }
+
+ template <class T, class Allocator, class Predicate>
+ typename list<T, Allocator>::size_type erase_if(list<T, Allocator>& c, Predicate predicate)
+ {
+ // Erases all elements that satisfy the predicate pred from the container.
+ return c.remove_if(predicate);
+ }
+
+
+} // namespace eastl
+
+
+EA_RESTORE_SN_WARNING()
+
+EA_RESTORE_VC_WARNING();
+
+
+#endif // Header include guard
diff --git a/EASTL/include/EASTL/map.h b/EASTL/include/EASTL/map.h
new file mode 100644
index 0000000..7824250
--- /dev/null
+++ b/EASTL/include/EASTL/map.h
@@ -0,0 +1,788 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+//////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_MAP_H
+#define EASTL_MAP_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/red_black_tree.h>
+#include <EASTL/functional.h>
+#include <EASTL/utility.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+
+ /// EASTL_MAP_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_MAP_DEFAULT_NAME
+ #define EASTL_MAP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " map" // Unless the user overrides something, this is "EASTL map".
+ #endif
+
+
+ /// EASTL_MULTIMAP_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_MULTIMAP_DEFAULT_NAME
+ #define EASTL_MULTIMAP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " multimap" // Unless the user overrides something, this is "EASTL multimap".
+ #endif
+
+
+ /// EASTL_MAP_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_MAP_DEFAULT_ALLOCATOR
+ #define EASTL_MAP_DEFAULT_ALLOCATOR allocator_type(EASTL_MAP_DEFAULT_NAME)
+ #endif
+
+ /// EASTL_MULTIMAP_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_MULTIMAP_DEFAULT_ALLOCATOR
+ #define EASTL_MULTIMAP_DEFAULT_ALLOCATOR allocator_type(EASTL_MULTIMAP_DEFAULT_NAME)
+ #endif
+
+
+
+ /// map
+ ///
+ /// Implements a canonical map.
+ ///
+ /// The large majority of the implementation of this class is found in the rbtree
+ /// base class. We control the behaviour of rbtree via template parameters.
+ ///
+ /// Pool allocation
+ /// If you want to make a custom memory pool for a map container, your pool
+ /// needs to contain items of type map::node_type. So if you have a memory
+ /// pool that has a constructor that takes the size of pool items and the
+ /// count of pool items, you would do this (assuming that MemoryPool implements
+ /// the Allocator interface):
+ /// typedef map<Widget, int, less<Widget>, MemoryPool> WidgetMap; // Delare your WidgetMap type.
+ /// MemoryPool myPool(sizeof(WidgetMap::node_type), 100); // Make a pool of 100 Widget nodes.
+ /// WidgetMap myMap(&myPool); // Create a map that uses the pool.
+ ///
+ template <typename Key, typename T, typename Compare = eastl::less<Key>, typename Allocator = EASTLAllocatorType>
+ class map
+ : public rbtree<Key, eastl::pair<const Key, T>, Compare, Allocator, eastl::use_first<eastl::pair<const Key, T> >, true, true>
+ {
+ public:
+ typedef rbtree<Key, eastl::pair<const Key, T>, Compare, Allocator,
+ eastl::use_first<eastl::pair<const Key, T> >, true, true> base_type;
+ typedef map<Key, T, Compare, Allocator> this_type;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::key_type key_type;
+ typedef T mapped_type;
+ typedef typename base_type::value_type value_type;
+ typedef typename base_type::node_type node_type;
+ typedef typename base_type::iterator iterator;
+ typedef typename base_type::const_iterator const_iterator;
+ typedef typename base_type::allocator_type allocator_type;
+ typedef typename base_type::insert_return_type insert_return_type;
+ typedef typename base_type::extract_key extract_key;
+ // Other types are inherited from the base class.
+
+ using base_type::begin;
+ using base_type::end;
+ using base_type::find;
+ using base_type::lower_bound;
+ using base_type::upper_bound;
+ using base_type::insert;
+ using base_type::erase;
+
+ protected:
+ using base_type::compare;
+ using base_type::get_compare;
+
+ public:
+ class value_compare
+ {
+ protected:
+ friend class map;
+ Compare compare;
+ value_compare(Compare c) : compare(c) {}
+
+ public:
+ typedef bool result_type;
+ typedef value_type first_argument_type;
+ typedef value_type second_argument_type;
+
+ bool operator()(const value_type& x, const value_type& y) const
+ { return compare(x.first, y.first); }
+ };
+
+ public:
+ map(const allocator_type& allocator = EASTL_MAP_DEFAULT_ALLOCATOR);
+ map(const Compare& compare, const allocator_type& allocator = EASTL_MAP_DEFAULT_ALLOCATOR);
+ map(const this_type& x);
+ map(this_type&& x);
+ map(this_type&& x, const allocator_type& allocator);
+ map(std::initializer_list<value_type> ilist, const Compare& compare = Compare(), const allocator_type& allocator = EASTL_MAP_DEFAULT_ALLOCATOR);
+
+ template <typename Iterator>
+ map(Iterator itBegin, Iterator itEnd); // allocator arg removed because VC7.1 fails on the default arg. To consider: Make a second version of this function without a default arg.
+
+ this_type& operator=(const this_type& x) { return (this_type&)base_type::operator=(x); }
+ this_type& operator=(std::initializer_list<value_type> ilist) { return (this_type&)base_type::operator=(ilist); }
+ this_type& operator=(this_type&& x) { return (this_type&)base_type::operator=(eastl::move(x)); }
+
+ public:
+ /// This is an extension to the C++ standard. We insert a default-constructed
+ /// element with the given key. The reason for this is that we can avoid the
+ /// potentially expensive operation of creating and/or copying a mapped_type
+ /// object on the stack. Note that C++11 move insertions and variadic emplace
+ /// support make this extension mostly no longer necessary.
+ insert_return_type insert(const Key& key);
+
+ value_compare value_comp() const;
+
+ size_type erase(const Key& key);
+ size_type count(const Key& key) const;
+
+ eastl::pair<iterator, iterator> equal_range(const Key& key);
+ eastl::pair<const_iterator, const_iterator> equal_range(const Key& key) const;
+
+ T& operator[](const Key& key); // Of map, multimap, set, and multimap, only map has operator[].
+ T& operator[](Key&& key);
+
+ T& at(const Key& key);
+ const T& at(const Key& key) const;
+
+ template <class... Args> eastl::pair<iterator, bool> try_emplace(const key_type& k, Args&&... args);
+ template <class... Args> eastl::pair<iterator, bool> try_emplace(key_type&& k, Args&&... args);
+ template <class... Args> iterator try_emplace(const_iterator position, const key_type& k, Args&&... args);
+ template <class... Args> iterator try_emplace(const_iterator position, key_type&& k, Args&&... args);
+
+ private:
+ template <class KFwd, class... Args>
+ eastl::pair<iterator, bool> try_emplace_forward(KFwd&& k, Args&&... args);
+
+ template <class KFwd, class... Args>
+ iterator try_emplace_forward(const_iterator hint, KFwd&& key, Args&&... args);
+ }; // map
+
+
+
+
+
+
+ /// multimap
+ ///
+ /// Implements a canonical multimap.
+ ///
+ /// The large majority of the implementation of this class is found in the rbtree
+ /// base class. We control the behaviour of rbtree via template parameters.
+ ///
+ /// Pool allocation
+ /// If you want to make a custom memory pool for a multimap container, your pool
+ /// needs to contain items of type multimap::node_type. So if you have a memory
+ /// pool that has a constructor that takes the size of pool items and the
+ /// count of pool items, you would do this (assuming that MemoryPool implements
+ /// the Allocator interface):
+ /// typedef multimap<Widget, int, less<Widget>, MemoryPool> WidgetMap; // Delare your WidgetMap type.
+ /// MemoryPool myPool(sizeof(WidgetMap::node_type), 100); // Make a pool of 100 Widget nodes.
+ /// WidgetMap myMap(&myPool); // Create a map that uses the pool.
+ ///
+ template <typename Key, typename T, typename Compare = eastl::less<Key>, typename Allocator = EASTLAllocatorType>
+ class multimap
+ : public rbtree<Key, eastl::pair<const Key, T>, Compare, Allocator, eastl::use_first<eastl::pair<const Key, T> >, true, false>
+ {
+ public:
+ typedef rbtree<Key, eastl::pair<const Key, T>, Compare, Allocator,
+ eastl::use_first<eastl::pair<const Key, T> >, true, false> base_type;
+ typedef multimap<Key, T, Compare, Allocator> this_type;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::key_type key_type;
+ typedef T mapped_type;
+ typedef typename base_type::value_type value_type;
+ typedef typename base_type::node_type node_type;
+ typedef typename base_type::iterator iterator;
+ typedef typename base_type::const_iterator const_iterator;
+ typedef typename base_type::allocator_type allocator_type;
+ typedef typename base_type::insert_return_type insert_return_type;
+ typedef typename base_type::extract_key extract_key;
+ // Other types are inherited from the base class.
+
+ using base_type::begin;
+ using base_type::end;
+ using base_type::find;
+ using base_type::lower_bound;
+ using base_type::upper_bound;
+ using base_type::insert;
+ using base_type::erase;
+
+ protected:
+ using base_type::compare;
+ using base_type::get_compare;
+
+ public:
+ class value_compare
+ {
+ protected:
+ friend class multimap;
+ Compare compare;
+ value_compare(Compare c) : compare(c) {}
+
+ public:
+ typedef bool result_type;
+ typedef value_type first_argument_type;
+ typedef value_type second_argument_type;
+
+ bool operator()(const value_type& x, const value_type& y) const
+ { return compare(x.first, y.first); }
+ };
+
+ public:
+ multimap(const allocator_type& allocator = EASTL_MULTIMAP_DEFAULT_ALLOCATOR);
+ multimap(const Compare& compare, const allocator_type& allocator = EASTL_MULTIMAP_DEFAULT_ALLOCATOR);
+ multimap(const this_type& x);
+ multimap(this_type&& x);
+ multimap(this_type&& x, const allocator_type& allocator);
+ multimap(std::initializer_list<value_type> ilist, const Compare& compare = Compare(), const allocator_type& allocator = EASTL_MULTIMAP_DEFAULT_ALLOCATOR);
+
+ template <typename Iterator>
+ multimap(Iterator itBegin, Iterator itEnd); // allocator arg removed because VC7.1 fails on the default arg. To consider: Make a second version of this function without a default arg.
+
+ this_type& operator=(const this_type& x) { return (this_type&)base_type::operator=(x); }
+ this_type& operator=(std::initializer_list<value_type> ilist) { return (this_type&)base_type::operator=(ilist); }
+ this_type& operator=(this_type&& x) { return (this_type&)base_type::operator=(eastl::move(x)); }
+
+ public:
+ /// This is an extension to the C++ standard. We insert a default-constructed
+ /// element with the given key. The reason for this is that we can avoid the
+ /// potentially expensive operation of creating and/or copying a mapped_type
+ /// object on the stack. Note that C++11 move insertions and variadic emplace
+ /// support make this extension mostly no longer necessary.
+ insert_return_type insert(const Key& key);
+
+ value_compare value_comp() const;
+
+ size_type erase(const Key& key);
+ size_type count(const Key& key) const;
+
+ eastl::pair<iterator, iterator> equal_range(const Key& key);
+ eastl::pair<const_iterator, const_iterator> equal_range(const Key& key) const;
+
+ /// equal_range_small
+ /// This is a special version of equal_range which is optimized for the
+ /// case of there being few or no duplicated keys in the tree.
+ eastl::pair<iterator, iterator> equal_range_small(const Key& key);
+ eastl::pair<const_iterator, const_iterator> equal_range_small(const Key& key) const;
+
+ private:
+ // these base member functions are not included in multimaps
+ using base_type::insert_or_assign;
+ }; // multimap
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // map
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline map<Key, T, Compare, Allocator>::map(const allocator_type& allocator)
+ : base_type(allocator)
+ {
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline map<Key, T, Compare, Allocator>::map(const Compare& compare, const allocator_type& allocator)
+ : base_type(compare, allocator)
+ {
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline map<Key, T, Compare, Allocator>::map(const this_type& x)
+ : base_type(x)
+ {
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline map<Key, T, Compare, Allocator>::map(this_type&& x)
+ : base_type(eastl::move(x))
+ {
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline map<Key, T, Compare, Allocator>::map(this_type&& x, const allocator_type& allocator)
+ : base_type(eastl::move(x), allocator)
+ {
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline map<Key, T, Compare, Allocator>::map(std::initializer_list<value_type> ilist, const Compare& compare, const allocator_type& allocator)
+ : base_type(ilist.begin(), ilist.end(), compare, allocator)
+ {
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ template <typename Iterator>
+ inline map<Key, T, Compare, Allocator>::map(Iterator itBegin, Iterator itEnd)
+ : base_type(itBegin, itEnd, Compare(), EASTL_MAP_DEFAULT_ALLOCATOR)
+ {
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename map<Key, T, Compare, Allocator>::insert_return_type
+ map<Key, T, Compare, Allocator>::insert(const Key& key)
+ {
+ return base_type::DoInsertKey(true_type(), key);
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename map<Key, T, Compare, Allocator>::value_compare
+ map<Key, T, Compare, Allocator>::value_comp() const
+ {
+ return value_compare(get_compare());
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename map<Key, T, Compare, Allocator>::size_type
+ map<Key, T, Compare, Allocator>::erase(const Key& key)
+ {
+ const iterator it(find(key));
+
+ if(it != end()) // If it exists...
+ {
+ base_type::erase(it);
+ return 1;
+ }
+ return 0;
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename map<Key, T, Compare, Allocator>::size_type
+ map<Key, T, Compare, Allocator>::count(const Key& key) const
+ {
+ const const_iterator it(find(key));
+ return (it != end()) ? 1 : 0;
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline eastl::pair<typename map<Key, T, Compare, Allocator>::iterator,
+ typename map<Key, T, Compare, Allocator>::iterator>
+ map<Key, T, Compare, Allocator>::equal_range(const Key& key)
+ {
+ // The resulting range will either be empty or have one element,
+ // so instead of doing two tree searches (one for lower_bound and
+ // one for upper_bound), we do just lower_bound and see if the
+ // result is a range of size zero or one.
+ const iterator itLower(lower_bound(key));
+
+ if((itLower == end()) || compare(key, itLower.mpNode->mValue.first)) // If at the end or if (key is < itLower)...
+ return eastl::pair<iterator, iterator>(itLower, itLower);
+
+ iterator itUpper(itLower);
+ return eastl::pair<iterator, iterator>(itLower, ++itUpper);
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline eastl::pair<typename map<Key, T, Compare, Allocator>::const_iterator,
+ typename map<Key, T, Compare, Allocator>::const_iterator>
+ map<Key, T, Compare, Allocator>::equal_range(const Key& key) const
+ {
+ // See equal_range above for comments.
+ const const_iterator itLower(lower_bound(key));
+
+ if((itLower == end()) || compare(key, itLower.mpNode->mValue.first)) // If at the end or if (key is < itLower)...
+ return eastl::pair<const_iterator, const_iterator>(itLower, itLower);
+
+ const_iterator itUpper(itLower);
+ return eastl::pair<const_iterator, const_iterator>(itLower, ++itUpper);
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline T& map<Key, T, Compare, Allocator>::operator[](const Key& key)
+ {
+ iterator itLower(lower_bound(key)); // itLower->first is >= key.
+
+ if((itLower == end()) || compare(key, (*itLower).first))
+ {
+ itLower = base_type::DoInsertKey(true_type(), itLower, key);
+ }
+
+ return (*itLower).second;
+
+ // Reference implementation of this function, which may not be as fast:
+ //iterator it(base_type::insert(eastl::pair<iterator, iterator>(key, T())).first);
+ //return it->second;
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline T& map<Key, T, Compare, Allocator>::operator[](Key&& key)
+ {
+ iterator itLower(lower_bound(key)); // itLower->first is >= key.
+
+ if((itLower == end()) || compare(key, (*itLower).first))
+ {
+ itLower = base_type::DoInsertKey(true_type(), itLower, eastl::move(key));
+ }
+
+ return (*itLower).second;
+
+ // Reference implementation of this function, which may not be as fast:
+ //iterator it(base_type::insert(eastl::pair<iterator, iterator>(key, T())).first);
+ //return it->second;
+ }
+
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline synth_three_way_result<eastl::pair<const Key, T>> operator<=>(const map<Key, T, Compare, Allocator>& a,
+ const map<Key, T, Compare, Allocator>& b)
+ {
+ return eastl::lexicographical_compare_three_way(a.begin(), a.end(), b.begin(), b.end(), synth_three_way{});
+ }
+#endif
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline T& map<Key, T, Compare, Allocator>::at(const Key& key)
+ {
+ // use the use const version of ::at to remove duplication
+ return const_cast<T&>(const_cast<map<Key, T, Compare, Allocator> const*>(this)->at(key));
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline const T& map<Key, T, Compare, Allocator>::at(const Key& key) const
+ {
+ const_iterator candidate = this->find(key);
+
+ if (candidate == end())
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ throw std::out_of_range("map::at key does not exist");
+ #else
+ EASTL_FAIL_MSG("map::at key does not exist");
+ #endif
+ }
+
+ return candidate->second;
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // erase_if
+ //
+ // https://en.cppreference.com/w/cpp/container/map/erase_if
+ ///////////////////////////////////////////////////////////////////////
+ template <class Key, class T, class Compare, class Allocator, class Predicate>
+ typename map<Key, T, Compare, Allocator>::size_type erase_if(map<Key, T, Compare, Allocator>& c, Predicate predicate)
+ {
+ auto oldSize = c.size();
+ for (auto i = c.begin(), last = c.end(); i != last;)
+ {
+ if (predicate(*i))
+ {
+ i = c.erase(i);
+ }
+ else
+ {
+ ++i;
+ }
+ }
+ return oldSize - c.size();
+ }
+
+
+ template <class Key, class T, class Compare, class Allocator>
+ template <class... Args>
+ inline eastl::pair<typename map<Key, T, Compare, Allocator>::iterator, bool>
+ map<Key, T, Compare, Allocator>::try_emplace(const key_type& key, Args&&... args)
+ {
+ return try_emplace_forward(key, eastl::forward<Args>(args)...);
+ }
+
+ template <class Key, class T, class Compare, class Allocator>
+ template <class... Args>
+ inline eastl::pair<typename map<Key, T, Compare, Allocator>::iterator, bool>
+ map<Key, T, Compare, Allocator>::try_emplace(key_type&& key, Args&&... args)
+ {
+ return try_emplace_forward(eastl::move(key), eastl::forward<Args>(args)...);
+ }
+
+ template <class Key, class T, class Compare, class Allocator>
+ template <class KFwd, class... Args>
+ inline eastl::pair<typename map<Key, T, Compare, Allocator>::iterator, bool>
+ map<Key, T, Compare, Allocator>::try_emplace_forward(KFwd&& key, Args&&... args)
+ {
+ bool canInsert;
+ node_type* const pPosition = base_type::DoGetKeyInsertionPositionUniqueKeys(canInsert, key);
+ if (!canInsert)
+ {
+ return pair<iterator, bool>(iterator(pPosition), false);
+ }
+ node_type* const pNodeNew =
+ base_type::DoCreateNode(piecewise_construct, eastl::forward_as_tuple(eastl::forward<KFwd>(key)),
+ eastl::forward_as_tuple(eastl::forward<Args>(args)...));
+ // the key might be moved above, so we can't re-use it,
+ // we need to get it back from the node's value.
+ const auto& k = extract_key{}(pNodeNew->mValue);
+ const iterator itResult(base_type::DoInsertValueImpl(pPosition, false, k, pNodeNew));
+ return pair<iterator, bool>(itResult, true);
+ }
+
+ template <class Key, class T, class Compare, class Allocator>
+ template <class... Args>
+ inline typename map<Key, T, Compare, Allocator>::iterator
+ map<Key, T, Compare, Allocator>::try_emplace(const_iterator hint, const key_type& key, Args&&... args)
+ {
+ return try_emplace_forward(hint, key, eastl::forward<Args>(args)...);
+ }
+
+ template <class Key, class T, class Compare, class Allocator>
+ template <class... Args>
+ inline typename map<Key, T, Compare, Allocator>::iterator
+ map<Key, T, Compare, Allocator>::try_emplace(const_iterator hint, key_type&& key, Args&&... args)
+ {
+ return try_emplace_forward(hint, eastl::move(key), eastl::forward<Args>(args)...);
+ }
+
+ template <class Key, class T, class Compare, class Allocator>
+ template <class KFwd, class... Args>
+ inline typename map<Key, T, Compare, Allocator>::iterator
+ map<Key, T, Compare, Allocator>::try_emplace_forward(const_iterator hint, KFwd&& key, Args&&... args)
+ {
+ bool bForceToLeft;
+ node_type* const pPosition = base_type::DoGetKeyInsertionPositionUniqueKeysHint(hint, bForceToLeft, key);
+
+ if (!pPosition)
+ {
+ // the hint didn't help, we need to do a normal insert.
+ return try_emplace_forward(eastl::forward<KFwd>(key), eastl::forward<Args>(args)...).first;
+ }
+
+ node_type* const pNodeNew =
+ base_type::DoCreateNode(piecewise_construct, eastl::forward_as_tuple(eastl::forward<KFwd>(key)),
+ eastl::forward_as_tuple(eastl::forward<Args>(args)...));
+ // the key might be moved above, so we can't re-use it,
+ // we need to get it back from the node's value.
+ return base_type::DoInsertValueImpl(pPosition, bForceToLeft, extract_key{}(pNodeNew->mValue), pNodeNew);
+ }
+
+ ///////////////////////////////////////////////////////////////////////
+ // multimap
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline multimap<Key, T, Compare, Allocator>::multimap(const allocator_type& allocator)
+ : base_type(allocator)
+ {
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline multimap<Key, T, Compare, Allocator>::multimap(const Compare& compare, const allocator_type& allocator)
+ : base_type(compare, allocator)
+ {
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline multimap<Key, T, Compare, Allocator>::multimap(const this_type& x)
+ : base_type(x)
+ {
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline multimap<Key, T, Compare, Allocator>::multimap(this_type&& x)
+ : base_type(eastl::move(x))
+ {
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline multimap<Key, T, Compare, Allocator>::multimap(this_type&& x, const allocator_type& allocator)
+ : base_type(eastl::move(x), allocator)
+ {
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline multimap<Key, T, Compare, Allocator>::multimap(std::initializer_list<value_type> ilist, const Compare& compare, const allocator_type& allocator)
+ : base_type(ilist.begin(), ilist.end(), compare, allocator)
+ {
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ template <typename Iterator>
+ inline multimap<Key, T, Compare, Allocator>::multimap(Iterator itBegin, Iterator itEnd)
+ : base_type(itBegin, itEnd, Compare(), EASTL_MULTIMAP_DEFAULT_ALLOCATOR)
+ {
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename multimap<Key, T, Compare, Allocator>::insert_return_type
+ multimap<Key, T, Compare, Allocator>::insert(const Key& key)
+ {
+ return base_type::DoInsertKey(false_type(), key);
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename multimap<Key, T, Compare, Allocator>::value_compare
+ multimap<Key, T, Compare, Allocator>::value_comp() const
+ {
+ return value_compare(get_compare());
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename multimap<Key, T, Compare, Allocator>::size_type
+ multimap<Key, T, Compare, Allocator>::erase(const Key& key)
+ {
+ const eastl::pair<iterator, iterator> range(equal_range(key));
+ const size_type n = (size_type)eastl::distance(range.first, range.second);
+ base_type::erase(range.first, range.second);
+ return n;
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename multimap<Key, T, Compare, Allocator>::size_type
+ multimap<Key, T, Compare, Allocator>::count(const Key& key) const
+ {
+ const eastl::pair<const_iterator, const_iterator> range(equal_range(key));
+ return (size_type)eastl::distance(range.first, range.second);
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline eastl::pair<typename multimap<Key, T, Compare, Allocator>::iterator,
+ typename multimap<Key, T, Compare, Allocator>::iterator>
+ multimap<Key, T, Compare, Allocator>::equal_range(const Key& key)
+ {
+ // There are multiple ways to implement equal_range. The implementation mentioned
+ // in the C++ standard and which is used by most (all?) commercial STL implementations
+ // is this:
+ // return eastl::pair<iterator, iterator>(lower_bound(key), upper_bound(key));
+ //
+ // This does two tree searches -- one for the lower bound and one for the
+ // upper bound. This works well for the case whereby you have a large container
+ // and there are lots of duplicated values. We provide an alternative version
+ // of equal_range called equal_range_small for cases where the user is confident
+ // that the number of duplicated items is only a few.
+
+ return eastl::pair<iterator, iterator>(lower_bound(key), upper_bound(key));
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline eastl::pair<typename multimap<Key, T, Compare, Allocator>::const_iterator,
+ typename multimap<Key, T, Compare, Allocator>::const_iterator>
+ multimap<Key, T, Compare, Allocator>::equal_range(const Key& key) const
+ {
+ // See comments above in the non-const version of equal_range.
+ return eastl::pair<const_iterator, const_iterator>(lower_bound(key), upper_bound(key));
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline eastl::pair<typename multimap<Key, T, Compare, Allocator>::iterator,
+ typename multimap<Key, T, Compare, Allocator>::iterator>
+ multimap<Key, T, Compare, Allocator>::equal_range_small(const Key& key)
+ {
+ // We provide alternative version of equal_range here which works faster
+ // for the case where there are at most small number of potential duplicated keys.
+ const iterator itLower(lower_bound(key));
+ iterator itUpper(itLower);
+
+ while((itUpper != end()) && !compare(key, itUpper.mpNode->mValue.first))
+ ++itUpper;
+
+ return eastl::pair<iterator, iterator>(itLower, itUpper);
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline eastl::pair<typename multimap<Key, T, Compare, Allocator>::const_iterator,
+ typename multimap<Key, T, Compare, Allocator>::const_iterator>
+ multimap<Key, T, Compare, Allocator>::equal_range_small(const Key& key) const
+ {
+ // We provide alternative version of equal_range here which works faster
+ // for the case where there are at most small number of potential duplicated keys.
+ const const_iterator itLower(lower_bound(key));
+ const_iterator itUpper(itLower);
+
+ while((itUpper != end()) && !compare(key, itUpper.mpNode->mValue.first))
+ ++itUpper;
+
+ return eastl::pair<const_iterator, const_iterator>(itLower, itUpper);
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // erase_if
+ //
+ // https://en.cppreference.com/w/cpp/container/multimap/erase_if
+ ///////////////////////////////////////////////////////////////////////
+ template <class Key, class T, class Compare, class Allocator, class Predicate>
+ typename multimap<Key, T, Compare, Allocator>::size_type erase_if(multimap<Key, T, Compare, Allocator>& c, Predicate predicate)
+ {
+ auto oldSize = c.size();
+ // Erases all elements that satisfy the predicate pred from the container.
+ for (auto i = c.begin(), last = c.end(); i != last;)
+ {
+ if (predicate(*i))
+ {
+ i = c.erase(i);
+ }
+ else
+ {
+ ++i;
+ }
+ }
+ return oldSize - c.size();
+ }
+
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline synth_three_way_result<eastl::pair<const Key, T>> operator<=>(const multimap<Key, T, Compare, Allocator>& a,
+ const multimap<Key, T, Compare, Allocator>& b)
+ {
+ return eastl::lexicographical_compare_three_way(a.begin(), a.end(), b.begin(), b.end(), synth_three_way{});
+ }
+#endif
+
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline synth_three_way_result<eastl::pair<const Key, T>> operator<=>(const multimap<Key, T, Compare, Allocator>& a,
+ const multimap<Key, T, Compare, Allocator>& b)
+ {
+ return eastl::lexicographical_compare_three_way(a.begin(), a.end(), b.begin(), b.end(), synth_three_way{});
+ }
+#endif
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
diff --git a/EASTL/include/EASTL/memory.h b/EASTL/include/EASTL/memory.h
new file mode 100644
index 0000000..ab2798f
--- /dev/null
+++ b/EASTL/include/EASTL/memory.h
@@ -0,0 +1,1726 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements the following functions from the C++ standard that
+// are found in the <memory> header:
+//
+// Temporary memory:
+// get_temporary_buffer
+// return_temporary_buffer
+//
+// Utility:
+// late_constructed - Extention to standard functionality.
+//
+// Uninitialized operations:
+// These are the same as the copy, fill, and fill_n algorithms, except that
+// they *construct* the destination with the source values rather than assign
+// the destination with the source values.
+//
+// uninitialized_copy
+// uninitialized_copy_n
+// uninitialized_default_construct
+// uninitialized_default_construct_n
+// uninitialized_move
+// uninitialized_move_if_noexcept - Extention to standard functionality.
+// uninitialized_move_n
+// uninitialized_fill
+// uninitialized_fill_n
+// uninitialized_value_construct
+// uninitialized_value_construct_n
+// uninitialized_default_fill - Extention to standard functionality.
+// uninitialized_default_fill_n - Extention to standard functionality.
+// uninitialized_relocate - Extention to standard functionality.
+// uninitialized_copy_ptr - Extention to standard functionality.
+// uninitialized_move_ptr - Extention to standard functionality.
+// uninitialized_move_ptr_if_noexcept- Extention to standard functionality.
+// uninitialized_fill_ptr - Extention to standard functionality.
+// uninitialized_fill_n_ptr - Extention to standard functionality.
+// uninitialized_copy_fill - Extention to standard functionality.
+// uninitialized_fill_copy - Extention to standard functionality.
+// uninitialized_copy_copy - Extention to standard functionality.
+//
+// In-place destructor helpers:
+// destruct(T*) - Non-standard extension.
+// destruct(first, last) - Non-standard extension.
+// destroy_at(T*)
+// destroy(first, last)
+// destroy_n(first, n)
+//
+// Alignment
+// align
+// align_advance - Extention to standard functionality.
+//
+// Allocator-related
+// uses_allocator
+// allocator_arg_t
+// allocator_arg
+//
+// Pointers
+// pointer_traits
+//
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_MEMORY_H
+#define EASTL_MEMORY_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/memory_base.h>
+#include <EASTL/internal/generic_iterator.h>
+#include <EASTL/internal/pair_fwd_decls.h>
+#include <EASTL/internal/functional_base.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/type_traits.h>
+#include <EASTL/allocator.h>
+#include <EASTL/iterator.h>
+#include <EASTL/utility.h>
+#include <EASTL/numeric_limits.h>
+
+EA_DISABLE_ALL_VC_WARNINGS()
+#include <stdlib.h>
+#include <new>
+EA_RESTORE_ALL_VC_WARNINGS()
+
+
+// 4530 - C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc
+// 4146 - unary minus operator applied to unsigned type, result still unsigned
+// 4571 - catch(...) semantics changed since Visual C++ 7.1; structured exceptions (SEH) are no longer caught.
+EA_DISABLE_VC_WARNING(4530 4146 4571);
+
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+namespace eastl
+{
+
+ /// EASTL_TEMP_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_TEMP_DEFAULT_NAME
+ #define EASTL_TEMP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " temp" // Unless the user overrides something, this is "EASTL temp".
+ #endif
+
+
+ /// get_temporary_buffer
+ ///
+ /// From the C++ standard, section 20.4.3:
+ /// 1 Effects: Obtains a pointer to storage sufficient to store up to n adjacent T objects.
+ /// 2 Returns: A pair containing the buffer's address and capacity (in the units of sizeof(T)),
+ /// or a pair of 0 values if no storage can be obtained.
+ ///
+ /// Note: The return value is space to hold T elements, but no T elements are constructed.
+ ///
+ /// Our implementation here differs slightly in that we have alignment, alignmentOffset, and pName arguments.
+ /// Note that you can use the EASTL_NAME_VAL macro to make names go away in release builds.
+ ///
+ /// Example usage:
+ /// pair<int*, ptrdiff_t> pr = get_temporary_buffer<int>(100, 0, 0, EASTL_NAME_VAL("Temp int array"));
+ /// memset(pr.first, 0, 100 * sizeof(int));
+ /// return_temporary_buffer(pr.first);
+ ///
+ template <typename T>
+ eastl::pair<T*, ptrdiff_t> get_temporary_buffer(ptrdiff_t n, size_t alignment = 1, size_t alignmentOffset = 0, const char* pName = EASTL_TEMP_DEFAULT_NAME)
+ {
+ EASTLAllocatorType allocator(*EASTLAllocatorDefault(), pName);
+ return eastl::pair<T*, ptrdiff_t>(static_cast<T*>(EASTLAllocAligned(allocator, n * sizeof(T), alignment, alignmentOffset)), n);
+ }
+
+
+ /// return_temporary_buffer
+ ///
+ /// From the C++ standard, section 20.4.3:
+ /// 3 Effects: Deallocates the buffer to which p points.
+ /// 4 Requires: The buffer shall have been previously allocated by get_temporary_buffer.
+ ///
+ /// Note: This function merely frees space and does not destruct any T elements.
+ ///
+ /// Example usage:
+ /// pair<int*, ptrdiff_t> pr = get_temporary_buffer<int>(300);
+ /// memset(pr.first, 0, 300 * sizeof(int));
+ /// return_temporary_buffer(pr.first, pr.second);
+ ///
+ template <typename T>
+ void return_temporary_buffer(T* p, ptrdiff_t n = 0)
+ {
+ EASTLAllocatorType& allocator(*EASTLAllocatorDefault());
+ EASTLFree(allocator, p, n * sizeof(T));
+ }
+
+
+
+ /// late_constructed
+ ///
+ /// Implements a smart pointer type which separates the memory allocation of an object from
+ /// the object's construction. The primary use case is to declare a global variable of the
+ /// late_construction type, which allows the memory to be global but the constructor executes
+ /// at some point after main() begins as opposed to before main, which is often dangerous
+ /// for non-trivial types.
+ ///
+ /// The autoConstruct template parameter controls whether the object is automatically default
+ /// constructed upon first reference or must be manually constructed upon the first use of
+ /// operator * or ->. autoConstruct is convenient but it causes * and -> to be slightly slower
+ /// and may result in construction at an inconvenient time.
+ ///
+ /// The autoDestruct template parameter controls whether the object, if constructed, is automatically
+ /// destructed when ~late_constructed() is called or must be manually destructed via a call to
+ /// destruct().
+ ///
+ /// While construction can be automatic or manual, automatic destruction support is always present.
+ /// Thus you aren't required in any case to manually call destruct. However, you may safely manually
+ /// destruct the object at any time before the late_constructed destructor is executed.
+ ///
+ /// You may still use late_constructed after calling destruct(), including calling construct()
+ /// again to reconstruct the instance. destruct returns the late_constructed instance to a
+ /// state equivalent to before construct was called.
+ ///
+ /// Caveat: While late_constructed instances can be declared in global scope and initialize
+ /// prior to main() executing, you cannot otherwise use such globally declared instances prior
+ /// to main with guaranteed behavior unless you can ensure that the late_constructed instance
+ /// is itself constructed prior to your use of it.
+ ///
+ /// Example usage (demonstrating manual-construction):
+ /// late_constructed<Widget, false> gWidget;
+ ///
+ /// void main(){
+ /// gWidget.construct(kScrollbarType, kVertical, "MyScrollbar");
+ /// gWidget->SetValue(15);
+ /// gWidget.destruct();
+ /// }
+ ///
+ /// Example usage (demonstrating auto-construction):
+ /// late_constructed<Widget, true> gWidget;
+ ///
+ /// void main(){
+ /// gWidget->SetValue(15);
+ /// // You may want to call destruct here, but aren't required to do so unless the Widget type requires it.
+ /// }
+ ///
+ template <typename T, bool autoConstruct = true, bool autoDestruct = true>
+ class late_constructed
+ {
+ public:
+ using this_type = late_constructed<T, autoConstruct, autoDestruct>;
+ using value_type = T;
+ using storage_type = eastl::aligned_storage_t<sizeof(value_type), eastl::alignment_of_v<value_type>>;
+
+ late_constructed() EA_NOEXCEPT // In the case of the late_constructed instance being at global scope, we rely on the
+ : mStorage(), mpValue(nullptr) {} // compiler executing this constructor or placing the instance in auto-zeroed-at-startup memory.
+
+ ~late_constructed()
+ {
+ if (autoDestruct && mpValue)
+ (*mpValue).~value_type();
+ }
+
+ template <typename... Args>
+ void construct(Args&&... args)
+ {
+ if(!mpValue)
+ mpValue = new (&mStorage) value_type(eastl::forward<Args>(args)...);
+ }
+
+ bool is_constructed() const EA_NOEXCEPT
+ { return mpValue != nullptr; }
+
+ void destruct()
+ {
+ if(mpValue)
+ {
+ (*mpValue).~value_type();
+ mpValue = nullptr;
+ }
+ }
+
+ value_type& operator*() EA_NOEXCEPT
+ {
+ if(!mpValue)
+ construct();
+
+ EA_ANALYSIS_ASSUME(mpValue);
+ return *mpValue;
+ }
+
+ const value_type& operator*() const EA_NOEXCEPT
+ {
+ if(!mpValue)
+ construct();
+
+ EA_ANALYSIS_ASSUME(mpValue);
+ return *mpValue;
+ }
+
+ value_type* operator->() EA_NOEXCEPT
+ {
+ if(!mpValue)
+ construct();
+ return mpValue;
+ }
+
+ const value_type* operator->() const EA_NOEXCEPT
+ {
+ if(!mpValue)
+ construct();
+ return mpValue;
+ }
+
+ value_type* get() EA_NOEXCEPT
+ {
+ if(!mpValue)
+ construct();
+ return mpValue;
+ }
+
+ const value_type* get() const EA_NOEXCEPT
+ {
+ if(!mpValue)
+ construct();
+ return mpValue;
+ }
+
+ protected:
+ storage_type mStorage; // Declared first because it may have aligment requirements, and it would be more space-efficient if it was first.
+ value_type* mpValue;
+ };
+
+
+ // Specialization that doesn't auto-construct on demand.
+ template <typename T, bool autoDestruct>
+ class late_constructed<T, false, autoDestruct> : public late_constructed<T, true, autoDestruct>
+ {
+ public:
+ typedef late_constructed<T, true, autoDestruct> base_type;
+
+ typename base_type::value_type& operator*() EA_NOEXCEPT
+ { EASTL_ASSERT(base_type::mpValue); return *base_type::mpValue; }
+
+ const typename base_type::value_type& operator*() const EA_NOEXCEPT
+ { EASTL_ASSERT(base_type::mpValue); return *base_type::mpValue; }
+
+ typename base_type::value_type* operator->() EA_NOEXCEPT
+ { EASTL_ASSERT(base_type::mpValue); return base_type::mpValue; }
+
+ const typename base_type::value_type* operator->() const EA_NOEXCEPT
+ { EASTL_ASSERT(base_type::mpValue); return base_type::mpValue; }
+
+ typename base_type::value_type* get() EA_NOEXCEPT
+ { return base_type::mpValue; }
+
+ const typename base_type::value_type* get() const EA_NOEXCEPT
+ { return base_type::mpValue; }
+ };
+
+
+
+ /// raw_storage_iterator
+ ///
+ /// From the C++11 Standard, section 20.6.10 p1
+ /// raw_storage_iterator is provided to enable algorithms to store their results into uninitialized memory.
+ /// The formal template parameter OutputIterator is required to have its operator* return an object for
+ /// which operator& is defined and returns a pointer to T, and is also required to satisfy the requirements
+ /// of an output iterator (24.2.4).
+
+ template <typename OutputIterator, typename T>
+ class raw_storage_iterator : public iterator<EASTL_ITC_NS::output_iterator_tag, void, void, void, void>
+ {
+ protected:
+ OutputIterator mIterator;
+
+ public:
+ explicit raw_storage_iterator(OutputIterator iterator)
+ : mIterator(iterator)
+ {
+ }
+
+ raw_storage_iterator& operator*()
+ {
+ return *this;
+ }
+
+ raw_storage_iterator& operator=(const T& value)
+ {
+ ::new(eastl::addressof(*mIterator)) T(value);
+ return *this;
+ }
+
+ raw_storage_iterator<OutputIterator, T>& operator++()
+ {
+ ++mIterator;
+ return *this;
+ }
+
+ raw_storage_iterator<OutputIterator, T> operator++(int)
+ {
+ raw_storage_iterator<OutputIterator, T> tempIterator = *this;
+ ++mIterator;
+ return tempIterator;
+ }
+ };
+
+
+ /// uninitialized_relocate (formerly named uninitialized_move prior to C++11)
+ ///
+ /// This utility is deprecated in favor of C++11 rvalue move functionality.
+ ///
+ /// uninitialized_relocate takes a constructed sequence of objects and an
+ /// uninitialized destination buffer. In the case of any exception thrown
+ /// while moving the objects, any newly constructed objects are guaranteed
+ /// to be destructed and the input left fully constructed.
+ ///
+ /// In the case where you need to do multiple moves atomically, split the
+ /// calls into uninitialized_relocate_start/abort/commit.
+ ///
+ /// uninitialized_relocate_start can possibly throw an exception. If it does,
+ /// you don't need to do anything. However, if it returns without throwing
+ /// an exception you need to guarantee that either uninitialized_relocate_abort
+ /// or uninitialized_relocate_commit is called.
+ ///
+ /// Both uninitialized_relocate_abort and uninitialize_move_commit are
+ /// guaranteed to not throw C++ exceptions.
+ namespace Internal
+ {
+ template <bool hasTrivialMove, typename iteratorTag>
+ struct uninitialized_relocate_impl
+ {
+ template <typename ForwardIterator, typename ForwardIteratorDest>
+ static ForwardIteratorDest do_move_start(ForwardIterator first, ForwardIterator last, ForwardIteratorDest dest)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ ForwardIteratorDest origDest(dest);
+ try
+ {
+ #endif
+ for(; first != last; ++first, ++dest)
+ ::new((void*)eastl::addressof(*dest)) value_type(*first);
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ for(; origDest < dest; ++origDest)
+ (*origDest).~value_type();
+ throw;
+ }
+ #endif
+
+ return dest;
+ }
+
+ template <typename ForwardIterator, typename ForwardIteratorDest>
+ static ForwardIteratorDest do_move_commit(ForwardIterator first, ForwardIterator last, ForwardIteratorDest dest) //throw()
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+ for(; first != last; ++first, ++dest)
+ (*first).~value_type();
+
+ return dest;
+ }
+
+ template <typename ForwardIterator, typename ForwardIteratorDest>
+ static ForwardIteratorDest do_move_abort(ForwardIterator first, ForwardIterator last, ForwardIteratorDest dest) //throw()
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+ for(; first != last; ++first, ++dest)
+ (*dest).~value_type();
+ return dest;
+ }
+ };
+
+ template <>
+ struct uninitialized_relocate_impl<true, EASTL_ITC_NS::random_access_iterator_tag>
+ {
+ template <typename T>
+ static T* do_move_start(T* first, T* last, T* dest)
+ {
+ if (EASTL_UNLIKELY(first == last))
+ return dest;
+
+ return (T*)memcpy(dest, first, (size_t)((uintptr_t)last - (uintptr_t)first)) + (last - first);
+ }
+
+ template <typename T>
+ static T* do_move_commit(T* first, T* last, T* dest)
+ {
+ return dest + (last - first);
+ }
+
+ template <typename T>
+ static T* do_move_abort(T* first, T* last, T* dest)
+ {
+ return dest + (last - first);
+ }
+ };
+ }
+
+
+ /// uninitialized_relocate_start, uninitialized_relocate_commit, uninitialized_relocate_abort
+ ///
+ /// This utility is deprecated in favor of C++11 rvalue move functionality.
+ ///
+ /// After calling uninitialized_relocate_start, if it doesn't throw an exception,
+ /// both the source and destination iterators point to undefined data. If it
+ /// does throw an exception, the destination remains uninitialized and the source
+ /// is as it was before.
+ ///
+ /// In order to make the iterators valid again you need to call either uninitialized_relocate_abort
+ /// or uninitialized_relocate_commit. The abort call makes the original source
+ /// iterator valid again, and commit makes the destination valid. Both abort
+ /// and commit are guaranteed to not throw C++ exceptions.
+ ///
+ /// Example usage:
+ /// iterator dest2 = uninitialized_relocate_start(first, last, dest);
+ /// try {
+ /// // some code here that might throw an exception
+ /// }
+ /// catch(...)
+ /// {
+ /// uninitialized_relocate_abort(first, last, dest);
+ /// throw;
+ /// }
+ /// uninitialized_relocate_commit(first, last, dest);
+ ///
+ template <typename ForwardIterator, typename ForwardIteratorDest>
+ inline ForwardIteratorDest uninitialized_relocate_start(ForwardIterator first, ForwardIterator last, ForwardIteratorDest dest)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::iterator_category IC;
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type_input;
+ typedef typename eastl::iterator_traits<ForwardIteratorDest>::value_type value_type_output;
+
+ const bool bHasTrivialMove = type_and<has_trivial_relocate<value_type_input>::value,
+ is_pointer<ForwardIterator>::value,
+ is_pointer<ForwardIteratorDest>::value,
+ is_same<value_type_input, value_type_output>::value>::value;
+
+ return Internal::uninitialized_relocate_impl<bHasTrivialMove, IC>::do_move_start(first, last, dest);
+ }
+
+ template <typename ForwardIterator, typename ForwardIteratorDest>
+ inline ForwardIteratorDest uninitialized_relocate_commit(ForwardIterator first, ForwardIterator last, ForwardIteratorDest dest)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::iterator_category IC;
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type_input;
+ typedef typename eastl::iterator_traits<ForwardIteratorDest>::value_type value_type_output;
+
+ const bool bHasTrivialMove = type_and<has_trivial_relocate<value_type_input>::value,
+ is_pointer<ForwardIterator>::value,
+ is_pointer<ForwardIteratorDest>::value,
+ is_same<value_type_input, value_type_output>::value>::value;
+
+ return Internal::uninitialized_relocate_impl<bHasTrivialMove, IC>::do_move_commit(first, last, dest);
+ }
+
+ template <typename ForwardIterator, typename ForwardIteratorDest>
+ inline ForwardIteratorDest uninitialized_relocate_abort(ForwardIterator first, ForwardIterator last, ForwardIteratorDest dest)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::iterator_category IC;
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type_input;
+ typedef typename eastl::iterator_traits<ForwardIteratorDest>::value_type value_type_output;
+
+ const bool bHasTrivialMove = type_and<has_trivial_relocate<value_type_input>::value,
+ is_pointer<ForwardIterator>::value,
+ is_pointer<ForwardIteratorDest>::value,
+ is_same<value_type_input, value_type_output>::value>::value;
+
+ return Internal::uninitialized_relocate_impl<bHasTrivialMove, IC>::do_move_abort(first, last, dest);
+ }
+
+ /// uninitialized_relocate
+ ///
+ /// See above for documentation.
+ ///
+ template <typename ForwardIterator, typename ForwardIteratorDest>
+ inline ForwardIteratorDest uninitialized_relocate(ForwardIterator first, ForwardIterator last, ForwardIteratorDest dest)
+ {
+ ForwardIteratorDest result = uninitialized_relocate_start(first, last, dest);
+ eastl::uninitialized_relocate_commit(first, last, dest);
+
+ return result;
+ }
+
+
+
+
+
+ // uninitialized_copy
+ //
+ namespace Internal
+ {
+ template <typename InputIterator, typename ForwardIterator>
+ inline ForwardIterator uninitialized_copy_impl(InputIterator first, InputIterator last, ForwardIterator dest, true_type)
+ {
+ return eastl::copy(first, last, dest); // The copy() in turn will use memcpy for POD types.
+ }
+
+ template <typename InputIterator, typename ForwardIterator>
+ inline ForwardIterator uninitialized_copy_impl(InputIterator first, InputIterator last, ForwardIterator dest, false_type)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+ ForwardIterator currentDest(dest);
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ for(; first != last; ++first, ++currentDest)
+ ::new(static_cast<void*>(eastl::addressof(*currentDest))) value_type(*first);
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ for(; dest < currentDest; ++dest)
+ (*dest).~value_type();
+ throw;
+ }
+ #endif
+
+ return currentDest;
+ }
+ }
+
+ /// uninitialized_copy
+ ///
+ /// Copies a source range to a destination, copy-constructing the destination with
+ /// the source values (and not *assigning* the destination with the source values).
+ /// Returns the end of the destination range (i.e. dest + (last - first)).
+ ///
+ /// Declaration:
+ /// template <typename InputIterator, typename ForwardIterator>
+ /// ForwardIterator uninitialized_copy(InputIterator sourceFirst, InputIterator sourceLast, ForwardIterator destination);
+ ///
+ /// Example usage:
+ /// SomeClass* pArray = malloc(10 * sizeof(SomeClass));
+ /// uninitialized_copy(pSourceDataBegin, pSourceDataBegin + 10, pArray);
+ ///
+ template <typename InputIterator, typename ForwardIterator>
+ inline ForwardIterator uninitialized_copy(InputIterator first, InputIterator last, ForwardIterator result)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+
+ // We use is_trivial, which in the C++11 Standard means is_trivially_copyable and is_trivially_default_constructible.
+ return Internal::uninitialized_copy_impl(first, last, result, eastl::is_trivial<value_type>());
+ }
+
+
+ /// uninitialized_copy_n
+ ///
+ /// Copies count elements from a range beginning at first to an uninitialized memory area
+ /// beginning at dest. The elements in the uninitialized area are constructed using copy constructor.
+ /// If an exception is thrown during the initialization, the function has no final effects.
+ ///
+ /// first: Beginning of the range of the elements to copy.
+ /// dest: Beginning of the destination range.
+ /// return value: Iterator of dest type to the element past the last element copied.
+ ///
+ namespace Internal
+ {
+ template <typename InputIterator, typename Count, typename ForwardIterator, typename IteratorTag>
+ struct uninitialized_copy_n_impl
+ {
+ static ForwardIterator impl(InputIterator first, Count n, ForwardIterator dest)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+ ForwardIterator currentDest(dest);
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ for(; n > 0; --n, ++first, ++currentDest)
+ ::new((void*)(eastl::addressof(*currentDest))) value_type(*first);
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ for(; dest < currentDest; ++dest)
+ (*dest).~value_type();
+ throw;
+ }
+ #endif
+
+ return currentDest;
+ }
+ };
+
+ template <typename InputIterator, typename Count, typename ForwardIterator>
+ struct uninitialized_copy_n_impl<InputIterator, Count, ForwardIterator, EASTL_ITC_NS::random_access_iterator_tag>
+ {
+ static inline ForwardIterator impl(InputIterator first, Count n, ForwardIterator dest)
+ {
+ return eastl::uninitialized_copy(first, first + n, dest);
+ }
+ };
+ }
+
+ template<typename InputIterator, typename Count, typename ForwardIterator>
+ inline ForwardIterator uninitialized_copy_n(InputIterator first, Count n, ForwardIterator dest)
+ {
+ typedef typename eastl::iterator_traits<InputIterator>::iterator_category IC;
+ return Internal::uninitialized_copy_n_impl<InputIterator, Count, ForwardIterator, IC>::impl(first, n, dest);
+ }
+
+
+
+ /// uninitialized_copy_ptr
+ ///
+ /// This is a specialization of uninitialized_copy for iterators that are pointers. We use it because
+ /// internally it uses generic_iterator to make pointers act like regular eastl::iterator.
+ ///
+ template <typename First, typename Last, typename Result>
+ inline Result uninitialized_copy_ptr(First first, Last last, Result result)
+ {
+ typedef typename eastl::iterator_traits<generic_iterator<Result, void> >::value_type value_type;
+ const generic_iterator<Result, void> i(Internal::uninitialized_copy_impl(eastl::generic_iterator<First, void>(first), // generic_iterator makes a pointer act like an iterator.
+ eastl::generic_iterator<Last, void>(last),
+ eastl::generic_iterator<Result, void>(result),
+ eastl::is_trivially_copy_assignable<value_type>()));
+ return i.base();
+ }
+
+
+
+ /// uninitialized_move_ptr
+ ///
+ /// This is a specialization of uninitialized_move for iterators that are pointers. We use it because
+ /// internally it uses generic_iterator to make pointers act like regular eastl::iterator.
+ ///
+ namespace Internal
+ {
+ template <typename InputIterator, typename ForwardIterator>
+ inline ForwardIterator uninitialized_move_impl(InputIterator first, InputIterator last, ForwardIterator dest, true_type)
+ {
+ return eastl::copy(first, last, dest); // The copy() in turn will use memcpy for is_trivially_copy_assignable (e.g. POD) types.
+ }
+
+ template <typename InputIterator, typename ForwardIterator>
+ inline ForwardIterator uninitialized_move_impl(InputIterator first, InputIterator last, ForwardIterator dest, false_type)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+ ForwardIterator currentDest(dest);
+
+ // We must run a loop over every element and move-construct it at the new location.
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ for(; first != last; ++first, ++currentDest)
+ ::new((void*)eastl::addressof(*currentDest)) value_type(eastl::move(*first)); // If value_type has a move constructor then it will be used here.
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ // We have a problem here: If an exception occurs while doing the loop below then we will
+ // have values that were moved from the source to the dest that may need to be moved back
+ // in the catch. What does the C++11 Standard say about this? And what happens if there's an
+ // exception while moving them back? We may want to trace through a conforming C++11 Standard
+ // Library to see what it does and do something similar. Given that rvalue references are
+ // objects that are going away, we may not need to move the values back, though that has the
+ // side effect of a certain kind of lost elements problem.
+ for(; dest < currentDest; ++dest)
+ (*dest).~value_type();
+ throw;
+ }
+ #endif
+
+ return currentDest;
+ }
+ }
+
+ template <typename First, typename Last, typename Result>
+ inline Result uninitialized_move_ptr(First first, Last last, Result dest)
+ {
+ typedef typename eastl::iterator_traits<generic_iterator<Result, void> >::value_type value_type;
+ const generic_iterator<Result, void> i(Internal::uninitialized_move_impl(eastl::generic_iterator<First, void>(first), // generic_iterator makes a pointer act like an iterator.
+ eastl::generic_iterator<Last, void>(last),
+ eastl::generic_iterator<Result, void>(dest),
+ eastl::is_trivially_copy_assignable<value_type>())); // is_trivially_copy_assignable identifies if copy assignment would be as valid as move assignment, which means we have the opportunity to memcpy/memmove optimization.
+ return i.base();
+ }
+
+
+
+
+ /// uninitialized_move
+ ///
+ /// Moves a source range to a destination, move-constructing the destination with
+ /// the source values (and not *assigning* the destination with the source values).
+ /// Returns the end of the destination range (i.e. dest + (last - first)).
+ ///
+ /// uninitialized_move is not part of any current C++ Standard, up to C++14.
+ ///
+ /// Declaration:
+ /// template <typename InputIterator, typename ForwardIterator>
+ /// ForwardIterator uninitialized_move(InputIterator sourceFirst, InputIterator sourceLast, ForwardIterator destination);
+ ///
+ /// Example usage:
+ /// SomeClass* pArray = malloc(10 * sizeof(SomeClass));
+ /// uninitialized_move(pSourceDataBegin, pSourceDataBegin + 10, pArray);
+ ///
+ template <typename InputIterator, typename ForwardIterator>
+ inline ForwardIterator uninitialized_move(InputIterator first, InputIterator last, ForwardIterator dest)
+ {
+ return eastl::uninitialized_copy(eastl::make_move_iterator(first), eastl::make_move_iterator(last), dest);
+ }
+
+
+ /// uninitialized_move_if_noexcept
+ ///
+ /// If the iterated type can be moved without exceptions, move construct the dest with the input. Else copy-construct
+ /// the dest witih the input. If move isn't supported by the compiler, do regular copy.
+ ///
+ template <typename InputIterator, typename ForwardIterator>
+ inline ForwardIterator uninitialized_move_if_noexcept(InputIterator first, InputIterator last, ForwardIterator dest)
+ {
+ return eastl::uninitialized_copy(eastl::make_move_if_noexcept_iterator(first), eastl::make_move_if_noexcept_iterator(last), dest);
+ }
+
+
+ /// uninitialized_move_ptr_if_noexcept
+ ///
+ template <typename First, typename Last, typename Result>
+ inline Result uninitialized_move_ptr_if_noexcept(First first, Last last, Result dest)
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ return eastl::uninitialized_move_if_noexcept(first, last, dest);
+ #else
+ return eastl::uninitialized_move_ptr(first, last, dest);
+ #endif
+ }
+
+
+ /// uninitialized_move_n
+ ///
+ /// Moves count elements from a range beginning at first to an uninitialized memory area
+ /// beginning at dest. The elements in the uninitialized area are constructed using copy constructor.
+ /// If an exception is thrown during the initialization, the function has no final effects.
+ ///
+ /// first: Beginning of the range of the elements to move.
+ /// dest: Beginning of the destination range.
+ /// return value: Iterator of dest type to the element past the last element moved.
+ ///
+ template<typename InputIterator, typename Count, typename ForwardIterator>
+ inline ForwardIterator uninitialized_move_n(InputIterator first, Count n, ForwardIterator dest)
+ {
+ return eastl::uninitialized_copy_n(eastl::make_move_iterator(first), n, dest);
+ }
+
+ // Disable warning C4345 - behavior change: an object of POD type constructed with an initializer of the form ()
+ // will be default-initialized.
+ // This is the behavior we intend below.
+ EA_DISABLE_VC_WARNING(4345)
+ /// uninitialized_default_fill
+ ///
+ /// Default-constructs the elements in the destination range.
+ /// Returns void. It wouldn't be useful to return the end of the destination range,
+ /// as that is the same as the 'last' input parameter.
+ ///
+ /// Declaration:
+ /// template <typename ForwardIterator, typename T>
+ /// void uninitialized_default_fill(ForwardIterator destinationFirst, ForwardIterator destinationLast);
+ ///
+ template <typename ForwardIterator>
+ inline void uninitialized_default_fill(ForwardIterator first, ForwardIterator last)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+ ForwardIterator currentDest(first);
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ for (; currentDest != last; ++currentDest)
+ ::new (eastl::addressof(*currentDest)) value_type();
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch (...)
+ {
+ for (; first < currentDest; ++first)
+ (*first).~value_type();
+ throw;
+ }
+ #endif
+ }
+
+ /// uninitialized_default_fill_n
+ ///
+ /// Default-constructs the range of [first, first + n).
+ /// Returns void as per the C++ standard, though returning the end input iterator
+ /// value may be of use.
+ ///
+ /// Declaration:
+ /// template <typename ForwardIterator, typename Count, typename T>
+ /// void uninitialized_default_fill_n(ForwardIterator destination, Count n);
+ ///
+ namespace Internal
+ {
+ template <typename ForwardIterator, typename Count>
+ inline void uninitialized_default_fill_n_impl(ForwardIterator first, Count n, false_type)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+ ForwardIterator currentDest(first);
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ for (; n > 0; --n, ++currentDest)
+ ::new (eastl::addressof(*currentDest)) value_type();
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch (...)
+ {
+ for (; first < currentDest; ++first)
+ (*first).~value_type();
+ throw;
+ }
+ #endif
+ }
+
+ template <typename ForwardIterator, typename Count>
+ inline void uninitialized_default_fill_n_impl(ForwardIterator first, Count n, true_type)
+ {
+ if (EASTL_UNLIKELY(n == 0))
+ return;
+
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+ memset(first, 0, sizeof(value_type) * n);
+ }
+ }
+
+ template <typename ForwardIterator, typename Count>
+ inline void uninitialized_default_fill_n(ForwardIterator first, Count n)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+ Internal::uninitialized_default_fill_n_impl(first, n, is_scalar<value_type>());
+ }
+ EA_RESTORE_VC_WARNING()
+
+ /// uninitialized_default_construct
+ ///
+ /// Constructs objects in the uninitialized storage designated by the range [first, last) by default-initialization.
+ ///
+ /// Default-initialization:
+ /// If T is a class, the default constructor is called; otherwise, no initialization is done, resulting in
+ /// indeterminate values.
+ ///
+ /// http://en.cppreference.com/w/cpp/memory/uninitialized_default_construct
+ ///
+ template <typename ForwardIterator>
+ inline void uninitialized_default_construct(ForwardIterator first, ForwardIterator last)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+ ForwardIterator currentDest(first);
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ for (; currentDest != last; ++currentDest)
+ ::new (eastl::addressof(*currentDest)) value_type;
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch (...)
+ {
+ for (; first < currentDest; ++first)
+ (*first).~value_type();
+ throw;
+ }
+ #endif
+ }
+
+ /// uninitialized_default_construct_n
+ ///
+ /// Constructs n objects in the uninitialized storage starting at first by default-initialization.
+ ///
+ /// http://en.cppreference.com/w/cpp/memory/uninitialized_default_construct_n
+ ///
+ template <typename ForwardIterator, typename Count>
+ inline ForwardIterator uninitialized_default_construct_n(ForwardIterator first, Count n)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+ ForwardIterator currentDest(first);
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ for (; n > 0; --n, ++currentDest)
+ ::new (eastl::addressof(*currentDest)) value_type;
+ return currentDest;
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch (...)
+ {
+ for (; first < currentDest; ++first)
+ (*first).~value_type();
+ throw;
+ }
+ #endif
+ }
+
+ /// uninitialized_fill
+ ///
+ /// Copy-constructs the elements in the destination range with the given input value.
+ /// Returns void. It wouldn't be useful to return the end of the destination range,
+ /// as that is the same as the 'last' input parameter.
+ ///
+ /// Declaration:
+ /// template <typename ForwardIterator, typename T>
+ /// void uninitialized_fill(ForwardIterator destinationFirst, ForwardIterator destinationLast, const T& value);
+ ///
+ namespace Internal
+ {
+ template <typename ForwardIterator, typename T>
+ inline void uninitialized_fill_impl(ForwardIterator first, ForwardIterator last, const T& value, true_type)
+ {
+ eastl::fill(first, last, value);
+ }
+
+ template <typename ForwardIterator, typename T>
+ void uninitialized_fill_impl(ForwardIterator first, ForwardIterator last, const T& value, false_type)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+ ForwardIterator currentDest(first);
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ for(; currentDest != last; ++currentDest)
+ ::new((void*)eastl::addressof(*currentDest)) value_type(value);
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ for(; first < currentDest; ++first)
+ (*first).~value_type();
+ throw;
+ }
+ #endif
+ }
+ }
+
+ template <typename ForwardIterator, typename T>
+ inline void uninitialized_fill(ForwardIterator first, ForwardIterator last, const T& value)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+ Internal::uninitialized_fill_impl(first, last, value, eastl::is_trivially_copy_assignable<value_type>());
+ }
+
+ /// uninitialized_value_construct
+ ///
+ /// Constructs objects in the uninitialized storage range [first, last) by value-initialization.
+ ///
+ /// Value-Initialization:
+ /// If T is a class, the object is default-initialized (after being zero-initialized if T's default
+ /// constructor is not user-provided/deleted); otherwise, the object is zero-initialized.
+ ///
+ /// http://en.cppreference.com/w/cpp/memory/uninitialized_value_construct
+ ///
+ template <class ForwardIterator>
+ void uninitialized_value_construct(ForwardIterator first, ForwardIterator last)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+ ForwardIterator currentDest(first);
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ for (; currentDest != last; ++currentDest)
+ ::new (eastl::addressof(*currentDest)) value_type();
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch (...)
+ {
+ for (; first < currentDest; ++first)
+ (*first).~value_type();
+ throw;
+ }
+ #endif
+ }
+
+ /// uninitialized_value_construct_n
+ ///
+ /// Constructs n objects in the uninitialized storage starting at first by value-initialization.
+ ///
+ /// Value-Initialization:
+ /// If T is a class, the object is default-initialized (after being zero-initialized if T's default
+ /// constructor is not user-provided/deleted); otherwise, the object is zero-initialized.
+ ///
+ /// http://en.cppreference.com/w/cpp/memory/uninitialized_value_construct_n
+ ///
+ template <class ForwardIterator, class Count>
+ ForwardIterator uninitialized_value_construct_n(ForwardIterator first, Count n)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+ ForwardIterator currentDest(first);
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ for (; n > 0; --n, ++currentDest)
+ ::new (eastl::addressof(*currentDest)) value_type();
+ return currentDest;
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch (...)
+ {
+ for (; first < currentDest; ++first)
+ (*first).~value_type();
+ throw;
+ }
+ #endif
+ }
+
+ /// uninitialized_fill_ptr
+ ///
+ /// This is a specialization of uninitialized_fill for iterators that are pointers.
+ /// It exists so that we can declare a value_type for the iterator, which you
+ /// can't do with a pointer by itself.
+ ///
+ template <typename T>
+ inline void uninitialized_fill_ptr(T* first, T* last, const T& value)
+ {
+ typedef typename eastl::iterator_traits<eastl::generic_iterator<T*, void> >::value_type value_type;
+ Internal::uninitialized_fill_impl(eastl::generic_iterator<T*, void>(first),
+ eastl::generic_iterator<T*, void>(last), value,
+ eastl::is_trivially_copy_assignable<value_type>());
+ }
+
+ /// uninitialized_fill_n
+ ///
+ /// Copy-constructs the range of [first, first + n) with the given input value.
+ /// Returns void as per the C++ standard, though returning the end input iterator
+ /// value may be of use.
+ ///
+ /// Declaration:
+ /// template <typename ForwardIterator, typename Count, typename T>
+ /// void uninitialized_fill_n(ForwardIterator destination, Count n, const T& value);
+ ///
+ namespace Internal
+ {
+ template <typename ForwardIterator, typename Count, typename T>
+ inline void uninitialized_fill_n_impl(ForwardIterator first, Count n, const T& value, true_type)
+ {
+ eastl::fill_n(first, n, value);
+ }
+
+ template <typename ForwardIterator, typename Count, typename T>
+ void uninitialized_fill_n_impl(ForwardIterator first, Count n, const T& value, false_type)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+ ForwardIterator currentDest(first);
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ for(; n > 0; --n, ++currentDest)
+ ::new((void*)eastl::addressof(*currentDest)) value_type(value);
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ for(; first < currentDest; ++first)
+ (*first).~value_type();
+ throw;
+ }
+ #endif
+ }
+ }
+
+ template <typename ForwardIterator, typename Count, typename T>
+ inline void uninitialized_fill_n(ForwardIterator first, Count n, const T& value)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+ Internal::uninitialized_fill_n_impl(first, n, value, eastl::is_trivially_copy_assignable<value_type>());
+ }
+
+
+
+ /// uninitialized_fill_n_ptr
+ ///
+ /// This is a specialization of uninitialized_fill_n for iterators that are pointers.
+ /// It exists so that we can declare a value_type for the iterator, which you
+ /// can't do with a pointer by itself.
+ ///
+ template <typename T, typename Count>
+ inline void uninitialized_fill_n_ptr(T* first, Count n, const T& value)
+ {
+ typedef typename eastl::iterator_traits<generic_iterator<T*, void> >::value_type value_type;
+ Internal::uninitialized_fill_n_impl(eastl::generic_iterator<T*, void>(first), n, value, eastl::is_trivially_copy_assignable<value_type>());
+ }
+
+
+
+
+ /// uninitialized_copy_fill
+ ///
+ /// Copies [first1, last1) into [first2, first2 + (last1 - first1)) then
+ /// fills [first2 + (last1 - first1), last2) with value.
+ ///
+ template <typename InputIterator, typename ForwardIterator, typename T>
+ inline void uninitialized_copy_fill(InputIterator first1, InputIterator last1,
+ ForwardIterator first2, ForwardIterator last2, const T& value)
+ {
+ const ForwardIterator mid(eastl::uninitialized_copy(first1, last1, first2));
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+ try
+ {
+ #endif
+ eastl::uninitialized_fill(mid, last2, value);
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ for(; first2 < mid; ++first2)
+ (*first2).~value_type();
+ throw;
+ }
+ #endif
+ }
+
+
+ /// uninitialized_move_fill
+ ///
+ /// Moves [first1, last1) into [first2, first2 + (last1 - first1)) then
+ /// fills [first2 + (last1 - first1), last2) with value.
+ ///
+ template <typename InputIterator, typename ForwardIterator, typename T>
+ inline void uninitialized_move_fill(InputIterator first1, InputIterator last1,
+ ForwardIterator first2, ForwardIterator last2, const T& value)
+ {
+ const ForwardIterator mid(eastl::uninitialized_move(first1, last1, first2));
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+ try
+ {
+ #endif
+ eastl::uninitialized_fill(mid, last2, value);
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ for(; first2 < mid; ++first2)
+ (*first2).~value_type();
+ throw;
+ }
+ #endif
+ }
+
+
+
+
+
+ /// uninitialized_fill_copy
+ ///
+ /// Fills [result, mid) with value then copies [first, last) into [mid, mid + (last - first)).
+ ///
+ template <typename ForwardIterator, typename T, typename InputIterator>
+ inline ForwardIterator
+ uninitialized_fill_copy(ForwardIterator result, ForwardIterator mid, const T& value, InputIterator first, InputIterator last)
+ {
+ eastl::uninitialized_fill(result, mid, value);
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+ try
+ {
+ #endif
+ return eastl::uninitialized_copy(first, last, mid);
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ for(; result < mid; ++result)
+ (*result).~value_type();
+ throw;
+ }
+ #endif
+ }
+
+
+ /// uninitialized_fill_move
+ ///
+ /// Fills [result, mid) with value then copies [first, last) into [mid, mid + (last - first)).
+ ///
+ template <typename ForwardIterator, typename T, typename InputIterator>
+ inline ForwardIterator
+ uninitialized_fill_move(ForwardIterator result, ForwardIterator mid, const T& value, InputIterator first, InputIterator last)
+ {
+ eastl::uninitialized_fill(result, mid, value);
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+ try
+ {
+ #endif
+ return eastl::uninitialized_move(first, last, mid);
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ for(; result < mid; ++result)
+ (*result).~value_type();
+ throw;
+ }
+ #endif
+ }
+
+
+
+ /// uninitialized_copy_copy
+ ///
+ /// Copies [first1, last1) into [result, result + (last1 - first1)) then
+ /// copies [first2, last2) into [result, result + (last1 - first1) + (last2 - first2)).
+ ///
+ template <typename InputIterator1, typename InputIterator2, typename ForwardIterator>
+ inline ForwardIterator
+ uninitialized_copy_copy(InputIterator1 first1, InputIterator1 last1,
+ InputIterator2 first2, InputIterator2 last2,
+ ForwardIterator result)
+ {
+ const ForwardIterator mid(eastl::uninitialized_copy(first1, last1, result));
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+ try
+ {
+ #endif
+ return eastl::uninitialized_copy(first2, last2, mid);
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ for(; result < mid; ++result)
+ (*result).~value_type();
+ throw;
+ }
+ #endif
+ }
+
+
+
+ /// destruct
+ ///
+ /// Calls the destructor of a given object.
+ ///
+ /// Note that we don't have a specialized version of this for objects
+ /// with trivial destructors, such as integers. This is because the
+ /// compiler can already see in our version here that the destructor
+ /// is a no-op.
+ ///
+ template <typename T>
+ inline void destruct(T* p)
+ {
+ // https://msdn.microsoft.com/query/dev14.query?appId=Dev14IDEF1&l=EN-US&k=k(C4100)&rd=true
+ // "C4100 can also be issued when code calls a destructor on a otherwise unreferenced parameter
+ // of primitive type. This is a limitation of the Visual C++ compiler."
+ EA_UNUSED(p);
+ p->~T();
+ }
+
+
+
+ // destruct(first, last)
+ //
+ template <typename ForwardIterator>
+ inline void destruct_impl(ForwardIterator /*first*/, ForwardIterator /*last*/, true_type) // true means the type has a trivial destructor.
+ {
+ // Empty. The type has a trivial destructor.
+ }
+
+ template <typename ForwardIterator>
+ inline void destruct_impl(ForwardIterator first, ForwardIterator last, false_type) // false means the type has a significant destructor.
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+
+ for(; first != last; ++first)
+ (*first).~value_type();
+ }
+
+ /// destruct
+ ///
+ /// Calls the destructor on a range of objects.
+ ///
+ /// We have a specialization for objects with trivial destructors, such as
+ /// PODs. In this specialization the destruction of the range is a no-op.
+ ///
+ template <typename ForwardIterator>
+ inline void destruct(ForwardIterator first, ForwardIterator last)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+ destruct_impl(first, last, eastl::has_trivial_destructor<value_type>());
+ }
+
+
+ /// destroy_at
+ ///
+ /// Calls the destructor of a given object.
+ ///
+ /// Note that we don't have a specialized version of this for objects
+ /// with trivial destructors, such as integers. This is because the
+ /// compiler can already see in our version here that the destructor
+ /// is a no-op.
+ ///
+ /// This is the same as eastl::destruct but we included for C++17 compliance.
+ ///
+ /// http://en.cppreference.com/w/cpp/memory/destroy_at
+ ///
+ template <typename T>
+ inline void destroy_at(T* p)
+ {
+ EA_UNUSED(p);
+ p->~T();
+ }
+
+
+ /// destroy
+ ///
+ /// Calls the destructor on a range of objects.
+ ///
+ /// http://en.cppreference.com/w/cpp/memory/destroy
+ ///
+ template <typename ForwardIterator>
+ inline void destroy(ForwardIterator first, ForwardIterator last)
+ {
+ for (; first != last; ++first)
+ eastl::destroy_at(eastl::addressof(*first));
+ }
+
+
+ /// destroy_n
+ ///
+ /// Calls the destructor on the n objects in the range.
+ ///
+ /// http://en.cppreference.com/w/cpp/memory/destroy_n
+ ///
+ template <typename ForwardIterator, typename Size>
+ ForwardIterator destroy_n(ForwardIterator first, Size n)
+ {
+ for (; n > 0; ++first, --n)
+ eastl::destroy_at(eastl::addressof(*first));
+
+ return first;
+ }
+
+
+ /// align
+ ///
+ /// Same as C++11 std::align. http://en.cppreference.com/w/cpp/memory/align
+ /// If it is possible to fit size bytes of storage aligned by alignment into the buffer pointed to by
+ /// ptr with length space, the function updates ptr to point to the first possible address of such storage,
+ /// decreases space by the number of bytes used for alignment, and returns the new ptr value. Otherwise,
+ /// the function returns NULL and leaves ptr and space unmodified.
+ ///
+ /// Example usage:
+ /// char buffer[512];
+ /// size_t space = sizeof(buffer);
+ /// void* p = buffer;
+ /// void* p1 = eastl::align(16, 3, p, space); p = (char*)p + 3; space -= 3;
+ /// void* p2 = eastl::align(32, 78, p, space); p = (char*)p + 78; space -= 78;
+ /// void* p3 = eastl::align(64, 9, p, space); p = (char*)p + 9; space -= 9;
+
+ inline void* align(size_t alignment, size_t size, void*& ptr, size_t& space)
+ {
+ if(space >= size)
+ {
+ char* ptrAligned = (char*)(((size_t)ptr + (alignment - 1)) & -alignment);
+ size_t offset = (size_t)(ptrAligned - (char*)ptr);
+
+ if((space - size) >= offset) // Have to implement this in terms of subtraction instead of addition in order to handle possible overflow.
+ {
+ ptr = ptrAligned;
+ space -= offset;
+
+ return ptrAligned;
+ }
+ }
+
+ return NULL;
+ }
+
+
+ /// align_advance
+ ///
+ /// Same as align except ptr and space can be adjusted to reflect remaining space.
+ /// Not present in the C++ Standard.
+ /// Note that the example code here is similar to align but simpler.
+ ///
+ /// Example usage:
+ /// char buffer[512];
+ /// size_t space = sizeof(buffer);
+ /// void* p = buffer;
+ /// void* p1 = eastl::align_advance(16, 3, p, space, &p, &space); // p is advanced and space reduced accordingly.
+ /// void* p2 = eastl::align_advance(32, 78, p, space, &p, &space);
+ /// void* p3 = eastl::align_advance(64, 9, p, space, &p, &space);
+ /// void* p4 = eastl::align_advance(16, 33, p, space);
+
+ inline void* align_advance(size_t alignment, size_t size, void* ptr, size_t space, void** ptrAdvanced = NULL, size_t* spaceReduced = NULL)
+ {
+ if(space >= size)
+ {
+ char* ptrAligned = (char*)(((size_t)ptr + (alignment - 1)) & -alignment);
+ size_t offset = (size_t)(ptrAligned - (char*)ptr);
+
+ if((space - size) >= offset) // Have to implement this in terms of subtraction instead of addition in order to handle possible overflow.
+ {
+ if(ptrAdvanced)
+ *ptrAdvanced = (ptrAligned + size);
+ if(spaceReduced)
+ *spaceReduced = (space - (offset + size));
+
+ return ptrAligned;
+ }
+ }
+
+ return NULL;
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // uses_allocator
+ //
+ // Determines if the class T has an allocator_type member typedef
+ // which Allocator is convertible to.
+ //
+ // http://en.cppreference.com/w/cpp/memory/uses_allocator
+ //
+ // A program may specialize this template to derive from true_type for a
+ // user-defined type T that does not have a nested allocator_type but
+ // nonetheless can be constructed with an allocator where either:
+ // - the first argument of a constructor has type allocator_arg_t and
+ // the second argument has type Allocator.
+ // or
+ // - the last argument of a constructor has type Allocator.
+ //
+ // Example behavilor:
+ // uses_allocator<vector>::value => true
+ // uses_allocator<int>::value => false
+ //
+ // This is useful for writing generic code for containers when you can't
+ // know ahead of time that the container has an allocator_type.
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T>
+ struct has_allocator_type_helper
+ {
+ private:
+ template <typename>
+ static eastl::no_type test(...);
+
+ template <typename U>
+ static eastl::yes_type test(typename U::allocator_type* = NULL);
+
+ public:
+ static const bool value = sizeof(test<T>(NULL)) == sizeof(eastl::yes_type);
+ };
+
+
+ template <typename T, typename Allocator, bool = has_allocator_type_helper<T>::value>
+ struct uses_allocator_impl
+ : public integral_constant<bool, eastl::is_convertible<Allocator, typename T::allocator_type>::value>
+ {
+ };
+
+ template <typename T, typename Allocator>
+ struct uses_allocator_impl<T, Allocator, false>
+ : public eastl::false_type
+ {
+ };
+
+ template <typename T, typename Allocator>
+ struct uses_allocator
+ : public uses_allocator_impl<T, Allocator>{ };
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // pointer_traits
+ //
+ // C++11 Standard section 20.6.3
+ // Provides information about a pointer type, mostly for the purpose
+ // of handling the case where the pointer type isn't a built-in T* but
+ // rather is a class that acts like a pointer.
+ //
+ // A user-defined Pointer has the following properties, by example:
+ // template <class T, class... MoreArgs>
+ // struct Pointer
+ // {
+ // typedef Pointer pointer; // required for use by pointer_traits.
+ // typedef T1 element_type; // optional for use by pointer_traits.
+ // typedef T2 difference_type; // optional for use by pointer_traits.
+ //
+ // template <class Other>
+ // using rebind = typename Ptr<Other, MoreArgs...>; // optional for use by pointer_traits.
+ //
+ // static pointer pointer_to(element_type& obj); // required for use by pointer_traits.
+ // };
+ //
+ //
+ // Example usage:
+ // template <typename Pointer>
+ // typename pointer_traits::element_type& GetElementPointedTo(Pointer p)
+ // { return *p; }
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ namespace Internal
+ {
+ // pointer_element_type
+ template <typename Pointer>
+ struct has_element_type // has_element_type<T>::value is true if T has an element_type member typedef.
+ {
+ private:
+ template <typename U> static eastl::no_type test(...);
+ template <typename U> static eastl::yes_type test(typename U::element_type* = 0);
+ public:
+ static const bool value = sizeof(test<Pointer>(0)) == sizeof(eastl::yes_type);
+ };
+
+ template <typename Pointer, bool = has_element_type<Pointer>::value>
+ struct pointer_element_type
+ {
+ using type = Pointer;
+ };
+
+ template <typename Pointer>
+ struct pointer_element_type<Pointer, true>
+ { typedef typename Pointer::element_type type; };
+
+ template <template <typename, typename...> class Pointer, typename T, typename... Args>
+ struct pointer_element_type<Pointer<T, Args...>, false>
+ { typedef T type; };
+
+
+ // pointer_difference_type
+ template <typename Pointer>
+ struct has_difference_type // has_difference_type<T>::value is true if T has an difference_type member typedef.
+ {
+ private:
+ template <typename U> static eastl::no_type test(...);
+ template <typename U> static eastl::yes_type test(typename U::difference_type* = 0);
+ public:
+ static const bool value = sizeof((test<Pointer>(0))) == sizeof(eastl::yes_type);
+ };
+
+ template <typename Pointer, bool = has_difference_type<Pointer>::value>
+ struct pointer_difference_type
+ { typedef typename Pointer::difference_type type; };
+
+ template <typename Pointer>
+ struct pointer_difference_type<Pointer, false>
+ { typedef ptrdiff_t type; };
+
+
+ // pointer_rebind
+ // The following isn't correct, as it is unilaterally requiring that Pointer typedef its
+ // own rebind. We can fix this if needed to make it optional (in which case it would return
+ // its own type), but we don't currently use rebind in EASTL (as we have a different allocator
+ // system than the C++ Standard Library has) and this is currently moot.
+ template <typename Pointer, typename U>
+ struct pointer_rebind
+ {
+ typedef typename Pointer::template rebind<U> type;
+ };
+
+
+ } // namespace Internal
+
+
+ template <typename Pointer>
+ struct pointer_traits
+ {
+ typedef Pointer pointer;
+ typedef typename Internal::pointer_element_type<pointer>::type element_type;
+ typedef typename Internal::pointer_difference_type<pointer>::type difference_type;
+
+ #if defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ template <typename U>
+ struct rebind { typedef typename Internal::pointer_rebind<pointer, U>::type other; };
+ #else
+ template <typename U>
+ using rebind = typename Internal::pointer_rebind<pointer, U>::type;
+ #endif
+
+ public:
+ static pointer pointer_to(typename eastl::conditional<eastl::is_void<element_type>::value, void, element_type>::type& r) // 20.6.3.2: if element_type is (possibly cv-qualified) void, the type of r is unspecified; otherwise, it is T&.
+ { return pointer::pointer_to(r); } // The C++11 Standard requires that Pointer provides a static pointer_to function.
+ };
+
+
+ template <typename T>
+ struct pointer_traits<T*>
+ {
+ typedef T* pointer;
+ typedef T element_type;
+ typedef ptrdiff_t difference_type;
+
+ #if defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ template <typename U>
+ struct rebind { typedef U* other; };
+ #else
+ template <typename U>
+ using rebind = U*;
+ #endif
+
+ public:
+ static pointer pointer_to(typename eastl::conditional<eastl::is_void<element_type>::value, void, element_type>::type& r) EA_NOEXCEPT
+ { return eastl::addressof(r); } // 20.6.3.2: if element_type is (possibly cv-qualified) void, the type of r is unspecified; otherwise, it is T&.
+ };
+
+ ///////////////////////////////////////////////////////////////////////
+ // to_address
+ //
+ // Helper that call the customization point in pointer_traits<T>::to_address for retrieving the address of a pointer.
+ // This is useful if you are using fancy-pointers.
+ ///////////////////////////////////////////////////////////////////////
+
+ namespace Internal
+ {
+ template <class T>
+ using detect_pointer_traits_to_address = decltype(eastl::pointer_traits<T>::to_address(eastl::declval<const T&>()));
+
+ template <class T>
+ using result_detect_pointer_traits_to_address = eastl::is_detected<detect_pointer_traits_to_address, T>;
+ }
+
+ template<class T>
+ EA_CPP14_CONSTEXPR T* to_address(T* p) noexcept
+ {
+ static_assert(!eastl::is_function<T>::value, "Cannot call to_address with a function pointer. C++20 20.2.4.1 - Pointer conversion.");
+ return p;
+ }
+
+ template <class Ptr, typename eastl::enable_if<Internal::result_detect_pointer_traits_to_address<Ptr>::value, int>::type = 0>
+ EA_CPP14_CONSTEXPR auto to_address(const Ptr& ptr) noexcept -> decltype(eastl::pointer_traits<Ptr>::to_address(ptr))
+ {
+ return eastl::pointer_traits<Ptr>::to_address(ptr);
+ }
+
+ template <class Ptr, typename eastl::enable_if<!Internal::result_detect_pointer_traits_to_address<Ptr>::value, int>::type = 0>
+ EA_CPP14_CONSTEXPR auto to_address(const Ptr& ptr) noexcept -> decltype(to_address(ptr.operator->()))
+ {
+ return to_address(ptr.operator->());
+ }
+
+} // namespace eastl
+
+
+EA_RESTORE_VC_WARNING();
+
+
+#endif // Header include guard
diff --git a/EASTL/include/EASTL/meta.h b/EASTL/include/EASTL/meta.h
new file mode 100644
index 0000000..545354d
--- /dev/null
+++ b/EASTL/include/EASTL/meta.h
@@ -0,0 +1,247 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_META_H
+#define EASTL_META_H
+
+#include <EASTL/internal/config.h>
+#include <EASTL/type_traits.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+////////////////////////////////////////////////////////////////////////////////////////////
+// This file contains meta programming utilities that are internal to EASTL. We reserve
+// the right to change this file at any time as it is only intended to be used internally.
+////////////////////////////////////////////////////////////////////////////////////////////
+
+namespace eastl
+{
+ namespace meta
+ {
+ ////////////////////////////////////////////////////////////////////////////////////////////
+ // get_type_index_v
+ //
+ // Linearly searches a typelist using compile-time recursion to inspect each T in
+ // the typelist and returns its index, if the type is found. If the T isn't found
+ // in the typelist -1 is returned.
+ //
+ namespace Internal
+ {
+ template <int I, typename T, typename... Types>
+ struct get_type_index;
+
+ template <int I, typename T, typename Head, typename... Types>
+ struct get_type_index<I, T, Head, Types...>
+ {
+ static EA_CONSTEXPR_OR_CONST int value = is_same_v<T, Head> ? I : get_type_index<I + 1, T, Types...>::value;
+ };
+
+ template <int I, typename T>
+ struct get_type_index<I, T>
+ {
+ static EA_CONSTEXPR_OR_CONST int value = -1;
+ };
+ }
+
+ template<typename T, typename... Types>
+ struct get_type_index
+ {
+ static EA_CONSTEXPR_OR_CONST int value = Internal::get_type_index<0, T, Types...>::value;
+ };
+
+ template <typename T, typename... Ts>
+ EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR int get_type_index_v = get_type_index<T, Ts...>::value;
+
+
+ ////////////////////////////////////////////////////////////////////////////////////////////
+ // get_type_at
+ //
+ // This traverses the variadic type list and retrieves the type at the user provided index.
+ //
+ template <size_t I, typename... Ts>
+ struct get_type_at_helper;
+
+ template <size_t I, typename Head, typename... Tail>
+ struct get_type_at_helper<I, Head, Tail...>
+ { typedef typename get_type_at_helper<I - 1, Tail...>::type type; };
+
+ template <typename Head, typename... Tail>
+ struct get_type_at_helper<0, Head, Tail...>
+ { typedef Head type; };
+
+ template <int I, typename... Ts>
+ using get_type_at_t = typename get_type_at_helper<I, Ts...>::type;
+
+
+ ////////////////////////////////////////////////////////////////////////////////////////////
+ // type_count_v
+ //
+ // Returns the number of occurrences of type T in a typelist.
+ //
+ template <typename T, typename... Types>
+ struct type_count;
+
+ template <typename T, typename H, typename... Types>
+ struct type_count<T, H, Types...>
+ {
+ static EA_CONSTEXPR_OR_CONST int value = (is_same_v<T, H> ? 1 : 0) + type_count<T, Types...>::value;
+ };
+
+ template <typename T>
+ struct type_count<T>
+ {
+ static EA_CONSTEXPR_OR_CONST int value = 0;
+ };
+
+ template <typename T, typename... Ts>
+ EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR int type_count_v = type_count<T, Ts...>::value;
+
+
+
+ ////////////////////////////////////////////////////////////////////////////////////////////
+ // duplicate_type_check_v
+ //
+ // Checks if a type T occurs in a typelist more than once.
+ //
+ template <typename T, typename... Types>
+ struct duplicate_type_check
+ {
+ static EA_CONSTEXPR_OR_CONST bool value = (type_count<T, Types...>::value == 1);
+ };
+
+ template <typename... Ts>
+ EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR bool duplicate_type_check_v = duplicate_type_check<Ts...>::value;
+
+
+ //////////////////////////////////////////////////////////////////////////////////
+ // type_list
+ //
+ // type_list is a simple struct that allows us to pass template parameter packs
+ // around in a single struct, and deduce parameter packs from function arguments
+ // like so:
+ //
+ // template <typename... Ts> void foo(type_list<Ts...>);
+ // foo(type_list<A, B, C>); // deduces Ts... as A, B, C
+ //
+ template <typename...> struct type_list {};
+
+
+ //////////////////////////////////////////////////////////////////////////////////
+ // unique_type_list
+ //
+ // unique_type_list is a meta-function which takes a parameter pack as its
+ // argument, and returns a type_list with duplicate types removed, like so:
+ //
+ // unique_type_list<int, int, string>::type; // type = type_list<int, string>
+ // unique_type_list<int, string, string>::type; // type = type_list<int, string>
+ //
+ // To use unique_type_list, specialize a variadic class template for a single
+ // type parameter, which is type_list<Ts...>:
+ //
+ // template <typename... Ts> struct foo {};
+ // template <typename... Ts> struct foo<type_list<Ts...>> {};
+ //
+ // Then instantiate the template with unique_type_list_t<Ts...> as its parameter:
+ //
+ // template <typename... Ts> struct bar : public foo<unique_type_list_t<Ts...>> {}
+ //
+ // See overload_set below for examples.
+ template <typename T, typename... Ts>
+ struct unique_type_list : public unique_type_list<Ts...>
+ {
+ template <typename... Args>
+ static enable_if_t<!disjunction_v<is_same<T, Args>...>, type_list<T, Args...>>
+ types(type_list<Args...>);
+
+ template <typename... Args>
+ static enable_if_t<disjunction_v<is_same<T, Args>...>, type_list<Args...>>
+ types(type_list<Args...>);
+
+ typedef decltype(types(declval<typename unique_type_list<Ts...>::type>())) type;
+ };
+
+ template <typename T>
+ struct unique_type_list<T>
+ {
+ using type = type_list<T>;
+ };
+
+ template <typename... Ts>
+ using unique_type_list_t = typename unique_type_list<Ts...>::type;
+
+
+ ////////////////////////////////////////////////////////////////////////////////////////////
+ // overload_resolution_t
+ //
+ // Given an input type and a typelist (which is a stand-in for alternative
+ // function overloads) this traits will return the same type chosen as if
+ // overload_resolution has selected a function to run.
+ //
+
+ // a single overload of an individual type
+ template <typename T>
+ struct overload
+ {
+ // Overload is implicitly convertible to the surrogated function
+ // call for pointer to member functions (pmf). This gets around
+ // variadic pack expansion in a class using statement being a C++17
+ // language feature. It is the core mechanism of aggregating all the
+ // individual overloads into the overload_set structure.
+ using F = T (*)(T);
+ operator F() const { return nullptr; }
+ };
+
+ template <typename...> struct overload_set_impl;
+
+ template <typename... Ts>
+ struct overload_set_impl<type_list<Ts...>> : public overload<Ts>... {};
+
+ template <typename... Ts>
+ struct overload_set : public overload_set_impl<unique_type_list_t<Ts...>>
+ {
+ // encapsulates the overloads matching the types of the variadic pack
+ };
+
+ EA_DISABLE_VC_WARNING(4242 4244) // conversion from 'T' to 'U', possible loss of data.
+ template <typename T, typename OverloadSet, typename ResultT = decltype(declval<OverloadSet>()(declval<T>()))>
+ struct overload_resolution
+ {
+ // capture the return type of the function the compiler selected by
+ // performing overload resolution on the overload set parameter
+ using type = ResultT;
+ };
+
+ EA_RESTORE_VC_WARNING()
+
+ template <typename T, typename OverloadSet>
+ using overload_resolution_t = typename overload_resolution<decay_t<T>, OverloadSet>::type;
+
+
+ ////////////////////////////////////////////////////////////////////////////////////////////
+ // double_pack_expansion
+ //
+ // MSVC 2017 has a hard time expanding two packs of different lengths.
+ // This is a helper meant to coerce MSVC 2017 into doing the expansion by adding another level
+ // of indirection.
+ //
+
+ template <typename T, size_t I>
+ struct double_pack_expansion;
+
+ template <size_t... Is, size_t I>
+ struct double_pack_expansion<index_sequence<Is...>, I>
+ {
+ using type = index_sequence<Is..., I>;
+ };
+
+ template <typename IndexSequence, size_t I>
+ using double_pack_expansion_t = typename double_pack_expansion<IndexSequence, I>::type;
+
+
+ } // namespace meta
+} // namespace eastl
+
+#endif // EASTL_META_H
diff --git a/EASTL/include/EASTL/numeric.h b/EASTL/include/EASTL/numeric.h
new file mode 100644
index 0000000..200be6c
--- /dev/null
+++ b/EASTL/include/EASTL/numeric.h
@@ -0,0 +1,344 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file defines numeric algorithms just like the std C++ <numeric>
+// algorithm header does.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_NUMERIC_H
+#define EASTL_NUMERIC_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/iterator.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+
+ /// accumulate
+ ///
+ /// Accumulates the values in the range [first, last) using operator+.
+ /// The initial value is init. The values are processed in order.
+ ///
+ template <typename InputIterator, typename T>
+ T accumulate(InputIterator first, InputIterator last, T init)
+ {
+ // The C++ standard specifies that we use (init = init + first).
+ // However, for non-built-in types, this is less efficent than
+ // operator +=, as no temporary is created. Until a serious problem
+ // is found with using operator +=, we'll use it.
+
+ for(; first != last; ++first)
+ init += *first;
+ return init;
+ }
+
+
+ /// accumulate
+ ///
+ /// Accumulates the values in the range [first, last) using binary_op.
+ /// The initial value is init. The values are processed in order.
+ ///
+ template <typename InputIterator, typename T, typename BinaryOperation>
+ T accumulate(InputIterator first, InputIterator last, T init, BinaryOperation binary_op)
+ {
+ for(; first != last; ++first)
+ init = binary_op(init, *first);
+ return init;
+ }
+
+
+
+ /// iota
+ ///
+ /// Requires: T shall be convertible to ForwardIterator's value type. The expression ++val,
+ /// where val has type T, shall be well formed.
+ /// Effects: For each element referred to by the iterator i in the range [first, last),
+ /// assigns *i = value and increments value as if by ++value.
+ /// Complexity: Exactly last - first increments and assignments.
+ /// Example usage: seeding a deck of cards with values 0-51.
+ ///
+ template <typename ForwardIterator, typename T>
+ void iota(ForwardIterator first, ForwardIterator last, T value)
+ {
+ while(first != last)
+ {
+ *first++ = value;
+ ++value;
+ }
+ }
+
+
+ /// inner_product
+ ///
+ /// Starting with an initial value of init, multiplies successive
+ /// elements from the two ranges and adds each product into the accumulated
+ /// value using operator+. The values in the ranges are processed in order.
+ ///
+ template <typename InputIterator1, typename InputIterator2, typename T>
+ T inner_product(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, T init)
+ {
+ // The C++ standard specifies that we use (init = init + (*first1 * *first2)).
+ // However, for non-built-in types, this is less efficent than
+ // operator +=, as no temporary is created. Until a serious problem
+ // is found with using operator +=, we'll use it.
+
+ for(; first1 != last1; ++first1, ++first2)
+ init += (*first1 * *first2);
+ return init;
+ }
+
+
+ /// inner_product
+ ///
+ /// Starting with an initial value of init, applies binary_op2 to
+ /// successive elements from the two ranges and accumulates each result
+ /// into the accumulated value using binary_op1. The values in the
+ /// ranges are processed in order.
+ ///
+ template <typename InputIterator1, typename InputIterator2, typename T, typename BinaryOperation1, typename BinaryOperation2>
+ T inner_product(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, T init,
+ BinaryOperation1 binary_op1, BinaryOperation2 binary_op2)
+ {
+ for(; first1 != last1; ++first1, ++first2)
+ init = binary_op1(init, binary_op2(*first1, *first2));
+ return init;
+ }
+
+
+
+
+
+ /// partial_sum
+ ///
+ /// Accumulates the values in the range [first, last) using operator+.
+ /// As each successive input value is added into the total, that partial
+ /// sum is written to result. Therefore, the first value in result is the
+ /// first value of the input, the second value in result is the sum of the
+ /// first and second input values, and so on.
+ ///
+ template <typename InputIterator, typename OutputIterator>
+ OutputIterator partial_sum(InputIterator first, InputIterator last, OutputIterator result)
+ {
+ typedef typename iterator_traits<InputIterator>::value_type value_type;
+
+ if(first != last)
+ {
+ value_type value(*first);
+
+ for(*result = value; ++first != last; *++result = value)
+ value += *first; // See discussions above on the decision use += instead of +.
+
+ ++result;
+ }
+
+ return result;
+ }
+
+
+ /// partial_sum
+ ///
+ /// Accumulates the values in the range [first,last) using binary_op.
+ /// As each successive input value is added into the total, that partial
+ /// sum is written to result. Therefore, the first value in result is the
+ /// first value of the input, the second value in result is the sum of the
+ /// first and second input values, and so on.
+
+ template <typename InputIterator, typename OutputIterator, typename BinaryOperation>
+ OutputIterator partial_sum(InputIterator first, InputIterator last, OutputIterator result, BinaryOperation binary_op)
+ {
+ typedef typename iterator_traits<InputIterator>::value_type value_type;
+
+ if(first != last)
+ {
+ value_type value(*first);
+
+ for(*result = value; ++first != last; *++result = value)
+ value = binary_op(value, *first);
+
+ ++result;
+ }
+
+ return result;
+ }
+
+
+
+
+
+ /// adjacent_difference
+ ///
+ /// Computes the difference between adjacent values in the range
+ /// [first, last) using operator- and writes the result to result.
+ ///
+ template <typename InputIterator, typename OutputIterator>
+ OutputIterator adjacent_difference(InputIterator first, InputIterator last, OutputIterator result)
+ {
+ typedef typename iterator_traits<InputIterator>::value_type value_type;
+
+ if(first != last)
+ {
+ value_type value(*first);
+
+ for(*result = value; ++first != last; )
+ {
+ const value_type temp(*first);
+
+ *++result = temp - value;
+ value = temp;
+ }
+
+ ++result;
+ }
+
+ return result;
+ }
+
+
+ /// adjacent_difference
+ ///
+ /// Computes the difference between adjacent values in the range
+ /// [first, last) using binary_op and writes the result to result.
+ ///
+ template <typename InputIterator, typename OutputIterator, typename BinaryOperation>
+ OutputIterator adjacent_difference(InputIterator first, InputIterator last, OutputIterator result, BinaryOperation binary_op)
+ {
+ typedef typename iterator_traits<InputIterator>::value_type value_type;
+
+ if(first != last)
+ {
+ value_type value(*first);
+
+ for(*result = value; ++first != last; )
+ {
+ const value_type temp(*first);
+
+ *++result = binary_op(temp, value);
+ value = temp;
+ }
+
+ ++result;
+ }
+
+ return result;
+ }
+
+
+ #if defined(EA_COMPILER_CPP20_ENABLED)
+ /// midpoint
+ ///
+ /// Computes the midpoint between the LHS and RHS by adding them together, then dividing the sum by 2.
+ /// If the operands are of integer type and the sum is odd, the result will be rounded closer to the LHS.
+ /// If the operands are floating points, then at most one inexact operation occurs.
+ ///
+ template <typename T>
+ constexpr eastl::enable_if_t<eastl::is_arithmetic_v<T> && !eastl::is_same_v<eastl::remove_cv_t<T>, bool>, T> midpoint(const T lhs, const T rhs) EA_NOEXCEPT
+ {
+ // If T is an integral type...
+ if constexpr(eastl::is_integral_v<T>)
+ {
+ using U = eastl::make_unsigned_t<T>;
+
+ int sign = 1;
+ U m = lhs;
+ U M = rhs;
+
+ if (lhs > rhs)
+ {
+ sign = -1;
+ m = rhs;
+ M = lhs;
+ }
+
+ return lhs + static_cast<T>(sign * static_cast<T>((U(M - m)) / 2 ));
+ }
+
+ // otherwise if T is a floating point
+ else
+ {
+ const T LO = eastl::numeric_limits<T>::min() * 2;
+ const T HI = eastl::numeric_limits<T>::max() / 2;
+
+ const T lhs_abs = (lhs < 0) ? -lhs : lhs;
+ const T rhs_abs = (rhs < 0) ? -rhs : rhs;
+
+ if (lhs_abs <= HI && rhs_abs <= HI)
+ return (lhs + rhs) / 2;
+ if (lhs_abs < LO)
+ return lhs + (rhs / 2);
+ if (rhs_abs < LO)
+ return (lhs / 2) + rhs;
+ return (lhs / 2) + (rhs / 2);
+ }
+ }
+
+
+ /// midpoint
+ ///
+ /// Computes the midpoint address between pointers LHS and RHS.
+ /// The midpoint address closer to the LHS is chosen.
+ ///
+ template <typename T>
+ constexpr eastl::enable_if_t<eastl::is_object_v<T>, T*> midpoint(T* lhs, T* rhs)
+ {
+ return lhs + ((rhs - lhs) / 2);
+ }
+
+
+ template <class T>
+ constexpr T shared_lerp(const T a, const T b, const T t) EA_NOEXCEPT
+ {
+ if ((a <= 0 && b >= 0) || (a >= 0 && b <= 0))
+ {
+ return t * b + (1 - t) * a;
+ }
+
+ if (t == 1)
+ {
+ return b;
+ }
+
+ const T X = a + t * (b - a);
+
+ if ((t > 1) == (b > a))
+ {
+ return (b > X) ? b : X;
+ }
+ return (b < X) ? b : X;
+ }
+
+ /// lerp
+ ///
+ /// Calculates the linear interpolation of two points A and B expressed A + T * (B - A)
+ /// where T is some value in range [0, 1]. If T is outside this range, the linear extrapolation will be computed.
+ ///
+ /// https://en.cppreference.com/w/cpp/numeric/lerp
+ ///
+ /// C++ proposal paper:
+ /// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2019/p0811r3.html
+ ///
+ constexpr float lerp(float a, float b, float t) EA_NOEXCEPT { return shared_lerp(a, b, t); }
+ constexpr double lerp(double a, double b, double t) EA_NOEXCEPT { return shared_lerp(a, b, t); }
+ constexpr long double lerp(long double a, long double b, long double t) EA_NOEXCEPT { return shared_lerp(a, b, t); }
+ #endif
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/numeric_limits.h b/EASTL/include/EASTL/numeric_limits.h
new file mode 100644
index 0000000..0d7dc97
--- /dev/null
+++ b/EASTL/include/EASTL/numeric_limits.h
@@ -0,0 +1,1819 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// We support eastl::numeric_limits for the following types. Sized types such
+// as int32_t are covered by these basic types, with the exception of int128_t.
+//
+// bool
+// char (distinct from signed and unsigned char)
+// unsigned char,
+// signed char,
+// wchar_t
+// char16_t (when char16_t is a distict type)
+// char32_t (when char32_t is a distinct type)
+// unsigned short,
+// signed short
+// unsigned int
+// signed int
+// unsigned long
+// signed long
+// signed long long
+// unsigned long long
+// uint128_t (when supported natively by the compiler)
+// int128_t (when supported natively by the compiler)
+// float
+// double
+// long double
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_NUMERIC_LIMITS_H
+#define EASTL_NUMERIC_LIMITS_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/type_traits.h>
+#include <limits.h> // C limits.h header
+#include <float.h>
+#if defined(_CPPLIB_VER) // Dinkumware.
+ #include <ymath.h>
+#endif
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+// Disable Warnings:
+// 4310 - cast truncates constant value
+// 4296 - expression is always false
+EA_DISABLE_VC_WARNING(4310 4296)
+
+// EASTL_CUSTOM_FLOAT_CONSTANTS_REQUIRED
+//
+// Defined as 0 or 1.
+// Indicates whether we need to define our own implementations of inf, nan, snan, denorm floating point constants.
+//
+#if !defined(EASTL_CUSTOM_FLOAT_CONSTANTS_REQUIRED)
+ #if (defined(EA_COMPILER_GNUC) || defined(__clang__) && defined(__FLT_MIN__)) || defined(_CPPLIB_VER) // __FLT_MIN__ detects if it's really GCC/clang and not a mimic. _CPPLIB_VER (Dinkumware) covers VC++, and Microsoft platforms.
+ #define EASTL_CUSTOM_FLOAT_CONSTANTS_REQUIRED 0
+ #else
+ #define EASTL_CUSTOM_FLOAT_CONSTANTS_REQUIRED 1
+ #endif
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// min/max workaround
+//
+// MSVC++ has #defines for min/max which collide with the min/max algorithm
+// declarations. The following may still not completely resolve some kinds of
+// problems with MSVC++ #defines, though it deals with most cases in production
+// game code.
+//
+#if EASTL_NOMINMAX
+ #ifdef min
+ #undef min
+ #endif
+ #ifdef max
+ #undef max
+ #endif
+#endif
+
+
+// EA_CONSTEXPR
+// EA_CONSTEXPR is defined in EABase 2.00.38 and later.
+#if !defined(EA_CONSTEXPR)
+ #define EA_CONSTEXPR
+#endif
+
+// EA_CONSTEXPR_OR_CONST
+// EA_CONSTEXPR_OR_CONST is defined in EABase 2.00.39 and later.
+#if !defined(EA_CONSTEXPR_OR_CONST)
+ #define EA_CONSTEXPR_OR_CONST const
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_LIMITS macros
+// These apply to integral types only.
+///////////////////////////////////////////////////////////////////////////////
+
+// true or false.
+#define EASTL_LIMITS_IS_SIGNED(T) ((T)(-1) < 0)
+
+// The min possible value of T.
+#define EASTL_LIMITS_MIN_S(T) ((T)((T)1 << EASTL_LIMITS_DIGITS_S(T)))
+#define EASTL_LIMITS_MIN_U(T) ((T)0)
+#define EASTL_LIMITS_MIN(T) ((EASTL_LIMITS_IS_SIGNED(T) ? EASTL_LIMITS_MIN_S(T) : EASTL_LIMITS_MIN_U(T)))
+
+// The max possible value of T.
+#define EASTL_LIMITS_MAX_S(T) ((T)(((((T)1 << (EASTL_LIMITS_DIGITS(T) - 1)) - 1) << 1) + 1))
+#define EASTL_LIMITS_MAX_U(T) ((T)~(T)0)
+#define EASTL_LIMITS_MAX(T) ((EASTL_LIMITS_IS_SIGNED(T) ? EASTL_LIMITS_MAX_S(T) : EASTL_LIMITS_MAX_U(T)))
+
+// The number of bits in the representation of T.
+#define EASTL_LIMITS_DIGITS_S(T) ((sizeof(T) * CHAR_BIT) - 1)
+#define EASTL_LIMITS_DIGITS_U(T) ((sizeof(T) * CHAR_BIT))
+#define EASTL_LIMITS_DIGITS(T) ((EASTL_LIMITS_IS_SIGNED(T) ? EASTL_LIMITS_DIGITS_S(T) : EASTL_LIMITS_DIGITS_U(T)))
+
+// The number of decimal digits that can be represented by T.
+#define EASTL_LIMITS_DIGITS10_S(T) ((EASTL_LIMITS_DIGITS_S(T) * 643L) / 2136) // (643 / 2136) ~= log10(2).
+#define EASTL_LIMITS_DIGITS10_U(T) ((EASTL_LIMITS_DIGITS_U(T) * 643L) / 2136)
+#define EASTL_LIMITS_DIGITS10(T) ((EASTL_LIMITS_IS_SIGNED(T) ? EASTL_LIMITS_DIGITS10_S(T) : EASTL_LIMITS_DIGITS10_U(T)))
+
+
+
+
+
+
+namespace eastl
+{
+ // See C++11 18.3.2.5
+ enum float_round_style
+ {
+ round_indeterminate = -1, /// Intermediate.
+ round_toward_zero = 0, /// To zero.
+ round_to_nearest = 1, /// To the nearest representable value.
+ round_toward_infinity = 2, /// To infinity.
+ round_toward_neg_infinity = 3 /// To negative infinity.
+ };
+
+ // See C++11 18.3.2.6
+ enum float_denorm_style
+ {
+ denorm_indeterminate = -1, /// It cannot be determined whether or not the type allows denormalized values.
+ denorm_absent = 0, /// The type does not allow denormalized values.
+ denorm_present = 1 /// The type allows denormalized values.
+ };
+
+
+ namespace Internal
+ {
+ // Defines default values for numeric_limits, which can be overridden by class specializations.
+ // See C++11 18.3.2.3
+ struct numeric_limits_base
+ {
+ // true if the type has an explicit specialization defined in the template class; false if not.
+ static EA_CONSTEXPR_OR_CONST bool is_specialized = false;
+
+ // Integer types: the number of *bits* in the representation of T.
+ // Floating types: the number of digits in the mantissa of T (same as FLT_MANT_DIG, DBL_MANT_DIG or LDBL_MANT_DIG).
+ static EA_CONSTEXPR_OR_CONST int digits = 0;
+
+ // The number of decimal digits that can be represented by T.
+ // Equivalent to FLT_DIG, DBL_DIG or LDBL_DIG for floating types.
+ static EA_CONSTEXPR_OR_CONST int digits10 = 0;
+
+ // The number of decimal digits required to make sure that two distinct values of the type have distinct decimal representations.
+ static EA_CONSTEXPR_OR_CONST int max_digits10 = 0;
+
+ // True if the type is signed.
+ static EA_CONSTEXPR_OR_CONST bool is_signed = false;
+
+ // True if the type is integral.
+ static EA_CONSTEXPR_OR_CONST bool is_integer = false;
+
+ // True if the type uses an exact representation. All integral types are
+ // exact, but other types can be exact as well.
+ static EA_CONSTEXPR_OR_CONST bool is_exact = false;
+
+ // Integer types: the base of the representation. Always 2 for integers.
+ // Floating types: the base of the exponent representation. Always FLT_RADIX (typically 2) for float.
+ static EA_CONSTEXPR_OR_CONST int radix = 0;
+
+ // The minimum integral radix-based exponent representable by the type.
+ static EA_CONSTEXPR_OR_CONST int min_exponent = 0;
+
+ // The minimum integral base 10 exponent representable by the type.
+ static EA_CONSTEXPR_OR_CONST int min_exponent10 = 0;
+
+ // The maximum integral radix-based exponent representable by the type.
+ static EA_CONSTEXPR_OR_CONST int max_exponent = 0;
+
+ // The maximum integral base 10 exponent representable by the type.
+ static EA_CONSTEXPR_OR_CONST int max_exponent10 = 0;
+
+ // True if the type has a representation for positive infinity.
+ static EA_CONSTEXPR_OR_CONST bool has_infinity = false;
+
+ // True if the type has a representation for a quiet (non-signaling) NaN.
+ static EA_CONSTEXPR_OR_CONST bool has_quiet_NaN = false;
+
+ // True if the type has a representation for a signaling NaN.
+ static EA_CONSTEXPR_OR_CONST bool has_signaling_NaN = false;
+
+ // An enumeration which identifies denormalization behavior.
+ // In practice the application can change this at runtime via hardware-specific commands.
+ static EA_CONSTEXPR_OR_CONST float_denorm_style has_denorm = denorm_absent;
+
+ // True if the loss of accuracy is detected as a denormalization loss.
+ // Typically false for integer types and true for floating point types.
+ static EA_CONSTEXPR_OR_CONST bool has_denorm_loss = false;
+
+ // True if the type has a bounded set of representable values. Typically true for
+ // all built-in numerial types (integer and floating point).
+ static EA_CONSTEXPR_OR_CONST bool is_bounded = false;
+
+ // True if the type has a modulo representation (if it's possible to add two
+ // positive numbers and have a result that wraps around to a third number
+ // that is less. Typically true for integers and false for floating types.
+ static EA_CONSTEXPR_OR_CONST bool is_modulo = false;
+
+ // True if trapping (arithmetic exception generation) is implemented for this type.
+ // Typically true for integer types (div by zero) and false for floating point types,
+ // though in practice the application may be able to change floating point to trap at runtime.
+ static EA_CONSTEXPR_OR_CONST bool traps = false;
+
+ // True if tiny-ness is detected before rounding.
+ static EA_CONSTEXPR_OR_CONST bool tinyness_before = false;
+
+ // An enumeration which identifies default rounding behavior.
+ // In practice the application can change this at runtime via hardware-specific commands.
+ static EA_CONSTEXPR_OR_CONST float_round_style round_style = round_toward_zero;
+
+ // True if the type is floating point and follows the IEC 559 standard (IEEE 754).
+ // In practice the application or OS can change this at runtime via hardware-specific commands or via compiler optimizations.
+ static EA_CONSTEXPR_OR_CONST bool is_iec559 = false;
+ };
+
+
+ #if EASTL_CUSTOM_FLOAT_CONSTANTS_REQUIRED
+ extern EASTL_API float gFloatInfinity;
+ extern EASTL_API float gFloatNaN;
+ extern EASTL_API float gFloatSNaN;
+ extern EASTL_API float gFloatDenorm;
+
+ extern EASTL_API double gDoubleInfinity;
+ extern EASTL_API double gDoubleNaN;
+ extern EASTL_API double gDoubleSNaN;
+ extern EASTL_API double gDoubleDenorm;
+
+ extern EASTL_API long double gLongDoubleInfinity;
+ extern EASTL_API long double gLongDoubleNaN;
+ extern EASTL_API long double gLongDoubleSNaN;
+ extern EASTL_API long double gLongDoubleDenorm;
+ #endif
+
+ } // namespace Internal
+
+
+ // Default numeric_limits.
+ // See C++11 18.3.2.3
+ template<typename T>
+ class numeric_limits : public Internal::numeric_limits_base
+ {
+ public:
+ typedef T value_type;
+
+ static value_type min()
+ { return value_type(0); }
+
+ static value_type max()
+ { return value_type(0); }
+
+ static value_type lowest()
+ { return min(); }
+
+ static value_type epsilon()
+ { return value_type(0); }
+
+ static value_type round_error()
+ { return value_type(0); }
+
+ static value_type denorm_min()
+ { return value_type(0); }
+
+ static value_type infinity()
+ { return value_type(0); }
+
+ static value_type quiet_NaN()
+ { return value_type(0); }
+
+ static value_type signaling_NaN()
+ { return value_type(0); }
+ };
+
+
+ // Const/volatile variations of numeric_limits.
+ template<typename T>
+ class numeric_limits<const T> : public numeric_limits<T>
+ {
+ };
+
+ template<typename T>
+ class numeric_limits<volatile T> : public numeric_limits<T>
+ {
+ };
+
+ template<typename T>
+ class numeric_limits<const volatile T> : public numeric_limits<T>
+ {
+ };
+
+
+
+ // numeric_limits<bool>
+ template<>
+ struct numeric_limits<bool>
+ {
+ typedef bool value_type;
+
+ static EA_CONSTEXPR_OR_CONST bool is_specialized = true;
+ static EA_CONSTEXPR_OR_CONST int digits = 1; // In practice bool is stores as a byte, or sometimes an int.
+ static EA_CONSTEXPR_OR_CONST int digits10 = 0;
+ static EA_CONSTEXPR_OR_CONST int max_digits10 = 0;
+ static EA_CONSTEXPR_OR_CONST bool is_signed = false; // In practice bool may be implemented as signed char.
+ static EA_CONSTEXPR_OR_CONST bool is_integer = true;
+ static EA_CONSTEXPR_OR_CONST bool is_exact = true;
+ static EA_CONSTEXPR_OR_CONST int radix = 2;
+ static EA_CONSTEXPR_OR_CONST int min_exponent = 0;
+ static EA_CONSTEXPR_OR_CONST int min_exponent10 = 0;
+ static EA_CONSTEXPR_OR_CONST int max_exponent = 0;
+ static EA_CONSTEXPR_OR_CONST int max_exponent10 = 0;
+ static EA_CONSTEXPR_OR_CONST bool is_bounded = true;
+ static EA_CONSTEXPR_OR_CONST bool is_modulo = false;
+ static EA_CONSTEXPR_OR_CONST bool traps = true; // Should this be true or false? Given that it's implemented in hardware as an integer type, we use true.
+ static EA_CONSTEXPR_OR_CONST bool tinyness_before = false;
+ static EA_CONSTEXPR_OR_CONST float_round_style round_style = round_toward_zero;
+ static EA_CONSTEXPR_OR_CONST bool has_infinity = false;
+ static EA_CONSTEXPR_OR_CONST bool has_quiet_NaN = false;
+ static EA_CONSTEXPR_OR_CONST bool has_signaling_NaN = false;
+ static EA_CONSTEXPR_OR_CONST float_denorm_style has_denorm = denorm_absent;
+ static EA_CONSTEXPR_OR_CONST bool has_denorm_loss = false;
+ static EA_CONSTEXPR_OR_CONST bool is_iec559 = false;
+
+ static EA_CONSTEXPR value_type min()
+ { return false; }
+
+ static EA_CONSTEXPR value_type max()
+ { return true; }
+
+ static EA_CONSTEXPR value_type lowest()
+ { return false; }
+
+ static EA_CONSTEXPR value_type epsilon()
+ { return false; }
+
+ static EA_CONSTEXPR value_type round_error()
+ { return false; }
+
+ static EA_CONSTEXPR value_type infinity()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type quiet_NaN()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type signaling_NaN()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type denorm_min()
+ { return value_type(); }
+ };
+
+
+ // numeric_limits<char>
+ template<>
+ struct numeric_limits<char>
+ {
+ typedef char value_type;
+
+ static EA_CONSTEXPR_OR_CONST bool is_specialized = true;
+ static EA_CONSTEXPR_OR_CONST int digits = EASTL_LIMITS_DIGITS(value_type);
+ static EA_CONSTEXPR_OR_CONST int digits10 = EASTL_LIMITS_DIGITS10(value_type);
+ static EA_CONSTEXPR_OR_CONST int max_digits10 = 0;
+ static EA_CONSTEXPR_OR_CONST bool is_signed = EASTL_LIMITS_IS_SIGNED(value_type);
+ static EA_CONSTEXPR_OR_CONST bool is_integer = true;
+ static EA_CONSTEXPR_OR_CONST bool is_exact = true;
+ static EA_CONSTEXPR_OR_CONST int radix = 2;
+ static EA_CONSTEXPR_OR_CONST int min_exponent = 0;
+ static EA_CONSTEXPR_OR_CONST int min_exponent10 = 0;
+ static EA_CONSTEXPR_OR_CONST int max_exponent = 0;
+ static EA_CONSTEXPR_OR_CONST int max_exponent10 = 0;
+ static EA_CONSTEXPR_OR_CONST bool is_bounded = true;
+ static EA_CONSTEXPR_OR_CONST bool is_modulo = true;
+ static EA_CONSTEXPR_OR_CONST bool traps = true;
+ static EA_CONSTEXPR_OR_CONST bool tinyness_before = false;
+ static EA_CONSTEXPR_OR_CONST float_round_style round_style = round_toward_zero;
+ static EA_CONSTEXPR_OR_CONST bool has_infinity = false;
+ static EA_CONSTEXPR_OR_CONST bool has_quiet_NaN = false;
+ static EA_CONSTEXPR_OR_CONST bool has_signaling_NaN = false;
+ static EA_CONSTEXPR_OR_CONST float_denorm_style has_denorm = denorm_absent;
+ static EA_CONSTEXPR_OR_CONST bool has_denorm_loss = false;
+ static EA_CONSTEXPR_OR_CONST bool is_iec559 = false;
+
+ static EA_CONSTEXPR value_type min()
+ { return EASTL_LIMITS_MIN(value_type); }
+
+ static EA_CONSTEXPR value_type max()
+ { return EASTL_LIMITS_MAX(value_type); }
+
+ static EA_CONSTEXPR value_type lowest()
+ { return EASTL_LIMITS_MIN(value_type); }
+
+ static EA_CONSTEXPR value_type epsilon()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type round_error()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type infinity()
+ { return value_type(); } // Question: Should we return 0 here or value_type()?
+
+ static EA_CONSTEXPR value_type quiet_NaN()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type signaling_NaN()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type denorm_min()
+ { return (value_type)0; }
+ };
+
+
+ // numeric_limits<unsigned char>
+ template<>
+ struct numeric_limits<unsigned char>
+ {
+ typedef unsigned char value_type;
+
+ static EA_CONSTEXPR_OR_CONST bool is_specialized = true;
+ static EA_CONSTEXPR_OR_CONST int digits = EASTL_LIMITS_DIGITS_U(value_type);
+ static EA_CONSTEXPR_OR_CONST int digits10 = EASTL_LIMITS_DIGITS10_U(value_type);
+ static EA_CONSTEXPR_OR_CONST int max_digits10 = 0;
+ static EA_CONSTEXPR_OR_CONST bool is_signed = false;
+ static EA_CONSTEXPR_OR_CONST bool is_integer = true;
+ static EA_CONSTEXPR_OR_CONST bool is_exact = true;
+ static EA_CONSTEXPR_OR_CONST int radix = 2;
+ static EA_CONSTEXPR_OR_CONST int min_exponent = 0;
+ static EA_CONSTEXPR_OR_CONST int min_exponent10 = 0;
+ static EA_CONSTEXPR_OR_CONST int max_exponent = 0;
+ static EA_CONSTEXPR_OR_CONST int max_exponent10 = 0;
+ static EA_CONSTEXPR_OR_CONST bool is_bounded = true;
+ static EA_CONSTEXPR_OR_CONST bool is_modulo = true;
+ static EA_CONSTEXPR_OR_CONST bool traps = true;
+ static EA_CONSTEXPR_OR_CONST bool tinyness_before = false;
+ static EA_CONSTEXPR_OR_CONST float_round_style round_style = round_toward_zero;
+ static EA_CONSTEXPR_OR_CONST bool has_infinity = false;
+ static EA_CONSTEXPR_OR_CONST bool has_quiet_NaN = false;
+ static EA_CONSTEXPR_OR_CONST bool has_signaling_NaN = false;
+ static EA_CONSTEXPR_OR_CONST float_denorm_style has_denorm = denorm_absent;
+ static EA_CONSTEXPR_OR_CONST bool has_denorm_loss = false;
+ static EA_CONSTEXPR_OR_CONST bool is_iec559 = false;
+
+ static EA_CONSTEXPR value_type min()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type max()
+ { return EASTL_LIMITS_MAX_U(value_type); }
+
+ static EA_CONSTEXPR value_type lowest()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type epsilon()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type round_error()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type infinity()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type quiet_NaN()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type signaling_NaN()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type denorm_min()
+ { return (value_type)0; }
+ };
+
+
+ // numeric_limits<signed char>
+ template<>
+ struct numeric_limits<signed char>
+ {
+ typedef signed char value_type;
+
+ static EA_CONSTEXPR_OR_CONST bool is_specialized = true;
+ static EA_CONSTEXPR_OR_CONST int digits = EASTL_LIMITS_DIGITS_S(value_type);
+ static EA_CONSTEXPR_OR_CONST int digits10 = EASTL_LIMITS_DIGITS10_S(value_type);
+ static EA_CONSTEXPR_OR_CONST int max_digits10 = 0;
+ static EA_CONSTEXPR_OR_CONST bool is_signed = true;
+ static EA_CONSTEXPR_OR_CONST bool is_integer = true;
+ static EA_CONSTEXPR_OR_CONST bool is_exact = true;
+ static EA_CONSTEXPR_OR_CONST int radix = 2;
+ static EA_CONSTEXPR_OR_CONST int min_exponent = 0;
+ static EA_CONSTEXPR_OR_CONST int min_exponent10 = 0;
+ static EA_CONSTEXPR_OR_CONST int max_exponent = 0;
+ static EA_CONSTEXPR_OR_CONST int max_exponent10 = 0;
+ static EA_CONSTEXPR_OR_CONST bool is_bounded = true;
+ static EA_CONSTEXPR_OR_CONST bool is_modulo = true;
+ static EA_CONSTEXPR_OR_CONST bool traps = true;
+ static EA_CONSTEXPR_OR_CONST bool tinyness_before = false;
+ static EA_CONSTEXPR_OR_CONST float_round_style round_style = round_toward_zero;
+ static EA_CONSTEXPR_OR_CONST bool has_infinity = false;
+ static EA_CONSTEXPR_OR_CONST bool has_quiet_NaN = false;
+ static EA_CONSTEXPR_OR_CONST bool has_signaling_NaN = false;
+ static EA_CONSTEXPR_OR_CONST float_denorm_style has_denorm = denorm_absent;
+ static EA_CONSTEXPR_OR_CONST bool has_denorm_loss = false;
+ static EA_CONSTEXPR_OR_CONST bool is_iec559 = false;
+
+ static EA_CONSTEXPR value_type min()
+ { return EASTL_LIMITS_MIN_S(value_type); }
+
+ static EA_CONSTEXPR value_type max()
+ { return EASTL_LIMITS_MAX_S(value_type); }
+
+ static EA_CONSTEXPR value_type lowest()
+ { return EASTL_LIMITS_MIN_S(value_type); }
+
+ static EA_CONSTEXPR value_type epsilon()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type round_error()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type infinity()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type quiet_NaN()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type signaling_NaN()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type denorm_min()
+ { return (value_type)0; }
+ };
+
+
+ // numeric_limits<wchar_t>
+ // VC++ has the option of making wchar_t simply be unsigned short. If that's enabled then
+ // the code below could possibly cause compile failures due to redundancy. The best resolution
+ // may be to use __wchar_t here for VC++ instead of wchar_t, as __wchar_t is always a true
+ // unique type under VC++. http://social.msdn.microsoft.com/Forums/en-US/vclanguage/thread/9059330a-7cce-4d0d-a8e0-e1dcb63322bd/
+ template<>
+ struct numeric_limits<wchar_t>
+ {
+ typedef wchar_t value_type;
+
+ static EA_CONSTEXPR_OR_CONST bool is_specialized = true;
+ static EA_CONSTEXPR_OR_CONST int digits = EASTL_LIMITS_DIGITS(value_type);
+ static EA_CONSTEXPR_OR_CONST int digits10 = EASTL_LIMITS_DIGITS10(value_type);
+ static EA_CONSTEXPR_OR_CONST int max_digits10 = 0;
+ static EA_CONSTEXPR_OR_CONST bool is_signed = EASTL_LIMITS_IS_SIGNED(value_type);
+ static EA_CONSTEXPR_OR_CONST bool is_integer = true;
+ static EA_CONSTEXPR_OR_CONST bool is_exact = true;
+ static EA_CONSTEXPR_OR_CONST int radix = 2;
+ static EA_CONSTEXPR_OR_CONST int min_exponent = 0;
+ static EA_CONSTEXPR_OR_CONST int min_exponent10 = 0;
+ static EA_CONSTEXPR_OR_CONST int max_exponent = 0;
+ static EA_CONSTEXPR_OR_CONST int max_exponent10 = 0;
+ static EA_CONSTEXPR_OR_CONST bool is_bounded = true;
+ static EA_CONSTEXPR_OR_CONST bool is_modulo = true;
+ static EA_CONSTEXPR_OR_CONST bool traps = true;
+ static EA_CONSTEXPR_OR_CONST bool tinyness_before = false;
+ static EA_CONSTEXPR_OR_CONST float_round_style round_style = round_toward_zero;
+ static EA_CONSTEXPR_OR_CONST bool has_infinity = false;
+ static EA_CONSTEXPR_OR_CONST bool has_quiet_NaN = false;
+ static EA_CONSTEXPR_OR_CONST bool has_signaling_NaN = false;
+ static EA_CONSTEXPR_OR_CONST float_denorm_style has_denorm = denorm_absent;
+ static EA_CONSTEXPR_OR_CONST bool has_denorm_loss = false;
+ static EA_CONSTEXPR_OR_CONST bool is_iec559 = false;
+
+ static EA_CONSTEXPR value_type min()
+ { return EASTL_LIMITS_MIN(value_type); }
+
+ static EA_CONSTEXPR value_type max()
+ { return EASTL_LIMITS_MAX(value_type); }
+
+ static EA_CONSTEXPR value_type lowest()
+ { return EASTL_LIMITS_MIN(value_type); }
+
+ static EA_CONSTEXPR value_type epsilon()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type round_error()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type infinity()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type quiet_NaN()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type signaling_NaN()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type denorm_min()
+ { return (value_type)0; }
+ };
+
+
+ #if defined(EA_CHAR8_UNIQUE) && EA_CHAR8_UNIQUE
+ template<>
+ struct numeric_limits<char8_t>
+ {
+ typedef char8_t value_type;
+
+ static EA_CONSTEXPR_OR_CONST bool is_specialized = true;
+ static EA_CONSTEXPR_OR_CONST int digits = EASTL_LIMITS_DIGITS(value_type);
+ static EA_CONSTEXPR_OR_CONST int digits10 = EASTL_LIMITS_DIGITS10(value_type);
+ static EA_CONSTEXPR_OR_CONST int max_digits10 = 0;
+ static EA_CONSTEXPR_OR_CONST bool is_signed = EASTL_LIMITS_IS_SIGNED(value_type);
+ static EA_CONSTEXPR_OR_CONST bool is_integer = true;
+ static EA_CONSTEXPR_OR_CONST bool is_exact = true;
+ static EA_CONSTEXPR_OR_CONST int radix = 2;
+ static EA_CONSTEXPR_OR_CONST int min_exponent = 0;
+ static EA_CONSTEXPR_OR_CONST int min_exponent10 = 0;
+ static EA_CONSTEXPR_OR_CONST int max_exponent = 0;
+ static EA_CONSTEXPR_OR_CONST int max_exponent10 = 0;
+ static EA_CONSTEXPR_OR_CONST bool is_bounded = true;
+ static EA_CONSTEXPR_OR_CONST bool is_modulo = true;
+ static EA_CONSTEXPR_OR_CONST bool traps = true;
+ static EA_CONSTEXPR_OR_CONST bool tinyness_before = false;
+ static EA_CONSTEXPR_OR_CONST float_round_style round_style = round_toward_zero;
+ static EA_CONSTEXPR_OR_CONST bool has_infinity = false;
+ static EA_CONSTEXPR_OR_CONST bool has_quiet_NaN = false;
+ static EA_CONSTEXPR_OR_CONST bool has_signaling_NaN = false;
+ static EA_CONSTEXPR_OR_CONST float_denorm_style has_denorm = denorm_absent;
+ static EA_CONSTEXPR_OR_CONST bool has_denorm_loss = false;
+ static EA_CONSTEXPR_OR_CONST bool is_iec559 = false;
+
+ static EA_CONSTEXPR value_type min()
+ { return EASTL_LIMITS_MIN(value_type); }
+
+ static EA_CONSTEXPR value_type max()
+ { return EASTL_LIMITS_MAX(value_type); }
+
+ static EA_CONSTEXPR value_type lowest()
+ { return EASTL_LIMITS_MIN(value_type); }
+
+ static EA_CONSTEXPR value_type epsilon()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type round_error()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type infinity()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type quiet_NaN()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type signaling_NaN()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type denorm_min()
+ { return (value_type)0; }
+ };
+ #endif
+
+ #if EA_CHAR16_NATIVE // If char16_t is a true unique type (as called for by the C++11 Standard)...
+
+ // numeric_limits<char16_t>
+ template<>
+ struct numeric_limits<char16_t>
+ {
+ typedef char16_t value_type;
+
+ static EA_CONSTEXPR_OR_CONST bool is_specialized = true;
+ static EA_CONSTEXPR_OR_CONST int digits = EASTL_LIMITS_DIGITS(value_type);
+ static EA_CONSTEXPR_OR_CONST int digits10 = EASTL_LIMITS_DIGITS10(value_type);
+ static EA_CONSTEXPR_OR_CONST int max_digits10 = 0;
+ static EA_CONSTEXPR_OR_CONST bool is_signed = EASTL_LIMITS_IS_SIGNED(value_type);
+ static EA_CONSTEXPR_OR_CONST bool is_integer = true;
+ static EA_CONSTEXPR_OR_CONST bool is_exact = true;
+ static EA_CONSTEXPR_OR_CONST int radix = 2;
+ static EA_CONSTEXPR_OR_CONST int min_exponent = 0;
+ static EA_CONSTEXPR_OR_CONST int min_exponent10 = 0;
+ static EA_CONSTEXPR_OR_CONST int max_exponent = 0;
+ static EA_CONSTEXPR_OR_CONST int max_exponent10 = 0;
+ static EA_CONSTEXPR_OR_CONST bool is_bounded = true;
+ static EA_CONSTEXPR_OR_CONST bool is_modulo = true;
+ static EA_CONSTEXPR_OR_CONST bool traps = true;
+ static EA_CONSTEXPR_OR_CONST bool tinyness_before = false;
+ static EA_CONSTEXPR_OR_CONST float_round_style round_style = round_toward_zero;
+ static EA_CONSTEXPR_OR_CONST bool has_infinity = false;
+ static EA_CONSTEXPR_OR_CONST bool has_quiet_NaN = false;
+ static EA_CONSTEXPR_OR_CONST bool has_signaling_NaN = false;
+ static EA_CONSTEXPR_OR_CONST float_denorm_style has_denorm = denorm_absent;
+ static EA_CONSTEXPR_OR_CONST bool has_denorm_loss = false;
+ static EA_CONSTEXPR_OR_CONST bool is_iec559 = false;
+
+ static EA_CONSTEXPR value_type min()
+ { return EASTL_LIMITS_MIN(value_type); }
+
+ static EA_CONSTEXPR value_type max()
+ { return EASTL_LIMITS_MAX(value_type); }
+
+ static EA_CONSTEXPR value_type lowest()
+ { return EASTL_LIMITS_MIN(value_type); }
+
+ static EA_CONSTEXPR value_type epsilon()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type round_error()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type infinity()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type quiet_NaN()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type signaling_NaN()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type denorm_min()
+ { return (value_type)0; }
+ };
+
+ #endif
+
+
+ #if EA_CHAR32_NATIVE // If char32_t is a true unique type (as called for by the C++11 Standard)...
+
+ // numeric_limits<char32_t>
+ template<>
+ struct numeric_limits<char32_t>
+ {
+ typedef char32_t value_type;
+
+ static EA_CONSTEXPR_OR_CONST bool is_specialized = true;
+ static EA_CONSTEXPR_OR_CONST int digits = EASTL_LIMITS_DIGITS(value_type);
+ static EA_CONSTEXPR_OR_CONST int digits10 = EASTL_LIMITS_DIGITS10(value_type);
+ static EA_CONSTEXPR_OR_CONST int max_digits10 = 0;
+ static EA_CONSTEXPR_OR_CONST bool is_signed = EASTL_LIMITS_IS_SIGNED(value_type);
+ static EA_CONSTEXPR_OR_CONST bool is_integer = true;
+ static EA_CONSTEXPR_OR_CONST bool is_exact = true;
+ static EA_CONSTEXPR_OR_CONST int radix = 2;
+ static EA_CONSTEXPR_OR_CONST int min_exponent = 0;
+ static EA_CONSTEXPR_OR_CONST int min_exponent10 = 0;
+ static EA_CONSTEXPR_OR_CONST int max_exponent = 0;
+ static EA_CONSTEXPR_OR_CONST int max_exponent10 = 0;
+ static EA_CONSTEXPR_OR_CONST bool is_bounded = true;
+ static EA_CONSTEXPR_OR_CONST bool is_modulo = true;
+ static EA_CONSTEXPR_OR_CONST bool traps = true;
+ static EA_CONSTEXPR_OR_CONST bool tinyness_before = false;
+ static EA_CONSTEXPR_OR_CONST float_round_style round_style = round_toward_zero;
+ static EA_CONSTEXPR_OR_CONST bool has_infinity = false;
+ static EA_CONSTEXPR_OR_CONST bool has_quiet_NaN = false;
+ static EA_CONSTEXPR_OR_CONST bool has_signaling_NaN = false;
+ static EA_CONSTEXPR_OR_CONST float_denorm_style has_denorm = denorm_absent;
+ static EA_CONSTEXPR_OR_CONST bool has_denorm_loss = false;
+ static EA_CONSTEXPR_OR_CONST bool is_iec559 = false;
+
+ static EA_CONSTEXPR value_type min()
+ { return EASTL_LIMITS_MIN(value_type); }
+
+ static EA_CONSTEXPR value_type max()
+ { return EASTL_LIMITS_MAX(value_type); }
+
+ static EA_CONSTEXPR value_type lowest()
+ { return EASTL_LIMITS_MIN(value_type); }
+
+ static EA_CONSTEXPR value_type epsilon()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type round_error()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type infinity()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type quiet_NaN()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type signaling_NaN()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type denorm_min()
+ { return (value_type)0; }
+ };
+
+ #endif
+
+
+ // numeric_limits<unsigned short>
+ template<>
+ struct numeric_limits<unsigned short>
+ {
+ typedef unsigned short value_type;
+
+ static EA_CONSTEXPR_OR_CONST bool is_specialized = true;
+ static EA_CONSTEXPR_OR_CONST int digits = EASTL_LIMITS_DIGITS_U(value_type);
+ static EA_CONSTEXPR_OR_CONST int digits10 = EASTL_LIMITS_DIGITS10_U(value_type);
+ static EA_CONSTEXPR_OR_CONST int max_digits10 = 0;
+ static EA_CONSTEXPR_OR_CONST bool is_signed = false;
+ static EA_CONSTEXPR_OR_CONST bool is_integer = true;
+ static EA_CONSTEXPR_OR_CONST bool is_exact = true;
+ static EA_CONSTEXPR_OR_CONST int radix = 2;
+ static EA_CONSTEXPR_OR_CONST int min_exponent = 0;
+ static EA_CONSTEXPR_OR_CONST int min_exponent10 = 0;
+ static EA_CONSTEXPR_OR_CONST int max_exponent = 0;
+ static EA_CONSTEXPR_OR_CONST int max_exponent10 = 0;
+ static EA_CONSTEXPR_OR_CONST bool is_bounded = true;
+ static EA_CONSTEXPR_OR_CONST bool is_modulo = true;
+ static EA_CONSTEXPR_OR_CONST bool traps = true;
+ static EA_CONSTEXPR_OR_CONST bool tinyness_before = false;
+ static EA_CONSTEXPR_OR_CONST float_round_style round_style = round_toward_zero;
+ static EA_CONSTEXPR_OR_CONST bool has_infinity = false;
+ static EA_CONSTEXPR_OR_CONST bool has_quiet_NaN = false;
+ static EA_CONSTEXPR_OR_CONST bool has_signaling_NaN = false;
+ static EA_CONSTEXPR_OR_CONST float_denorm_style has_denorm = denorm_absent;
+ static EA_CONSTEXPR_OR_CONST bool has_denorm_loss = false;
+ static EA_CONSTEXPR_OR_CONST bool is_iec559 = false;
+
+ static EA_CONSTEXPR value_type min()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type max()
+ { return EASTL_LIMITS_MAX_U(value_type); }
+
+ static EA_CONSTEXPR value_type lowest()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type epsilon()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type round_error()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type infinity()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type quiet_NaN()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type signaling_NaN()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type denorm_min()
+ { return static_cast<value_type>(0); }
+ };
+
+
+ // numeric_limits<signed short>
+ template<>
+ struct numeric_limits<signed short>
+ {
+ typedef signed short value_type;
+
+ static EA_CONSTEXPR_OR_CONST bool is_specialized = true;
+ static EA_CONSTEXPR_OR_CONST int digits = EASTL_LIMITS_DIGITS_S(value_type);
+ static EA_CONSTEXPR_OR_CONST int digits10 = EASTL_LIMITS_DIGITS10_S(value_type);
+ static EA_CONSTEXPR_OR_CONST int max_digits10 = 0;
+ static EA_CONSTEXPR_OR_CONST bool is_signed = true;
+ static EA_CONSTEXPR_OR_CONST bool is_integer = true;
+ static EA_CONSTEXPR_OR_CONST bool is_exact = true;
+ static EA_CONSTEXPR_OR_CONST int radix = 2;
+ static EA_CONSTEXPR_OR_CONST int min_exponent = 0;
+ static EA_CONSTEXPR_OR_CONST int min_exponent10 = 0;
+ static EA_CONSTEXPR_OR_CONST int max_exponent = 0;
+ static EA_CONSTEXPR_OR_CONST int max_exponent10 = 0;
+ static EA_CONSTEXPR_OR_CONST bool is_bounded = true;
+ static EA_CONSTEXPR_OR_CONST bool is_modulo = true;
+ static EA_CONSTEXPR_OR_CONST bool traps = true;
+ static EA_CONSTEXPR_OR_CONST bool tinyness_before = false;
+ static EA_CONSTEXPR_OR_CONST float_round_style round_style = round_toward_zero;
+ static EA_CONSTEXPR_OR_CONST bool has_infinity = false;
+ static EA_CONSTEXPR_OR_CONST bool has_quiet_NaN = false;
+ static EA_CONSTEXPR_OR_CONST bool has_signaling_NaN = false;
+ static EA_CONSTEXPR_OR_CONST float_denorm_style has_denorm = denorm_absent;
+ static EA_CONSTEXPR_OR_CONST bool has_denorm_loss = false;
+ static EA_CONSTEXPR_OR_CONST bool is_iec559 = false;
+
+ static EA_CONSTEXPR value_type min()
+ { return EASTL_LIMITS_MIN_S(value_type); }
+
+ static EA_CONSTEXPR value_type max()
+ { return EASTL_LIMITS_MAX_S(value_type); }
+
+ static EA_CONSTEXPR value_type lowest()
+ { return EASTL_LIMITS_MIN_S(value_type); }
+
+ static EA_CONSTEXPR value_type epsilon()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type round_error()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type infinity()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type quiet_NaN()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type signaling_NaN()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type denorm_min()
+ { return static_cast<value_type>(0); }
+ };
+
+
+
+ // numeric_limits<unsigned int>
+ template<>
+ struct numeric_limits<unsigned int>
+ {
+ typedef unsigned int value_type;
+
+ static EA_CONSTEXPR_OR_CONST bool is_specialized = true;
+ static EA_CONSTEXPR_OR_CONST int digits = EASTL_LIMITS_DIGITS_U(value_type);
+ static EA_CONSTEXPR_OR_CONST int digits10 = EASTL_LIMITS_DIGITS10_U(value_type);
+ static EA_CONSTEXPR_OR_CONST int max_digits10 = 0;
+ static EA_CONSTEXPR_OR_CONST bool is_signed = false;
+ static EA_CONSTEXPR_OR_CONST bool is_integer = true;
+ static EA_CONSTEXPR_OR_CONST bool is_exact = true;
+ static EA_CONSTEXPR_OR_CONST int radix = 2;
+ static EA_CONSTEXPR_OR_CONST int min_exponent = 0;
+ static EA_CONSTEXPR_OR_CONST int min_exponent10 = 0;
+ static EA_CONSTEXPR_OR_CONST int max_exponent = 0;
+ static EA_CONSTEXPR_OR_CONST int max_exponent10 = 0;
+ static EA_CONSTEXPR_OR_CONST bool is_bounded = true;
+ static EA_CONSTEXPR_OR_CONST bool is_modulo = true;
+ static EA_CONSTEXPR_OR_CONST bool traps = true;
+ static EA_CONSTEXPR_OR_CONST bool tinyness_before = false;
+ static EA_CONSTEXPR_OR_CONST float_round_style round_style = round_toward_zero;
+ static EA_CONSTEXPR_OR_CONST bool has_infinity = false;
+ static EA_CONSTEXPR_OR_CONST bool has_quiet_NaN = false;
+ static EA_CONSTEXPR_OR_CONST bool has_signaling_NaN = false;
+ static EA_CONSTEXPR_OR_CONST float_denorm_style has_denorm = denorm_absent;
+ static EA_CONSTEXPR_OR_CONST bool has_denorm_loss = false;
+ static EA_CONSTEXPR_OR_CONST bool is_iec559 = false;
+
+ static EA_CONSTEXPR value_type min()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type max()
+ { return EASTL_LIMITS_MAX_U(value_type); }
+
+ static EA_CONSTEXPR value_type lowest()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type epsilon()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type round_error()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type infinity()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type quiet_NaN()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type signaling_NaN()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type denorm_min()
+ { return static_cast<value_type>(0); }
+ };
+
+
+ // numeric_limits<signed int>
+ template<>
+ struct numeric_limits<signed int>
+ {
+ typedef signed int value_type;
+
+ static EA_CONSTEXPR_OR_CONST bool is_specialized = true;
+ static EA_CONSTEXPR_OR_CONST int digits = EASTL_LIMITS_DIGITS_S(value_type);
+ static EA_CONSTEXPR_OR_CONST int digits10 = EASTL_LIMITS_DIGITS10_S(value_type);
+ static EA_CONSTEXPR_OR_CONST int max_digits10 = 0;
+ static EA_CONSTEXPR_OR_CONST bool is_signed = true;
+ static EA_CONSTEXPR_OR_CONST bool is_integer = true;
+ static EA_CONSTEXPR_OR_CONST bool is_exact = true;
+ static EA_CONSTEXPR_OR_CONST int radix = 2;
+ static EA_CONSTEXPR_OR_CONST int min_exponent = 0;
+ static EA_CONSTEXPR_OR_CONST int min_exponent10 = 0;
+ static EA_CONSTEXPR_OR_CONST int max_exponent = 0;
+ static EA_CONSTEXPR_OR_CONST int max_exponent10 = 0;
+ static EA_CONSTEXPR_OR_CONST bool is_bounded = true;
+ static EA_CONSTEXPR_OR_CONST bool is_modulo = true;
+ static EA_CONSTEXPR_OR_CONST bool traps = true;
+ static EA_CONSTEXPR_OR_CONST bool tinyness_before = false;
+ static EA_CONSTEXPR_OR_CONST float_round_style round_style = round_toward_zero;
+ static EA_CONSTEXPR_OR_CONST bool has_infinity = false;
+ static EA_CONSTEXPR_OR_CONST bool has_quiet_NaN = false;
+ static EA_CONSTEXPR_OR_CONST bool has_signaling_NaN = false;
+ static EA_CONSTEXPR_OR_CONST float_denorm_style has_denorm = denorm_absent;
+ static EA_CONSTEXPR_OR_CONST bool has_denorm_loss = false;
+ static EA_CONSTEXPR_OR_CONST bool is_iec559 = false;
+
+ static EA_CONSTEXPR value_type min()
+ { return INT_MIN; } // It's hard to get EASTL_LIMITS_MIN_S to work with all compilers here.
+
+ static EA_CONSTEXPR value_type max()
+ { return EASTL_LIMITS_MAX_S(value_type); }
+
+ static EA_CONSTEXPR value_type lowest()
+ { return INT_MIN; }
+
+ static EA_CONSTEXPR value_type epsilon()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type round_error()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type infinity()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type quiet_NaN()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type signaling_NaN()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type denorm_min()
+ { return static_cast<value_type>(0); }
+ };
+
+
+ // numeric_limits<unsigned long>
+ template<>
+ struct numeric_limits<unsigned long>
+ {
+ typedef unsigned long value_type;
+
+ static EA_CONSTEXPR_OR_CONST bool is_specialized = true;
+ static EA_CONSTEXPR_OR_CONST int digits = EASTL_LIMITS_DIGITS_U(value_type);
+ static EA_CONSTEXPR_OR_CONST int digits10 = EASTL_LIMITS_DIGITS10_U(value_type);
+ static EA_CONSTEXPR_OR_CONST int max_digits10 = 0;
+ static EA_CONSTEXPR_OR_CONST bool is_signed = false;
+ static EA_CONSTEXPR_OR_CONST bool is_integer = true;
+ static EA_CONSTEXPR_OR_CONST bool is_exact = true;
+ static EA_CONSTEXPR_OR_CONST int radix = 2;
+ static EA_CONSTEXPR_OR_CONST int min_exponent = 0;
+ static EA_CONSTEXPR_OR_CONST int min_exponent10 = 0;
+ static EA_CONSTEXPR_OR_CONST int max_exponent = 0;
+ static EA_CONSTEXPR_OR_CONST int max_exponent10 = 0;
+ static EA_CONSTEXPR_OR_CONST bool is_bounded = true;
+ static EA_CONSTEXPR_OR_CONST bool is_modulo = true;
+ static EA_CONSTEXPR_OR_CONST bool traps = true;
+ static EA_CONSTEXPR_OR_CONST bool tinyness_before = false;
+ static EA_CONSTEXPR_OR_CONST float_round_style round_style = round_toward_zero;
+ static EA_CONSTEXPR_OR_CONST bool has_infinity = false;
+ static EA_CONSTEXPR_OR_CONST bool has_quiet_NaN = false;
+ static EA_CONSTEXPR_OR_CONST bool has_signaling_NaN = false;
+ static EA_CONSTEXPR_OR_CONST float_denorm_style has_denorm = denorm_absent;
+ static EA_CONSTEXPR_OR_CONST bool has_denorm_loss = false;
+ static EA_CONSTEXPR_OR_CONST bool is_iec559 = false;
+
+ static EA_CONSTEXPR value_type min()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type max()
+ { return EASTL_LIMITS_MAX_U(value_type); }
+
+ static EA_CONSTEXPR value_type lowest()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type epsilon()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type round_error()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type infinity()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type quiet_NaN()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type signaling_NaN()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type denorm_min()
+ { return static_cast<value_type>(0); }
+ };
+
+
+ // numeric_limits<signed long>
+ template<>
+ struct numeric_limits<signed long>
+ {
+ typedef signed long value_type;
+
+ static EA_CONSTEXPR_OR_CONST bool is_specialized = true;
+ static EA_CONSTEXPR_OR_CONST int digits = EASTL_LIMITS_DIGITS_S(value_type);
+ static EA_CONSTEXPR_OR_CONST int digits10 = EASTL_LIMITS_DIGITS10_S(value_type);
+ static EA_CONSTEXPR_OR_CONST int max_digits10 = 0;
+ static EA_CONSTEXPR_OR_CONST bool is_signed = true;
+ static EA_CONSTEXPR_OR_CONST bool is_integer = true;
+ static EA_CONSTEXPR_OR_CONST bool is_exact = true;
+ static EA_CONSTEXPR_OR_CONST int radix = 2;
+ static EA_CONSTEXPR_OR_CONST int min_exponent = 0;
+ static EA_CONSTEXPR_OR_CONST int min_exponent10 = 0;
+ static EA_CONSTEXPR_OR_CONST int max_exponent = 0;
+ static EA_CONSTEXPR_OR_CONST int max_exponent10 = 0;
+ static EA_CONSTEXPR_OR_CONST bool is_bounded = true;
+ static EA_CONSTEXPR_OR_CONST bool is_modulo = true;
+ static EA_CONSTEXPR_OR_CONST bool traps = true;
+ static EA_CONSTEXPR_OR_CONST bool tinyness_before = false;
+ static EA_CONSTEXPR_OR_CONST float_round_style round_style = round_toward_zero;
+ static EA_CONSTEXPR_OR_CONST bool has_infinity = false;
+ static EA_CONSTEXPR_OR_CONST bool has_quiet_NaN = false;
+ static EA_CONSTEXPR_OR_CONST bool has_signaling_NaN = false;
+ static EA_CONSTEXPR_OR_CONST float_denorm_style has_denorm = denorm_absent;
+ static EA_CONSTEXPR_OR_CONST bool has_denorm_loss = false;
+ static EA_CONSTEXPR_OR_CONST bool is_iec559 = false;
+
+ static EA_CONSTEXPR value_type min()
+ { return LONG_MIN; } // It's hard to get EASTL_LIMITS_MIN_S to work with all compilers here.
+
+ static EA_CONSTEXPR value_type max()
+ { return EASTL_LIMITS_MAX_S(value_type); }
+
+ static EA_CONSTEXPR value_type lowest()
+ { return LONG_MIN; }
+
+ static EA_CONSTEXPR value_type epsilon()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type round_error()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type infinity()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type quiet_NaN()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type signaling_NaN()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type denorm_min()
+ { return static_cast<value_type>(0); }
+ };
+
+
+ // numeric_limits<unsigned long long>
+ template<>
+ struct numeric_limits<unsigned long long>
+ {
+ typedef unsigned long long value_type;
+
+ static EA_CONSTEXPR_OR_CONST bool is_specialized = true;
+ static EA_CONSTEXPR_OR_CONST int digits = EASTL_LIMITS_DIGITS_U(value_type);
+ static EA_CONSTEXPR_OR_CONST int digits10 = EASTL_LIMITS_DIGITS10_U(value_type);
+ static EA_CONSTEXPR_OR_CONST int max_digits10 = 0;
+ static EA_CONSTEXPR_OR_CONST bool is_signed = false;
+ static EA_CONSTEXPR_OR_CONST bool is_integer = true;
+ static EA_CONSTEXPR_OR_CONST bool is_exact = true;
+ static EA_CONSTEXPR_OR_CONST int radix = 2;
+ static EA_CONSTEXPR_OR_CONST int min_exponent = 0;
+ static EA_CONSTEXPR_OR_CONST int min_exponent10 = 0;
+ static EA_CONSTEXPR_OR_CONST int max_exponent = 0;
+ static EA_CONSTEXPR_OR_CONST int max_exponent10 = 0;
+ static EA_CONSTEXPR_OR_CONST bool is_bounded = true;
+ static EA_CONSTEXPR_OR_CONST bool is_modulo = true;
+ static EA_CONSTEXPR_OR_CONST bool traps = true;
+ static EA_CONSTEXPR_OR_CONST bool tinyness_before = false;
+ static EA_CONSTEXPR_OR_CONST float_round_style round_style = round_toward_zero;
+ static EA_CONSTEXPR_OR_CONST bool has_infinity = false;
+ static EA_CONSTEXPR_OR_CONST bool has_quiet_NaN = false;
+ static EA_CONSTEXPR_OR_CONST bool has_signaling_NaN = false;
+ static EA_CONSTEXPR_OR_CONST float_denorm_style has_denorm = denorm_absent;
+ static EA_CONSTEXPR_OR_CONST bool has_denorm_loss = false;
+ static EA_CONSTEXPR_OR_CONST bool is_iec559 = false;
+
+ static EA_CONSTEXPR value_type min()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type max()
+ { return EASTL_LIMITS_MAX_U(value_type); }
+
+ static EA_CONSTEXPR value_type lowest()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type epsilon()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type round_error()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type infinity()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type quiet_NaN()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type signaling_NaN()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type denorm_min()
+ { return static_cast<value_type>(0); }
+ };
+
+
+ // numeric_limits<signed long long>
+ template<>
+ struct numeric_limits<signed long long>
+ {
+ typedef signed long long value_type;
+
+ static EA_CONSTEXPR_OR_CONST bool is_specialized = true;
+ static EA_CONSTEXPR_OR_CONST int digits = EASTL_LIMITS_DIGITS_S(value_type);
+ static EA_CONSTEXPR_OR_CONST int digits10 = EASTL_LIMITS_DIGITS10_S(value_type);
+ static EA_CONSTEXPR_OR_CONST int max_digits10 = 0;
+ static EA_CONSTEXPR_OR_CONST bool is_signed = true;
+ static EA_CONSTEXPR_OR_CONST bool is_integer = true;
+ static EA_CONSTEXPR_OR_CONST bool is_exact = true;
+ static EA_CONSTEXPR_OR_CONST int radix = 2;
+ static EA_CONSTEXPR_OR_CONST int min_exponent = 0;
+ static EA_CONSTEXPR_OR_CONST int min_exponent10 = 0;
+ static EA_CONSTEXPR_OR_CONST int max_exponent = 0;
+ static EA_CONSTEXPR_OR_CONST int max_exponent10 = 0;
+ static EA_CONSTEXPR_OR_CONST bool is_bounded = true;
+ static EA_CONSTEXPR_OR_CONST bool is_modulo = true;
+ static EA_CONSTEXPR_OR_CONST bool traps = true;
+ static EA_CONSTEXPR_OR_CONST bool tinyness_before = false;
+ static EA_CONSTEXPR_OR_CONST float_round_style round_style = round_toward_zero;
+ static EA_CONSTEXPR_OR_CONST bool has_infinity = false;
+ static EA_CONSTEXPR_OR_CONST bool has_quiet_NaN = false;
+ static EA_CONSTEXPR_OR_CONST bool has_signaling_NaN = false;
+ static EA_CONSTEXPR_OR_CONST float_denorm_style has_denorm = denorm_absent;
+ static EA_CONSTEXPR_OR_CONST bool has_denorm_loss = false;
+ static EA_CONSTEXPR_OR_CONST bool is_iec559 = false;
+
+ static EA_CONSTEXPR value_type min()
+ { return EASTL_LIMITS_MIN_S(value_type); }
+
+ static EA_CONSTEXPR value_type max()
+ { return EASTL_LIMITS_MAX_S(value_type); }
+
+ static EA_CONSTEXPR value_type lowest()
+ { return EASTL_LIMITS_MIN_S(value_type); }
+
+ static EA_CONSTEXPR value_type epsilon()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type round_error()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type infinity()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type quiet_NaN()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type signaling_NaN()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type denorm_min()
+ { return static_cast<value_type>(0); }
+ };
+
+
+ #if (EA_COMPILER_INTMAX_SIZE >= 16) && (defined(EA_COMPILER_GNUC) || defined(__clang__)) // If __int128_t/__uint128_t is supported...
+ // numeric_limits<__uint128_t>
+ template<>
+ struct numeric_limits<__uint128_t>
+ {
+ typedef __uint128_t value_type;
+
+ static EA_CONSTEXPR_OR_CONST bool is_specialized = true;
+ static EA_CONSTEXPR_OR_CONST int digits = EASTL_LIMITS_DIGITS_U(value_type);
+ static EA_CONSTEXPR_OR_CONST int digits10 = EASTL_LIMITS_DIGITS10_U(value_type);
+ static EA_CONSTEXPR_OR_CONST int max_digits10 = 0;
+ static EA_CONSTEXPR_OR_CONST bool is_signed = false;
+ static EA_CONSTEXPR_OR_CONST bool is_integer = true;
+ static EA_CONSTEXPR_OR_CONST bool is_exact = true;
+ static EA_CONSTEXPR_OR_CONST int radix = 2;
+ static EA_CONSTEXPR_OR_CONST int min_exponent = 0;
+ static EA_CONSTEXPR_OR_CONST int min_exponent10 = 0;
+ static EA_CONSTEXPR_OR_CONST int max_exponent = 0;
+ static EA_CONSTEXPR_OR_CONST int max_exponent10 = 0;
+ static EA_CONSTEXPR_OR_CONST bool is_bounded = true;
+ static EA_CONSTEXPR_OR_CONST bool is_modulo = true;
+ static EA_CONSTEXPR_OR_CONST bool traps = true;
+ static EA_CONSTEXPR_OR_CONST bool tinyness_before = false;
+ static EA_CONSTEXPR_OR_CONST float_round_style round_style = round_toward_zero;
+ static EA_CONSTEXPR_OR_CONST bool has_infinity = false;
+ static EA_CONSTEXPR_OR_CONST bool has_quiet_NaN = false;
+ static EA_CONSTEXPR_OR_CONST bool has_signaling_NaN = false;
+ static EA_CONSTEXPR_OR_CONST float_denorm_style has_denorm = denorm_absent;
+ static EA_CONSTEXPR_OR_CONST bool has_denorm_loss = false;
+ static EA_CONSTEXPR_OR_CONST bool is_iec559 = false;
+
+ static EA_CONSTEXPR value_type min()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type max()
+ { return EASTL_LIMITS_MAX_U(value_type); }
+
+ static EA_CONSTEXPR value_type lowest()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type epsilon()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type round_error()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type infinity()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type quiet_NaN()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type signaling_NaN()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type denorm_min()
+ { return static_cast<value_type>(0); }
+ };
+
+
+ // numeric_limits<__int128_t>
+ template<>
+ struct numeric_limits<__int128_t>
+ {
+ typedef __int128_t value_type;
+
+ static EA_CONSTEXPR_OR_CONST bool is_specialized = true;
+ static EA_CONSTEXPR_OR_CONST int digits = EASTL_LIMITS_DIGITS_S(value_type);
+ static EA_CONSTEXPR_OR_CONST int digits10 = EASTL_LIMITS_DIGITS10_S(value_type);
+ static EA_CONSTEXPR_OR_CONST int max_digits10 = 0;
+ static EA_CONSTEXPR_OR_CONST bool is_signed = true;
+ static EA_CONSTEXPR_OR_CONST bool is_integer = true;
+ static EA_CONSTEXPR_OR_CONST bool is_exact = true;
+ static EA_CONSTEXPR_OR_CONST int radix = 2;
+ static EA_CONSTEXPR_OR_CONST int min_exponent = 0;
+ static EA_CONSTEXPR_OR_CONST int min_exponent10 = 0;
+ static EA_CONSTEXPR_OR_CONST int max_exponent = 0;
+ static EA_CONSTEXPR_OR_CONST int max_exponent10 = 0;
+ static EA_CONSTEXPR_OR_CONST bool is_bounded = true;
+ static EA_CONSTEXPR_OR_CONST bool is_modulo = true;
+ static EA_CONSTEXPR_OR_CONST bool traps = true;
+ static EA_CONSTEXPR_OR_CONST bool tinyness_before = false;
+ static EA_CONSTEXPR_OR_CONST float_round_style round_style = round_toward_zero;
+ static EA_CONSTEXPR_OR_CONST bool has_infinity = false;
+ static EA_CONSTEXPR_OR_CONST bool has_quiet_NaN = false;
+ static EA_CONSTEXPR_OR_CONST bool has_signaling_NaN = false;
+ static EA_CONSTEXPR_OR_CONST float_denorm_style has_denorm = denorm_absent;
+ static EA_CONSTEXPR_OR_CONST bool has_denorm_loss = false;
+ static EA_CONSTEXPR_OR_CONST bool is_iec559 = false;
+
+ static EA_CONSTEXPR value_type min()
+ { return EASTL_LIMITS_MIN_S(value_type); }
+
+ static EA_CONSTEXPR value_type max()
+ { return EASTL_LIMITS_MAX_S(value_type); }
+
+ static EA_CONSTEXPR value_type lowest()
+ { return EASTL_LIMITS_MIN_S(value_type); }
+
+ static EA_CONSTEXPR value_type epsilon()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type round_error()
+ { return 0; }
+
+ static EA_CONSTEXPR value_type infinity()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type quiet_NaN()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type signaling_NaN()
+ { return value_type(); }
+
+ static EA_CONSTEXPR value_type denorm_min()
+ { return static_cast<value_type>(0); }
+ };
+ #endif
+
+
+ // numeric_limits<float>
+ template<>
+ struct numeric_limits<float>
+ {
+ typedef float value_type;
+
+ static EA_CONSTEXPR_OR_CONST bool is_specialized = true;
+ static EA_CONSTEXPR_OR_CONST int digits = FLT_MANT_DIG;
+ static EA_CONSTEXPR_OR_CONST int digits10 = FLT_DIG;
+ static EA_CONSTEXPR_OR_CONST int max_digits10 = FLT_MANT_DIG;
+ static EA_CONSTEXPR_OR_CONST bool is_signed = true;
+ static EA_CONSTEXPR_OR_CONST bool is_integer = false;
+ static EA_CONSTEXPR_OR_CONST bool is_exact = false;
+ static EA_CONSTEXPR_OR_CONST int radix = FLT_RADIX;
+ static EA_CONSTEXPR_OR_CONST int min_exponent = FLT_MIN_EXP;
+ static EA_CONSTEXPR_OR_CONST int min_exponent10 = FLT_MIN_10_EXP;
+ static EA_CONSTEXPR_OR_CONST int max_exponent = FLT_MAX_EXP;
+ static EA_CONSTEXPR_OR_CONST int max_exponent10 = FLT_MAX_10_EXP;
+ static EA_CONSTEXPR_OR_CONST bool is_bounded = true;
+ static EA_CONSTEXPR_OR_CONST bool is_modulo = false;
+ static EA_CONSTEXPR_OR_CONST bool traps = true;
+ static EA_CONSTEXPR_OR_CONST bool tinyness_before = false;
+ static EA_CONSTEXPR_OR_CONST float_round_style round_style = round_to_nearest;
+ static EA_CONSTEXPR_OR_CONST bool has_infinity = true;
+ static EA_CONSTEXPR_OR_CONST bool has_quiet_NaN = true; // This may be wrong for some platforms.
+ static EA_CONSTEXPR_OR_CONST bool has_signaling_NaN = true; // This may be wrong for some platforms.
+ static EA_CONSTEXPR_OR_CONST float_denorm_style has_denorm = denorm_present; // This may be wrong for some platforms.
+ static EA_CONSTEXPR_OR_CONST bool has_denorm_loss = false; // This may be wrong for some platforms.
+ static EA_CONSTEXPR_OR_CONST bool is_iec559 = has_infinity && has_quiet_NaN && (has_denorm == denorm_present);
+
+ #if EASTL_CUSTOM_FLOAT_CONSTANTS_REQUIRED
+ static value_type min()
+ { return FLT_MIN; }
+
+ static value_type max()
+ { return FLT_MAX; }
+
+ static value_type lowest()
+ { return -FLT_MAX; }
+
+ static value_type epsilon()
+ { return FLT_EPSILON; }
+
+ static value_type round_error()
+ { return 0.5f; }
+
+ static value_type infinity()
+ { return Internal::gFloatInfinity; }
+
+ static value_type quiet_NaN()
+ { return Internal::gFloatNaN; }
+
+ static value_type signaling_NaN()
+ { return Internal::gFloatSNaN; }
+
+ static value_type denorm_min()
+ { return Internal::gFloatDenorm; }
+
+ #elif (defined(EA_COMPILER_GNUC) || defined(__clang__)) && defined(__FLT_MIN__)
+ static EA_CONSTEXPR value_type min()
+ { return __FLT_MIN__; }
+
+ static EA_CONSTEXPR value_type max()
+ { return __FLT_MAX__; }
+
+ static EA_CONSTEXPR value_type lowest()
+ { return -__FLT_MAX__; }
+
+ static EA_CONSTEXPR value_type epsilon()
+ { return __FLT_EPSILON__; }
+
+ static EA_CONSTEXPR value_type round_error()
+ { return 0.5f; }
+
+ static EA_CONSTEXPR value_type infinity()
+ { return __builtin_huge_valf(); }
+
+ static EA_CONSTEXPR value_type quiet_NaN()
+ { return __builtin_nanf(""); }
+
+ static EA_CONSTEXPR value_type signaling_NaN()
+ { return __builtin_nansf(""); }
+
+ static EA_CONSTEXPR value_type denorm_min()
+ { return __FLT_DENORM_MIN__; }
+
+ #elif defined(_CPPLIB_VER) // If using the Dinkumware Standard library...
+ static value_type min()
+ { return FLT_MIN; }
+
+ static value_type max()
+ { return FLT_MAX; }
+
+ static value_type lowest()
+ { return -FLT_MAX; }
+
+ static value_type epsilon()
+ { return FLT_EPSILON; }
+
+ static value_type round_error()
+ { return 0.5f; }
+
+ #if defined(_MSVC_STL_UPDATE) && _MSVC_STL_UPDATE >= 202206L // If using a recent version of MSVC's STL...
+ static value_type infinity()
+ { return __builtin_huge_valf(); }
+
+ static value_type quiet_NaN()
+ { return __builtin_nanf("0"); }
+
+ static value_type signaling_NaN()
+ { return __builtin_nansf("1"); }
+
+ static value_type denorm_min()
+ { return FLT_TRUE_MIN; }
+ #else
+ static value_type infinity()
+ { return _CSTD _FInf._Float; }
+
+ static value_type quiet_NaN()
+ { return _CSTD _FNan._Float; }
+
+ static value_type signaling_NaN()
+ { return _CSTD _FSnan._Float; }
+
+ static value_type denorm_min()
+ { return _CSTD _FDenorm._Float; }
+ #endif
+
+ #endif
+ };
+
+
+ // numeric_limits<double>
+ template<>
+ struct numeric_limits<double>
+ {
+ typedef double value_type;
+
+ static EA_CONSTEXPR_OR_CONST bool is_specialized = true;
+ static EA_CONSTEXPR_OR_CONST int digits = DBL_MANT_DIG;
+ static EA_CONSTEXPR_OR_CONST int digits10 = DBL_DIG;
+ static EA_CONSTEXPR_OR_CONST int max_digits10 = DBL_MANT_DIG;
+ static EA_CONSTEXPR_OR_CONST bool is_signed = true;
+ static EA_CONSTEXPR_OR_CONST bool is_integer = false;
+ static EA_CONSTEXPR_OR_CONST bool is_exact = false;
+ static EA_CONSTEXPR_OR_CONST int radix = FLT_RADIX; // FLT_RADIX applies to all floating point types.
+ static EA_CONSTEXPR_OR_CONST int min_exponent = DBL_MIN_EXP;
+ static EA_CONSTEXPR_OR_CONST int min_exponent10 = DBL_MIN_10_EXP;
+ static EA_CONSTEXPR_OR_CONST int max_exponent = DBL_MAX_EXP;
+ static EA_CONSTEXPR_OR_CONST int max_exponent10 = DBL_MAX_10_EXP;
+ static EA_CONSTEXPR_OR_CONST bool is_bounded = true;
+ static EA_CONSTEXPR_OR_CONST bool is_modulo = false;
+ static EA_CONSTEXPR_OR_CONST bool traps = true;
+ static EA_CONSTEXPR_OR_CONST bool tinyness_before = false;
+ static EA_CONSTEXPR_OR_CONST float_round_style round_style = round_to_nearest;
+ static EA_CONSTEXPR_OR_CONST bool has_infinity = true;
+ static EA_CONSTEXPR_OR_CONST bool has_quiet_NaN = true; // This may be wrong for some platforms.
+ static EA_CONSTEXPR_OR_CONST bool has_signaling_NaN = true; // This may be wrong for some platforms.
+ static EA_CONSTEXPR_OR_CONST float_denorm_style has_denorm = denorm_present; // This may be wrong for some platforms.
+ static EA_CONSTEXPR_OR_CONST bool has_denorm_loss = false; // This may be wrong for some platforms.
+ static EA_CONSTEXPR_OR_CONST bool is_iec559 = has_infinity && has_quiet_NaN && (has_denorm == denorm_present);
+
+ #if EASTL_CUSTOM_FLOAT_CONSTANTS_REQUIRED
+ static value_type min()
+ { return DBL_MIN; }
+
+ static value_type max()
+ { return DBL_MAX; }
+
+ static value_type lowest()
+ { return -DBL_MAX; }
+
+ static value_type epsilon()
+ { return DBL_EPSILON; }
+
+ static value_type round_error()
+ { return 0.5f; }
+
+ static value_type infinity()
+ { return Internal::gDoubleInfinity; }
+
+ static value_type quiet_NaN()
+ { return Internal::gDoubleNaN; }
+
+ static value_type signaling_NaN()
+ { return Internal::gDoubleSNaN; }
+
+ static value_type denorm_min()
+ { return Internal::gDoubleDenorm; }
+
+ #elif (defined(EA_COMPILER_GNUC) || defined(__clang__)) && defined(__DBL_MIN__)
+ static EA_CONSTEXPR value_type min()
+ { return __DBL_MIN__; }
+
+ static EA_CONSTEXPR value_type max()
+ { return __DBL_MAX__; }
+
+ static EA_CONSTEXPR value_type lowest()
+ { return -__DBL_MAX__; }
+
+ static EA_CONSTEXPR value_type epsilon()
+ { return __DBL_EPSILON__; }
+
+ static EA_CONSTEXPR value_type round_error()
+ { return 0.5f; }
+
+ static EA_CONSTEXPR value_type infinity()
+ { return __builtin_huge_val(); }
+
+ static EA_CONSTEXPR value_type quiet_NaN()
+ { return __builtin_nan(""); }
+
+ static EA_CONSTEXPR value_type signaling_NaN()
+ { return __builtin_nans(""); }
+
+ static EA_CONSTEXPR value_type denorm_min()
+ { return __DBL_DENORM_MIN__; }
+
+ #elif defined(_CPPLIB_VER) // If using the Dinkumware Standard library...
+ static value_type min()
+ { return DBL_MIN; }
+
+ static value_type max()
+ { return DBL_MAX; }
+
+ static value_type lowest()
+ { return -DBL_MAX; }
+
+ static value_type epsilon()
+ { return DBL_EPSILON; }
+
+ static value_type round_error()
+ { return 0.5f; }
+
+ #if defined(_MSVC_STL_UPDATE) && _MSVC_STL_UPDATE >= 202206L // If using a recent version of MSVC's STL...
+ static value_type infinity()
+ { return __builtin_huge_val(); }
+
+ static value_type quiet_NaN()
+ { return __builtin_nan("0"); }
+
+ static value_type signaling_NaN()
+ { return __builtin_nans("1"); }
+
+ static value_type denorm_min()
+ { return DBL_TRUE_MIN; }
+ #else
+ static value_type infinity()
+ { return _CSTD _Inf._Double; }
+
+ static value_type quiet_NaN()
+ { return _CSTD _Nan._Double; }
+
+ static value_type signaling_NaN()
+ { return _CSTD _Snan._Double; }
+
+ static value_type denorm_min()
+ { return _CSTD _Denorm._Double; }
+ #endif
+
+ #endif
+ };
+
+
+ // numeric_limits<long double>
+ template<>
+ struct numeric_limits<long double>
+ {
+ typedef long double value_type;
+
+ static EA_CONSTEXPR_OR_CONST bool is_specialized = true;
+ static EA_CONSTEXPR_OR_CONST int digits = LDBL_MANT_DIG;
+ static EA_CONSTEXPR_OR_CONST int digits10 = LDBL_DIG;
+ static EA_CONSTEXPR_OR_CONST int max_digits10 = LDBL_MANT_DIG;
+ static EA_CONSTEXPR_OR_CONST bool is_signed = true;
+ static EA_CONSTEXPR_OR_CONST bool is_integer = false;
+ static EA_CONSTEXPR_OR_CONST bool is_exact = false;
+ static EA_CONSTEXPR_OR_CONST int radix = FLT_RADIX; // FLT_RADIX applies to all floating point types.
+ static EA_CONSTEXPR_OR_CONST int min_exponent = LDBL_MIN_EXP;
+ static EA_CONSTEXPR_OR_CONST int min_exponent10 = LDBL_MIN_10_EXP;
+ static EA_CONSTEXPR_OR_CONST int max_exponent = LDBL_MAX_EXP;
+ static EA_CONSTEXPR_OR_CONST int max_exponent10 = LDBL_MAX_10_EXP;
+ static EA_CONSTEXPR_OR_CONST bool is_bounded = true;
+ static EA_CONSTEXPR_OR_CONST bool is_modulo = false;
+ static EA_CONSTEXPR_OR_CONST bool traps = true;
+ static EA_CONSTEXPR_OR_CONST bool tinyness_before = false;
+ static EA_CONSTEXPR_OR_CONST float_round_style round_style = round_to_nearest;
+ static EA_CONSTEXPR_OR_CONST bool has_infinity = true;
+ static EA_CONSTEXPR_OR_CONST bool has_quiet_NaN = true; // This may be wrong for some platforms.
+ static EA_CONSTEXPR_OR_CONST bool has_signaling_NaN = true; // This may be wrong for some platforms.
+ static EA_CONSTEXPR_OR_CONST float_denorm_style has_denorm = denorm_present; // This may be wrong for some platforms.
+ static EA_CONSTEXPR_OR_CONST bool has_denorm_loss = false; // This may be wrong for some platforms.
+ static EA_CONSTEXPR_OR_CONST bool is_iec559 = has_infinity && has_quiet_NaN && (has_denorm == denorm_present);
+
+ #if EASTL_CUSTOM_FLOAT_CONSTANTS_REQUIRED
+ static value_type min()
+ { return LDBL_MIN; }
+
+ static value_type max()
+ { return LDBL_MAX; }
+
+ static value_type lowest()
+ { return -LDBL_MAX; }
+
+ static value_type epsilon()
+ { return LDBL_EPSILON; }
+
+ static value_type round_error()
+ { return 0.5f; }
+
+ static value_type infinity()
+ { return Internal::gLongDoubleInfinity; }
+
+ static value_type quiet_NaN()
+ { return Internal::gLongDoubleNaN; }
+
+ static value_type signaling_NaN()
+ { return Internal::gLongDoubleSNaN; }
+
+ static value_type denorm_min()
+ { return Internal::gLongDoubleDenorm; }
+
+ #elif (defined(EA_COMPILER_GNUC) || defined(__clang__)) && defined(__LDBL_MIN__)
+ static EA_CONSTEXPR value_type min()
+ { return __LDBL_MIN__; }
+
+ static EA_CONSTEXPR value_type max()
+ { return __LDBL_MAX__; }
+
+ static EA_CONSTEXPR value_type lowest()
+ { return -__LDBL_MAX__; }
+
+ static EA_CONSTEXPR value_type epsilon()
+ { return __LDBL_EPSILON__; }
+
+ static EA_CONSTEXPR value_type round_error()
+ { return 0.5f; }
+
+ static EA_CONSTEXPR value_type infinity()
+ { return __builtin_huge_val(); }
+
+ static EA_CONSTEXPR value_type quiet_NaN()
+ { return __builtin_nan(""); }
+
+ static EA_CONSTEXPR value_type signaling_NaN()
+ { return __builtin_nans(""); }
+
+ static EA_CONSTEXPR value_type denorm_min()
+ { return __LDBL_DENORM_MIN__; }
+
+ #elif defined(_CPPLIB_VER) // If using the Dinkumware Standard library...
+ static value_type min()
+ { return LDBL_MIN; }
+
+ static value_type max()
+ { return LDBL_MAX; }
+
+ static value_type lowest()
+ { return -LDBL_MAX; }
+
+ static value_type epsilon()
+ { return LDBL_EPSILON; }
+
+ static value_type round_error()
+ { return 0.5f; }
+
+ #if defined(_MSVC_STL_UPDATE) && _MSVC_STL_UPDATE >= 202206L // If using a recent version of MSVC's STL...
+ static value_type infinity()
+ { return __builtin_huge_val(); }
+
+ static value_type quiet_NaN()
+ { return __builtin_nan("0"); }
+
+ static value_type signaling_NaN()
+ { return __builtin_nans("1"); }
+
+ static value_type denorm_min()
+ { return LDBL_TRUE_MIN; }
+ #else
+ static value_type infinity()
+ { return _CSTD _LInf._Long_double; }
+
+ static value_type quiet_NaN()
+ { return _CSTD _LNan._Long_double; }
+
+ static value_type signaling_NaN()
+ { return _CSTD _LSnan._Long_double; }
+
+ static value_type denorm_min()
+ { return _CSTD _LDenorm._Long_double; }
+ #endif
+
+ #endif
+ };
+
+} // namespace eastl
+
+
+EA_RESTORE_VC_WARNING()
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/optional.h b/EASTL/include/EASTL/optional.h
new file mode 100644
index 0000000..15cacd0
--- /dev/null
+++ b/EASTL/include/EASTL/optional.h
@@ -0,0 +1,728 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements the class template optional that represents optional objects.
+//
+// An optional object is an object that contains the storage for another object and
+// manages the lifetime of this contained object, if any. The contained object may be
+// initialized after the optional object has been initialized, and may be destroyed before
+// the optional object has been destroyed.
+//
+// Any instance of optional<T> at any given time either contains a value or does not
+// contain a value. When an instance of optional<T> contains a value, it means that an
+// object of type T, referred to as the optional object's contained value, is allocated
+// within the storage of the optional object. Implementations are not permitted to use
+// additional storage, such as dynamic memory, to allocate its contained value.
+//
+// The contained value is allocated in the optional<T> storage suitably
+// aligned for the type T. When an object of type optional<T> is contextually converted to
+// bool, the conversion returns true if the object contains a value; otherwise the
+// conversion returns false.
+//
+// T shall be an object type and satisfy the requirements of Destructible.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_OPTIONAL_H
+#define EASTL_OPTIONAL_H
+
+#include <EASTL/internal/config.h>
+#include <EASTL/initializer_list.h>
+#include <EASTL/memory.h> // eastl::addressof
+#include <EASTL/internal/in_place_t.h> // eastl::in_place_t
+
+#if EASTL_EXCEPTIONS_ENABLED
+ EA_DISABLE_ALL_VC_WARNINGS()
+ #include <stdexcept> // std::logic_error.
+ EA_RESTORE_ALL_VC_WARNINGS()
+#endif
+
+#if defined(EASTL_OPTIONAL_ENABLED) && EASTL_OPTIONAL_ENABLED
+
+EA_DISABLE_VC_WARNING(4582 4583) // constructor/destructor is not implicitly called
+
+namespace eastl
+{
+ #if EASTL_EXCEPTIONS_ENABLED
+ #define EASTL_OPTIONAL_NOEXCEPT
+ #else
+ #define EASTL_OPTIONAL_NOEXCEPT EA_NOEXCEPT
+ #endif
+
+ ///////////////////////////////////////////////////////////////////////////////
+ /// nullopt_t
+ ///
+ /// nullopt_t is class type used to indicate eastl::optional type with uninitialized state.
+ ///
+ struct nullopt_tag_t {};
+
+ struct nullopt_t
+ {
+ EA_CONSTEXPR nullopt_t(nullopt_tag_t) {}
+ };
+
+ EA_CONSTEXPR nullopt_t nullopt{nullopt_tag_t{}};
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ /// bad_optional_access
+ ///
+ #if EASTL_EXCEPTIONS_ENABLED
+ struct bad_optional_access : public std::logic_error
+ {
+ bad_optional_access() : std::logic_error("eastl::bad_optional_access exception") {}
+ virtual ~bad_optional_access() EA_NOEXCEPT {}
+ };
+ #endif
+
+ namespace Internal
+ {
+ ///////////////////////////////////////////////////////////////////////////////
+ /// optional_storage
+ ///
+ template<typename T, bool IsTriviallyDestructible = eastl::is_trivially_destructible_v<T>>
+ struct optional_storage
+ {
+ typedef typename eastl::remove_const<T>::type value_type;
+
+ optional_storage() EA_NOEXCEPT = default;
+
+ inline optional_storage(const value_type& v)
+ : engaged(true)
+ {
+ ::new (eastl::addressof(val)) value_type(v);
+ }
+
+ inline optional_storage(value_type&& v)
+ : engaged(true)
+ {
+ ::new (eastl::addressof(val)) value_type(eastl::move(v));
+ }
+
+ inline ~optional_storage()
+ {
+ if (engaged)
+ destruct_value();
+ }
+
+ template <class... Args>
+ inline explicit optional_storage(in_place_t, Args&&... args)
+ : engaged(true)
+ {
+ ::new (eastl::addressof(val)) T{eastl::forward<Args>(args)...};
+ }
+
+ template <typename U,
+ typename... Args,
+ typename = eastl::enable_if_t<eastl::is_constructible_v<T, std::initializer_list<U>&, Args&&...>>>
+ inline explicit optional_storage(in_place_t, std::initializer_list<U> ilist, Args&&... args)
+ : engaged(true)
+ {
+ ::new (eastl::addressof(val)) value_type{ilist, eastl::forward<Args>(args)...};
+ }
+
+ inline void destruct_value() { (*(value_type*)eastl::addressof(val)).~value_type(); }
+
+
+ eastl::aligned_storage_t<sizeof(value_type), eastl::alignment_of_v<value_type>> val;
+ bool engaged = false;
+ };
+
+
+ /// optional_storage<T, true>
+ ///
+ /// Template specialization for trivial types to satisfy the requirement that optional<T> is trivially
+ /// destructible when T is trivially destructible.
+ ///
+ template<typename T>
+ struct optional_storage<T, true>
+ {
+ typedef eastl::remove_const_t<T> value_type;
+
+ optional_storage() EA_NOEXCEPT = default;
+
+ inline optional_storage(const value_type& v)
+ : engaged(true)
+ {
+ ::new (eastl::addressof(val)) value_type(v);
+ }
+
+ inline optional_storage(value_type&& v)
+ : engaged(true)
+ {
+ ::new (eastl::addressof(val)) value_type(eastl::move(v));
+ }
+
+ // Removed to make optional<T> trivially destructible when T is trivially destructible.
+ //
+ // inline ~optional_storage()
+ // {
+ // if (engaged)
+ // destruct_value();
+ // }
+ ~optional_storage() EA_NOEXCEPT = default;
+
+ template <class... Args>
+ inline explicit optional_storage(in_place_t, Args&&... args)
+ : engaged(true)
+ {
+ ::new (eastl::addressof(val)) value_type{eastl::forward<Args>(args)...};
+ }
+
+ template <typename U,
+ typename... Args,
+ typename = eastl::enable_if_t<eastl::is_constructible_v<T, std::initializer_list<U>&, Args&&...>>>
+ inline explicit optional_storage(in_place_t, std::initializer_list<U> ilist, Args&&... args)
+ : engaged(true)
+ {
+ ::new (eastl::addressof(val)) value_type{ilist, eastl::forward<Args>(args)...};
+ }
+
+ inline void destruct_value() {} // no implementation necessary since T is trivially destructible.
+
+
+ eastl::aligned_storage_t<sizeof(value_type), eastl::alignment_of_v<value_type>> val;
+ bool engaged = false;
+ };
+ } // namespace Internal
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ /// optional
+ ///
+ template <typename T>
+ class optional : private Internal::optional_storage<T>
+ {
+ typedef Internal::optional_storage<T> base_type;
+
+ using base_type::destruct_value;
+ using base_type::engaged;
+ using base_type::val;
+
+ public:
+ typedef T value_type;
+
+ // (ISOCPP 20.6.3) A program that necessitates the instantiation of template optional for a reference type, or
+ // for possibly cv-qualified types in_place_t or nullopt_t is ill-formed.
+ static_assert(!eastl::is_reference<value_type>::value, "eastl::optional of a reference type is ill-formed");
+ static_assert(!eastl::is_same<value_type, in_place_t>::value, "eastl::optional of a in_place_t type is ill-formed");
+ static_assert(!eastl::is_same<value_type, nullopt_t>::value, "eastl::optional of a nullopt_t type is ill-formed");
+
+ inline EA_CONSTEXPR optional() EA_NOEXCEPT {}
+ inline EA_CONSTEXPR optional(nullopt_t) EA_NOEXCEPT {}
+ inline EA_CONSTEXPR optional(const value_type& value) : base_type(value) {}
+ inline EA_CONSTEXPR optional(value_type&& value) EA_NOEXCEPT_IF(eastl::is_nothrow_move_constructible_v<T>)
+ : base_type(eastl::move(value))
+ {
+ }
+
+ optional(const optional& other)
+ {
+ engaged = other.engaged;
+
+ if (engaged)
+ {
+ auto* pOtherValue = reinterpret_cast<const T*>(eastl::addressof(other.val));
+ ::new (eastl::addressof(val)) value_type(*pOtherValue);
+ }
+ }
+
+ optional(optional&& other)
+ {
+ engaged = other.engaged;
+
+ if (engaged)
+ {
+ auto* pOtherValue = reinterpret_cast<T*>(eastl::addressof(other.val));
+ ::new (eastl::addressof(val)) value_type(eastl::move(*pOtherValue));
+ }
+ }
+
+ template <typename... Args>
+ inline EA_CONSTEXPR explicit optional(in_place_t, Args&&... args)
+ : base_type(in_place, eastl::forward<Args>(args)...)
+ {
+ }
+
+ template <typename U,
+ typename... Args,
+ typename = eastl::enable_if_t<eastl::is_constructible_v<T, std::initializer_list<U>&, Args&&...>>>
+ inline explicit optional(in_place_t, std::initializer_list<U> ilist, Args&&... args)
+ : base_type(in_place, ilist, eastl::forward<Args>(args)...)
+ {
+ }
+
+ template <typename U = value_type,
+ typename = eastl::enable_if_t<eastl::is_constructible_v<T, U&&> &&
+ !eastl::is_same_v<eastl::remove_cvref_t<U>, eastl::in_place_t> &&
+ !eastl::is_same_v<eastl::remove_cvref_t<U>, optional>>>
+ inline explicit EA_CONSTEXPR optional(U&& value)
+ : base_type(in_place, eastl::forward<U>(value))
+ {
+ }
+
+ inline optional& operator=(nullopt_t)
+ {
+ reset();
+ return *this;
+ }
+
+ inline optional& operator=(const optional& other)
+ {
+ auto* pOtherValue = reinterpret_cast<const T*>(eastl::addressof(other.val));
+ if (engaged == other.engaged)
+ {
+ if (engaged)
+ *get_value_address() = *pOtherValue;
+ }
+ else
+ {
+ if (engaged)
+ {
+ destruct_value();
+ engaged = false;
+ }
+ else
+ {
+ construct_value(*pOtherValue);
+ engaged = true;
+ }
+ }
+ return *this;
+ }
+
+ inline optional& operator=(optional&& other)
+ EA_NOEXCEPT_IF(EA_NOEXCEPT(eastl::is_nothrow_move_assignable<value_type>::value &&
+ eastl::is_nothrow_move_constructible<value_type>::value))
+ {
+ auto* pOtherValue = reinterpret_cast<T*>(eastl::addressof(other.val));
+ if (engaged == other.engaged)
+ {
+ if (engaged)
+ *get_value_address() = eastl::move(*pOtherValue);
+ }
+ else
+ {
+ if (engaged)
+ {
+ destruct_value();
+ engaged = false;
+ }
+ else
+ {
+ construct_value(eastl::move(*pOtherValue));
+ engaged = true;
+ }
+ }
+ return *this;
+ }
+
+ template <class U, typename = typename eastl::enable_if<eastl::is_same<eastl::decay_t<U>, T>::value>::type>
+ inline optional& operator=(U&& u)
+ {
+ if(engaged)
+ {
+ *get_value_address() = eastl::forward<U>(u);
+ }
+ else
+ {
+ engaged = true;
+ construct_value(eastl::forward<U>(u));
+ }
+
+ return *this;
+ }
+
+ EA_CONSTEXPR inline explicit operator bool() const { return engaged; }
+
+ EA_CONSTEXPR inline bool has_value() const EA_NOEXCEPT { return engaged; }
+
+ template <class U>
+ inline value_type value_or(U&& default_value) const
+ { return engaged ? *get_value_address() : static_cast<value_type>(eastl::forward<U>(default_value)); }
+
+ template <class U>
+ inline value_type value_or(U&& default_value)
+ { return engaged ? *get_value_address() : static_cast<value_type>(eastl::forward<U>(default_value)); }
+
+ inline T& value()& { return get_value_ref(); }
+ inline const T& value() const& { return get_value_ref(); }
+ inline T&& value()&& { return get_rvalue_ref(); }
+ inline const T&& value() const&& { return get_rvalue_ref(); }
+
+ inline T* operator->() { return get_value_address(); }
+ inline const T* operator->() const { return get_value_address(); }
+ inline T& operator*()& { return get_value_ref(); }
+ inline T&& operator*()&& { return get_rvalue_ref(); }
+ inline const T& operator*() const& { return get_value_ref(); }
+ inline const T&& operator*() const&& { return get_rvalue_ref(); }
+
+ template <class... Args>
+ void emplace(Args&&... args)
+ {
+ if (engaged)
+ {
+ destruct_value();
+ engaged = false;
+ }
+ construct_value(eastl::forward<Args>(args)...);
+ engaged = true;
+ }
+
+ template <class U, class... Args>
+ void emplace(std::initializer_list<U> ilist, Args&&... args)
+ {
+ if (engaged)
+ {
+ destruct_value();
+ engaged = false;
+ }
+ construct_value(ilist, eastl::forward<Args>(args)...);
+ engaged = true;
+ }
+
+ inline void swap(optional& other)
+ EA_NOEXCEPT_IF(eastl::is_nothrow_move_constructible<T>::value&& eastl::is_nothrow_swappable<T>::value)
+ {
+ using eastl::swap;
+ if (engaged == other.engaged)
+ {
+ if (engaged)
+ swap(**this, *other);
+ }
+ else
+ {
+ if (engaged)
+ {
+ other.construct_value(eastl::move(*(value_type*)eastl::addressof(val)));
+ destruct_value();
+ }
+ else
+ {
+ construct_value(eastl::move(*((value_type*)eastl::addressof(other.val))));
+ other.destruct_value();
+ }
+
+ swap(engaged, other.engaged);
+ }
+ }
+
+ inline void reset()
+ {
+ if (engaged)
+ {
+ destruct_value();
+ engaged = false;
+ }
+ }
+
+ private:
+
+ template <class... Args>
+ inline void construct_value(Args&&... args)
+ { ::new (eastl::addressof(val)) value_type(eastl::forward<Args>(args)...); }
+
+ inline T* get_value_address() EASTL_OPTIONAL_NOEXCEPT
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ if(!engaged)
+ throw bad_optional_access();
+ #elif EASTL_ASSERT_ENABLED
+ EASTL_ASSERT_MSG(engaged, "no value to retrieve");
+ #endif
+ return reinterpret_cast<T*>(eastl::addressof(val));
+ }
+
+ inline const T* get_value_address() const EASTL_OPTIONAL_NOEXCEPT
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ if(!engaged)
+ throw bad_optional_access();
+ #elif EASTL_ASSERT_ENABLED
+ EASTL_ASSERT_MSG(engaged, "no value to retrieve");
+ #endif
+ return reinterpret_cast<const T*>(eastl::addressof(val));
+ }
+
+ inline value_type& get_value_ref() EASTL_OPTIONAL_NOEXCEPT
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ if(!engaged)
+ throw bad_optional_access();
+ #elif EASTL_ASSERT_ENABLED
+ EASTL_ASSERT_MSG(engaged, "no value to retrieve");
+ #endif
+ return *(value_type*)eastl::addressof(val);
+ }
+
+ inline const value_type& get_value_ref() const EASTL_OPTIONAL_NOEXCEPT
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ if(!engaged)
+ throw bad_optional_access();
+ #elif EASTL_ASSERT_ENABLED
+ EASTL_ASSERT_MSG(engaged, "no value to retrieve");
+ #endif
+ return *(value_type*)eastl::addressof(val);
+ }
+
+ inline value_type&& get_rvalue_ref() EASTL_OPTIONAL_NOEXCEPT
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ if(!engaged)
+ throw bad_optional_access();
+ #elif EASTL_ASSERT_ENABLED
+ EASTL_ASSERT_MSG(engaged, "no value to retrieve");
+ #endif
+ return eastl::move(*((value_type*)eastl::addressof(val)));
+ }
+ }; // class optional
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ /// global swap
+ ///
+ template <class T>
+ void swap(optional<T>& lhs, optional<T>& rhs) EA_NOEXCEPT_IF(EA_NOEXCEPT(lhs.swap(rhs)))
+ { lhs.swap(rhs); }
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ /// global comparisions
+ ///
+ /// http://en.cppreference.com/w/cpp/utility/optional/operator_cmp
+ ///
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // Compare two optional objects
+ //
+ template <class T>
+ inline EA_CONSTEXPR bool operator==(const optional<T>& lhs, const optional<T>& rhs)
+ {
+ // NOTE:
+ //
+ // Code collapsed onto a single line to satisfy requirements for constexpr expressions
+ // being a single line return statement.
+ //
+ // if(bool(lhs) != bool(rhs))
+ // return false;
+
+ // if(bool(lhs) == false)
+ // return true;
+
+ // return *lhs == *rhs;
+
+ return (bool(lhs) != bool(rhs)) ? false : (bool(lhs) == false) ? true : *lhs == *rhs;
+ }
+
+ template <class T>
+ inline EA_CONSTEXPR bool operator<(const optional<T>& lhs, const optional<T>& rhs)
+ {
+ // NOTE:
+ //
+ // Code collapsed onto a single line to satisify requirements for constexpr expressions
+ // being a single line return statement.
+ //
+ // if (!bool(rhs))
+ // return false;
+
+ // if (!bool(lhs))
+ // return true;
+
+ // return *lhs < *rhs;
+
+ return (!bool(rhs)) ? false : (!bool(lhs)) ? true : *lhs < *rhs;
+ }
+
+ template <class T>
+ inline EA_CONSTEXPR bool operator!=(const optional<T>& lhs, const optional<T>& rhs)
+ { return !(lhs == rhs); }
+
+ template <class T>
+ inline EA_CONSTEXPR bool operator<=(const optional<T>& lhs, const optional<T>& rhs)
+ { return !(rhs < lhs); }
+
+ template <class T>
+ inline EA_CONSTEXPR bool operator>(const optional<T>& lhs, const optional<T>& rhs)
+ { return rhs < lhs; }
+
+ template <class T>
+ inline EA_CONSTEXPR bool operator>=(const optional<T>& lhs, const optional<T>& rhs)
+ { return !(lhs < rhs); }
+
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ template <class T, class U=T> requires std::three_way_comparable_with<T, U>
+ inline EA_CONSTEXPR std::compare_three_way_result_t<T, U> operator<=>(const optional<T>& lhs, const optional<U>& rhs)
+ {
+ if (lhs && rhs)
+ {
+ return *lhs <=> *rhs;
+ }
+ return lhs.has_value() <=> rhs.has_value();
+ }
+#endif
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // Compare an optional object with a nullopt
+ //
+ template <class T>
+ inline EA_CONSTEXPR bool operator==(const optional<T>& opt, eastl::nullopt_t) EA_NOEXCEPT
+ { return !opt; }
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ template <class T>
+ inline EA_CONSTEXPR std::strong_ordering operator<=>(const optional<T>& opt, eastl::nullopt_t) EA_NOEXCEPT
+ { return opt.has_value() <=> false; }
+#else
+ template <class T>
+ inline EA_CONSTEXPR bool operator==(eastl::nullopt_t, const optional<T>& opt) EA_NOEXCEPT
+ { return !opt; }
+
+ template <class T>
+ inline EA_CONSTEXPR bool operator!=(const optional<T>& opt, eastl::nullopt_t) EA_NOEXCEPT
+ { return bool(opt); }
+
+ template <class T>
+ inline EA_CONSTEXPR bool operator!=(eastl::nullopt_t, const optional<T>& opt) EA_NOEXCEPT
+ { return bool(opt); }
+
+ template <class T>
+ inline EA_CONSTEXPR bool operator<(const optional<T>&, eastl::nullopt_t) EA_NOEXCEPT
+ { return false; }
+
+ template <class T>
+ inline EA_CONSTEXPR bool operator<(eastl::nullopt_t, const optional<T>& opt) EA_NOEXCEPT
+ { return bool(opt); }
+
+ template <class T>
+ inline EA_CONSTEXPR bool operator<=(const optional<T>& opt, eastl::nullopt_t) EA_NOEXCEPT
+ { return !opt; }
+
+ template <class T>
+ inline EA_CONSTEXPR bool operator<=(eastl::nullopt_t, const optional<T>&) EA_NOEXCEPT
+ { return true; }
+
+ template <class T>
+ inline EA_CONSTEXPR bool operator>(const optional<T>& opt, eastl::nullopt_t) EA_NOEXCEPT
+ { return bool(opt); }
+
+ template <class T>
+ inline EA_CONSTEXPR bool operator>(eastl::nullopt_t, const optional<T>&) EA_NOEXCEPT
+ { return false; }
+
+ template <class T>
+ inline EA_CONSTEXPR bool operator>=(const optional<T>&, eastl::nullopt_t) EA_NOEXCEPT
+ { return true; }
+
+ template <class T>
+ inline EA_CONSTEXPR bool operator>=(eastl::nullopt_t, const optional<T>& opt) EA_NOEXCEPT
+ { return !opt; }
+#endif
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // Compare an optional object with a T
+ //
+ template <class T>
+ inline EA_CONSTEXPR bool operator==(const optional<T>& opt, const T& value)
+ { return bool(opt) ? *opt == value : false; }
+
+ template <class T>
+ inline EA_CONSTEXPR bool operator==(const T& value, const optional<T>& opt)
+ { return bool(opt) ? value == *opt : false; }
+
+ template <class T>
+ inline EA_CONSTEXPR bool operator!=(const optional<T>& opt, const T& value)
+ { return bool(opt) ? !(*opt == value) : true; }
+
+ template <class T>
+ inline EA_CONSTEXPR bool operator!=(const T& value, const optional<T>& opt)
+ { return bool(opt) ? !(value == *opt) : true; }
+
+ template <class T>
+ inline EA_CONSTEXPR bool operator<(const optional<T>& opt, const T& value)
+ { return bool(opt) ? *opt < value : true; }
+
+ template <class T>
+ inline EA_CONSTEXPR bool operator<(const T& value, const optional<T>& opt)
+ { return bool(opt) ? value < *opt : false; }
+
+ template <class T>
+ inline EA_CONSTEXPR bool operator<=(const optional<T>& opt, const T& value)
+ { return !(opt > value); }
+
+ template <class T>
+ inline EA_CONSTEXPR bool operator<=(const T& value, const optional<T>& opt)
+ { return !(value > opt); }
+
+ template <class T>
+ inline EA_CONSTEXPR bool operator>(const optional<T>& opt, const T& value)
+ { return bool(opt) ? value < *opt : false; }
+
+ template <class T>
+ inline EA_CONSTEXPR bool operator>(const T& value, const optional<T>& opt)
+ { return bool(opt) ? *opt < value : true; }
+
+ template <class T>
+ inline EA_CONSTEXPR bool operator>=(const optional<T>& opt, const T& value)
+ { return !(opt < value); }
+
+ template <class T>
+ inline EA_CONSTEXPR bool operator>=(const T& value, const optional<T>& opt)
+ { return !(value < opt); }
+
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ template <class T, class U=T> requires std::three_way_comparable_with<T, U>
+ inline EA_CONSTEXPR std::compare_three_way_result_t<T, U> operator<=>(const optional<T>& opt, const U& value)
+ { return (opt.has_value()) ? *opt <=> value : std::strong_ordering::less; }
+#endif
+
+ ///////////////////////////////////////////////////////////////////////////////
+ /// hash
+ ///
+ template <typename T>
+ struct hash<eastl::optional<T>>
+ {
+ typedef eastl::optional<T> argument_type;
+ typedef size_t result_type;
+
+ result_type operator()(const argument_type& opt) const EA_NOEXCEPT
+ {
+ if (opt)
+ return eastl::hash<T>()(*opt);
+ else
+ return 0; // no value to generate a hash from
+ }
+ };
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ /// make_optional
+ ///
+ template <class T>
+ inline EA_CONSTEXPR optional<decay_t<T>> make_optional(T&& value)
+ {
+ return optional<decay_t<T>>(eastl::forward<T>(value));
+ }
+
+ template <class T, class... Args>
+ inline EA_CONSTEXPR optional<T> make_optional(Args&&... args)
+ {
+ return optional<T>(eastl::in_place, eastl::forward<Args>(args)...);
+ }
+
+ template <class T, class U, class... Args>
+ inline EA_CONSTEXPR optional<T> make_optional(std::initializer_list<U> il, Args&&... args)
+ {
+ return eastl::optional<T>(eastl::in_place, il, eastl::forward<Args>(args)...);
+ }
+
+
+ #undef EASTL_OPTIONAL_NOEXCEPT
+
+} // namespace eastl
+
+EA_RESTORE_VC_WARNING()
+
+#endif // EASTL_OPTIONAL_ENABLED
+#endif // EASTL_OPTIONAL_H
diff --git a/EASTL/include/EASTL/priority_queue.h b/EASTL/include/EASTL/priority_queue.h
new file mode 100644
index 0000000..ade625a
--- /dev/null
+++ b/EASTL/include/EASTL/priority_queue.h
@@ -0,0 +1,491 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements a priority_queue that is just like the C++
+// std::priority_queue adapter class, except it has a couple extension functions.
+// The primary distinctions between this priority_queue and std::priority_queue are:
+// - priority_queue has a couple extension functions that allow you to
+// use a priority queue in extra ways. See the code for documentation.
+// - priority_queue can contain objects with alignment requirements.
+// std::priority_queue cannot do so without a bit of tedious non-portable effort.
+// - priority_queue supports debug memory naming natively.
+// - priority_queue is easier to read, debug, and visualize.
+// - priority_queue is savvy to an environment that doesn't have exception handling,
+// as is sometimes the case with console or embedded environments.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_PRIORITY_QUEUE_H
+#define EASTL_PRIORITY_QUEUE_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/vector.h>
+#include <EASTL/heap.h>
+#include <EASTL/functional.h>
+#include <EASTL/initializer_list.h>
+#include <stddef.h>
+
+// 4530 - C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc
+// 4571 - catch(...) semantics changed since Visual C++ 7.1; structured exceptions (SEH) are no longer caught.
+EA_DISABLE_VC_WARNING(4530 4571);
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+
+ /// EASTL_PRIORITY_QUEUE_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_PRIORITY_QUEUE_DEFAULT_NAME
+ #define EASTL_PRIORITY_QUEUE_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " priority_queue" // Unless the user overrides something, this is "EASTL priority_queue".
+ #endif
+
+ /// EASTL_PRIORITY_QUEUE_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_PRIORITY_QUEUE_DEFAULT_ALLOCATOR
+ #define EASTL_PRIORITY_QUEUE_DEFAULT_ALLOCATOR allocator_type(EASTL_PRIORITY_QUEUE_DEFAULT_NAME)
+ #endif
+
+
+
+ /// priority_queue
+ ///
+ /// The behaviour of this class is just like the std::priority_queue
+ /// class and you can refer to std documentation on it.
+ ///
+ /// A priority_queue is an adapter container which implements a
+ /// queue-like container whereby pop() returns the item of highest
+ /// priority. The entire queue isn't necessarily sorted; merely the
+ /// first item in the queue happens to be of higher priority than
+ /// other items. You can read about priority_queues in many books
+ /// on algorithms, such as "Algorithms" by Robert Sedgewick.
+ ///
+ /// The Container type is a container which is random access and
+ /// supports empty(), size(), clear(), insert(), front(),
+ /// push_back(), and pop_back(). You would typically use vector
+ /// or deque.
+ ///
+ /// Note that we don't provide functions in the priority_queue
+ /// interface for working with allocators or names. The reason for
+ /// this is that priority_queue is an adapter class which can work
+ /// with any standard sequence and not necessarily just a sequence
+ /// provided by this library. So what we do is provide a member
+ /// accessor function get_container() which allows the user to
+ /// manipulate the sequence as needed. The user needs to be careful
+ /// not to change the container's contents, however.
+ ///
+ /// Classic heaps allow for the concept of removing arbitrary items
+ /// and changing the priority of arbitrary items, though the C++
+ /// std heap (and thus priority_queue) functions don't support
+ /// these operations. We have extended the heap algorithms and the
+ /// priority_queue implementation to support these operations.
+ ///
+ ///////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Container = eastl::vector<T>, typename Compare = eastl::less<typename Container::value_type> >
+ class priority_queue
+ {
+ public:
+ typedef priority_queue<T, Container, Compare> this_type;
+ typedef Container container_type;
+ typedef Compare compare_type;
+ //typedef typename Container::allocator_type allocator_type; // We can't currently declare this because the container may be a type that doesn't have an allocator.
+ typedef typename Container::value_type value_type;
+ typedef typename Container::reference reference;
+ typedef typename Container::const_reference const_reference;
+ typedef typename Container::size_type size_type;
+ typedef typename Container::difference_type difference_type;
+
+ public: // We declare public so that global comparison operators can be implemented without adding an inline level and without tripping up GCC 2.x friend declaration failures. GCC (through at least v4.0) is poor at inlining and performance wins over correctness.
+ container_type c; // The C++ standard specifies that you declare a protected member variable of type Container called 'c'.
+ compare_type comp; // The C++ standard specifies that you declare a protected member variable of type Compare called 'comp'.
+
+ public:
+ priority_queue();
+
+ // Allocator is templated here because we aren't allowed to infer the allocator_type from the Container, as some containers (e.g. array) don't
+ // have allocators. For containers that don't have allocator types, you could use void or char as the Allocator template type.
+
+ template <class Allocator>
+ explicit priority_queue(const Allocator& allocator, typename eastl::enable_if<eastl::uses_allocator<container_type, Allocator>::value>::type* = NULL)
+ : c(allocator), comp()
+ {
+ }
+
+ template <class Allocator>
+ priority_queue(const this_type& x, const Allocator& allocator, typename eastl::enable_if<eastl::uses_allocator<container_type, Allocator>::value>::type* = NULL)
+ : c(x.c, allocator), comp(x.comp)
+ {
+ eastl::make_heap(c.begin(), c.end(), comp);
+ }
+
+ template <class Allocator>
+ priority_queue(this_type&& x, const Allocator& allocator, typename eastl::enable_if<eastl::uses_allocator<container_type, Allocator>::value>::type* = NULL)
+ : c(eastl::move(x.c), allocator), comp(x.comp)
+ {
+ eastl::make_heap(c.begin(), c.end(), comp);
+ }
+
+ explicit priority_queue(const compare_type& compare);
+ explicit priority_queue(const compare_type& compare, container_type&& x);
+
+ priority_queue(const compare_type& compare, const container_type& x);
+ priority_queue(std::initializer_list<value_type> ilist, const compare_type& compare = compare_type()); // C++11 doesn't specify that std::priority_queue has initializer list support.
+
+ template <typename InputIterator>
+ priority_queue(InputIterator first, InputIterator last);
+
+ template <typename InputIterator>
+ priority_queue(InputIterator first, InputIterator last, const compare_type& compare);
+
+ template <typename InputIterator>
+ priority_queue(InputIterator first, InputIterator last, const compare_type& compare, const container_type& x);
+
+ template <class InputIterator>
+ priority_queue(InputIterator first, InputIterator last, const compare_type& compare, container_type&& x);
+
+ // Additional C++11 support to consider:
+ //
+ // template <class Allocator>
+ // priority_queue(const Compare&, const Allocator&);
+ //
+ // template <class Allocator>
+ // priority_queue(const Compare&, const container_type&, const Allocator&);
+ //
+ // template <class Allocator>
+ // priority_queue(const Compare&, container_type&&, const Allocator&);
+
+ bool empty() const;
+ size_type size() const;
+
+ const_reference top() const;
+
+ void push(const value_type& value);
+
+ void push(value_type&& x);
+
+ template <class... Args>
+ void emplace(Args&&... args);
+
+ void pop();
+
+ void pop(value_type& value); // Extension to the C++11 Standard that allows popping a move-only type (e.g. unique_ptr).
+
+ void change(size_type n); /// Moves the item at the given array index to a new location based on its current priority.
+ void remove(size_type n); /// Removes the item at the given array index.
+
+ container_type& get_container();
+ const container_type& get_container() const;
+
+ void swap(this_type& x) EA_NOEXCEPT_IF((eastl::is_nothrow_swappable<this_type::container_type>::value && eastl::is_nothrow_swappable<this_type::compare_type>::value));
+
+ bool validate() const;
+
+ }; // class priority_queue
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // priority_queue
+ ///////////////////////////////////////////////////////////////////////
+
+
+ template <typename T, typename Container, typename Compare>
+ inline priority_queue<T, Container, Compare>::priority_queue()
+ : c(), // To consider: use c(EASTL_PRIORITY_QUEUE_DEFAULT_ALLOCATOR) here, though that would add the requirement that the user supplied container support this.
+ comp()
+ {
+ }
+
+
+ template <typename T, typename Container, typename Compare>
+ inline priority_queue<T, Container, Compare>::priority_queue(const compare_type& compare)
+ : c(), // To consider: use c(EASTL_PRIORITY_QUEUE_DEFAULT_ALLOCATOR) here, though that would add the requirement that the user supplied container support this.
+ comp(compare)
+ {
+ }
+
+
+ template <typename T, typename Container, typename Compare>
+ inline priority_queue<T, Container, Compare>::priority_queue(const compare_type& compare, const container_type& x)
+ : c(x), comp(compare)
+ {
+ eastl::make_heap(c.begin(), c.end(), comp);
+ }
+
+
+ template <typename T, typename Container, typename Compare>
+ inline priority_queue<T, Container, Compare>::priority_queue(const compare_type& compare, container_type&& x)
+ : c(eastl::move(x)), comp(compare)
+ {
+ eastl::make_heap(c.begin(), c.end(), comp);
+ }
+
+
+ template <typename T, typename Container, typename Compare>
+ inline priority_queue<T, Container, Compare>::priority_queue(std::initializer_list<value_type> ilist, const compare_type& compare)
+ : c(), comp(compare)
+ {
+ c.insert(c.end(), ilist.begin(), ilist.end());
+ eastl::make_heap(c.begin(), c.end(), comp);
+ }
+
+
+
+ template <typename T, typename Container, typename Compare>
+ template <typename InputIterator>
+ inline priority_queue<T, Container, Compare>::priority_queue(InputIterator first, InputIterator last)
+ : c(first, last), comp()
+ {
+ eastl::make_heap(c.begin(), c.end(), comp);
+ }
+
+
+ template <typename T, typename Container, typename Compare>
+ template <typename InputIterator>
+ inline priority_queue<T, Container, Compare>::priority_queue(InputIterator first, InputIterator last, const compare_type& compare)
+ : c(first, last), comp(compare)
+ {
+ eastl::make_heap(c.begin(), c.end(), comp);
+ }
+
+
+ template <typename T, typename Container, typename Compare>
+ template <typename InputIterator>
+ inline priority_queue<T, Container, Compare>::priority_queue(InputIterator first, InputIterator last, const compare_type& compare, const container_type& x)
+ : c(x), comp(compare)
+ {
+ c.insert(c.end(), first, last);
+ eastl::make_heap(c.begin(), c.end(), comp);
+ }
+
+
+ template <typename T, typename Container, typename Compare>
+ template <typename InputIterator>
+ inline priority_queue<T, Container, Compare>::priority_queue(InputIterator first, InputIterator last, const compare_type& compare, container_type&& x)
+ : c(eastl::move(x)), comp(compare)
+ {
+ c.insert(c.end(), first, last);
+ eastl::make_heap(c.begin(), c.end(), comp);
+ }
+
+
+ template <typename T, typename Container, typename Compare>
+ inline bool priority_queue<T, Container, Compare>::empty() const
+ {
+ return c.empty();
+ }
+
+
+ template <typename T, typename Container, typename Compare>
+ inline typename priority_queue<T, Container, Compare>::size_type
+ priority_queue<T, Container, Compare>::size() const
+ {
+ return c.size();
+ }
+
+
+ template <typename T, typename Container, typename Compare>
+ inline typename priority_queue<T, Container, Compare>::const_reference
+ priority_queue<T, Container, Compare>::top() const
+ {
+ return c.front();
+ }
+
+
+ template <typename T, typename Container, typename Compare>
+ inline void priority_queue<T, Container, Compare>::push(const value_type& value)
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ c.push_back(value);
+ eastl::push_heap(c.begin(), c.end(), comp);
+ }
+ catch(...)
+ {
+ c.clear();
+ throw;
+ }
+ #else
+ c.push_back(value);
+ eastl::push_heap(c.begin(), c.end(), comp);
+ #endif
+ }
+
+
+ template <typename T, typename Container, typename Compare>
+ inline void priority_queue<T, Container, Compare>::push(value_type&& value)
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ c.push_back(eastl::move(value));
+ eastl::push_heap(c.begin(), c.end(), comp);
+ }
+ catch(...)
+ {
+ c.clear();
+ throw;
+ }
+ #else
+ c.push_back(eastl::move(value));
+ eastl::push_heap(c.begin(), c.end(), comp);
+ #endif
+ }
+
+
+ template <typename T, typename Container, typename Compare>
+ template <class... Args>
+ inline void priority_queue<T, Container, Compare>::emplace(Args&&... args)
+ {
+ push(value_type(eastl::forward<Args>(args)...)); // The C++11 Standard 23.6.4/1 states that c.emplace is used, but also declares that c doesn't need to have an emplace function.
+ }
+
+
+ template <typename T, typename Container, typename Compare>
+ inline void priority_queue<T, Container, Compare>::pop()
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ eastl::pop_heap(c.begin(), c.end(), comp);
+ c.pop_back();
+ }
+ catch(...)
+ {
+ c.clear();
+ throw;
+ }
+ #else
+ eastl::pop_heap(c.begin(), c.end(), comp);
+ c.pop_back();
+ #endif
+ }
+
+
+ template <typename T, typename Container, typename Compare>
+ inline void priority_queue<T, Container, Compare>::pop(value_type& value)
+ {
+ value = eastl::move(c.front()); // To consider: value = move_if_noexcept_assignable(c.front());
+ pop();
+ }
+
+
+ template <typename T, typename Container, typename Compare>
+ inline void priority_queue<T, Container, Compare>::change(size_type n) // This function is not in the STL std::priority_queue.
+ {
+ eastl::change_heap(c.begin(), c.size(), n, comp);
+ }
+
+
+ template <typename T, typename Container, typename Compare>
+ inline void priority_queue<T, Container, Compare>::remove(size_type n) // This function is not in the STL std::priority_queue.
+ {
+ eastl::remove_heap(c.begin(), c.size(), n, comp);
+ c.pop_back();
+ }
+
+
+ template <typename T, typename Container, typename Compare>
+ inline typename priority_queue<T, Container, Compare>::container_type&
+ priority_queue<T, Container, Compare>::get_container()
+ {
+ return c;
+ }
+
+
+ template <typename T, typename Container, typename Compare>
+ inline const typename priority_queue<T, Container, Compare>::container_type&
+ priority_queue<T, Container, Compare>::get_container() const
+ {
+ return c;
+ }
+
+
+ template <typename T, typename Container, typename Compare>
+ inline void priority_queue<T, Container, Compare>::swap(this_type& x) EA_NOEXCEPT_IF((eastl::is_nothrow_swappable<this_type::container_type>::value &&
+ eastl::is_nothrow_swappable<this_type::compare_type>::value))
+ {
+ using eastl::swap;
+ swap(c, x.c);
+ swap(comp, x.comp);
+ }
+
+
+ template <typename T, typename Container, typename Compare>
+ inline bool
+ priority_queue<T, Container, Compare>::validate() const
+ {
+ return c.validate() && eastl::is_heap(c.begin(), c.end(), comp);
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Container, typename Compare>
+ bool operator==(const priority_queue<T, Container, Compare>& a, const priority_queue<T, Container, Compare>& b)
+ {
+ return (a.c == b.c);
+ }
+
+ template <typename T, typename Container, typename Compare>
+ bool operator<(const priority_queue<T, Container, Compare>& a, const priority_queue<T, Container, Compare>& b)
+ {
+ return (a.c < b.c);
+ }
+
+ template <typename T, typename Container, typename Compare>
+ inline bool operator!=(const priority_queue<T, Container, Compare>& a, const priority_queue<T, Container, Compare>& b)
+ {
+ return !(a.c == b.c);
+ }
+
+ template <typename T, typename Container, typename Compare>
+ inline bool operator>(const priority_queue<T, Container, Compare>& a, const priority_queue<T, Container, Compare>& b)
+ {
+ return (b.c < a.c);
+ }
+
+ template <typename T, typename Container, typename Compare>
+ inline bool operator<=(const priority_queue<T, Container, Compare>& a, const priority_queue<T, Container, Compare>& b)
+ {
+ return !(b.c < a.c);
+ }
+
+ template <typename T, typename Container, typename Compare>
+ inline bool operator>=(const priority_queue<T, Container, Compare>& a, const priority_queue<T, Container, Compare>& b)
+ {
+ return !(a.c < b.c);
+ }
+
+
+ template <class T, class Container, class Compare>
+ inline void swap(priority_queue<T, Container, Compare>& a, priority_queue<T, Container, Compare>& b) EA_NOEXCEPT_IF((eastl::is_nothrow_swappable<typename priority_queue<T, Container, Compare>::container_type>::value &&
+ eastl::is_nothrow_swappable<typename priority_queue<T, Container, Compare>::compare_type>::value)) // EDG has a bug and won't let us use Container in this noexcept statement
+ {
+ a.swap(b);
+ }
+
+
+} // namespace eastl
+
+
+EA_RESTORE_VC_WARNING();
+
+
+#endif // Header include guard
diff --git a/EASTL/include/EASTL/queue.h b/EASTL/include/EASTL/queue.h
new file mode 100644
index 0000000..8b29555
--- /dev/null
+++ b/EASTL/include/EASTL/queue.h
@@ -0,0 +1,373 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements a queue that is just like the C++ std::queue adapter class.
+// There are no significant differences between EASTL/queue and std::queue.
+// We provide this class for completeness and where std STL may not be available.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_QUEUE_H
+#define EASTL_QUEUE_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/deque.h>
+#include <EASTL/initializer_list.h>
+#include <stddef.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+
+ /// EASTL_QUEUE_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_QUEUE_DEFAULT_NAME
+ #define EASTL_QUEUE_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " queue" // Unless the user overrides something, this is "EASTL queue".
+ #endif
+
+ /// EASTL_QUEUE_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_QUEUE_DEFAULT_ALLOCATOR
+ #define EASTL_QUEUE_DEFAULT_ALLOCATOR allocator_type(EASTL_QUEUE_DEFAULT_NAME)
+ #endif
+
+
+
+ /// queue
+ ///
+ /// queue is an adapter class provides a FIFO (first-in, first-out) interface
+ /// via wrapping a sequence that provides at least the following operations:
+ /// push_back
+ /// pop_front
+ /// front
+ /// back
+ ///
+ /// In practice this usually means deque, list, intrusive_list. vector and string
+ /// cannot be used because they don't provide pop-front. This is reasonable because
+ /// a vector or string pop_front would be inefficient and could lead to
+ /// silently poor performance.
+ ///
+ template <typename T, typename Container = eastl::deque<T, EASTLAllocatorType, DEQUE_DEFAULT_SUBARRAY_SIZE(T)> >
+ class queue
+ {
+ public:
+ typedef queue<T, Container> this_type;
+ typedef Container container_type;
+ //typedef typename Container::allocator_type allocator_type; // We can't currently declare this because the container may be a type that doesn't have an allocator.
+ typedef typename Container::value_type value_type;
+ typedef typename Container::reference reference;
+ typedef typename Container::const_reference const_reference;
+ typedef typename Container::size_type size_type;
+
+ public: // We declare public so that global comparison operators can be implemented without adding an inline level and without tripping up GCC 2.x friend declaration failures. GCC (through at least v4.0) is poor at inlining and performance wins over correctness.
+ container_type c; // The C++ standard specifies that you declare a protected member variable of type Container called 'c'.
+
+ public:
+ queue();
+
+ // Allocator is templated here because we aren't allowed to infer the allocator_type from the Container, as some containers (e.g. array) don't
+ // have allocators. For containers that don't have allocator types, you could use void or char as the Allocator template type.
+
+ template <class Allocator>
+ explicit queue(const Allocator& allocator, typename eastl::enable_if<eastl::uses_allocator<container_type, Allocator>::value>::type* = NULL)
+ : c(allocator)
+ {
+ }
+
+ template <class Allocator>
+ queue(const this_type& x, const Allocator& allocator, typename eastl::enable_if<eastl::uses_allocator<container_type, Allocator>::value>::type* = NULL)
+ : c(x.c, allocator)
+ {
+ }
+
+ template <class Allocator>
+ queue(this_type&& x, const Allocator& allocator, typename eastl::enable_if<eastl::uses_allocator<container_type, Allocator>::value>::type* = NULL)
+ : c(eastl::move(x.c), allocator)
+ {
+ }
+
+ explicit queue(const container_type& x);
+ explicit queue(container_type&& x);
+
+ // Additional C++11 support to consider:
+ //
+ // template <class Allocator>
+ // queue(const container_type& x, const Allocator& allocator);
+ //
+ // template <class Allocator>
+ // queue(container_type&& x, const Allocator& allocator);
+
+ queue(std::initializer_list<value_type> ilist); // C++11 doesn't specify that std::queue has initializer list support.
+
+ bool empty() const;
+ size_type size() const;
+
+ reference front();
+ const_reference front() const;
+
+ reference back();
+ const_reference back() const;
+
+ void push(const value_type& value);
+ void push(value_type&& x);
+
+ template <class... Args>
+ EA_DEPRECATED void emplace_back(Args&&... args); // backwards compatibility
+
+ template <class... Args>
+ decltype(auto) emplace(Args&&... args);
+
+ void pop();
+
+ container_type& get_container();
+ const container_type& get_container() const;
+
+ void swap(this_type& x) EA_NOEXCEPT_IF((eastl::is_nothrow_swappable<this_type::container_type>::value));
+
+ bool validate() const;
+
+ }; // class queue
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // queue
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Container>
+ inline queue<T, Container>::queue()
+ : c() // To consider: use c(EASTL_QUEUE_DEFAULT_ALLOCATOR) here, though that would add the requirement that the user supplied container support this.
+ {
+ // Empty
+ }
+
+
+ template <typename T, typename Container>
+ inline queue<T, Container>::queue(const Container& x)
+ : c(x)
+ {
+ // Empty
+ }
+
+
+ template <typename T, typename Container>
+ inline queue<T, Container>::queue(Container&& x)
+ : c(eastl::move(x))
+ {
+ // Empty
+ }
+
+
+ template <typename T, typename Container>
+ inline queue<T, Container>::queue(std::initializer_list<value_type> ilist)
+ : c() // We could alternatively use c(ilist) here, but that would require c to have an ilist constructor.
+ {
+ // Better solution but requires an insert function.
+ // c.insert(ilist.begin(), ilist.end());
+
+ // Possibly slower solution but doesn't require an insert function.
+ for(typename std::initializer_list<value_type>::iterator it = ilist.begin(); it != ilist.end(); ++it)
+ {
+ const value_type& value = *it;
+ c.push_back(value);
+ }
+ }
+
+
+ template <typename T, typename Container>
+ inline bool queue<T, Container>::empty() const
+ {
+ return c.empty();
+ }
+
+
+ template <typename T, typename Container>
+ inline typename queue<T, Container>::size_type
+ queue<T, Container>::size() const
+ {
+ return c.size();
+ }
+
+
+ template <typename T, typename Container>
+ inline typename queue<T, Container>::reference
+ queue<T, Container>::front()
+ {
+ return c.front();
+ }
+
+
+ template <typename T, typename Container>
+ inline typename queue<T, Container>::const_reference
+ queue<T, Container>::front() const
+ {
+ return c.front();
+ }
+
+
+ template <typename T, typename Container>
+ inline typename queue<T, Container>::reference
+ queue<T, Container>::back()
+ {
+ return c.back();
+ }
+
+
+ template <typename T, typename Container>
+ inline typename queue<T, Container>::const_reference
+ queue<T, Container>::back() const
+ {
+ return c.back();
+ }
+
+
+ template <typename T, typename Container>
+ inline void queue<T, Container>::push(const value_type& value)
+ {
+ c.push_back(const_cast<value_type&>(value)); // const_cast so that intrusive_list can work. We may revisit this.
+ }
+
+
+ template <typename T, typename Container>
+ inline void queue<T, Container>::push(value_type&& x)
+ {
+ c.push_back(eastl::move(x));
+ }
+
+
+ template <typename T, typename Container>
+ template <class... Args>
+ inline void queue<T, Container>::emplace_back(Args&&... args)
+ {
+ emplace(eastl::forward<Args>(args)...);
+ }
+
+ template <typename T, typename Container>
+ template <class... Args>
+ inline decltype(auto) queue<T, Container>::emplace(Args&&... args)
+ {
+ return c.emplace_back(eastl::forward<Args>(args)...);
+ }
+
+
+ template <typename T, typename Container>
+ inline void queue<T, Container>::pop()
+ {
+ c.pop_front();
+ }
+
+
+ template <typename T, typename Container>
+ inline typename queue<T, Container>::container_type&
+ queue<T, Container>::get_container()
+ {
+ return c;
+ }
+
+
+ template <typename T, typename Container>
+ inline const typename queue<T, Container>::container_type&
+ queue<T, Container>::get_container() const
+ {
+ return c;
+ }
+
+
+ template <typename T, typename Container>
+ void queue<T, Container>::swap(this_type& x) EA_NOEXCEPT_IF((eastl::is_nothrow_swappable<this_type::container_type>::value))
+ {
+ using eastl::swap;
+ swap(c, x.c);
+ }
+
+
+ template <typename T, typename Container>
+ bool queue<T, Container>::validate() const
+ {
+ return c.validate();
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Container>
+ inline bool operator==(const queue<T, Container>& a, const queue<T, Container>& b)
+ {
+ return (a.c == b.c);
+ }
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ template <typename T, typename Container> requires std::three_way_comparable<Container>
+
+ inline synth_three_way_result<T> operator<=>(const queue<T, Container>& a, const queue<T, Container>& b)
+ {
+ return a.c <=> b.c;
+ }
+#endif
+
+ template <typename T, typename Container>
+ inline bool operator!=(const queue<T, Container>& a, const queue<T, Container>& b)
+ {
+ return !(a.c == b.c);
+ }
+
+ template <typename T, typename Container>
+ inline bool operator<(const queue<T, Container>& a, const queue<T, Container>& b)
+ {
+ return (a.c < b.c);
+ }
+
+ template <typename T, typename Container>
+ inline bool operator>(const queue<T, Container>& a, const queue<T, Container>& b)
+ {
+ return (b.c < a.c);
+ }
+
+ template <typename T, typename Container>
+ inline bool operator<=(const queue<T, Container>& a, const queue<T, Container>& b)
+ {
+ return !(b.c < a.c);
+ }
+
+ template <typename T, typename Container>
+ inline bool operator>=(const queue<T, Container>& a, const queue<T, Container>& b)
+ {
+ return !(a.c < b.c);
+ }
+
+ template <typename T, typename Container>
+ inline void swap(queue<T, Container>& a, queue<T, Container>& b) EA_NOEXCEPT_IF((eastl::is_nothrow_swappable<typename queue<T, Container>::container_type>::value)) // EDG has a bug and won't let us use Container in this noexcept statement
+ {
+ a.swap(b);
+ }
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/random.h b/EASTL/include/EASTL/random.h
new file mode 100644
index 0000000..ca3e20b
--- /dev/null
+++ b/EASTL/include/EASTL/random.h
@@ -0,0 +1,254 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file defines random number generation like the std C++ <random> header.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_RANDOM_H
+#define EASTL_RANDOM_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/numeric_limits.h>
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// min/max workaround
+//
+// MSVC++ has #defines for min/max which collide with the min/max algorithm
+// declarations. The following may still not completely resolve some kinds of
+// problems with MSVC++ #defines, though it deals with most cases in production
+// game code.
+//
+#if EASTL_NOMINMAX
+ #ifdef min
+ #undef min
+ #endif
+ #ifdef max
+ #undef max
+ #endif
+#endif
+
+
+namespace eastl
+{
+
+ // Implements a uniform distribution of values generated by a Generator,
+ // where Generator is typically a random or pseudo-random number generator.
+ // Note that the min/max range for this class is inclusive, so if you want
+ // random integers 0, 1, 2, and 3, then you need to init this class with (0, 3)
+ // and not (0, 4).
+ // See the C++11 Standard, section 26.5.1.6
+ template<class IntType = int>
+ class uniform_int_distribution
+ {
+ static_assert(eastl::is_integral<IntType>::value, "uniform_int_distribution: IntType must be integral.");
+
+ public:
+ typedef IntType result_type;
+
+ // For uniform_int_distribution, param_type defines simply the min and max values of
+ // the range returned by operator(). It may mean something else for other distribution types.
+ struct param_type
+ {
+ explicit param_type(IntType a = 0, IntType b = eastl::numeric_limits<IntType>::max());
+
+ result_type a() const;
+ result_type b() const;
+
+ bool operator==(const param_type& x) { return (x.mA == mA) && (x.mB == mB); }
+ bool operator!=(const param_type& x) { return (x.mA != mA) || (x.mB != mB); }
+
+ protected:
+ IntType mA;
+ IntType mB;
+ };
+
+ uniform_int_distribution(IntType a = 0, IntType b = eastl::numeric_limits<IntType>::max());
+ uniform_int_distribution(const param_type& params);
+
+ void reset();
+
+ template<class Generator>
+ result_type operator()(Generator& g);
+
+ template<class Generator>
+ result_type operator()(Generator& g, const param_type& params);
+
+ result_type a() const;
+ result_type b() const;
+
+ param_type param() const;
+ void param(const param_type& params);
+
+ result_type min() const;
+ result_type max() const;
+
+ protected:
+ param_type mParam;
+ };
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // uniform_int_distribution
+ ///////////////////////////////////////////////////////////////////////
+
+ template<class IntType>
+ inline uniform_int_distribution<IntType>::param_type::param_type(IntType aValue, IntType bValue)
+ : mA(aValue), mB(bValue)
+ {
+ EASTL_ASSERT(aValue <= bValue);
+ }
+
+ template<class IntType>
+ inline typename uniform_int_distribution<IntType>::result_type
+ uniform_int_distribution<IntType>::param_type::a() const
+ {
+ return mA;
+ }
+
+ template<class IntType>
+ inline typename uniform_int_distribution<IntType>::result_type
+ uniform_int_distribution<IntType>::param_type::b() const
+ {
+ return mB;
+ }
+
+
+
+ template<class IntType>
+ inline uniform_int_distribution<IntType>::uniform_int_distribution(IntType aValue, IntType bValue)
+ : mParam(aValue, bValue)
+ {
+ // Nothing more to do.
+ }
+
+ template<class IntType>
+ inline uniform_int_distribution<IntType>::uniform_int_distribution(const param_type& params)
+ : mParam(params)
+ {
+ // Nothing more to do.
+ }
+
+ template<class IntType>
+ void uniform_int_distribution<IntType>::reset()
+ {
+ // Nothing to do.
+ }
+
+ template<class IntType>
+ template<class Generator>
+ inline typename uniform_int_distribution<IntType>::result_type
+ uniform_int_distribution<IntType>::operator()(Generator& g)
+ {
+ return operator()(g, mParam);
+ }
+
+ template<class IntType>
+ template<class Generator>
+ inline typename uniform_int_distribution<IntType>::result_type
+ uniform_int_distribution<IntType>::operator()(Generator& g, const param_type& params)
+ {
+ // This is a tricky function to implement in a generic way for all integral types.
+ // The solution will involve handling the case of signed types and 64 bit types,
+ // probably in a way that uses template metaprogramming to deal with signed ranges.
+
+ // Temporary solution while we research a full solution. It supports only uint8_t,
+ // uint16_t, and uint32_t uniform_int_distribution types.
+ static_assert(eastl::is_unsigned<result_type>::value && (sizeof(result_type) <= 4), "uniform_int_distribution currently supports only uint8_t, uint16_t, uint32_t.");
+
+ result_type v = g(); // Generates a value in the range of (numeric_limits<result_type>::min(), numeric_limits<result_type>::max()).
+ result_type r = (result_type)((v * (uint64_t)((params.b() - params.a()) + 1)) >> (sizeof(result_type) * 8)); // +1 because ranges are inclusive.
+ return params.a() + r;
+ }
+
+ template<class IntType>
+ inline typename uniform_int_distribution<IntType>::result_type
+ uniform_int_distribution<IntType>::a() const
+ {
+ return mParam.mA;
+ }
+
+ template<class IntType>
+ inline typename uniform_int_distribution<IntType>::result_type
+ uniform_int_distribution<IntType>::b() const
+ {
+ return mParam.mB;
+ }
+
+
+ template<class IntType>
+ inline typename uniform_int_distribution<IntType>::param_type
+ uniform_int_distribution<IntType>::param() const
+ {
+ return mParam;
+ }
+
+ template<class IntType>
+ inline void
+ uniform_int_distribution<IntType>::param(const param_type& params)
+ {
+ mParam = params;
+ }
+
+ template<class IntType>
+ inline typename uniform_int_distribution<IntType>::result_type
+ uniform_int_distribution<IntType>::min() const
+ {
+ return mParam.mA;
+ }
+
+ template<class IntType>
+ inline typename uniform_int_distribution<IntType>::result_type
+ uniform_int_distribution<IntType>::max() const
+ {
+ return mParam.mB;
+ }
+
+
+
+ template<class ResultType>
+ inline bool operator==(const uniform_int_distribution<ResultType>& lhs,
+ const uniform_int_distribution<ResultType>& rhs)
+ {
+ return (lhs.param() == rhs.param());
+ }
+
+ template<class ResultType>
+ inline bool operator!=(const uniform_int_distribution<ResultType>& lhs,
+ const uniform_int_distribution<ResultType>& rhs)
+ {
+ return (lhs.param() != rhs.param());
+ }
+
+
+ // EASTL doesn't currently implement IO stream-related functionality.
+ // It may be useful to forward declare these templates and let the user implement them in the meantime.
+ //
+ // template<class CharT, class Traits, class ResultType>
+ // eastl::basic_ostream<CharT, Traits>& operator<<(eastl::basic_ostream<CharT, Traits>& os, const uniform_int_distribution& uid);
+ //
+ // template<class CharT, class Traits, class ResultType>
+ // eastl::basic_istream<CharT, Traits>& operator>>(eastl::basic_istream<CharT, Traits>& is, uniform_int_distribution& uid);
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/ratio.h b/EASTL/include/EASTL/ratio.h
new file mode 100644
index 0000000..da1a7b1
--- /dev/null
+++ b/EASTL/include/EASTL/ratio.h
@@ -0,0 +1,320 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Implements the class template eastl::ratio that provides compile-time
+// rational arithmetic support. Each instantiation of this template exactly
+// represents any finite rational number as long as its numerator Num and
+// denominator Denom are representable as compile-time constants of type
+// intmax_t. In addition, Denom may not be zero and may not be equal to the most
+// negative value. Both numerator and denominator are automatically reduced to
+// the lowest terms.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_RATIO_H
+#define EASTL_RATIO_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <EABase/eabase.h>
+
+
+//////////////////////////////////////////////////////////////////////////////
+// namespace eastl
+// {
+// template <intmax_t N, intmax_t D = 1>
+// class ratio
+// {
+// public:
+// static constexpr intmax_t num;
+// static constexpr intmax_t den;
+// typedef ratio<num, den> type;
+// };
+//
+// // ratio arithmetic
+// template <class R1, class R2> using ratio_add = ...;
+// template <class R1, class R2> using ratio_subtract = ...;
+// template <class R1, class R2> using ratio_multiply = ...;
+// template <class R1, class R2> using ratio_divide = ...;
+//
+// // ratio comparison
+// template <class R1, class R2> struct ratio_equal;
+// template <class R1, class R2> struct ratio_not_equal;
+// template <class R1, class R2> struct ratio_less;
+// template <class R1, class R2> struct ratio_less_equal;
+// template <class R1, class R2> struct ratio_greater;
+// template <class R1, class R2> struct ratio_greater_equal;
+//
+// // convenience SI typedefs
+// typedef ratio<1, 1000000000000000000000000> yocto; // not supported
+// typedef ratio<1, 1000000000000000000000> zepto; // not supported
+// typedef ratio<1, 1000000000000000000> atto;
+// typedef ratio<1, 1000000000000000> femto;
+// typedef ratio<1, 1000000000000> pico;
+// typedef ratio<1, 1000000000> nano;
+// typedef ratio<1, 1000000> micro;
+// typedef ratio<1, 1000> milli;
+// typedef ratio<1, 100> centi;
+// typedef ratio<1, 10> deci;
+// typedef ratio< 10, 1> deca;
+// typedef ratio< 100, 1> hecto;
+// typedef ratio< 1000, 1> kilo;
+// typedef ratio< 1000000, 1> mega;
+// typedef ratio< 1000000000, 1> giga;
+// typedef ratio< 1000000000000, 1> tera;
+// typedef ratio< 1000000000000000, 1> peta;
+// typedef ratio< 1000000000000000000, 1> exa;
+// typedef ratio< 1000000000000000000000, 1> zetta; // not supported
+// typedef ratio<1000000000000000000000000, 1> yotta; // not supported
+// }
+//////////////////////////////////////////////////////////////////////////////
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/type_traits.h>
+
+
+namespace eastl
+{
+ ///////////////////////////////////////////////////////////////////////
+ // compile-time overflow helpers
+ ///////////////////////////////////////////////////////////////////////
+ #define EASTL_RATIO_ABS(x) ((x) < 0 ? -(x) : (x))
+
+ template <intmax_t X, intmax_t Y>
+ struct AdditionOverFlow
+ {
+ static const bool c1 = (X <= 0 && 0 <= Y) || (Y < 0 && 0 < X); // True if digits do not have the same sign.
+ static const bool c2 = EASTL_RATIO_ABS(Y) <= INTMAX_MAX - EASTL_RATIO_ABS(X);
+ static const bool value = c1 || c2;
+ };
+
+ template <intmax_t X, intmax_t Y>
+ struct MultiplyOverFlow
+ {
+ static const bool value = (EASTL_RATIO_ABS(X) <= (INTMAX_MAX / EASTL_RATIO_ABS(Y)));
+ };
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // ratio (C++ Standard: 20.11.3)
+ ///////////////////////////////////////////////////////////////////////
+ template <intmax_t N = 0, intmax_t D = 1>
+ class ratio
+ {
+ public:
+ static EA_CONSTEXPR_OR_CONST intmax_t num = N;
+ static EA_CONSTEXPR_OR_CONST intmax_t den = D;
+ typedef ratio<num, den> type;
+ };
+
+ namespace Internal
+ {
+ // gcd -- implementation based on euclid's algorithm
+ template <intmax_t X, intmax_t Y> struct gcd { static const intmax_t value = gcd<Y, X % Y>::value; };
+ template <intmax_t X> struct gcd<X, 0> { static const intmax_t value = X; };
+ template <> struct gcd<0, 0> { static const intmax_t value = 1; };
+
+ // lcm
+ template<intmax_t X, intmax_t Y>
+ struct lcm { static const intmax_t value = (X * (Y / gcd<X,Y>::value)); };
+
+ // ct_add
+ template <intmax_t X, intmax_t Y>
+ struct ct_add
+ {
+ static_assert(AdditionOverFlow<X,Y>::value, "compile-time addition overflow");
+ static const intmax_t value = X + Y;
+ };
+
+ // ct_sub
+ template <intmax_t X, intmax_t Y>
+ struct ct_sub
+ {
+ static_assert(AdditionOverFlow<X,-Y>::value, "compile-time addition overflow");
+ static const intmax_t value = X - Y;
+ };
+
+ // ct_multi
+ template <intmax_t X, intmax_t Y>
+ struct ct_multi
+ {
+ static_assert(MultiplyOverFlow<X,Y>::value, "compile-time multiply overflow");
+ static const intmax_t value = X * Y;
+ };
+
+ // ct_simplify
+ template <class R1>
+ struct ct_simplify
+ {
+ static const intmax_t divisor = Internal::gcd<R1::num, R1::den>::value;
+ static const intmax_t num = R1::num / divisor;
+ static const intmax_t den = R1::den / divisor;
+
+ typedef ratio<num, den> ratio_type;
+ typedef ct_simplify<R1> this_type;
+ };
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <intmax_t N1, intmax_t N2> intmax_t ct_add_v = ct_add<N1, N2>::value;
+ template <intmax_t N1, intmax_t N2> intmax_t ct_multi_v = ct_multi<N1, N2>::value;
+ template <class R1, class R2> R2 ct_simplify_t = ct_simplify<R1>::ratio_type;
+ #else
+ template <intmax_t N1, intmax_t N2> struct ct_add_v : public ct_add<N1, N2>::value {};
+ template <intmax_t N1, intmax_t N2> struct ct_multi_v : public ct_multi<N1, N2>::value {};
+ template <class R1> struct ct_simplify_t : public ct_simplify<R1>::ratio_type {};
+ #endif
+
+ ///////////////////////////////////////////////////////////////////////
+ // ratio_add
+ ///////////////////////////////////////////////////////////////////////
+ template <class R1, class R2>
+ struct ratio_add
+ {
+ typedef typename ct_simplify
+ <
+ typename ratio
+ <
+ ct_add
+ <
+ ct_multi<R1::num, R2::den>::value,
+ ct_multi<R2::num, R1::den>::value
+ >::value,
+ ct_multi<R1::den, R2::den>::value
+ >::type
+ >::ratio_type type;
+ };
+
+ ///////////////////////////////////////////////////////////////////////
+ // ratio_subtract
+ ///////////////////////////////////////////////////////////////////////
+ template <class R1, class R2>
+ struct ratio_subtract
+ {
+ typedef typename ct_simplify
+ <
+ typename ratio
+ <
+ ct_sub
+ <
+ ct_multi<R1::num, R2::den>::value,
+ ct_multi<R2::num, R1::den>::value
+ >::value,
+ ct_multi<R1::den, R2::den>::value
+ >::type
+ >::ratio_type type;
+ };
+
+ ///////////////////////////////////////////////////////////////////////
+ // ratio_multiply
+ ///////////////////////////////////////////////////////////////////////
+ template <class R1, class R2>
+ struct ratio_multiply
+ {
+ typedef typename ct_simplify
+ <
+ typename ratio
+ <
+ ct_multi<R1::num, R2::num>::value,
+ ct_multi<R1::den, R2::den>::value
+ >::type
+ >::ratio_type type;
+ };
+
+ ///////////////////////////////////////////////////////////////////////
+ // ratio_divide
+ ///////////////////////////////////////////////////////////////////////
+ template <class R1, class R2>
+ struct ratio_divide
+ {
+ typedef typename ct_simplify
+ <
+ typename ratio
+ <
+ ct_multi<R1::num, R2::den>::value,
+ ct_multi<R1::den, R2::num>::value
+ >::type
+ >::ratio_type type;
+ };
+
+ ///////////////////////////////////////////////////////////////////////
+ // ratio_equal
+ ///////////////////////////////////////////////////////////////////////
+ template <class R1, class R2>
+ struct ratio_equal
+ {
+ typedef ct_simplify<R1> sr1_t;
+ typedef ct_simplify<R2> sr2_t;
+
+ static const bool value = (sr1_t::num == sr2_t::num) && (sr1_t::den == sr2_t::den);
+ };
+
+ ///////////////////////////////////////////////////////////////////////
+ // ratio_less
+ ///////////////////////////////////////////////////////////////////////
+ template <class R1, class R2>
+ struct ratio_less
+ {
+ static const bool value = (R1::num * R2::den) < (R2::num * R1::den);
+ };
+ } // namespace Internal
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // ratio arithmetic (C++ Standard: 20.11.4)
+ ///////////////////////////////////////////////////////////////////////
+ #if defined(EA_COMPILER_NO_TEMPLATE_ALIASES) || (defined(_MSC_VER) && (_MSC_VER < 1900)) // prior to VS2015
+ template <class R1, class R2> struct ratio_add : public Internal::ratio_add<R1, R2>::type {};
+ template <class R1, class R2> struct ratio_subtract : public Internal::ratio_subtract<R1, R2>::type {};
+ template <class R1, class R2> struct ratio_multiply : public Internal::ratio_multiply<R1, R2>::type {};
+ template <class R1, class R2> struct ratio_divide : public Internal::ratio_divide<R1, R2>::type {};
+ #else
+ template <class R1, class R2> using ratio_add = typename Internal::ratio_add<R1, R2>::type;
+ template <class R1, class R2> using ratio_subtract = typename Internal::ratio_subtract<R1, R2>::type;
+ template <class R1, class R2> using ratio_multiply = typename Internal::ratio_multiply<R1, R2>::type;
+ template <class R1, class R2> using ratio_divide = typename Internal::ratio_divide<R1, R2>::type;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // ratio comparison (C++ Standard: 20.11.5)
+ ///////////////////////////////////////////////////////////////////////
+ template <class R1, class R2> struct ratio_equal : public integral_constant<bool, Internal::ratio_equal<R1, R2>::value> {};
+ template <class R1, class R2> struct ratio_not_equal : public integral_constant<bool, !ratio_equal<R1, R2>::value> {};
+ template <class R1, class R2> struct ratio_less : public integral_constant<bool, Internal::ratio_less<R1, R2>::value> {};
+ template <class R1, class R2> struct ratio_less_equal : public integral_constant<bool, !ratio_less<R2, R1>::value> {};
+ template <class R1, class R2> struct ratio_greater : public integral_constant<bool, ratio_less<R2, R1>::value> {};
+ template <class R1, class R2> struct ratio_greater_equal : public integral_constant<bool, !ratio_less<R1, R2>::value> {};
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // convenience SI typedefs (C++ Standard: 20.11.6)
+ ///////////////////////////////////////////////////////////////////////
+ // typedef ratio<1, 1000000000000000000000000> yocto; // not supported, too big for intmax_t
+ // typedef ratio<1, 1000000000000000000000 > zepto; // not supported, too big for intmax_t
+ typedef ratio<1, 1000000000000000000 > atto;
+ typedef ratio<1, 1000000000000000 > femto;
+ typedef ratio<1, 1000000000000 > pico;
+ typedef ratio<1, 1000000000 > nano;
+ typedef ratio<1, 1000000 > micro;
+ typedef ratio<1, 1000 > milli;
+ typedef ratio<1, 100 > centi;
+ typedef ratio<1, 10 > deci;
+ typedef ratio<10, 1 > deca;
+ typedef ratio<100, 1 > hecto;
+ typedef ratio<1000, 1 > kilo;
+ typedef ratio<1000000, 1 > mega;
+ typedef ratio<1000000000, 1 > giga;
+ typedef ratio<1000000000000, 1 > tera;
+ typedef ratio<1000000000000000, 1 > peta;
+ typedef ratio<1000000000000000000, 1 > exa;
+ // typedef ratio<1000000000000000000000, 1 > zetta; // not supported, too big for intmax_t
+ // typedef ratio<1000000000000000000000000, 1> yotta; // not supported, too big for intmax_t
+}
+
+#endif // EASTL_RATIO_H
diff --git a/EASTL/include/EASTL/safe_ptr.h b/EASTL/include/EASTL/safe_ptr.h
new file mode 100644
index 0000000..344ded8
--- /dev/null
+++ b/EASTL/include/EASTL/safe_ptr.h
@@ -0,0 +1,485 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_SAFEPTR_H
+#define EASTL_SAFEPTR_H
+
+
+#include <EASTL/internal/config.h>
+#include <stddef.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+ class safe_ptr_base;
+
+
+ /// safe_object
+ ///
+ /// In order for a class to be the template argument for safe_ptr,
+ /// it must derive from safe_object.
+ ///
+ /// Example usage:
+ /// class RandomLifetimeObject : public safe_object
+ /// {
+ /// public:
+ /// RandomLifetimeObject();
+ /// Method();
+ /// ...
+ /// };
+ ///
+ class safe_object
+ {
+ public:
+ bool is_unreferenced() const; /// Returns true if there are zero references (by a smart_ptr) to this object (mpSafePtrList is NULL).
+ bool has_unique_reference() const; /// Returns true if there is at most one reference (by a smart_ptr) to us.
+
+ protected:
+ safe_object();
+ ~safe_object();
+
+ void clear_references(); /// Forcibly removes any references (by smart_ptrs) to this object. All safe_ptr mpObject values are set to NULL.
+
+ private:
+ friend class safe_ptr_base;
+
+ void add(safe_ptr_base* pBase) const; /// Link pBase into my list of safe pointers.
+ void remove(safe_ptr_base* pBase) const; /// Unlink pBase from my list of safe pointers.
+
+ mutable safe_ptr_base* mpSafePtrList;
+
+ public:
+ // Deprecated, as its name is misleading:
+ bool has_references() const; /// Returns true if there is at most one reference (by a smart_ptr) to us.
+ };
+
+
+
+ /// safe_ptr_base
+ ///
+ /// This is a non-templated base class for safe_ptr<T>, not for direct use by the user.
+ ///
+ class safe_ptr_base
+ {
+ public:
+ bool unique() const; /// Returns true if there are no other smart pointers pointing to our object except us. True if mpObject is NUll
+ bool empty() const; /// Returns true if mpObject is NULL.
+ void reset(const safe_object* pObject); /// Make this point to pObject and enlist.
+ void reset(); /// Make this point to NULL and delist.
+
+ protected:
+ // The following are protected and must be overridden in the safe_ptr<T> subclass.
+ safe_ptr_base();
+ safe_ptr_base(const safe_object* pObject);
+ safe_ptr_base(const safe_ptr_base& safePtrBase);
+ ~safe_ptr_base();
+
+ protected:
+ const safe_object* mpObject;
+
+ private:
+ friend class safe_object;
+
+ safe_ptr_base& operator=(const safe_ptr_base& safePtrBase);
+
+ safe_ptr_base* mpNext;
+ };
+
+
+ /// safe_ptr
+ ///
+ /// safe_ptr is an automatic, lightweight solution to the dangling pointer problem.
+ /// This class is an alternative to weak_ptr which has the primary benefit of not
+ /// allocating memory at the primary cost of being a tad slower and thread-unsafe.
+ ///
+ /// During normal usage, safe_ptr<T> behaves exactly as a T*. When the
+ /// raw pointer referenced by the safe_ptr is deleted, all of the SafePtrs
+ /// for the raw pointer are set to NULL.
+ ///
+ /// This works by making the raw objects derive from the class safe_object,
+ /// which maintains a linked-list of the Safe pointers that reference it.
+ /// When a safe_object is destroyed, it walks its linked list, setting the
+ /// object reference for each of its SafePtrs to NULL.
+ ///
+ /// The overhead for this is light - a single pointer is added to the
+ /// size of the pointed to object, and a safePtr is the size of a raw
+ /// pointer plus one list pointer.
+ ///
+ /// This class is not thread-safe. In particular, manipulation of safe_ptr
+ /// objects that refer to the same underlying object cannot be done safely
+ /// from multiple threads. safe_ptr objects that are unrelated can be used
+ /// safely from multiple threads.
+ ///
+ /// Example usage:
+ /// class RandomLifetimeObject : public safe_object
+ /// {
+ /// public:
+ /// RandomLifetimeObject();
+ /// Method();
+ /// ...
+ /// };
+ ///
+ /// safe_ptr<RandomLifetimeObject> pSafePtr(new RandomLifetimeObject);
+ /// safe_ptr<RandomLifetimeObject> pSafePtrCopy = pSafePtr;
+ ///
+ /// pSafePtr->Method();
+ /// delete pSafePtr;
+ /// At this point, pSafePtrCopy evaluates to NULL.
+ ///
+ template<class T>
+ class safe_ptr : public safe_ptr_base
+ {
+ public:
+ typedef T value_type;
+ typedef safe_ptr<T> this_type;
+
+ public:
+ safe_ptr(); /// Default constructor.
+ explicit safe_ptr(T* pObject); /// Construct a safeptr from a naked pointer.
+ safe_ptr(const this_type& safePtr); /// Copy constructor.
+ //~safe_ptr() {} /// No need to implement this; the compiler-generated destructor is OK.
+
+ this_type& operator=(const this_type& safePtr); /// Assignment operator.
+ this_type& operator=(T* const pObject); /// Assign this to a naked pointer.
+
+ bool operator==(const this_type& safePtr) const; /// Returns true if safePtr points to the same object as this.
+
+ public:
+ T* get() const; /// Get the naked pointer from this safe ptr.
+ operator T*() const; /// Implicit safe_ptr<T> -> T* conversion operator.
+ T* operator->() const; /// Member operator.
+ T& operator*() const; /// Dereference operator.
+ bool operator!() const; /// Boolean negation operator.
+
+ typedef T* (this_type::*bool_)() const; /// Allows for a more portable version of testing an instance of this class as a bool.
+ operator bool_() const // A bug in the CodeWarrior compiler forces us to implement this inline instead of below.
+ {
+ if(mpObject)
+ return &this_type::get;
+ return NULL;
+ }
+ };
+
+} // namespace eastl
+
+
+
+
+
+
+/////////////////////////////////////////////////////////////////////////
+// Inlines
+/////////////////////////////////////////////////////////////////////////
+
+
+///////////////////////////////////////////////////////////////////////////////
+// safe_object
+///////////////////////////////////////////////////////////////////////////////
+
+inline eastl::safe_object::safe_object()
+ : mpSafePtrList(0)
+{
+}
+
+
+inline bool eastl::safe_object::is_unreferenced() const
+{
+ return (mpSafePtrList == NULL);
+}
+
+
+inline void eastl::safe_object::clear_references()
+{
+ while(mpSafePtrList != NULL)
+ {
+ safe_ptr_base* const pNext = mpSafePtrList->mpNext;
+ mpSafePtrList->mpNext = NULL;
+ mpSafePtrList->mpObject = NULL;
+ mpSafePtrList = pNext;
+ }
+}
+
+
+inline eastl::safe_object::~safe_object()
+{
+ safe_ptr_base* pIter = mpSafePtrList;
+
+ while(pIter)
+ {
+ safe_ptr_base* const pNext = pIter->mpNext;
+ pIter->mpNext = NULL;
+ pIter->mpObject = NULL;
+ pIter = pNext;
+ }
+}
+
+
+inline void eastl::safe_object::add(safe_ptr_base* pBase) const
+{
+ pBase->mpNext = mpSafePtrList;
+ mpSafePtrList = pBase;
+}
+
+
+inline void eastl::safe_object::remove(safe_ptr_base* pBase) const
+{
+ // We have a singly-linked list (starting with mpSafePtrList) and need to
+ // remove an element from within it.
+ if(pBase == mpSafePtrList)
+ mpSafePtrList = mpSafePtrList->mpNext;
+ else
+ {
+ for(safe_ptr_base *pPrev = mpSafePtrList, *pCurrent = mpSafePtrList->mpNext;
+ pCurrent;
+ pPrev = pCurrent, pCurrent = pCurrent->mpNext)
+ {
+ if(pCurrent == pBase)
+ {
+ pPrev->mpNext = pCurrent->mpNext;
+ break;
+ }
+ }
+ }
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+// safe_ptr_base
+///////////////////////////////////////////////////////////////////////////////
+
+inline eastl::safe_ptr_base::safe_ptr_base(const safe_ptr_base& safePtrBase)
+ : mpObject(safePtrBase.mpObject),
+ mpNext(NULL)
+{
+ EASTL_ASSERT(this != &safePtrBase);
+
+ if(mpObject)
+ mpObject->add(this);
+}
+
+
+inline eastl::safe_ptr_base::safe_ptr_base()
+ : mpObject(NULL),
+ mpNext(NULL)
+{
+}
+
+
+inline eastl::safe_ptr_base::safe_ptr_base(const safe_object* pObject)
+ : mpObject(pObject),
+ mpNext(NULL)
+{
+ if(mpObject)
+ mpObject->add(this);
+}
+
+
+inline eastl::safe_ptr_base::~safe_ptr_base()
+{
+ if(mpObject)
+ mpObject->remove(this);
+}
+
+
+inline void eastl::safe_ptr_base::reset()
+{
+ if(mpObject)
+ {
+ mpObject->remove(this);
+ mpObject = NULL;
+ }
+}
+
+
+inline bool eastl::safe_ptr_base::empty() const
+{
+ return (mpObject == NULL);
+}
+
+
+inline void eastl::safe_ptr_base::reset(const safe_object* pNewObject)
+{
+ if(mpObject != pNewObject)
+ {
+ if(mpObject)
+ mpObject->remove(this);
+
+ mpObject = pNewObject;
+
+ if(mpObject)
+ mpObject->add(this);
+ }
+}
+
+
+inline bool eastl::safe_ptr_base::unique() const
+{
+ return (mpNext == NULL) && ((mpObject == NULL) || (mpObject->mpSafePtrList == this));
+}
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// safe_object
+///////////////////////////////////////////////////////////////////////////////
+
+
+// This function is defined here below safe_ptr_base because some compilers
+// (GCC in particular) generate warnings about inline functions (e.g. unique below)
+// being used before their inline implementations.
+inline bool eastl::safe_object::has_unique_reference() const
+{
+ return mpSafePtrList ? mpSafePtrList->unique() : false;
+}
+
+// Deprecated:
+inline bool eastl::safe_object::has_references() const
+{
+ return mpSafePtrList ? mpSafePtrList->unique() : false;
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+// safe_ptr<T>
+///////////////////////////////////////////////////////////////////////////////
+
+template<class T>
+inline eastl::safe_ptr<T>::safe_ptr()
+ : safe_ptr_base()
+{
+}
+
+
+template<class T>
+inline eastl::safe_ptr<T>::safe_ptr(T* pObject)
+ : safe_ptr_base(pObject)
+{
+}
+
+
+template<class T>
+inline eastl::safe_ptr<T>::safe_ptr(const this_type& safePtr)
+ : safe_ptr_base(safePtr)
+{
+}
+
+
+template<class T>
+inline typename eastl::safe_ptr<T>::this_type& eastl::safe_ptr<T>::operator=(const this_type& safePtr)
+{
+ if(this != &safePtr)
+ reset(safePtr.mpObject);
+ return *this;
+}
+
+
+template<class T>
+inline typename eastl::safe_ptr<T>::this_type& eastl::safe_ptr<T>::operator=(T* const pObject)
+{
+ reset(pObject);
+ return *this;
+}
+
+
+template<class T>
+inline bool eastl::safe_ptr<T>::operator==(const this_type& rhs) const
+{
+ return (mpObject == rhs.mpObject);
+}
+
+
+template<class T>
+inline T* eastl::safe_ptr<T>::get() const
+{
+ return static_cast<T*>(const_cast<safe_object*>(mpObject));
+}
+
+
+template<class T>
+inline eastl::safe_ptr<T>::operator T*() const
+{
+ return static_cast<T*>(const_cast<safe_object*>(mpObject));
+}
+
+
+template<class T>
+inline T* eastl::safe_ptr<T>::operator->() const
+{
+ return static_cast<T*>(const_cast<safe_object*>(mpObject));
+}
+
+
+template<class T>
+inline T& eastl::safe_ptr<T>::operator*() const
+{
+ return *static_cast<T*>(const_cast<safe_object*>(mpObject));
+}
+
+
+template<class T>
+inline bool eastl::safe_ptr<T>::operator!() const
+{
+ return (mpObject == NULL);
+}
+
+// A bug in the CodeWarrior compiler forces us to implement this inline in the class instead of here.
+// template<class T>
+// inline eastl::safe_ptr<T>::operator bool_() const
+// {
+// if(mpObject)
+// return &this_type::get;
+// return NULL;
+// }
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// global operators
+///////////////////////////////////////////////////////////////////////////////
+
+template<class T>
+inline bool operator==(const eastl::safe_ptr<T>& safePtr, const T* pObject)
+{
+ return (safePtr.get() == pObject);
+}
+
+
+template<class T>
+inline bool operator!=(const eastl::safe_ptr<T>& safePtr, const T* pObject)
+{
+ return (safePtr.get() != pObject);
+}
+
+
+template<class T>
+inline bool operator<(const eastl::safe_ptr<T>& safePtrA, const eastl::safe_ptr<T>& safePtrB)
+{
+ return (safePtrA.get() < safePtrB.get());
+}
+
+
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/scoped_array.h b/EASTL/include/EASTL/scoped_array.h
new file mode 100644
index 0000000..c955dba
--- /dev/null
+++ b/EASTL/include/EASTL/scoped_array.h
@@ -0,0 +1,237 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+
+///////////////////////////////////////////////////////////////////////////////
+// *** Note ***
+// *** This code is deprecated in favor of the C++11-conforming ***
+// *** eastl::unique_ptr template class found in <EASTL/unique_ptr.h> ***
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_SCOPED_ARRAY_H
+#define EASTL_SCOPED_ARRAY_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/smart_ptr.h> // Defines smart_array_deleter
+#include <stddef.h> // Definition of ptrdiff_t
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+
+ /// class scoped_array
+ ///
+ /// A scoped_array is the same as scoped_ptr but for arrays.
+ ///
+ template <typename T, typename Deleter = smart_array_deleter<T> >
+ class scoped_array
+ {
+ protected:
+ /// this_type
+ /// This is an alias for scoped_array<T>, this class.
+ typedef scoped_array<T> this_type;
+
+ /// deleter_type
+ typedef Deleter deleter_type;
+
+ /// mpArray
+ /// The owned pointer. Points to an array of T.
+ T* mpArray;
+
+ /// scoped_array
+ /// This function is private in order to prevent copying, for safety.
+ scoped_array(const scoped_array&);
+
+ /// scoped_array
+ /// This function is private in order to prevent copying, for safety.
+ scoped_array& operator=(const scoped_array&);
+
+ /// scoped_ptr
+ /// This function is private in order to prevent copying, for safety.
+ scoped_array& operator=(T* pValue);
+
+ public:
+ typedef T element_type;
+
+ /// scoped_ptr
+ /// Construct a scoped_ptr from a pointer allocated via new.
+ /// Example usage:
+ /// scoped_array<int> ptr(new int[6]);
+ explicit scoped_array(T* pArray = NULL)
+ : mpArray(pArray) {}
+
+ /// ~scoped_array
+ /// Destroys the owned pointer. The destructors for each of the objects
+ /// in the owned array will be called.
+ ~scoped_array()
+ {
+ Deleter del;
+ del(mpArray);
+ }
+
+ /// reset
+ /// Deletes the owned pointer and takes ownership of the
+ /// passed in pointer. If the passed in pointer is the same
+ /// as the owned pointer, nothing is done.
+ /// Example usage:
+ /// scoped_array<int> ptr(new int[6]);
+ /// ptr.reset(new int[7]); // deletes int[6]
+ /// ptr.reset(NULL); // deletes int[7]
+ void reset(T* pArray = NULL)
+ {
+ if(pArray != mpArray)
+ {
+ Deleter del;
+ del(mpArray);
+ mpArray = pArray;
+ }
+ }
+
+ /// detach
+ /// This simply forgets the owned pointer. It doesn't
+ /// free it but rather assumes that the user does.
+ /// Example usage:
+ /// scoped_array<int> ptr(new int[6]);
+ /// int* pIntArray = ptr.get();
+ /// ptr.detach();
+ /// delete[] pIntArray;
+ T* detach()
+ {
+ T* const pTemp = mpArray;
+ mpArray = NULL;
+ return pTemp;
+ }
+
+ /// swap
+ /// Exchanges the owned pointer beween two scoped_array objects.
+ void swap(this_type& scopedArray)
+ {
+ // std::swap(mpArray, scopedArray.mpArray); // Not used so that we can reduce a dependency.
+ T* const pArray = scopedArray.mpArray;
+ scopedArray.mpArray = mpArray;
+ mpArray = pArray;
+ }
+
+ /// operator[]
+ /// Returns a reference to the specified item in the owned pointer
+ /// array.
+ /// Example usage:
+ /// scoped_array<int> ptr(new int[6]);
+ /// int x = ptr[2];
+ typename add_lvalue_reference<T>::type operator[](ptrdiff_t i) const
+ {
+ // assert(mpArray && (i >= 0));
+ return mpArray[i];
+ }
+
+ /// get
+ /// Returns the owned array pointer.
+ /// Example usage:
+ /// struct X{ void DoSomething(); };
+ /// scoped_array<int> ptr(new X[8]);
+ /// X** ppX = ptr.get();
+ /// ppX[2]->DoSomething();
+ T* get() const
+ {
+ return mpArray;
+ }
+
+ /// Implicit operator bool
+ /// Allows for using a scoped_ptr as a boolean.
+ /// Example usage:
+ /// scoped_array<int> ptr(new int[8]);
+ /// if(ptr)
+ /// ++ptr[2];
+ ///
+ /// Note that below we do not use operator bool(). The reason for this
+ /// is that booleans automatically convert up to short, int, float, etc.
+ /// The result is that this: if(scopedArray == 1) would yield true (bad).
+ typedef T* (this_type::*bool_)() const;
+ operator bool_() const
+ {
+ if(mpArray)
+ return &this_type::get;
+ return NULL;
+ }
+
+ /// operator!
+ /// This returns the opposite of operator bool; it returns true if
+ /// the owned pointer is null. Some compilers require this and some don't.
+ /// scoped_array<int> ptr(new int(3));
+ /// if(!ptr)
+ /// assert(false);
+ bool operator!() const
+ {
+ return (mpArray == NULL);
+ }
+
+ }; // class scoped_array
+
+
+ /// unique_array
+ ///
+ /// Example usage:
+ /// unique_array<int> uniqueIntArray;
+ /// Example usage:
+ /// UNIQUE_ARRAY_T(int, eastl::smart_ptr_deleter<int>) uniqueIntArray;
+ ///
+ #if defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ #define UNIQUE_ARRAY_T(T, Deleter) scoped_array<T, Deleter>
+ #else
+ template <typename T, typename Deleter = smart_ptr_deleter<T> >
+ using unique_array = scoped_array<T, Deleter>;
+ #define UNIQUE_ARRAY_T(T, Deleter) unique_array<T, Deleter>
+ #endif
+
+
+
+ /// scoped_array
+ /// returns scoped_array::get() via the input scoped_array.
+ template <typename T, typename D>
+ inline T* get_pointer(const scoped_array<T, D>& scopedArray)
+ {
+ return scopedArray.get();
+ }
+
+
+ /// swap
+ /// Exchanges the owned pointer beween two scoped_array objects.
+ /// This non-member version is useful for compatibility of scoped_array
+ /// objects with the C++ Standard Library and other libraries.
+ template <typename T, typename D>
+ inline void swap(scoped_array<T, D>& scopedArray1, scoped_array<T, D>& scopedArray2)
+ {
+ scopedArray1.swap(scopedArray2);
+ }
+
+
+ /// operator<
+ /// Returns which scoped_array is 'less' than the other. Useful when storing
+ /// sorted containers of scoped_array objects.
+ template <typename T, typename D>
+ inline bool operator<(const scoped_array<T, D>& scopedArray1, const scoped_array<T, D>& scopedArray2)
+ {
+ return (scopedArray1.get() < scopedArray2.get()); // Alternatively use: std::less<T*>(scopedArray1.get(), scopedArray2.get());
+ }
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/scoped_ptr.h b/EASTL/include/EASTL/scoped_ptr.h
new file mode 100644
index 0000000..3ba01da
--- /dev/null
+++ b/EASTL/include/EASTL/scoped_ptr.h
@@ -0,0 +1,256 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+
+///////////////////////////////////////////////////////////////////////////////
+// *** Note ***
+// *** This code is deprecated in favor of the C++11-conforming ***
+// *** eastl::unique_ptr template class found in <EASTL/unique_ptr.h> ***
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_SCOPED_PTR_H
+#define EASTL_SCOPED_PTR_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/smart_ptr.h> // Defines smart_ptr_deleter
+#include <stddef.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+ /// class scoped_ptr
+ ///
+ /// This class is intended to be the same as the C++11 unique_ptr class,
+ /// but was created before there was such a thing.
+ ///
+ /// This class implements a scoped_ptr template. This is a class which is
+ /// similar to the C++ auto_ptr template, except that it prohibits copying
+ /// of itself, for safety.
+ ///
+ /// More specifically, the scoped_ptr class template stores a pointer to a
+ /// dynamically allocated object. The object pointed to is automatically
+ /// deleted on destructor of scoped_ptr or can be manually deleted via the
+ /// scopted_ptr::reset function.
+ ///
+ /// scoped_ptr cannot be used in C++ Standard Library containers; you'll need
+ /// to use the shared_ptr template if you want to do this. The reason you can't
+ /// use scoped_ptr is that it prohibits copying. You can't (safely) use auto_ptr
+ /// in C++ Standard Library containers because copying of an auto_ptr will
+ /// create a situation whereby objects are multiply freed.
+ ///
+ /// scoped_ptr cannot be used with arrays of objects. The reason for this is
+ /// that it calls delete on the owned pointer and not delete[]. The latter
+ /// allows for the calling of the destructors for the objects of the owned pointer.
+ /// If you want to use scoped_ptr with a dynamically allocated array, use the
+ /// scoped_array function instead.
+ ///
+ template <typename T, typename Deleter = smart_ptr_deleter<T> >
+ class scoped_ptr
+ {
+ protected:
+ /// this_type
+ /// This is an alias for scoped_ptr<T>, this class.
+ typedef scoped_ptr<T> this_type;
+
+ /// deleter_type
+ typedef Deleter deleter_type;
+
+ /// mpValue
+ /// The owned pointer.
+ T* mpValue;
+
+ /// scoped_ptr
+ /// This function is private in order to prevent copying, for safety.
+ scoped_ptr(const scoped_ptr&);
+
+ /// scoped_ptr
+ /// This function is private in order to prevent copying, for safety.
+ scoped_ptr& operator=(const scoped_ptr&);
+
+ /// scoped_ptr
+ /// This function is private in order to prevent copying, for safety.
+ scoped_ptr& operator=(T* pValue);
+
+ public:
+ typedef T element_type;
+
+ /// scoped_ptr
+ /// Construct a scoped_ptr from a pointer allocated via new.
+ /// Example usage:
+ /// scoped_ptr<int> ptr(new int(3));
+ explicit scoped_ptr(T* pValue = NULL)
+ : mpValue(pValue) {}
+
+ /// ~scoped_ptr
+ /// Destroys the owned pointer. The destructor for the object
+ /// referred to by the owned pointer will be called.
+ ~scoped_ptr()
+ {
+ Deleter del;
+ del(mpValue);
+ }
+
+ /// reset
+ /// Deletes the owned pointer and takes ownership of the
+ /// passed in pointer. If the passed in pointer is the same
+ /// as the owned pointer, nothing is done.
+ /// Example usage:
+ /// scoped_ptr<int> ptr(new int(3));
+ /// ptr.reset(new int(4)); // deletes int(3)
+ /// ptr.reset(NULL); // deletes int(4)
+ void reset(T* pValue = NULL)
+ {
+ if(pValue != mpValue)
+ {
+ Deleter del;
+ del(mpValue);
+ mpValue = pValue;
+ }
+ }
+
+ /// detach
+ /// This simply forgets the owned pointer. It doesn't
+ /// free it but rather assumes that the user does.
+ /// Example usage:
+ /// scoped_ptr<int> ptr(new int(3));
+ /// int* pInt = ptr.detach();
+ /// delete pInt;
+ T* detach()
+ {
+ T* const pTemp = mpValue;
+ mpValue = NULL;
+ return pTemp;
+ }
+
+ /// swap
+ /// Exchanges the owned pointer beween two scoped_ptr objects.
+ void swap(this_type& scopedPtr)
+ {
+ // std::swap(mpValue, scopedPtr.mpValue); // Not used so that we can reduce a dependency.
+ T* const pValue = scopedPtr.mpValue;
+ scopedPtr.mpValue = mpValue;
+ mpValue = pValue;
+ }
+
+ /// operator*
+ /// Returns the owner pointer dereferenced.
+ /// Example usage:
+ /// scoped_ptr<int> ptr(new int(3));
+ /// int x = *ptr;
+ typename add_lvalue_reference<T>::type operator*() const
+ {
+ // assert(mpValue);
+ return *mpValue;
+ }
+
+ /// operator->
+ /// Allows access to the owned pointer via operator->()
+ /// Example usage:
+ /// struct X{ void DoSomething(); };
+ /// scoped_ptr<int> ptr(new X);
+ /// ptr->DoSomething();
+ T* operator->() const
+ {
+ // assert(mpValue);
+ return mpValue;
+ }
+
+ /// get
+ /// Returns the owned pointer. Note that this class does
+ /// not provide an operator T() function. This is because such
+ /// a thing (automatic conversion) is deemed unsafe.
+ /// Example usage:
+ /// struct X{ void DoSomething(); };
+ /// scoped_ptr<int> ptr(new X);
+ /// X* pX = ptr.get();
+ /// pX->DoSomething();
+ T* get() const
+ {
+ return mpValue;
+ }
+
+ /// Implicit operator bool
+ /// Allows for using a scoped_ptr as a boolean.
+ /// Example usage:
+ /// scoped_ptr<int> ptr(new int(3));
+ /// if(ptr)
+ /// ++*ptr;
+ ///
+ /// Note that below we do not use operator bool(). The reason for this
+ /// is that booleans automatically convert up to short, int, float, etc.
+ /// The result is that this: if(scopedPtr == 1) would yield true (bad).
+ typedef T* (this_type::*bool_)() const;
+ operator bool_() const
+ {
+ if(mpValue)
+ return &this_type::get;
+ return NULL;
+ }
+
+ /// operator!
+ /// This returns the opposite of operator bool; it returns true if
+ /// the owned pointer is null. Some compilers require this and some don't.
+ /// scoped_ptr<int> ptr(new int(3));
+ /// if(!ptr)
+ /// assert(false);
+ bool operator!() const
+ {
+ return (mpValue == NULL);
+ }
+
+ }; // class scoped_ptr
+
+
+
+ /// get_pointer
+ /// returns scoped_ptr::get() via the input scoped_ptr.
+ template <typename T, typename D>
+ inline T* get_pointer(const scoped_ptr<T, D>& scopedPtr)
+ {
+ return scopedPtr.get();
+ }
+
+
+ /// swap
+ /// Exchanges the owned pointer beween two scoped_ptr objects.
+ /// This non-member version is useful for compatibility of scoped_ptr
+ /// objects with the C++ Standard Library and other libraries.
+ template <typename T, typename D>
+ inline void swap(scoped_ptr<T, D>& scopedPtr1, scoped_ptr<T, D>& scopedPtr2)
+ {
+ scopedPtr1.swap(scopedPtr2);
+ }
+
+
+ /// operator<
+ /// Returns which scoped_ptr is 'less' than the other. Useful when storing
+ /// sorted containers of scoped_ptr objects.
+ template <typename T, typename D>
+ inline bool operator<(const scoped_ptr<T, D>& scopedPtr1, const scoped_ptr<T, D>& scopedPtr2)
+ {
+ return (scopedPtr1.get() < scopedPtr2.get()); // Alternatively use: std::less<T*>(scopedPtr1.get(), scopedPtr2.get());
+ }
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/segmented_vector.h b/EASTL/include/EASTL/segmented_vector.h
new file mode 100644
index 0000000..d46a942
--- /dev/null
+++ b/EASTL/include/EASTL/segmented_vector.h
@@ -0,0 +1,523 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_SEGMENTED_VECTOR_H
+#define EASTL_SEGMENTED_VECTOR_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <EASTL/internal/config.h>
+
+namespace eastl
+{
+ template<typename T, size_t Count, typename Allocator = EASTLAllocatorType>
+ class segment
+ {
+ public:
+ typedef eastl_size_t size_type;
+ typedef segment<T, Count, Allocator> this_type;
+ typedef T* iterator;
+ typedef const T* const_iterator;
+
+ const this_type* next_segment() const;
+ this_type* next_segment();
+
+ const_iterator begin() const;
+ iterator begin();
+
+ const_iterator end() const;
+ iterator end();
+
+ private:
+ static const uintptr_t kIsLastSegment = 1 << 0;
+ uintptr_t mPrev;
+
+ union
+ {
+ this_type* mNext;
+ size_type mSize;
+ };
+ T mData[Count];
+ template<typename, size_t, typename> friend class segmented_vector;
+ template<typename, size_t, typename> friend struct segmented_vector_iterator;
+ };
+
+
+ template <typename T, size_t Count, typename Allocator = EASTLAllocatorType>
+ struct segmented_vector_iterator
+ {
+ public:
+ typedef segmented_vector_iterator<T, Count, Allocator> this_type;
+ typedef segment<T, Count, Allocator> segment_type;
+
+ T* operator->() const;
+ T& operator*() const;
+
+ this_type& operator++();
+ this_type operator++(int);
+
+ public:
+ T* mCurrent;
+ T* mEnd;
+ segment_type* mSegment;
+ };
+
+
+ template <typename T, size_t Count, typename Allocator = EASTLAllocatorType>
+ class segmented_vector
+ {
+ public:
+ typedef eastl_size_t size_type;
+ typedef segmented_vector<T, Count, Allocator> this_type;
+ typedef segment<T, Count, Allocator> segment_type;
+ typedef Allocator allocator_type;
+ typedef segmented_vector_iterator<const T, Count, Allocator> const_iterator;
+ typedef segmented_vector_iterator<T, Count, Allocator> iterator;
+
+
+ segmented_vector(const Allocator& allocator = Allocator());
+ ~segmented_vector();
+
+ allocator_type& get_allocator();
+
+ const segment_type* first_segment() const;
+ segment_type* first_segment();
+ const_iterator begin() const;
+ iterator begin();
+
+ const_iterator end() const;
+ iterator end();
+
+ size_type size() const;
+ size_type segment_count() const;
+ T& front();
+ T& back();
+
+ bool empty() const;
+ void clear();
+
+ T& push_back();
+ T& push_back(const T& value);
+ void* push_back_uninitialized();
+
+ void pop_back();
+
+ void erase_unsorted(segment_type& segment, typename segment_type::iterator it);
+ iterator erase_unsorted(const iterator& i);
+
+ void swap(this_type& other);
+
+ protected:
+ segment_type* DoAllocSegment(segment_type* prevSegment);
+ void* DoPushBack();
+
+ allocator_type mAllocator;
+ segment_type* mFirstSegment;
+ segment_type* mLastSegment;
+ size_type mSegmentCount;
+ };
+
+
+ template<typename T, size_t Count, typename Allocator>
+ inline const segment<T, Count, Allocator>*
+ segment<T, Count, Allocator>::next_segment() const
+ {
+ if (mPrev & kIsLastSegment)
+ return 0;
+ else
+ return mNext;
+ }
+
+ template<typename T, size_t Count, typename Allocator>
+ inline segment<T, Count, Allocator>*
+ segment<T, Count, Allocator>::next_segment()
+ {
+ if (mPrev & kIsLastSegment)
+ return 0;
+ else
+ return mNext;
+ }
+
+ template<typename T, size_t Count, typename Allocator>
+ inline typename segment<T, Count, Allocator>::const_iterator
+ segment<T, Count, Allocator>::begin() const
+ {
+ return mData;
+ }
+
+ template<typename T, size_t Count, typename Allocator>
+ inline typename segment<T, Count, Allocator>::iterator
+ segment<T, Count, Allocator>::begin()
+ {
+ return mData;
+ }
+
+ template<typename T, size_t Count, typename Allocator>
+ inline typename segment<T, Count, Allocator>::const_iterator
+ segment<T, Count, Allocator>::end() const
+ {
+ if (mPrev & kIsLastSegment)
+ return mData + mSize;
+ else
+ return mData + Count;
+ }
+
+ template<typename T, size_t Count, typename Allocator>
+ inline typename segment<T, Count, Allocator>::iterator
+ segment<T, Count, Allocator>::end()
+ {
+ if (mPrev & kIsLastSegment)
+ return mData + mSize;
+ else
+ return mData + Count;
+ }
+
+ template<typename T, size_t Count, typename Allocator>
+ T*
+ segmented_vector_iterator<T, Count, Allocator>::operator->() const
+ {
+ return mCurrent;
+ }
+
+ template<typename T, size_t Count, typename Allocator>
+ T&
+ segmented_vector_iterator<T, Count, Allocator>::operator*() const
+ {
+ return *mCurrent;
+ }
+
+ template<typename T, size_t Count, typename Allocator>
+ segmented_vector_iterator<T, Count, Allocator>&
+ segmented_vector_iterator<T, Count, Allocator>::operator++()
+ {
+ ++mCurrent;
+ if(EASTL_UNLIKELY(mCurrent == mEnd))
+ {
+ if (!(mSegment->mPrev & segment_type::kIsLastSegment))
+ {
+ mSegment = mSegment->mNext;
+ mCurrent = mSegment->begin();
+ mEnd = mSegment->end();
+ }
+ else
+ mCurrent = 0;
+ }
+ return *this;
+ }
+
+ template<typename T, size_t Count, typename Allocator>
+ segmented_vector_iterator<T, Count, Allocator>
+ segmented_vector_iterator<T, Count, Allocator>::operator++(int)
+ {
+ this_type i(*this);
+ operator++();
+ return i;
+ }
+
+
+ template <typename T, size_t Count, typename Allocator>
+ inline segmented_vector<T, Count, Allocator>::segmented_vector(const Allocator& allocator)
+ : mAllocator(allocator)
+ , mFirstSegment(0)
+ , mLastSegment(0)
+ , mSegmentCount(0)
+ {
+ }
+
+ template <typename T, size_t Count, typename Allocator>
+ inline segmented_vector<T, Count, Allocator>::~segmented_vector()
+ {
+ clear();
+ }
+
+ template <typename T, size_t Count, typename Allocator>
+ inline typename segmented_vector<T, Count, Allocator>::allocator_type&
+ segmented_vector<T, Count, Allocator>::get_allocator()
+ {
+ return mAllocator;
+ }
+
+ template <typename T, size_t Count, typename Allocator>
+ inline const typename segmented_vector<T, Count, Allocator>::segment_type*
+ segmented_vector<T, Count, Allocator>::first_segment() const
+ {
+ return mFirstSegment;
+ }
+
+ template <typename T, size_t Count, typename Allocator>
+ inline typename segmented_vector<T, Count, Allocator>::segment_type*
+ segmented_vector<T, Count, Allocator>::first_segment()
+ {
+ return mFirstSegment;
+ }
+
+ template <typename T, size_t Count, typename Allocator>
+ inline typename segmented_vector<T, Count, Allocator>::const_iterator
+ segmented_vector<T, Count, Allocator>::begin() const
+ {
+ iterator i;
+ i.mSegment = mFirstSegment;
+ if (mFirstSegment)
+ {
+ i.mCurrent = mFirstSegment->begin();
+ i.mEnd = mFirstSegment->end();
+ }
+ else
+ i.mCurrent = 0;
+ return (const_iterator&)i;
+ }
+
+ template <typename T, size_t Count, typename Allocator>
+ inline typename segmented_vector<T, Count, Allocator>::iterator
+ segmented_vector<T, Count, Allocator>::begin()
+ {
+ iterator i;
+ i.mSegment = mFirstSegment;
+ if (mFirstSegment)
+ {
+ i.mCurrent = mFirstSegment->begin();
+ i.mEnd = mFirstSegment->end();
+ }
+ else
+ i.mCurrent = 0;
+ return i;
+ }
+
+ template <typename T, size_t Count, typename Allocator>
+ inline typename segmented_vector<T, Count, Allocator>::const_iterator
+ segmented_vector<T, Count, Allocator>::end() const
+ {
+ iterator i;
+ i.mCurrent = 0;
+ return (const_iterator&)i;
+ }
+
+ template <typename T, size_t Count, typename Allocator>
+ inline typename segmented_vector<T, Count, Allocator>::iterator
+ segmented_vector<T, Count, Allocator>::end()
+ {
+ iterator i;
+ i.mCurrent = 0;
+ return i;
+ }
+
+ template <typename T, size_t Count, typename Allocator>
+ inline typename segmented_vector<T, Count, Allocator>::size_type
+ segmented_vector<T, Count, Allocator>::size() const
+ {
+ if (segment_type* segment = mLastSegment)
+ return (mSegmentCount-1)*Count + segment->mSize;
+ return 0;
+ }
+
+ template <typename T, size_t Count, typename Allocator>
+ inline typename segmented_vector<T, Count, Allocator>::size_type
+ segmented_vector<T, Count, Allocator>::segment_count() const
+ {
+ return mSegmentCount;
+ }
+
+ template <typename T, size_t Count, typename Allocator>
+ inline T&
+ segmented_vector<T, Count, Allocator>::front()
+ {
+ return mFirstSegment->mData[0];
+ }
+
+ template <typename T, size_t Count, typename Allocator>
+ inline T&
+ segmented_vector<T, Count, Allocator>::back()
+ {
+ segment_type* lastSegment = mLastSegment;
+ return lastSegment->mData[lastSegment->mSize-1];
+ }
+
+ template <typename T, size_t Count, typename Allocator>
+ inline bool
+ segmented_vector<T, Count, Allocator>::empty() const
+ {
+ return mFirstSegment == 0;
+ }
+
+ template <typename T, size_t Count, typename Allocator>
+ inline void
+ segmented_vector<T, Count, Allocator>::clear()
+ {
+ if (segment_type* segment = mFirstSegment)
+ {
+ while (segment != mLastSegment)
+ {
+ segment_type* nextSegment = segment->mNext;
+ segment->~segment_type();
+ EASTLFree(mAllocator, segment, sizeof(segment_type));
+ segment = nextSegment;
+ }
+ for (T* i = segment->mData, *e = segment->mData + segment->mSize; i!=e; ++i)
+ i->~T();
+ EASTLFree(mAllocator, segment, sizeof(segment_type));
+ mFirstSegment = 0;
+ mLastSegment = 0;
+ mSegmentCount = 0;
+ }
+ }
+
+ template <typename T, size_t Count, typename Allocator>
+ inline T&
+ segmented_vector<T, Count, Allocator>::push_back()
+ {
+ return *(new (DoPushBack()) T());
+ }
+
+ template <typename T, size_t Count, typename Allocator>
+ inline T&
+ segmented_vector<T, Count, Allocator>::push_back(const T& value)
+ {
+ return *(new (DoPushBack()) T(value));
+ }
+
+ template <typename T, size_t Count, typename Allocator>
+ inline void*
+ segmented_vector<T, Count, Allocator>::push_back_uninitialized()
+ {
+ return DoPushBack();
+ }
+
+ template <typename T, size_t Count, typename Allocator>
+ inline void
+ segmented_vector<T, Count, Allocator>::pop_back()
+ {
+ segment_type* lastSegment = mLastSegment;
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(!lastSegment))
+ EASTL_FAIL_MSG("segmented_vector::pop_back -- segmented vector is empty");
+ #endif
+ --lastSegment->mSize;
+ (lastSegment->mData + lastSegment->mSize)->T::~T();
+
+ if (!lastSegment->mSize)
+ {
+ --mSegmentCount;
+ mLastSegment = (segment_type*)(lastSegment->mPrev & (~segment_type::kIsLastSegment));
+ EASTLFree(mAllocator, lastSegment, sizeof(segment_type));
+ if (mLastSegment)
+ {
+ mLastSegment->mPrev |= segment_type::kIsLastSegment;
+ mLastSegment->mSize = Count;
+ }
+ else
+ mFirstSegment = 0;
+ }
+ }
+
+ template <typename T, size_t Count, typename Allocator>
+ inline void
+ segmented_vector<T, Count, Allocator>::erase_unsorted(segment_type& segment, typename segment_type::iterator it)
+ {
+ EA_UNUSED(segment);
+
+ *it = back();
+ pop_back();
+ }
+
+ template <typename T, size_t Count, typename Allocator>
+ inline typename segmented_vector<T, Count, Allocator>::iterator
+ segmented_vector<T, Count, Allocator>::erase_unsorted(const iterator& i)
+ {
+ iterator ret(i);
+ *i = back();
+ if (i.mSegment == mLastSegment && mLastSegment->mSize == 1)
+ ret.mCurrent = 0;
+ pop_back();
+ return ret;
+ }
+
+ template <typename T, size_t Count, typename Allocator>
+ void
+ segmented_vector<T, Count, Allocator>::swap(this_type& other)
+ {
+ allocator_type tempAllocator(mAllocator);
+ segment_type* tempFirstSegment = mFirstSegment;
+ segment_type* tempLastSegment = mLastSegment;
+ size_type tempSegmentCount = mSegmentCount;
+
+ mAllocator = other.mAllocator;
+ mFirstSegment = other.mFirstSegment;
+ mLastSegment = other.mLastSegment;
+ mSegmentCount = other.mSegmentCount;
+
+ other.mAllocator = tempAllocator;
+ other.mFirstSegment = tempFirstSegment;
+ other.mLastSegment = tempLastSegment;
+ other.mSegmentCount = tempSegmentCount;
+ }
+
+ template <typename T, size_t Count, typename Allocator>
+ segment<T, Count, Allocator>*
+ segmented_vector<T, Count, Allocator>::DoAllocSegment(segment_type* prevSegment)
+ {
+ ++mSegmentCount;
+ segment_type* segment = (segment_type*)allocate_memory(mAllocator, sizeof(segment_type), EASTL_ALIGN_OF(segment_type), 0);
+ segment->mPrev = uintptr_t(prevSegment) | segment_type::kIsLastSegment;
+ segment->mSize = 1;
+ return segment;
+ }
+
+ template <typename T, size_t Count, typename Allocator>
+ inline void*
+ segmented_vector<T, Count, Allocator>::DoPushBack()
+ {
+ if (segment_type* segment = mLastSegment)
+ {
+ size_type size = segment->mSize;
+ if (size < Count)
+ {
+ ++segment->mSize;
+ return segment->mData + size;
+ }
+ else
+ {
+ segment_type* lastSegment = mLastSegment;
+ segment_type* newSegment = mLastSegment = DoAllocSegment(mLastSegment);
+ lastSegment->mPrev &= ~segment_type::kIsLastSegment;
+ lastSegment->mNext = newSegment;
+ return newSegment->mData;
+ }
+ }
+ else
+ {
+ segment = mFirstSegment = mLastSegment = DoAllocSegment(0);
+ return segment->mData;
+ }
+ }
+
+ template<typename T, size_t Count, typename Allocator>
+ inline bool operator==(const segmented_vector_iterator<const T, Count, Allocator>& a, const segmented_vector_iterator<const T, Count, Allocator>& b)
+ {
+ return a.mCurrent == b.mCurrent;
+ }
+
+
+ template<typename T, size_t Count, typename Allocator>
+ inline bool operator!=(const segmented_vector_iterator<const T, Count, Allocator>& a, const segmented_vector_iterator<const T, Count, Allocator>& b)
+ {
+ return a.mCurrent != b.mCurrent;
+ }
+
+ template<typename T, size_t Count, typename Allocator>
+ inline bool operator==(const segmented_vector_iterator<T, Count, Allocator>& a, const segmented_vector_iterator<T, Count, Allocator>& b)
+ {
+ return a.mCurrent == b.mCurrent;
+ }
+
+
+ template<typename T, size_t Count, typename Allocator>
+ inline bool operator!=(const segmented_vector_iterator<T, Count, Allocator>& a, const segmented_vector_iterator<T, Count, Allocator>& b)
+ {
+ return a.mCurrent != b.mCurrent;
+ }
+}
+
+#endif
diff --git a/EASTL/include/EASTL/set.h b/EASTL/include/EASTL/set.h
new file mode 100644
index 0000000..8256162
--- /dev/null
+++ b/EASTL/include/EASTL/set.h
@@ -0,0 +1,675 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+//////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_SET_H
+#define EASTL_SET_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/red_black_tree.h>
+#include <EASTL/functional.h>
+#include <EASTL/utility.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+
+ /// EASTL_SET_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_SET_DEFAULT_NAME
+ #define EASTL_SET_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " set" // Unless the user overrides something, this is "EASTL set".
+ #endif
+
+
+ /// EASTL_MULTISET_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_MULTISET_DEFAULT_NAME
+ #define EASTL_MULTISET_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " multiset" // Unless the user overrides something, this is "EASTL multiset".
+ #endif
+
+
+ /// EASTL_SET_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_SET_DEFAULT_ALLOCATOR
+ #define EASTL_SET_DEFAULT_ALLOCATOR allocator_type(EASTL_SET_DEFAULT_NAME)
+ #endif
+
+ /// EASTL_MULTISET_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_MULTISET_DEFAULT_ALLOCATOR
+ #define EASTL_MULTISET_DEFAULT_ALLOCATOR allocator_type(EASTL_MULTISET_DEFAULT_NAME)
+ #endif
+
+
+
+ /// set
+ ///
+ /// Implements a canonical set.
+ ///
+ /// The large majority of the implementation of this class is found in the rbtree
+ /// base class. We control the behaviour of rbtree via template parameters.
+ ///
+ /// Note that the 'bMutableIterators' template parameter to rbtree is set to false.
+ /// This means that set::iterator is const and the same as set::const_iterator.
+ /// This is by design and it follows the C++ standard defect report recommendation.
+ /// If the user wants to modify a container element, the user needs to either use
+ /// mutable data members or use const_cast on the iterator's data member. Both of
+ /// these solutions are recommended by the C++ standard defect report.
+ /// To consider: Expose the bMutableIterators template policy here at the set level
+ /// so the user can have non-const set iterators via a template parameter.
+ ///
+ /// Pool allocation
+ /// If you want to make a custom memory pool for a set container, your pool
+ /// needs to contain items of type set::node_type. So if you have a memory
+ /// pool that has a constructor that takes the size of pool items and the
+ /// count of pool items, you would do this (assuming that MemoryPool implements
+ /// the Allocator interface):
+ /// typedef set<Widget, less<Widget>, MemoryPool> WidgetSet; // Delare your WidgetSet type.
+ /// MemoryPool myPool(sizeof(WidgetSet::node_type), 100); // Make a pool of 100 Widget nodes.
+ /// WidgetSet mySet(&myPool); // Create a map that uses the pool.
+ ///
+ template <typename Key, typename Compare = eastl::less<Key>, typename Allocator = EASTLAllocatorType>
+ class set
+ : public rbtree<Key, Key, Compare, Allocator, eastl::use_self<Key>, false, true>
+ {
+ public:
+ typedef rbtree<Key, Key, Compare, Allocator, eastl::use_self<Key>, false, true> base_type;
+ typedef set<Key, Compare, Allocator> this_type;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::value_type value_type;
+ typedef typename base_type::iterator iterator;
+ typedef typename base_type::const_iterator const_iterator;
+ typedef typename base_type::reverse_iterator reverse_iterator;
+ typedef typename base_type::const_reverse_iterator const_reverse_iterator;
+ typedef typename base_type::allocator_type allocator_type;
+ typedef Compare value_compare;
+ // Other types are inherited from the base class.
+
+ using base_type::begin;
+ using base_type::end;
+ using base_type::find;
+ using base_type::lower_bound;
+ using base_type::upper_bound;
+
+ protected:
+ using base_type::compare;
+ using base_type::get_compare;
+
+ public:
+ set(const allocator_type& allocator = EASTL_SET_DEFAULT_ALLOCATOR);
+ set(const Compare& compare, const allocator_type& allocator = EASTL_SET_DEFAULT_ALLOCATOR);
+ set(const this_type& x);
+ set(this_type&& x);
+ set(this_type&& x, const allocator_type& allocator);
+ set(std::initializer_list<value_type> ilist, const Compare& compare = Compare(), const allocator_type& allocator = EASTL_SET_DEFAULT_ALLOCATOR);
+
+ template <typename Iterator>
+ set(Iterator itBegin, Iterator itEnd); // allocator arg removed because VC7.1 fails on the default arg. To do: Make a second version of this function without a default arg.
+
+ // The (this_type&& x) ctor above has the side effect of forcing us to make operator= visible in this subclass.
+ this_type& operator=(const this_type& x) { return (this_type&)base_type::operator=(x); }
+ this_type& operator=(std::initializer_list<value_type> ilist) { return (this_type&)base_type::operator=(ilist); }
+ this_type& operator=(this_type&& x) { return (this_type&)base_type::operator=(eastl::move(x)); }
+
+ public:
+ value_compare value_comp() const;
+
+ size_type erase(const Key& k);
+ iterator erase(const_iterator position);
+ iterator erase(const_iterator first, const_iterator last);
+
+ reverse_iterator erase(const_reverse_iterator position);
+ reverse_iterator erase(const_reverse_iterator first, const_reverse_iterator last);
+
+ size_type count(const Key& k) const;
+
+ eastl::pair<iterator, iterator> equal_range(const Key& k);
+ eastl::pair<const_iterator, const_iterator> equal_range(const Key& k) const;
+
+ }; // set
+
+
+
+
+
+ /// multiset
+ ///
+ /// Implements a canonical multiset.
+ ///
+ /// The large majority of the implementation of this class is found in the rbtree
+ /// base class. We control the behaviour of rbtree via template parameters.
+ ///
+ /// See notes above in 'set' regarding multable iterators.
+ ///
+ /// Pool allocation
+ /// If you want to make a custom memory pool for a multiset container, your pool
+ /// needs to contain items of type multiset::node_type. So if you have a memory
+ /// pool that has a constructor that takes the size of pool items and the
+ /// count of pool items, you would do this (assuming that MemoryPool implements
+ /// the Allocator interface):
+ /// typedef multiset<Widget, less<Widget>, MemoryPool> WidgetSet; // Delare your WidgetSet type.
+ /// MemoryPool myPool(sizeof(WidgetSet::node_type), 100); // Make a pool of 100 Widget nodes.
+ /// WidgetSet mySet(&myPool); // Create a map that uses the pool.
+ ///
+ template <typename Key, typename Compare = eastl::less<Key>, typename Allocator = EASTLAllocatorType>
+ class multiset
+ : public rbtree<Key, Key, Compare, Allocator, eastl::use_self<Key>, false, false>
+ {
+ public:
+ typedef rbtree<Key, Key, Compare, Allocator, eastl::use_self<Key>, false, false> base_type;
+ typedef multiset<Key, Compare, Allocator> this_type;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::value_type value_type;
+ typedef typename base_type::iterator iterator;
+ typedef typename base_type::const_iterator const_iterator;
+ typedef typename base_type::reverse_iterator reverse_iterator;
+ typedef typename base_type::const_reverse_iterator const_reverse_iterator;
+ typedef typename base_type::allocator_type allocator_type;
+ typedef Compare value_compare;
+ // Other types are inherited from the base class.
+
+ using base_type::begin;
+ using base_type::end;
+ using base_type::find;
+ using base_type::lower_bound;
+ using base_type::upper_bound;
+
+ protected:
+ using base_type::compare;
+ using base_type::get_compare;
+
+ public:
+ multiset(const allocator_type& allocator = EASTL_MULTISET_DEFAULT_ALLOCATOR);
+ multiset(const Compare& compare, const allocator_type& allocator = EASTL_MULTISET_DEFAULT_ALLOCATOR);
+ multiset(const this_type& x);
+ multiset(this_type&& x);
+ multiset(this_type&& x, const allocator_type& allocator);
+ multiset(std::initializer_list<value_type> ilist, const Compare& compare = Compare(), const allocator_type& allocator = EASTL_MULTISET_DEFAULT_ALLOCATOR);
+
+ template <typename Iterator>
+ multiset(Iterator itBegin, Iterator itEnd); // allocator arg removed because VC7.1 fails on the default arg. To do: Make a second version of this function without a default arg.
+
+ // The (this_type&& x) ctor above has the side effect of forcing us to make operator= visible in this subclass.
+ this_type& operator=(const this_type& x) { return (this_type&)base_type::operator=(x); }
+ this_type& operator=(std::initializer_list<value_type> ilist) { return (this_type&)base_type::operator=(ilist); }
+ this_type& operator=(this_type&& x) { return (this_type&)base_type::operator=(eastl::move(x)); }
+
+ public:
+ value_compare value_comp() const;
+
+ size_type erase(const Key& k);
+ iterator erase(const_iterator position);
+ iterator erase(const_iterator first, const_iterator last);
+
+ reverse_iterator erase(const_reverse_iterator position);
+ reverse_iterator erase(const_reverse_iterator first, const_reverse_iterator last);
+
+ size_type count(const Key& k) const;
+
+ eastl::pair<iterator, iterator> equal_range(const Key& k);
+ eastl::pair<const_iterator, const_iterator> equal_range(const Key& k) const;
+
+ /// equal_range_small
+ /// This is a special version of equal_range which is optimized for the
+ /// case of there being few or no duplicated keys in the tree.
+ eastl::pair<iterator, iterator> equal_range_small(const Key& k);
+ eastl::pair<const_iterator, const_iterator> equal_range_small(const Key& k) const;
+
+ }; // multiset
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // set
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline set<Key, Compare, Allocator>::set(const allocator_type& allocator)
+ : base_type(allocator)
+ {
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline set<Key, Compare, Allocator>::set(const Compare& compare, const allocator_type& allocator)
+ : base_type(compare, allocator)
+ {
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline set<Key, Compare, Allocator>::set(const this_type& x)
+ : base_type(x)
+ {
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline set<Key, Compare, Allocator>::set(this_type&& x)
+ : base_type(eastl::move(x))
+ {
+ }
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline set<Key, Compare, Allocator>::set(this_type&& x, const allocator_type& allocator)
+ : base_type(eastl::move(x), allocator)
+ {
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline set<Key, Compare, Allocator>::set(std::initializer_list<value_type> ilist, const Compare& compare, const allocator_type& allocator)
+ : base_type(ilist.begin(), ilist.end(), compare, allocator)
+ {
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ template <typename Iterator>
+ inline set<Key, Compare, Allocator>::set(Iterator itBegin, Iterator itEnd)
+ : base_type(itBegin, itEnd, Compare(), EASTL_SET_DEFAULT_ALLOCATOR)
+ {
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline typename set<Key, Compare, Allocator>::value_compare
+ set<Key, Compare, Allocator>::value_comp() const
+ {
+ return get_compare();
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline typename set<Key, Compare, Allocator>::size_type
+ set<Key, Compare, Allocator>::erase(const Key& k)
+ {
+ const iterator it(find(k));
+
+ if(it != end()) // If it exists...
+ {
+ base_type::erase(it);
+ return 1;
+ }
+ return 0;
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline typename set<Key, Compare, Allocator>::iterator
+ set<Key, Compare, Allocator>::erase(const_iterator position)
+ {
+ // We need to provide this version because we override another version
+ // and C++ hiding rules would make the base version of this hidden.
+ return base_type::erase(position);
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline typename set<Key, Compare, Allocator>::iterator
+ set<Key, Compare, Allocator>::erase(const_iterator first, const_iterator last)
+ {
+ // We need to provide this version because we override another version
+ // and C++ hiding rules would make the base version of this hidden.
+ return base_type::erase(first, last);
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline typename set<Key, Compare, Allocator>::size_type
+ set<Key, Compare, Allocator>::count(const Key& k) const
+ {
+ const const_iterator it(find(k));
+ return (it != end()) ? (size_type)1 : (size_type)0;
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline typename set<Key, Compare, Allocator>::reverse_iterator
+ set<Key, Compare, Allocator>::erase(const_reverse_iterator position)
+ {
+ return reverse_iterator(erase((++position).base()));
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline typename set<Key, Compare, Allocator>::reverse_iterator
+ set<Key, Compare, Allocator>::erase(const_reverse_iterator first, const_reverse_iterator last)
+ {
+ // Version which erases in order from first to last.
+ // difference_type i(first.base() - last.base());
+ // while(i--)
+ // first = erase(first);
+ // return first;
+
+ // Version which erases in order from last to first, but is slightly more efficient:
+ return reverse_iterator(erase((++last).base(), (++first).base()));
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline eastl::pair<typename set<Key, Compare, Allocator>::iterator,
+ typename set<Key, Compare, Allocator>::iterator>
+ set<Key, Compare, Allocator>::equal_range(const Key& k)
+ {
+ // The resulting range will either be empty or have one element,
+ // so instead of doing two tree searches (one for lower_bound and
+ // one for upper_bound), we do just lower_bound and see if the
+ // result is a range of size zero or one.
+ const iterator itLower(lower_bound(k));
+
+ if((itLower == end()) || compare(k, *itLower)) // If at the end or if (k is < itLower)...
+ return eastl::pair<iterator, iterator>(itLower, itLower);
+
+ iterator itUpper(itLower);
+ return eastl::pair<iterator, iterator>(itLower, ++itUpper);
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline eastl::pair<typename set<Key, Compare, Allocator>::const_iterator,
+ typename set<Key, Compare, Allocator>::const_iterator>
+ set<Key, Compare, Allocator>::equal_range(const Key& k) const
+ {
+ // See equal_range above for comments.
+ const const_iterator itLower(lower_bound(k));
+
+ if((itLower == end()) || compare(k, *itLower)) // If at the end or if (k is < itLower)...
+ return eastl::pair<const_iterator, const_iterator>(itLower, itLower);
+
+ const_iterator itUpper(itLower);
+ return eastl::pair<const_iterator, const_iterator>(itLower, ++itUpper);
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // erase_if
+ //
+ // https://en.cppreference.com/w/cpp/container/set/erase_if
+ ///////////////////////////////////////////////////////////////////////
+ template <class Key, class Compare, class Allocator, class Predicate>
+ typename set<Key, Compare, Allocator>::size_type erase_if(set<Key, Compare, Allocator>& c, Predicate predicate)
+ {
+ auto oldSize = c.size();
+ for (auto i = c.begin(), last = c.end(); i != last;)
+ {
+ if (predicate(*i))
+ {
+ i = c.erase(i);
+ }
+ else
+ {
+ ++i;
+ }
+ }
+ return oldSize - c.size();
+ }
+
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ template <class Key, class Compare, class Allocator>
+ synth_three_way_result<Key> operator<=>(const set<Key, Compare, Allocator>& a, const set<Key, Compare, Allocator>& b)
+ {
+ return eastl::lexicographical_compare_three_way(a.begin(), a.end(), b.begin(), b.end(), synth_three_way{});
+ }
+#endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // multiset
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline multiset<Key, Compare, Allocator>::multiset(const allocator_type& allocator)
+ : base_type(allocator)
+ {
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline multiset<Key, Compare, Allocator>::multiset(const Compare& compare, const allocator_type& allocator)
+ : base_type(compare, allocator)
+ {
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline multiset<Key, Compare, Allocator>::multiset(const this_type& x)
+ : base_type(x)
+ {
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline multiset<Key, Compare, Allocator>::multiset(this_type&& x)
+ : base_type(eastl::move(x))
+ {
+ }
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline multiset<Key, Compare, Allocator>::multiset(this_type&& x, const allocator_type& allocator)
+ : base_type(eastl::move(x), allocator)
+ {
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline multiset<Key, Compare, Allocator>::multiset(std::initializer_list<value_type> ilist, const Compare& compare, const allocator_type& allocator)
+ : base_type(ilist.begin(), ilist.end(), compare, allocator)
+ {
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ template <typename Iterator>
+ inline multiset<Key, Compare, Allocator>::multiset(Iterator itBegin, Iterator itEnd)
+ : base_type(itBegin, itEnd, Compare(), EASTL_MULTISET_DEFAULT_ALLOCATOR)
+ {
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline typename multiset<Key, Compare, Allocator>::value_compare
+ multiset<Key, Compare, Allocator>::value_comp() const
+ {
+ return get_compare();
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline typename multiset<Key, Compare, Allocator>::size_type
+ multiset<Key, Compare, Allocator>::erase(const Key& k)
+ {
+ const eastl::pair<iterator, iterator> range(equal_range(k));
+ const size_type n = (size_type)eastl::distance(range.first, range.second);
+ base_type::erase(range.first, range.second);
+ return n;
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline typename multiset<Key, Compare, Allocator>::iterator
+ multiset<Key, Compare, Allocator>::erase(const_iterator position)
+ {
+ // We need to provide this version because we override another version
+ // and C++ hiding rules would make the base version of this hidden.
+ return base_type::erase(position);
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline typename multiset<Key, Compare, Allocator>::iterator
+ multiset<Key, Compare, Allocator>::erase(const_iterator first, const_iterator last)
+ {
+ // We need to provide this version because we override another version
+ // and C++ hiding rules would make the base version of this hidden.
+ return base_type::erase(first, last);
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline typename multiset<Key, Compare, Allocator>::size_type
+ multiset<Key, Compare, Allocator>::count(const Key& k) const
+ {
+ const eastl::pair<const_iterator, const_iterator> range(equal_range(k));
+ return (size_type)eastl::distance(range.first, range.second);
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline typename multiset<Key, Compare, Allocator>::reverse_iterator
+ multiset<Key, Compare, Allocator>::erase(const_reverse_iterator position)
+ {
+ return reverse_iterator(erase((++position).base()));
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline typename multiset<Key, Compare, Allocator>::reverse_iterator
+ multiset<Key, Compare, Allocator>::erase(const_reverse_iterator first, const_reverse_iterator last)
+ {
+ // Version which erases in order from first to last.
+ // difference_type i(first.base() - last.base());
+ // while(i--)
+ // first = erase(first);
+ // return first;
+
+ // Version which erases in order from last to first, but is slightly more efficient:
+ return reverse_iterator(erase((++last).base(), (++first).base()));
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline eastl::pair<typename multiset<Key, Compare, Allocator>::iterator,
+ typename multiset<Key, Compare, Allocator>::iterator>
+ multiset<Key, Compare, Allocator>::equal_range(const Key& k)
+ {
+ // There are multiple ways to implement equal_range. The implementation mentioned
+ // in the C++ standard and which is used by most (all?) commercial STL implementations
+ // is this:
+ // return eastl::pair<iterator, iterator>(lower_bound(k), upper_bound(k));
+ //
+ // This does two tree searches -- one for the lower bound and one for the
+ // upper bound. This works well for the case whereby you have a large container
+ // and there are lots of duplicated values. We provide an alternative version
+ // of equal_range called equal_range_small for cases where the user is confident
+ // that the number of duplicated items is only a few.
+
+ return eastl::pair<iterator, iterator>(lower_bound(k), upper_bound(k));
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline eastl::pair<typename multiset<Key, Compare, Allocator>::const_iterator,
+ typename multiset<Key, Compare, Allocator>::const_iterator>
+ multiset<Key, Compare, Allocator>::equal_range(const Key& k) const
+ {
+ // See comments above in the non-const version of equal_range.
+ return eastl::pair<iterator, iterator>(lower_bound(k), upper_bound(k));
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline eastl::pair<typename multiset<Key, Compare, Allocator>::iterator,
+ typename multiset<Key, Compare, Allocator>::iterator>
+ multiset<Key, Compare, Allocator>::equal_range_small(const Key& k)
+ {
+ // We provide alternative version of equal_range here which works faster
+ // for the case where there are at most small number of potential duplicated keys.
+ const iterator itLower(lower_bound(k));
+ iterator itUpper(itLower);
+
+ while((itUpper != end()) && !compare(k, itUpper.mpNode->mValue))
+ ++itUpper;
+
+ return eastl::pair<iterator, iterator>(itLower, itUpper);
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline eastl::pair<typename multiset<Key, Compare, Allocator>::const_iterator,
+ typename multiset<Key, Compare, Allocator>::const_iterator>
+ multiset<Key, Compare, Allocator>::equal_range_small(const Key& k) const
+ {
+ // We provide alternative version of equal_range here which works faster
+ // for the case where there are at most small number of potential duplicated keys.
+ const const_iterator itLower(lower_bound(k));
+ const_iterator itUpper(itLower);
+
+ while((itUpper != end()) && !compare(k, *itUpper))
+ ++itUpper;
+
+ return eastl::pair<const_iterator, const_iterator>(itLower, itUpper);
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // erase_if
+ //
+ // https://en.cppreference.com/w/cpp/container/multiset/erase_if
+ ///////////////////////////////////////////////////////////////////////
+ template <class Key, class Compare, class Allocator, class Predicate>
+ typename multiset<Key, Compare, Allocator>::size_type erase_if(multiset<Key, Compare, Allocator>& c, Predicate predicate)
+ {
+ auto oldSize = c.size();
+ // Erases all elements that satisfy the predicate pred from the container.
+ for (auto i = c.begin(), last = c.end(); i != last;)
+ {
+ if (predicate(*i))
+ {
+ i = c.erase(i);
+ }
+ else
+ {
+ ++i;
+ }
+ }
+ return oldSize - c.size();
+ }
+
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ template <class Key, class Compare, class Allocator>
+ synth_three_way_result<Key> operator<=>(const multiset<Key, Compare, Allocator>& a, const multiset<Key, Compare, Allocator>& b)
+ {
+ return eastl::lexicographical_compare_three_way(a.begin(), a.end(), b.begin(), b.end(), synth_three_way{});
+ }
+#endif
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/shared_array.h b/EASTL/include/EASTL/shared_array.h
new file mode 100644
index 0000000..b7d7840
--- /dev/null
+++ b/EASTL/include/EASTL/shared_array.h
@@ -0,0 +1,434 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// Note (March 2014): shared_array is not a full implementation of an array version
+// of C++11 shared_ptr, and there currently are no plans to make it so. A future
+// version of shared_ptr would likely take on the ability to store arrays,
+// same as unique_ptr has array support. This class isn't deprecated, but it
+// is frozen until some future decision is made on what to do about arrays.
+//
+///////////////////////////////////////////////////////////////////////////////
+// This class implements a shared_array template. This is a class which is
+// similar to the C++ shared_ptr template, except that it works with arrays
+// instead of individual objects.
+//
+// Important notice:
+// As of this writing (9/2003), the implementation provided here has
+// limitations that you should be aware of. These limitations will be shortly
+// rectified. Most significantly, this implementation has the following
+// weaknesses:
+// - It cannot safely deal with exceptions that occur during the
+// construction of shared_ptr objects.
+// - It cannot safely deal with recursive shared_ptr objects.
+// If a shared_ptr<A> holds a pointer to an instance of A and
+// class A owns an instance of shared_ptr<A> that refers to,
+// the original instance, the memory will leak.
+// - A template of type shared_ptr<void> will not call the destructor
+// for an object that it stores. You thus must declare a shared_ptr
+// template specifically for the class type.
+// - It doesn't safely handle multiple instances of shared_ptr
+// which own the same pointer accessed from multiple threads.
+// This weakness is by design, for performance reasons. You should
+// use shared_ptr_mt for multi-thread safe access.
+//
+// The rectification of the above issues are discussed in the C++ standardization
+// documents for the next C++ standard (as of 2003):
+// http://std.dkuug.dk/jtc1/sc22/wg21/docs/papers/2003/n1450.html#Implementation-difficulty
+//
+// This current implementation will be eventually (hopefully by 1/2004) rectified
+// to be in line with the second generation C++ standard proposal.
+//
+// The intended design of this class is based somewhat on the design of the Boost
+// shared_array template. This design is also being considered for the next C++
+// standard (as of 2003). The C++ standard update proposal is currently available at:
+// http://std.dkuug.dk/jtc1/sc22/wg21/docs/papers/2003/n1450.html
+// Boost smart pointers, including shared_array are documented at:
+// http://www.boost.org/libs/smart_ptr/
+//
+// As of this writing (10/2003), this class has received approval from EA legal
+// for use. The potential issue is the similarity of the class name and class
+// interface to existing open source code.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_SHARED_ARRAY_H
+#define EASTL_SHARED_ARRAY_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/smart_ptr.h> // Defines smart_array_deleter
+
+
+EA_DISABLE_ALL_VC_WARNINGS();
+
+ #include <new>
+ #include <stddef.h>
+
+EA_RESTORE_ALL_VC_WARNINGS();
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+
+ /// EASTL_SHARED_ARRAY_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_SHARED_ARRAY_DEFAULT_NAME
+ #define EASTL_SHARED_ARRAY_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " shared_array" // Unless the user overrides something, this is "EASTL shared_array".
+ #endif
+
+
+ /// EASTL_SHARED_ARRAY_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_SHARED_ARRAY_DEFAULT_ALLOCATOR
+ #define EASTL_SHARED_ARRAY_DEFAULT_ALLOCATOR allocator_type(EASTL_SHARED_ARRAY_DEFAULT_NAME)
+ #endif
+
+
+
+ /// class shared_array
+ /// A shared_array is the same as shared_ptr but for arrays.
+ template <typename T, typename Allocator = EASTLAllocatorType, typename Deleter = smart_array_deleter<T> >
+ class shared_array
+ {
+ protected:
+ /// this_type
+ /// This is an alias for shared_array<T>, this class.
+ typedef shared_array<T> this_type;
+
+ /// allocator_type
+ typedef Allocator allocator_type;
+
+ /// deleter_type
+ typedef Deleter deleter_type;
+
+ /// ref_count
+ /// An internal reference count type. Must be convertable to int
+ /// so that the public use_count function can work.
+ typedef int ref_count;
+
+ T* mpArray; /// The owned pointer. Points to an array of T.
+ ref_count* mpRefCount; /// Reference count for owned pointer.
+ allocator_type mAllocator; /// The allocator used to manage new/delete of mpRefCount.
+
+ public:
+ typedef T element_type;
+
+ /// shared_array
+ /// Takes ownership of the pointer and sets the reference count
+ /// to the pointer to 1. It is OK if the input pointer is null.
+ /// The shared reference count is allocated on the heap via operator new.
+ /// If an exception occurs during the allocation of the shared
+ /// reference count, the owned pointer is deleted and the exception
+ /// is rethrown. A null pointer is given a reference count of 1.
+ explicit shared_array(T* pArray = NULL, const allocator_type& allocator = EASTL_SHARED_ARRAY_DEFAULT_ALLOCATOR)
+ : mpArray(pArray),
+ mpRefCount(NULL),
+ mAllocator(allocator)
+ {
+ // Allocate memory for the reference count.
+ void* const pMemory = EASTLAlloc(mAllocator, sizeof(ref_count));
+ if(pMemory)
+ mpRefCount = ::new(pMemory) ref_count(1);
+ }
+
+
+ /// shared_array
+ /// Shares ownership of a pointer with another instance of shared_array.
+ /// This function increments the shared reference count on the pointer.
+ /// If we want a shared_array constructor that is templated on shared_array<U>,
+ /// then we need to make it in addition to this function, as otherwise
+ /// the compiler will generate this function and things will go wrong.
+ shared_array(const shared_array& sharedArray)
+ : mpArray(sharedArray.mpArray),
+ mpRefCount(sharedArray.mpRefCount),
+ mAllocator(sharedArray.mAllocator)
+ {
+ ++*mpRefCount;
+ }
+
+
+ /// ~shared_array
+ /// Decrements the reference count for the owned pointer. If the
+ /// reference count goes to zero, the owned pointer is deleted and
+ /// the shared reference count is deleted.
+ ~shared_array()
+ {
+ const ref_count newRefCount(--*mpRefCount);
+ // assert(newRefCount >= 0);
+ if(newRefCount == 0)
+ {
+ EASTLFree(mAllocator, mpRefCount, sizeof(ref_count));
+ Deleter del;
+ del(mpArray);
+ }
+ }
+
+
+ /// operator=
+ /// Copies another shared_array to this object. Note that this object
+ /// may already own a shared pointer with another different pointer
+ /// (but still of the same type) before this call. In that case,
+ /// this function releases the old pointer, decrementing its reference
+ /// count and deleting it if zero, takes shared ownership of the new
+ /// pointer and increments its reference count.
+ /// If we want a shared_array operator= that is templated on shared_array<U>,
+ /// then we need to make it in addition to this function, as otherwise
+ /// the compiler will generate this function and things will go wrong.
+ shared_array& operator=(const shared_array& sharedArray)
+ {
+ if(mpArray != sharedArray.mpArray)
+ {
+ // The easiest thing to do is to create a temporary and
+ // copy ourselves ourselves into it. This is a standard
+ // method for switching pointer ownership in systems like this.
+ shared_array(sharedArray).swap(*this);
+ }
+ return *this;
+ }
+
+
+ /// operator=
+ /// Assigns a new pointer, while decrementing the reference count on
+ /// the current pointer. The new pointer can be NULL and the current
+ /// pointer can NULL. If the new pointer is equivalent to the current
+ /// pointer, then nothing is done.
+ shared_array& operator=(T* pValue)
+ {
+ reset(pValue);
+ return *this;
+ }
+
+
+ /// reset
+ /// Releases the owned pointer and takes ownership of the
+ /// passed in pointer. If the passed in pointer is the same
+ /// as the owned pointer, nothing is done. The passed in pointer
+ /// can be null, in which case the use count is set to 1.
+ void reset(T* pArray = NULL)
+ {
+ if(pArray != mpArray)
+ {
+ // The easiest thing to do is to create a temporary and
+ // copy ourselves ourselves into it. This is a standard
+ // method for switching pointer ownership in systems like this.
+ shared_array(pArray, mAllocator).swap(*this);
+ }
+ }
+
+
+ /// swap
+ /// Exchanges the owned pointer beween two shared_array objects.
+ void swap(this_type& sharedArray)
+ {
+ // We leave mAllocator as-is.
+
+ // eastl::swap(mpArray, sharedArray.mpArray);
+ T* const pArray = sharedArray.mpArray;
+ sharedArray.mpArray = mpArray;
+ mpArray = pArray;
+
+ // eastl::swap(mpRefCount, sharedArray.mpRefCount);
+ ref_count* const pRefCount = sharedArray.mpRefCount;
+ sharedArray.mpRefCount = mpRefCount;
+ mpRefCount = pRefCount;
+ }
+
+
+ /// operator[]
+ /// Returns a reference to the specified item in the owned pointer
+ /// array.
+ /// Example usage:
+ /// shared_array<int> ptr = new int[6];
+ /// int x = ptr[2];
+ T& operator[](ptrdiff_t i) const
+ {
+ // assert(mpArray && (i >= 0));
+ return mpArray[i];
+ }
+
+ /// operator*
+ /// Returns the owner pointer dereferenced.
+ /// Example usage:
+ /// shared_array<int> ptr = new int(3);
+ /// int x = *ptr;
+ T& operator*() const
+ {
+ // assert(mpArray);
+ return *mpArray;
+ }
+
+ /// operator->
+ /// Allows access to the owned pointer via operator->()
+ /// Example usage:
+ /// struct X{ void DoSomething(); };
+ /// shared_array<int> ptr = new X;
+ /// ptr->DoSomething();
+ T* operator->() const EA_NOEXCEPT
+ {
+ // assert(mpArray);
+ return mpArray;
+ }
+
+ /// get
+ /// Returns the owned pointer. Note that this class does
+ /// not provide an operator T() function. This is because such
+ /// a thing (automatic conversion) is deemed unsafe.
+ /// Example usage:
+ /// struct X{ void DoSomething(); };
+ /// shared_array<int> ptr = new X;
+ /// X* pX = ptr.get();
+ /// pX->DoSomething();
+ T* get() const EA_NOEXCEPT
+ {
+ return mpArray;
+ }
+
+ /// use_count
+ /// Returns the reference count on the owned pointer.
+ /// The return value is one if the owned pointer is null.
+ int use_count() const
+ {
+ // assert(mpRefCount);
+ return (int)*mpRefCount;
+ }
+
+ /// unique
+ /// Returns true if the reference count on the owned pointer is one.
+ /// The return value is true if the owned pointer is null.
+ bool unique() const
+ {
+ // assert(mpRefCount);
+ return (*mpRefCount == 1);
+ }
+
+ /// Implicit operator bool
+ /// Allows for using a scoped_ptr as a boolean.
+ /// Example usage:
+ /// shared_array<int> ptr = new int(3);
+ /// if(ptr)
+ /// ++*ptr;
+ ///
+ /// Note that below we do not use operator bool(). The reason for this
+ /// is that booleans automatically convert up to short, int, float, etc.
+ /// The result is that this: if(sharedArray == 1) would yield true (bad).
+ typedef T* (this_type::*bool_)() const;
+ operator bool_() const EA_NOEXCEPT
+ {
+ if(mpArray)
+ return &this_type::get;
+ return NULL;
+ }
+
+ /// operator!
+ /// This returns the opposite of operator bool; it returns true if
+ /// the owned pointer is null. Some compilers require this and some don't.
+ /// shared_array<int> ptr = new int(3);
+ /// if(!ptr)
+ /// assert(false);
+ bool operator!() const EA_NOEXCEPT
+ {
+ return (mpArray == NULL);
+ }
+
+ /// get_allocator
+ /// Returns the memory allocator associated with this class.
+ const allocator_type& get_allocator() const EA_NOEXCEPT
+ {
+ return mAllocator;
+ }
+ allocator_type& get_allocator() EA_NOEXCEPT
+ {
+ return mAllocator;
+ }
+
+ /// set_allocator
+ /// Sets the memory allocator associated with this class.
+ void set_allocator(const allocator_type& allocator)
+ {
+ mAllocator = allocator;
+ }
+
+ }; // class shared_array
+
+
+
+ /// get_pointer
+ /// returns shared_array::get() via the input shared_array.
+ template <typename T, typename A, typename D>
+ inline T* get_pointer(const shared_array<T, A, D>& sharedArray)
+ {
+ return sharedArray.get();
+ }
+
+ /// swap
+ /// Exchanges the owned pointer beween two shared_array objects.
+ /// This non-member version is useful for compatibility of shared_array
+ /// objects with the C++ Standard Library and other libraries.
+ template <typename T, typename A, typename D>
+ inline void swap(shared_array<T, A, D>& sharedArray1, shared_array<T, A, D>& sharedArray2)
+ {
+ sharedArray1.swap(sharedArray2);
+ }
+
+
+ /// operator!=
+ /// Compares two shared_array objects for equality. Equality is defined as
+ /// being true when the pointer shared between two shared_array objects is equal.
+ /// It is debatable what the appropriate definition of equality is between two
+ /// shared_array objects, but we follow the current 2nd generation C++ standard proposal.
+ template <typename T, typename TA, typename TD, typename U, typename UA, typename UD>
+ inline bool operator==(const shared_array<T, TA, TD>& sharedArray1, const shared_array<U, UA, UD>& sharedArray2)
+ {
+ // assert((sharedArray1.get() != sharedArray2.get()) || (sharedArray1.use_count() == sharedArray2.use_count()));
+ return (sharedArray1.get() == sharedArray2.get());
+ }
+
+
+ /// operator!=
+ /// Compares two shared_array objects for inequality. Equality is defined as
+ /// being true when the pointer shared between two shared_array objects is equal.
+ /// It is debatable what the appropriate definition of equality is between two
+ /// shared_array objects, but we follow the current 2nd generation C++ standard proposal.
+ template <typename T, typename TA, typename TD, typename U, typename UA, typename UD>
+ inline bool operator!=(const shared_array<T, TA, TD>& sharedArray1, const shared_array<U, UA, UD>& sharedArray2)
+ {
+ // assert((sharedArray1.get() != sharedArray2.get()) || (sharedArray1.use_count() == sharedArray2.use_count()));
+ return (sharedArray1.get() != sharedArray2.get());
+ }
+
+
+ /// operator<
+ /// Returns which shared_array is 'less' than the other. Useful when storing
+ /// sorted containers of scoped_ptr objects.
+ template <typename T, typename TA, typename TD, typename U, typename UA, typename UD>
+ inline bool operator<(const shared_array<T, TA, TD>& sharedArray1, const shared_array<U, UA, UD>& sharedArray2)
+ {
+ return (sharedArray1.get() < sharedArray2.get()); // Alternatively use: std::less<T*>(a.get(), b.get());
+ }
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/shared_ptr.h b/EASTL/include/EASTL/shared_ptr.h
new file mode 100644
index 0000000..e7eb778
--- /dev/null
+++ b/EASTL/include/EASTL/shared_ptr.h
@@ -0,0 +1,1717 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This class implements the C++11 shared_ptr template. A shared_ptr is like
+// the C++ Standard Library unique_ptr except that it allows sharing of pointers
+// between instances via reference counting. shared_ptr objects can safely be
+// copied and can safely be used in containers such as vector or list.
+//
+// Notes regarding safe usage of shared_ptr:
+// - http://www.boost.org/doc/libs/1_53_0/libs/smart_ptr/shared_ptr.htm#ThreadSafety
+// - If you construct a shared_ptr with a raw pointer, you cannot construct another shared_ptr
+// with just that raw pointer. Insted you need to construct additional shared_ptrs with
+// the originally created shared_ptr. Otherwise you will get a crash.
+// - Usage of shared_ptr is thread-safe, but what it points to isn't automatically thread safe.
+// Multiple shared_ptrs that refer to the same object can be used arbitrarily by multiple threads.
+// - You can use a single shared_ptr between multiple threads in all ways except one: assigment
+// to that shared_ptr. The following is not thread-safe, and needs to be guarded by a mutex
+// or the shared_ptr atomic functions:
+// shared_ptr<Foo> pFoo;
+// // Thread 1:
+// shared_ptr<Foo> pFoo2 = pFoo;
+// // Thread 2:
+// pFoo = make_shared<Foo>();
+//
+// Compatibility note:
+// This version of shared_ptr updates the previous version to have full C++11
+// compatibility. However, in order to add C++11 compatibility there needed to
+// be a few breaking changes which may affect some users. It's likely that most
+// or all breaking changes can be rectified by doing the same thing in a slightly
+// different way. Here's a list of the primary signficant breaking changes:
+// - shared_ptr now takes just one template parameter instead of three.
+// (allocator and deleter). You now specify the allocator and deleter
+// as part of the shared_ptr constructor at runtime.
+// - shared_ptr has thread safety, which
+//
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_SHARED_PTR_H
+#define EASTL_SHARED_PTR_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/smart_ptr.h>
+#include <EASTL/internal/thread_support.h>
+#include <EASTL/unique_ptr.h>
+#include <EASTL/functional.h>
+#include <EASTL/allocator.h>
+#include <EASTL/atomic.h>
+#if EASTL_RTTI_ENABLED
+ #include <typeinfo>
+#endif
+#if EASTL_EXCEPTIONS_ENABLED
+ #include <exception>
+#endif
+
+EA_DISABLE_ALL_VC_WARNINGS()
+#include <new>
+#include <stddef.h>
+EA_RESTORE_ALL_VC_WARNINGS()
+
+EA_DISABLE_VC_WARNING(4530); // C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc
+EA_DISABLE_VC_WARNING(4571); // catch(...) semantics changed since Visual C++ 7.1; structured exceptions (SEH) are no longer caught.
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+ ///////////////////////////////////////////////////////////////////////////
+ // shared_ptr
+ ///////////////////////////////////////////////////////////////////////////
+
+ /// EASTL_SHARED_PTR_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_SHARED_PTR_DEFAULT_NAME
+ #define EASTL_SHARED_PTR_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " shared_ptr" // Unless the user overrides something, this is "EASTL shared_ptr".
+ #endif
+
+
+ /// EASTL_SHARED_PTR_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_SHARED_PTR_DEFAULT_ALLOCATOR
+ #define EASTL_SHARED_PTR_DEFAULT_ALLOCATOR EASTLAllocatorType(EASTL_SHARED_PTR_DEFAULT_NAME)
+ #endif
+
+
+ // Forward declarations
+ template <typename T, typename Deleter> class unique_ptr;
+ template <typename T> class weak_ptr;
+ template <typename T> class enable_shared_from_this;
+
+
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ // We define eastl::bad_weak_ptr as opposed to std::bad_weak_ptr. The reason is that
+ // we can't easily know of std::bad_weak_ptr exists and we would have to #include <memory>
+ // to use it. EASTL "owns" the types that are defined in EASTL headers, and std::bad_weak_ptr
+ // is declared in <memory>.
+
+ struct bad_weak_ptr : std::exception
+ {
+ const char* what() const EA_NOEXCEPT EA_OVERRIDE
+ { return "bad weak_ptr"; }
+ };
+ #endif
+
+
+ /// ref_count_sp
+ ///
+ /// This is a small utility class used by shared_ptr and weak_ptr.
+ struct ref_count_sp
+ {
+ atomic<int32_t> mRefCount; /// Reference count on the contained pointer. Starts as 1 by default.
+ atomic<int32_t> mWeakRefCount; /// Reference count on contained pointer plus this ref_count_sp object itself. Starts as 1 by default.
+
+ public:
+ ref_count_sp(int32_t refCount = 1, int32_t weakRefCount = 1) EA_NOEXCEPT;
+ virtual ~ref_count_sp() EA_NOEXCEPT {}
+
+ int32_t use_count() const EA_NOEXCEPT;
+ void addref() EA_NOEXCEPT;
+ void release();
+ void weak_addref() EA_NOEXCEPT;
+ void weak_release();
+ ref_count_sp* lock() EA_NOEXCEPT;
+
+ virtual void free_value() EA_NOEXCEPT = 0; // Release the contained object.
+ virtual void free_ref_count_sp() EA_NOEXCEPT = 0; // Release this instance.
+
+ #if EASTL_RTTI_ENABLED
+ virtual void* get_deleter(const std::type_info& type) const EA_NOEXCEPT = 0;
+ #else
+ virtual void* get_deleter() const EA_NOEXCEPT = 0;
+ #endif
+ };
+
+
+ inline ref_count_sp::ref_count_sp(int32_t refCount, int32_t weakRefCount) EA_NOEXCEPT
+ : mRefCount(refCount), mWeakRefCount(weakRefCount) {}
+
+ inline int32_t ref_count_sp::use_count() const EA_NOEXCEPT
+ {
+ return mRefCount.load(memory_order_relaxed); // To figure out: is this right?
+ }
+
+ inline void ref_count_sp::addref() EA_NOEXCEPT
+ {
+ mRefCount.fetch_add(1, memory_order_relaxed);
+ mWeakRefCount.fetch_add(1, memory_order_relaxed);
+ }
+
+ inline void ref_count_sp::release()
+ {
+ EASTL_ASSERT((mRefCount.load(memory_order_relaxed) > 0));
+ if(mRefCount.fetch_sub(1, memory_order_release) == 1)
+ {
+ atomic_thread_fence(memory_order_acquire);
+ free_value();
+ }
+
+ weak_release();
+ }
+
+ inline void ref_count_sp::weak_addref() EA_NOEXCEPT
+ {
+ mWeakRefCount.fetch_add(1, memory_order_relaxed);
+ }
+
+ inline void ref_count_sp::weak_release()
+ {
+ EASTL_ASSERT(mWeakRefCount.load(memory_order_relaxed) > 0);
+ if(mWeakRefCount.fetch_sub(1, memory_order_release) == 1)
+ {
+ atomic_thread_fence(memory_order_acquire);
+ free_ref_count_sp();
+ }
+ }
+
+ inline ref_count_sp* ref_count_sp::lock() EA_NOEXCEPT
+ {
+ for(int32_t refCountTemp = mRefCount.load(memory_order_relaxed); refCountTemp != 0; )
+ {
+ if(mRefCount.compare_exchange_weak(refCountTemp, refCountTemp + 1, memory_order_relaxed))
+ {
+ mWeakRefCount.fetch_add(1, memory_order_relaxed);
+ return this;
+ }
+ }
+
+ return nullptr;
+ }
+
+
+
+ /// ref_count_sp_t
+ ///
+ /// This is a version of ref_count_sp which is used to delete the contained pointer.
+ template <typename T, typename Allocator, typename Deleter>
+ class ref_count_sp_t : public ref_count_sp
+ {
+ public:
+ typedef ref_count_sp_t<T, Allocator, Deleter> this_type;
+ typedef T value_type;
+ typedef Allocator allocator_type;
+ typedef Deleter deleter_type;
+
+ value_type mValue; // This is expected to be a pointer.
+ deleter_type mDeleter;
+ allocator_type mAllocator;
+
+ ref_count_sp_t(value_type value, deleter_type deleter, allocator_type allocator)
+ : ref_count_sp(), mValue(value), mDeleter(eastl::move(deleter)), mAllocator(eastl::move(allocator))
+ {}
+
+ void free_value() EA_NOEXCEPT
+ {
+ mDeleter(mValue);
+ mValue = nullptr;
+ }
+
+ void free_ref_count_sp() EA_NOEXCEPT
+ {
+ allocator_type allocator = mAllocator;
+ this->~ref_count_sp_t();
+ EASTLFree(allocator, this, sizeof(*this));
+ }
+
+ #if EASTL_RTTI_ENABLED
+ void* get_deleter(const std::type_info& type) const EA_NOEXCEPT
+ {
+ return (type == typeid(deleter_type)) ? (void*)&mDeleter : nullptr;
+ }
+ #else
+ void* get_deleter() const EA_NOEXCEPT
+ {
+ return (void*)&mDeleter;
+ }
+ #endif
+ };
+
+ /// ref_count_sp_t_inst
+ ///
+ /// This is a version of ref_count_sp which is used to actually hold an instance of
+ /// T (instead of a pointer). This is useful to allocate the object and ref count
+ /// in a single memory allocation.
+ template<typename T, typename Allocator>
+ class ref_count_sp_t_inst : public ref_count_sp
+ {
+ public:
+ typedef ref_count_sp_t_inst<T, Allocator> this_type;
+ typedef T value_type;
+ typedef Allocator allocator_type;
+ typedef typename aligned_storage<sizeof(T), eastl::alignment_of<T>::value>::type storage_type;
+
+ storage_type mMemory;
+ allocator_type mAllocator;
+
+ value_type* GetValue() { return static_cast<value_type*>(static_cast<void*>(&mMemory)); }
+
+ template <typename... Args>
+ ref_count_sp_t_inst(allocator_type allocator, Args&&... args)
+ : ref_count_sp(), mAllocator(eastl::move(allocator))
+ {
+ new (&mMemory) value_type(eastl::forward<Args>(args)...);
+ }
+
+ void free_value() EA_NOEXCEPT
+ {
+ GetValue()->~value_type();
+ }
+
+ void free_ref_count_sp() EA_NOEXCEPT
+ {
+ allocator_type allocator = mAllocator;
+ this->~ref_count_sp_t_inst();
+ EASTLFree(allocator, this, sizeof(*this));
+ }
+
+ #if EASTL_RTTI_ENABLED
+ void* get_deleter(const std::type_info&) const EA_NOEXCEPT
+ {
+ return nullptr; // Default base implementation.
+ }
+ #else
+ void* get_deleter() const EA_NOEXCEPT
+ {
+ return nullptr;
+ }
+ #endif
+ };
+
+
+ /// do_enable_shared_from_this
+ ///
+ /// If a user calls this function, it sets up mWeakPtr member of
+ /// the enable_shared_from_this parameter to point to the ref_count_sp
+ /// object that's passed in. Normally, the user doesn't need to call
+ /// this function, as the shared_ptr constructor will do it for them.
+ ///
+ template <typename T, typename U>
+ void do_enable_shared_from_this(const ref_count_sp* pRefCount,
+ const enable_shared_from_this<T>* pEnableSharedFromThis,
+ const U* pValue)
+ {
+ if (pEnableSharedFromThis)
+ pEnableSharedFromThis->mWeakPtr.assign(const_cast<U*>(pValue), const_cast<ref_count_sp*>(pRefCount));
+ }
+
+ inline void do_enable_shared_from_this(const ref_count_sp*, ...) {} // Empty specialization. This no-op version is
+ // called by shared_ptr when shared_ptr's T type
+ // is anything but an enabled_shared_from_this
+ // class.
+
+
+ /// shared_ptr_traits
+ /// This exists for the sole purpose of creating a typedef called
+ /// reference_type which is specialized for type void. The reason
+ /// for this is that shared_ptr::operator*() returns a reference
+ /// to T but if T is void, it needs to return void, not *void,
+ /// as the latter is not valid C++.
+ template <typename T> struct shared_ptr_traits
+ { typedef T& reference_type; };
+
+ template <> struct shared_ptr_traits<void>
+ { typedef void reference_type; };
+
+ template <> struct shared_ptr_traits<void const>
+ { typedef void reference_type; };
+
+ template <> struct shared_ptr_traits<void volatile>
+ { typedef void reference_type; };
+
+ template <> struct shared_ptr_traits<void const volatile>
+ { typedef void reference_type; };
+
+
+
+ /// shared_ptr
+ ///
+ /// This class implements the C++11 shared_ptr template. A shared_ptr is like the C++
+ /// Standard Library unique_ptr except that it allows sharing of pointers between
+ /// instances via reference counting. shared_ptr objects can safely be copied and
+ /// can safely be used in C++ Standard Library containers such as std::vector or
+ /// std::list.
+ ///
+ /// This class is not thread safe in that you cannot use an instance of it from
+ /// two threads at the same time and cannot use two separate instances of it, which
+ /// own the same pointer, at the same time. Use standard multithread mutex techniques
+ /// to address the former problems and use shared_ptr_mt to address the latter.
+ /// Note that this is contrary to the C++11 standard.
+ ///
+ /// As of this writing, arrays aren't supported, but they are planned in the future
+ /// based on the C++17 proposal: http://isocpp.org/files/papers/N3920.html
+ ///
+ template <typename T>
+ class shared_ptr
+ {
+ public:
+ typedef shared_ptr<T> this_type;
+ typedef T element_type;
+ typedef typename shared_ptr_traits<T>::reference_type reference_type; // This defines what a reference to a T is. It's always simply T&, except for the case where T is void, whereby the reference is also just void.
+ typedef EASTLAllocatorType default_allocator_type;
+ typedef default_delete<T> default_deleter_type;
+ typedef weak_ptr<T> weak_type;
+
+ protected:
+ element_type* mpValue;
+ ref_count_sp* mpRefCount; /// Base pointer to Reference count for owned pointer and the owned pointer.
+
+ public:
+ /// Initializes and "empty" shared_ptr.
+ /// Postcondition: use_count() == zero and get() == 0
+ shared_ptr() EA_NOEXCEPT
+ : mpValue(nullptr),
+ mpRefCount(nullptr)
+ {
+ // Intentionally leaving mpRefCount as NULL. Can't allocate here due to noexcept.
+ }
+
+ /// Takes ownership of the pointer and sets the reference count
+ /// to the pointer to 1. It is OK if the input pointer is null.
+ /// The shared reference count is allocated on the heap using the
+ /// default eastl allocator.
+ /// Throws: bad_alloc, or an implementation-defined exception when
+ /// a resource other than memory could not be obtained.
+ /// Exception safety: If an exception is thrown, delete p is called.
+ /// Postcondition in the event of no exception: use_count() == 1 && get() == p
+ template <typename U>
+ explicit shared_ptr(U* pValue,
+ typename eastl::enable_if<eastl::is_convertible<U*, element_type*>::value>::type* = 0)
+ : mpValue(nullptr), mpRefCount(nullptr) // alloc_internal will set this.
+ {
+ // We explicitly use default_delete<U>. You can use the other version of this constructor to provide a
+ // custom version.
+ alloc_internal(pValue, default_allocator_type(),
+ default_delete<U>()); // Problem: We want to be able to use default_deleter_type() instead of
+ // default_delete<U>, but if default_deleter_type's type is void or
+ // otherwise mismatched then this will fail to compile. What we really
+ // want to be able to do is "rebind" default_allocator_type to U
+ // instead of its original type.
+ }
+
+
+ shared_ptr(std::nullptr_t) EA_NOEXCEPT
+ : mpValue(nullptr),
+ mpRefCount(nullptr)
+ {
+ // Intentionally leaving mpRefCount as NULL. Can't allocate here due to noexcept.
+ }
+
+
+ /// Takes ownership of the pointer and sets the reference count
+ /// to the pointer to 1. It is OK if the input pointer is null.
+ /// The shared reference count is allocated on the heap using the
+ /// default eastl allocator. The pointer will be disposed using the
+ /// provided deleter.
+ /// If an exception occurs during the allocation of the shared
+ /// reference count, the owned pointer is deleted and the exception
+ /// is rethrown.
+ /// Postcondition: use_count() == 1 && get() == p
+ template <typename U, typename Deleter>
+ shared_ptr(U* pValue,
+ Deleter deleter,
+ typename eastl::enable_if<eastl::is_convertible<U*, element_type*>::value>::type* = 0)
+ : mpValue(nullptr), mpRefCount(nullptr)
+ {
+ alloc_internal(pValue, default_allocator_type(), eastl::move(deleter));
+ }
+
+ template <typename Deleter>
+ shared_ptr(std::nullptr_t, Deleter deleter)
+ : mpValue(nullptr), mpRefCount(nullptr) // alloc_internal will set this.
+ {
+ alloc_internal(nullptr, default_allocator_type(), eastl::move(deleter));
+ }
+
+
+ /// Takes ownership of the pointer and sets the reference count
+ /// to the pointer to 1. It is OK if the input pointer is null.
+ /// The shared reference count is allocated on the heap using the
+ /// supplied allocator. The pointer will be disposed using the
+ /// provided deleter.
+ /// If an exception occurs during the allocation of the shared
+ /// reference count, the owned pointer is deleted and the exception
+ /// is rethrown.
+ /// Postcondition: use_count() == 1 && get() == p
+ template <typename U, typename Deleter, typename Allocator>
+ explicit shared_ptr(U* pValue,
+ Deleter deleter,
+ const Allocator& allocator,
+ typename eastl::enable_if<eastl::is_convertible<U*, element_type*>::value>::type* = 0)
+ : mpValue(nullptr), mpRefCount(nullptr) // alloc_internal will set this.
+ {
+ alloc_internal(pValue, eastl::move(allocator), eastl::move(deleter));
+ }
+
+ template <typename Deleter, typename Allocator>
+ shared_ptr(std::nullptr_t, Deleter deleter, Allocator allocator)
+ : mpValue(nullptr),
+ mpRefCount(nullptr) // alloc_internal will set this.
+ {
+ alloc_internal(nullptr, eastl::move(allocator), eastl::move(deleter));
+ }
+
+
+ /// shared_ptr
+ /// construction with self type.
+ /// If we want a shared_ptr constructor that is templated on shared_ptr<U>,
+ /// then we need to make it in addition to this function, as otherwise
+ /// the compiler will generate this function and things will go wrong.
+ /// To accomplish this in a thread-safe way requires use of shared_ptr atomic_store.
+ shared_ptr(const shared_ptr& sharedPtr) EA_NOEXCEPT
+ : mpValue(sharedPtr.mpValue),
+ mpRefCount(sharedPtr.mpRefCount)
+ {
+ if(mpRefCount)
+ mpRefCount->addref();
+ }
+
+
+ /// shared_ptr
+ /// Shares ownership of a pointer with another instance of shared_ptr.
+ /// This function increments the shared reference count on the pointer.
+ /// To accomplish this in a thread-safe way requires use of shared_ptr atomic_store.
+ template <typename U>
+ shared_ptr(const shared_ptr<U>& sharedPtr,
+ typename eastl::enable_if<eastl::is_convertible<U*, element_type*>::value>::type* = 0) EA_NOEXCEPT
+ : mpValue(sharedPtr.mpValue),
+ mpRefCount(sharedPtr.mpRefCount)
+ {
+ if (mpRefCount)
+ mpRefCount->addref();
+ }
+
+
+ /// shared_ptr
+ ///
+ /// 20.7.2.2.1p13: Constructs a shared_ptr instance that stores p and shares ownership with r.
+ /// Postconditions: get() == pValue && use_count() == sharedPtr.use_count().
+ /// To avoid the possibility of a dangling pointer, the user of this constructor must
+ /// ensure that pValue remains valid at least until the ownership group of sharedPtr is destroyed.
+ /// This constructor allows creation of an empty shared_ptr instance with a non-NULL stored pointer.
+ ///
+ /// Shares ownership of a pointer with another instance of shared_ptr while storing a potentially
+ /// different pointer. This function increments the shared reference count on the sharedPtr if it exists.
+ /// If sharedPtr has no shared reference then a shared reference is not created an pValue is not
+ /// deleted in our destructor and effectively the pointer is not actually shared.
+ ///
+ /// To accomplish this in a thread-safe way requires the user to maintain the lifetime of sharedPtr
+ /// as described above.
+ ///
+ template <typename U>
+ shared_ptr(const shared_ptr<U>& sharedPtr, element_type* pValue) EA_NOEXCEPT
+ : mpValue(pValue),
+ mpRefCount(sharedPtr.mpRefCount)
+ {
+ if(mpRefCount)
+ mpRefCount->addref();
+ }
+
+
+ shared_ptr(shared_ptr&& sharedPtr) EA_NOEXCEPT
+ : mpValue(sharedPtr.mpValue),
+ mpRefCount(sharedPtr.mpRefCount)
+ {
+ sharedPtr.mpValue = nullptr;
+ sharedPtr.mpRefCount = nullptr;
+ }
+
+
+ template <typename U>
+ shared_ptr(shared_ptr<U>&& sharedPtr,
+ typename eastl::enable_if<eastl::is_convertible<U*, element_type*>::value>::type* = 0) EA_NOEXCEPT
+ : mpValue(sharedPtr.mpValue),
+ mpRefCount(sharedPtr.mpRefCount)
+ {
+ sharedPtr.mpValue = nullptr;
+ sharedPtr.mpRefCount = nullptr;
+ }
+
+ // unique_ptr constructor
+ template <typename U, typename Deleter>
+ shared_ptr(unique_ptr<U, Deleter>&& uniquePtr,
+ typename eastl::enable_if<!eastl::is_array<U>::value && !is_lvalue_reference<Deleter>::value &&
+ eastl::is_convertible<U*, element_type*>::value>::type* = 0)
+ : mpValue(nullptr), mpRefCount(nullptr)
+ {
+ alloc_internal(uniquePtr.release(), default_allocator_type(), uniquePtr.get_deleter());
+ }
+
+ // unique_ptr constructor
+ // The following is not in the C++11 Standard.
+ template <typename U, typename Deleter, typename Allocator>
+ shared_ptr(unique_ptr<U, Deleter>&& uniquePtr,
+ const Allocator& allocator,
+ typename eastl::enable_if<!eastl::is_array<U>::value && !is_lvalue_reference<Deleter>::value &&
+ eastl::is_convertible<U*, element_type*>::value>::type* = 0)
+ : mpValue(nullptr), mpRefCount(nullptr)
+ {
+ alloc_internal(uniquePtr.release(), allocator, uniquePtr.get_deleter());
+ }
+
+
+ /// shared_ptr(weak_ptr)
+ /// Shares ownership of a pointer with an instance of weak_ptr.
+ /// This function increments the shared reference count on the pointer.
+ template <typename U>
+ explicit shared_ptr(const weak_ptr<U>& weakPtr,
+ typename eastl::enable_if<eastl::is_convertible<U*, element_type*>::value>::type* = 0)
+ : mpValue(weakPtr.mpValue)
+ , mpRefCount(weakPtr.mpRefCount ?
+ weakPtr.mpRefCount->lock() :
+ weakPtr.mpRefCount) // mpRefCount->lock() addref's the return value for us.
+ {
+ if (!mpRefCount)
+ {
+ mpValue = nullptr; // Question: Is it right for us to NULL this or not?
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ throw eastl::bad_weak_ptr();
+ #else
+ EASTL_FAIL_MSG("eastl::shared_ptr -- bad_weak_ptr");
+ #endif
+ }
+ }
+
+
+ /// ~shared_ptr
+ /// Decrements the reference count for the owned pointer. If the
+ /// reference count goes to zero, the owned pointer is deleted and
+ /// the shared reference count is deleted.
+ ~shared_ptr()
+ {
+ if (mpRefCount)
+ {
+ mpRefCount->release();
+ }
+ // else if mpValue is non-NULL then we just lose it because it wasn't actually shared (can happen with
+ // shared_ptr(const shared_ptr<U>& sharedPtr, element_type* pValue) constructor).
+
+ #if EASTL_DEBUG
+ mpValue = nullptr;
+ mpRefCount = nullptr;
+ #endif
+ }
+
+
+ // The following is disabled because it is not specified by the C++11 Standard, as it leads to
+ // potential collisions. Use the reset(p) and reset() functions instead.
+ //
+ // template <typename U>
+ // typename eastl::enable_if<eastl::is_convertible<U*, element_type*>::value, this_type&>::type
+ // operator=(const U* pValue) EA_NOEXCEPT
+ // {
+ // reset(pValue);
+ // return *this;
+ // }
+ //
+ // template <typename U>
+ // this_type& operator=(std::nullptr_t) EA_NOEXCEPT
+ // {
+ // reset();
+ // return *this;
+ // }
+
+
+ /// operator=
+ /// Assignment to self type.
+ /// If we want a shared_ptr operator= that is templated on shared_ptr<U>,
+ /// then we need to make it in addition to this function, as otherwise
+ /// the compiler will generate this function and things will go wrong.
+ shared_ptr& operator=(const shared_ptr& sharedPtr) EA_NOEXCEPT
+ {
+ if(&sharedPtr != this)
+ this_type(sharedPtr).swap(*this);
+
+ return *this;
+ }
+
+
+ /// operator=
+ /// Copies another shared_ptr to this object. Note that this object
+ /// may already own a shared pointer with another different pointer
+ /// (but still of the same type) before this call. In that case,
+ /// this function releases the old pointer, decrementing its reference
+ /// count and deleting it if zero, takes shared ownership of the new
+ /// pointer and increments its reference count.
+ template <typename U>
+ typename eastl::enable_if<eastl::is_convertible<U*, element_type*>::value, this_type&>::type
+ operator=(const shared_ptr<U>& sharedPtr) EA_NOEXCEPT
+ {
+ if(!equivalent_ownership(sharedPtr))
+ this_type(sharedPtr).swap(*this);
+ return *this;
+ }
+
+
+ /// operator=
+ /// Assignment to self type.
+ /// If we want a shared_ptr operator= that is templated on shared_ptr<U>,
+ /// then we need to make it in addition to this function, as otherwise
+ /// the compiler will generate this function and things will go wrong.
+ this_type& operator=(shared_ptr&& sharedPtr) EA_NOEXCEPT
+ {
+ if(&sharedPtr != this)
+ this_type(eastl::move(sharedPtr)).swap(*this);
+
+ return *this;
+ }
+
+
+ /// operator=
+ /// Moves another shared_ptr to this object. Note that this object
+ /// may already own a shared pointer with another different pointer
+ /// (but still of the same type) before this call. In that case,
+ /// this function releases the old pointer, decrementing its reference
+ /// count and deleting it if zero, takes shared ownership of the new
+ /// pointer and increments its reference count.
+ template <typename U>
+ typename eastl::enable_if<eastl::is_convertible<U*, element_type*>::value, this_type&>::type
+ operator=(shared_ptr<U>&& sharedPtr) EA_NOEXCEPT
+ {
+ if(!equivalent_ownership(sharedPtr))
+ shared_ptr(eastl::move(sharedPtr)).swap(*this);
+ return *this;
+ }
+
+
+ // unique_ptr operator=
+ template <typename U, typename Deleter>
+ typename eastl::enable_if<!eastl::is_array<U>::value && eastl::is_convertible<U*, element_type*>::value, this_type&>::type
+ operator=(unique_ptr<U, Deleter>&& uniquePtr)
+ {
+ // Note that this will use the default EASTL allocator
+ this_type(eastl::move(uniquePtr)).swap(*this);
+ return *this;
+ }
+
+
+ /// reset
+ /// Releases the owned pointer.
+ void reset() EA_NOEXCEPT
+ {
+ this_type().swap(*this);
+ }
+
+
+ /// reset
+ /// Releases the owned pointer and takes ownership of the
+ /// passed in pointer.
+ template <typename U>
+ typename eastl::enable_if<eastl::is_convertible<U*, element_type*>::value, void>::type
+ reset(U* pValue)
+ {
+ this_type(pValue).swap(*this);
+ }
+
+
+ /// reset
+ /// Releases the owned pointer and takes ownership of the
+ /// passed in pointer.
+ template <typename U, typename Deleter>
+ typename eastl::enable_if<eastl::is_convertible<U*, element_type*>::value, void>::type
+ reset(U* pValue, Deleter deleter)
+ {
+ shared_ptr(pValue, deleter).swap(*this);
+ }
+
+
+ /// reset
+ /// Resets the shared_ptr
+ template <typename U, typename Deleter, typename Allocator>
+ typename eastl::enable_if<eastl::is_convertible<U*, element_type*>::value, void>::type
+ reset(U* pValue, Deleter deleter, const Allocator& allocator)
+ {
+ shared_ptr(pValue, deleter, allocator).swap(*this);
+ }
+
+
+ /// swap
+ /// Exchanges the owned pointer between two shared_ptr objects.
+ /// This function is not intrinsically thread-safe. You must use atomic_exchange(shared_ptr<T>*, shared_ptr<T>)
+ /// or manually coordinate the swap.
+ void swap(this_type& sharedPtr) EA_NOEXCEPT
+ {
+ element_type* const pValue = sharedPtr.mpValue;
+ sharedPtr.mpValue = mpValue;
+ mpValue = pValue;
+
+ ref_count_sp* const pRefCount = sharedPtr.mpRefCount;
+ sharedPtr.mpRefCount = mpRefCount;
+ mpRefCount = pRefCount;
+ }
+
+
+ /// operator*
+ /// Returns the owner pointer dereferenced.
+ /// Example usage:
+ /// shared_ptr<int> ptr(new int(3));
+ /// int x = *ptr;
+ reference_type operator*() const EA_NOEXCEPT
+ {
+ return *mpValue;
+ }
+
+ /// operator->
+ /// Allows access to the owned pointer via operator->()
+ /// Example usage:
+ /// struct X{ void DoSomething(); };
+ /// shared_ptr<int> ptr(new X);
+ /// ptr->DoSomething();
+ element_type* operator->() const EA_NOEXCEPT
+ {
+ // assert(mpValue);
+ return mpValue;
+ }
+
+ /// operator[]
+ /// Index into the array pointed to by the owned pointer.
+ /// The behaviour is undefined if the owned pointer is nullptr, if the user specified index is negative, or if
+ /// the index is outside the referred array bounds.
+ ///
+ /// When T is not an array type, it is unspecified whether this function is declared. If the function is declared,
+ /// it is unspecified what its return type is, except that the declaration (although not necessarily the
+ /// definition) of the function is guaranteed to be legal.
+ //
+ // TODO(rparolin): This is disabled because eastl::shared_ptr needs array support.
+ // element_type& operator[](ptrdiff_t idx)
+ // {
+ // return get()[idx];
+ // }
+
+ /// get
+ /// Returns the owned pointer. Note that this class does
+ /// not provide an operator T() function. This is because such
+ /// a thing (automatic conversion) is deemed unsafe.
+ /// Example usage:
+ /// struct X{ void DoSomething(); };
+ /// shared_ptr<int> ptr(new X);
+ /// X* pX = ptr.get();
+ /// pX->DoSomething();
+ element_type* get() const EA_NOEXCEPT
+ {
+ return mpValue;
+ }
+
+ /// use_count
+ /// Returns: the number of shared_ptr objects, *this included, that share ownership with *this, or 0 when *this is empty.
+ int use_count() const EA_NOEXCEPT
+ {
+ return mpRefCount ? mpRefCount->use_count() : 0;
+ }
+
+ /// unique
+ /// Returns: use_count() == 1.
+ bool unique() const EA_NOEXCEPT
+ {
+ return (mpRefCount && (mpRefCount->use_count() == 1));
+ }
+
+
+ /// owner_before
+ /// C++11 function for ordering.
+ template <typename U>
+ bool owner_before(const shared_ptr<U>& sharedPtr) const EA_NOEXCEPT
+ {
+ return (mpRefCount < sharedPtr.mpRefCount);
+ }
+
+ template <typename U>
+ bool owner_before(const weak_ptr<U>& weakPtr) const EA_NOEXCEPT
+ {
+ return (mpRefCount < weakPtr.mpRefCount);
+ }
+
+
+ template <typename Deleter>
+ Deleter* get_deleter() const EA_NOEXCEPT
+ {
+ #if EASTL_RTTI_ENABLED
+ return mpRefCount ? static_cast<Deleter*>(mpRefCount->get_deleter(typeid(typename remove_cv<Deleter>::type))) : nullptr;
+ #else
+ // This is probably unsafe but without typeid there is no way to ensure that the
+ // stored deleter is actually of the templated Deleter type.
+ return nullptr;
+
+ // Alternatively:
+ // return mpRefCount ? static_cast<Deleter*>(mpRefCount->get_deleter()) : nullptr;
+ #endif
+ }
+
+ #ifdef EA_COMPILER_NO_EXPLICIT_CONVERSION_OPERATORS
+ /// Note that below we do not use operator bool(). The reason for this
+ /// is that booleans automatically convert up to short, int, float, etc.
+ /// The result is that this: if(sharedPtr == 1) would yield true (bad).
+ typedef T* (this_type::*bool_)() const;
+ operator bool_() const EA_NOEXCEPT
+ {
+ if(mpValue)
+ return &this_type::get;
+ return nullptr;
+ }
+
+ bool operator!() const EA_NOEXCEPT
+ {
+ return (mpValue == nullptr);
+ }
+ #else
+ /// Explicit operator bool
+ /// Allows for using a shared_ptr as a boolean.
+ /// Example usage:
+ /// shared_ptr<int> ptr(new int(3));
+ /// if(ptr)
+ /// ++*ptr;
+ explicit operator bool() const EA_NOEXCEPT
+ {
+ return (mpValue != nullptr);
+ }
+ #endif
+
+ /// Returns true if the given shared_ptr ows the same T pointer that we do.
+ template <typename U>
+ bool equivalent_ownership(const shared_ptr<U>& sharedPtr) const
+ {
+ // We compare mpRefCount instead of mpValue, because it's feasible that there are two sets of shared_ptr
+ // objects that are unconnected to each other but happen to own the same value pointer.
+ return (mpRefCount == sharedPtr.mpRefCount);
+ }
+
+ protected:
+ // Friend declarations.
+ template <typename U> friend class shared_ptr;
+ template <typename U> friend class weak_ptr;
+ template <typename U> friend void allocate_shared_helper(shared_ptr<U>&, ref_count_sp*, U*);
+
+ // Handles the allocating of mpRefCount, while assigning mpValue.
+ // The provided pValue may be NULL, as with constructing with a deleter and allocator but NULL pointer.
+ template <typename U, typename Allocator, typename Deleter>
+ void alloc_internal(U pValue, Allocator allocator, Deleter deleter)
+ {
+ typedef ref_count_sp_t<U, Allocator, Deleter> ref_count_type;
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ void* const pMemory = EASTLAlloc(allocator, sizeof(ref_count_type));
+ if(!pMemory)
+ throw std::bad_alloc();
+ mpRefCount = ::new(pMemory) ref_count_type(pValue, eastl::move(deleter), eastl::move(allocator));
+ mpValue = pValue;
+ do_enable_shared_from_this(mpRefCount, pValue, pValue);
+ }
+ catch(...) // The exception would usually be std::bad_alloc.
+ {
+ deleter(pValue); // 20.7.2.2.1 p7: If an exception is thrown, delete p is called.
+ throw; // Throws: bad_alloc, or an implementation-defined exception when a resource other than memory could not be obtained.
+ }
+ #else
+ void* const pMemory = EASTLAlloc(allocator, sizeof(ref_count_type));
+ if(pMemory)
+ {
+ mpRefCount = ::new(pMemory) ref_count_type(pValue, eastl::move(deleter), eastl::move(allocator));
+ mpValue = pValue;
+ do_enable_shared_from_this(mpRefCount, pValue, pValue);
+ }
+ else
+ {
+ deleter(pValue); // We act the same as we do above with exceptions enabled.
+ }
+ #endif
+ }
+
+ }; // class shared_ptr
+
+
+ /// get_pointer
+ /// returns shared_ptr::get() via the input shared_ptr.
+ template <typename T>
+ inline typename shared_ptr<T>::element_type* get_pointer(const shared_ptr<T>& sharedPtr) EA_NOEXCEPT
+ {
+ return sharedPtr.get();
+ }
+
+ /// get_deleter
+ /// returns the deleter in the input shared_ptr.
+ template <typename Deleter, typename T>
+ Deleter* get_deleter(const shared_ptr<T>& sharedPtr) EA_NOEXCEPT
+ {
+ return sharedPtr.template get_deleter<Deleter>();
+ }
+
+ /// swap
+ /// Exchanges the owned pointer beween two shared_ptr objects.
+ /// This non-member version is useful for compatibility of shared_ptr
+ /// objects with the C++ Standard Library and other libraries.
+ template <typename T>
+ inline void swap(shared_ptr<T>& a, shared_ptr<T>& b) EA_NOEXCEPT
+ {
+ a.swap(b);
+ }
+
+
+ /// shared_ptr comparison operators
+ template <typename T, typename U>
+ inline bool operator==(const shared_ptr<T>& a, const shared_ptr<U>& b) EA_NOEXCEPT
+ {
+ // assert((a.get() != b.get()) || (a.use_count() == b.use_count()));
+ return (a.get() == b.get());
+ }
+
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ template <typename T, typename U>
+ std::strong_ordering operator<=>(const shared_ptr<T>& a, const shared_ptr<U>& b) EA_NOEXCEPT
+ {
+ return a.get() <=> b.get();
+ }
+#else
+ template <typename T, typename U>
+ inline bool operator!=(const shared_ptr<T>& a, const shared_ptr<U>& b) EA_NOEXCEPT
+ {
+ // assert((a.get() != b.get()) || (a.use_count() == b.use_count()));
+ return (a.get() != b.get());
+ }
+
+ template <typename T, typename U>
+ inline bool operator<(const shared_ptr<T>& a, const shared_ptr<U>& b) EA_NOEXCEPT
+ {
+ //typedef typename eastl::common_type<T*, U*>::type CPointer;
+ //return less<CPointer>()(a.get(), b.get());
+
+ typedef typename eastl::common_type<T*, U*>::type CPointer; // We currently need to make these temporary variables, as otherwise clang complains about CPointer being int*&&&.
+ CPointer pT = a.get(); // I wonder if there's something wrong with our common_type type trait implementation.
+ CPointer pU = b.get(); // "in instantiation of function template specialization 'eastl::operator<<int, int>, no known conversion from 'element_type *' (aka 'int *') to 'int *&&&' for 1st argument"
+ return less<CPointer>()(pT, pU); // It looks like common_type is making CPointer be (e.g.) int*&& instead of int*, though the problem may be in how less<> deals with that.
+ }
+
+ template <typename T, typename U>
+ inline bool operator>(const shared_ptr<T>& a, const shared_ptr<U>& b) EA_NOEXCEPT
+ {
+ return (b < a);
+ }
+
+ template <typename T, typename U>
+ inline bool operator<=(const shared_ptr<T>& a, const shared_ptr<U>& b) EA_NOEXCEPT
+ {
+ return !(b < a);
+ }
+
+ template <typename T, typename U>
+ inline bool operator>=(const shared_ptr<T>& a, const shared_ptr<U>& b) EA_NOEXCEPT
+ {
+ return !(a < b);
+ }
+#endif
+
+ template <typename T>
+ inline bool operator==(const shared_ptr<T>& a, std::nullptr_t) EA_NOEXCEPT
+ {
+ return !a;
+ }
+
+ #if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ template <typename T>
+ inline std::strong_ordering operator<=>(const shared_ptr<T>& a, std::nullptr_t) EA_NOEXCEPT
+ {
+ return a.get() <=> nullptr;
+ }
+ #else
+ template <typename T>
+ inline bool operator==(std::nullptr_t, const shared_ptr<T>& b) EA_NOEXCEPT
+ {
+ return !b;
+ }
+
+ template <typename T>
+ inline bool operator!=(const shared_ptr<T>& a, std::nullptr_t) EA_NOEXCEPT
+ {
+ return static_cast<bool>(a);
+ }
+
+ template <typename T>
+ inline bool operator!=(std::nullptr_t, const shared_ptr<T>& b) EA_NOEXCEPT
+ {
+ return static_cast<bool>(b);
+ }
+
+ template <typename T>
+ inline bool operator<(const shared_ptr<T>& a, std::nullptr_t) EA_NOEXCEPT
+ {
+ return less<T*>()(a.get(), nullptr);
+ }
+
+ template <typename T>
+ inline bool operator<(std::nullptr_t, const shared_ptr<T>& b) EA_NOEXCEPT
+ {
+ return less<T*>()(nullptr, b.get());
+ }
+
+ template <typename T>
+ inline bool operator>(const shared_ptr<T>& a, std::nullptr_t) EA_NOEXCEPT
+ {
+ return (nullptr < a);
+ }
+
+ template <typename T>
+ inline bool operator>(std::nullptr_t, const shared_ptr<T>& b) EA_NOEXCEPT
+ {
+ return (b < nullptr);
+ }
+
+ template <typename T>
+ inline bool operator<=(const shared_ptr<T>& a, std::nullptr_t) EA_NOEXCEPT
+ {
+ return !(nullptr < a);
+ }
+
+ template <typename T>
+ inline bool operator<=(std::nullptr_t, const shared_ptr<T>& b) EA_NOEXCEPT
+ {
+ return !(b < nullptr);
+ }
+
+ template <typename T>
+ inline bool operator>=(const shared_ptr<T>& a, std::nullptr_t) EA_NOEXCEPT
+ {
+ return !(a < nullptr);
+ }
+
+ template <typename T>
+ inline bool operator>=(std::nullptr_t, const shared_ptr<T>& b) EA_NOEXCEPT
+ {
+ return !(nullptr < b);
+ }
+#endif
+
+
+
+ /// reinterpret_pointer_cast
+ ///
+ /// Returns a shared_ptr<T> reinterpret-casted from a const shared_ptr<U>&.
+ /// http://isocpp.org/files/papers/N3920.html
+ ///
+ /// Requires: The expression reinterpret_cast<T*>(sharedPtr.get()) shall be well formed.
+ /// Returns: If sharedPtr is empty, an empty shared_ptr<T>; otherwise, a shared_ptr<T>
+ /// object that stores const_cast<T*>(sharedPtr.get()) and shares ownership with sharedPtr.
+ /// Postconditions: w.get() == const_cast<T*>(sharedPtr.get()) and w.use_count() == sharedPtr.use_count(),
+ /// where w is the return value.
+ template <typename T, typename U>
+ inline shared_ptr<T> reinterpret_pointer_cast(shared_ptr<U> const& sharedPtr) EA_NOEXCEPT
+ {
+ return shared_ptr<T>(sharedPtr, reinterpret_cast<T*>(sharedPtr.get()));
+ }
+
+
+ /// static_pointer_cast
+ ///
+ /// Returns a shared_ptr<T> static-casted from a shared_ptr<U>&.
+ ///
+ /// Requires: The expression const_cast<T*>(sharedPtr.get()) shall be well formed.
+ /// Returns: If sharedPtr is empty, an empty shared_ptr<T>; otherwise, a shared_ptr<T>
+ /// object that stores const_cast<T*>(sharedPtr.get()) and shares ownership with sharedPtr.
+ /// Postconditions: w.get() == const_cast<T*>(sharedPtr.get()) and w.use_count() == sharedPtr.use_count(),
+ /// where w is the return value.
+ template <typename T, typename U>
+ inline shared_ptr<T> static_pointer_cast(const shared_ptr<U>& sharedPtr) EA_NOEXCEPT
+ {
+ return shared_ptr<T>(sharedPtr, static_cast<T*>(sharedPtr.get()));
+ }
+
+ template <typename T, typename U> // Retained for support for pre-C++11 shared_ptr.
+ inline shared_ptr<T> static_shared_pointer_cast(const shared_ptr<U>& sharedPtr) EA_NOEXCEPT
+ { return static_pointer_cast<T, U>(sharedPtr); }
+
+
+
+ /// const_pointer_cast
+ ///
+ /// Returns a shared_ptr<T> const-casted from a const shared_ptr<U>&.
+ /// Normally, this means that the source shared_ptr holds a const data type.
+ //
+ /// Requires: The expression const_cast<T*>(sharedPtr.get()) shall be well formed.
+ /// Returns: If sharedPtr is empty, an empty shared_ptr<T>; otherwise, a shared_ptr<T>
+ /// object that stores const_cast<T*>(sharedPtr.get()) and shares ownership with sharedPtr.
+ /// Postconditions: w.get() == const_cast<T*>(sharedPtr.get()) and w.use_count() == sharedPtr.use_count(),
+ /// where w is the return value.
+ template <typename T, typename U>
+ inline shared_ptr<T> const_pointer_cast(const shared_ptr<U>& sharedPtr) EA_NOEXCEPT
+ {
+ return shared_ptr<T>(sharedPtr, const_cast<T*>(sharedPtr.get()));
+ }
+
+ template <typename T, typename U> // Retained for support for pre-C++11 shared_ptr.
+ inline shared_ptr<T> const_shared_pointer_cast(const shared_ptr<U>& sharedPtr) EA_NOEXCEPT
+ { return const_pointer_cast<T, U>(sharedPtr); }
+
+
+
+ #if EASTL_RTTI_ENABLED
+ /// dynamic_pointer_cast
+ ///
+ /// Returns a shared_ptr<T> dynamic-casted from a const shared_ptr<U>&.
+ ///
+ /// Requires: The expression dynamic_cast<T*>(sharedPtr.get()) shall be well formed and shall have well defined behavior.
+ /// Returns: When dynamic_cast<T*>(sharedPtr.get()) returns a nonzero value, a shared_ptr<T> object that stores
+ /// a copy of it and shares ownership with sharedPtr; Otherwise, an empty shared_ptr<T> object.
+ /// Postcondition: w.get() == dynamic_cast<T*>(sharedPtr.get()), where w is the return value
+ ///
+ template <typename T, typename U>
+ inline shared_ptr<T> dynamic_pointer_cast(const shared_ptr<U>& sharedPtr) EA_NOEXCEPT
+ {
+ if(T* p = dynamic_cast<T*>(sharedPtr.get()))
+ return shared_ptr<T>(sharedPtr, p);
+ return shared_ptr<T>();
+ }
+
+ template <typename T, typename U> // Retained for support for pre-C++11 shared_ptr.
+ inline typename eastl::enable_if<!eastl::is_array<T>::value && !eastl::is_array<U>::value, shared_ptr<T> >::type
+ dynamic_shared_pointer_cast(const shared_ptr<U>& sharedPtr) EA_NOEXCEPT
+ { return dynamic_pointer_cast<T, U>(sharedPtr); }
+ #endif
+
+
+ /// hash specialization for shared_ptr.
+ /// It simply returns eastl::hash(x.get()). If your unique_ptr pointer type (the return value of shared_ptr<T>::get) is
+ /// a custom type and not a built-in pointer type then you will need to independently define eastl::hash for that type.
+ template <typename T>
+ struct hash< shared_ptr<T> >
+ {
+ size_t operator()(const shared_ptr<T>& x) const EA_NOEXCEPT
+ { return eastl::hash<T*>()(x.get()); }
+ };
+
+
+ template <typename T>
+ void allocate_shared_helper(eastl::shared_ptr<T>& sharedPtr, ref_count_sp* pRefCount, T* pValue)
+ {
+ sharedPtr.mpRefCount = pRefCount;
+ sharedPtr.mpValue = pValue;
+ do_enable_shared_from_this(pRefCount, pValue, pValue);
+ }
+
+ template <typename T, typename Allocator, typename... Args>
+ shared_ptr<T> allocate_shared(const Allocator& allocator, Args&&... args)
+ {
+ typedef ref_count_sp_t_inst<T, Allocator> ref_count_type;
+ shared_ptr<T> ret;
+ void* const pMemory = EASTLAlloc(const_cast<Allocator&>(allocator), sizeof(ref_count_type));
+ if(pMemory)
+ {
+ ref_count_type* pRefCount = ::new(pMemory) ref_count_type(allocator, eastl::forward<Args>(args)...);
+ allocate_shared_helper(ret, pRefCount, pRefCount->GetValue());
+ }
+ return ret;
+ }
+
+ template <typename T, typename... Args>
+ shared_ptr<T> make_shared(Args&&... args)
+ {
+ // allocate with the default allocator.
+ return eastl::allocate_shared<T>(EASTL_SHARED_PTR_DEFAULT_ALLOCATOR, eastl::forward<Args>(args)...);
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // shared_ptr atomic access
+ //
+ // These functions allow shared_ptr to act like other C++11 atomic operations.
+ // So the same way you can use atomic_load on a raw pointer, you can also
+ // use it on a shared_ptr. This allows for transparent use of shared_ptr in
+ // place of raw pointers (e.g. in templates). You do not need to use these
+ // functions for regular thread-safe direct usage of shared_ptr construction
+ // and copying, as it's intrinsically thread-safe for that already.
+ //
+ // That being said, the following is not thread-safe and needs to be guarded by
+ // a mutex or the following atomic functions, as it's assigning the *same*
+ // shared_ptr object from multiple threads as opposed to different shared_ptr
+ // objects underlying object:
+ // shared_ptr<Foo> pFoo;
+ // // Thread 1:
+ // shared_ptr<Foo> pFoo2 = pFoo;
+ // // Thread 2:
+ // pFoo = make_shared<Foo>();
+ ///////////////////////////////////////////////////////////////////////////
+
+ template <typename T>
+ inline bool atomic_is_lock_free(const shared_ptr<T>*)
+ {
+ // Return true if atomic access to the provided shared_ptr instance is lock-free, false otherwise.
+ // For this to be lock-free, we would have to be able to copy shared_ptr objects in an atomic way
+ // as opposed to wrapping it with a mutex like we do below. Given the nature of shared_ptr, it's
+ // probably not feasible to implement these operations without a mutex. atomic_is_lock_free exists
+ // in the C++11 Standard because it also applies to other types such as built-in types which can
+ // be lock-free in their access.
+ return false;
+ }
+
+ template <typename T>
+ inline shared_ptr<T> atomic_load(const shared_ptr<T>* pSharedPtr)
+ {
+ Internal::shared_ptr_auto_mutex autoMutex(pSharedPtr);
+ return *pSharedPtr;
+ }
+
+ template <typename T>
+ inline shared_ptr<T> atomic_load_explicit(const shared_ptr<T>* pSharedPtr, ... /*std::memory_order memoryOrder*/)
+ {
+ return atomic_load(pSharedPtr);
+ }
+
+ template <typename T>
+ inline void atomic_store(shared_ptr<T>* pSharedPtrA, shared_ptr<T> sharedPtrB)
+ {
+ Internal::shared_ptr_auto_mutex autoMutex(pSharedPtrA);
+ pSharedPtrA->swap(sharedPtrB);
+ }
+
+ template <typename T>
+ inline void atomic_store_explicit(shared_ptr<T>* pSharedPtrA, shared_ptr<T> sharedPtrB, ... /*std::memory_order memoryOrder*/)
+ {
+ atomic_store(pSharedPtrA, sharedPtrB);
+ }
+
+ template <typename T>
+ shared_ptr<T> atomic_exchange(shared_ptr<T>* pSharedPtrA, shared_ptr<T> sharedPtrB)
+ {
+ Internal::shared_ptr_auto_mutex autoMutex(pSharedPtrA);
+ pSharedPtrA->swap(sharedPtrB);
+ return sharedPtrB;
+ }
+
+ template <typename T>
+ inline shared_ptr<T> atomic_exchange_explicit(shared_ptr<T>* pSharedPtrA, shared_ptr<T> sharedPtrB, ... /*std::memory_order memoryOrder*/)
+ {
+ return atomic_exchange(pSharedPtrA, sharedPtrB);
+ }
+
+ // Compares the shared pointers pointed-to by p and expected. If they are equivalent (share ownership of the
+ // same pointer and refer to the same pointer), assigns sharedPtrNew into *pSharedPtr using the memory ordering constraints
+ // specified by success and returns true. If they are not equivalent, assigns *pSharedPtr into *pSharedPtrCondition using the
+ // memory ordering constraints specified by failure and returns false.
+ template <typename T>
+ bool atomic_compare_exchange_strong(shared_ptr<T>* pSharedPtr, shared_ptr<T>* pSharedPtrCondition, shared_ptr<T> sharedPtrNew)
+ {
+ Internal::shared_ptr_auto_mutex autoMutex(pSharedPtr);
+
+ if(pSharedPtr->equivalent_ownership(*pSharedPtrCondition))
+ {
+ *pSharedPtr = sharedPtrNew;
+ return true;
+ }
+
+ *pSharedPtrCondition = *pSharedPtr;
+ return false;
+ }
+
+ template <typename T>
+ inline bool atomic_compare_exchange_weak(shared_ptr<T>* pSharedPtr, shared_ptr<T>* pSharedPtrCondition, shared_ptr<T> sharedPtrNew)
+ {
+ return atomic_compare_exchange_strong(pSharedPtr, pSharedPtrCondition, sharedPtrNew);
+ }
+
+ template <typename T> // Returns true if pSharedPtr was equivalent to *pSharedPtrCondition.
+ inline bool atomic_compare_exchange_strong_explicit(shared_ptr<T>* pSharedPtr, shared_ptr<T>* pSharedPtrCondition, shared_ptr<T> sharedPtrNew, ... /*memory_order memoryOrderSuccess, memory_order memoryOrderFailure*/)
+ {
+ return atomic_compare_exchange_strong(pSharedPtr, pSharedPtrCondition, sharedPtrNew);
+ }
+
+ template <typename T>
+ inline bool atomic_compare_exchange_weak_explicit(shared_ptr<T>* pSharedPtr, shared_ptr<T>* pSharedPtrCondition, shared_ptr<T> sharedPtrNew, ... /*memory_order memoryOrderSuccess, memory_order memoryOrderFailure*/)
+ {
+ return atomic_compare_exchange_weak(pSharedPtr, pSharedPtrCondition, sharedPtrNew);
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // weak_ptr
+ ///////////////////////////////////////////////////////////////////////////
+
+ /// EASTL_WEAK_PTR_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_WEAK_PTR_DEFAULT_NAME
+ #define EASTL_WEAK_PTR_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " weak_ptr" // Unless the user overrides something, this is "EASTL weak_ptr".
+ #endif
+
+
+ /// EASTL_WEAK_PTR_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_WEAK_PTR_DEFAULT_ALLOCATOR
+ #define EASTL_WEAK_PTR_DEFAULT_ALLOCATOR allocator_type(EASTL_WEAK_PTR_DEFAULT_NAME)
+ #endif
+
+
+ /// weak_ptr
+ ///
+ /// The weak_ptr class template stores a "weak reference" to an object
+ /// that's already managed by a shared_ptr. To access the object, a weak_ptr
+ /// can be converted to a shared_ptr using the shared_ptr constructor or
+ /// the lock() member function. When the last shared_ptr to the object goes
+ /// away and the object is deleted, the attempt to obtain a shared_ptr
+ /// from the weak_ptr instances that refer to the deleted object will fail via
+ /// lock() returning an empty shared_ptr.
+ ///
+ /// The Allocator template argument manages the memory of the shared reference
+ /// count and not the stored object. weak_ptr will not delete the stored object
+ /// but instead can only delete the reference count on that object.
+ ///
+ template <typename T>
+ class weak_ptr
+ {
+ public:
+ typedef weak_ptr<T> this_type;
+ typedef T element_type;
+
+ public:
+ /// weak_ptr
+ weak_ptr() EA_NOEXCEPT
+ : mpValue(nullptr),
+ mpRefCount(nullptr)
+ {
+ }
+
+
+ /// weak_ptr
+ /// Construction with self type.
+ weak_ptr(const this_type& weakPtr) EA_NOEXCEPT
+ : mpValue(weakPtr.mpValue),
+ mpRefCount(weakPtr.mpRefCount)
+ {
+ if(mpRefCount)
+ mpRefCount->weak_addref();
+ }
+
+
+ /// weak_ptr
+ /// Move construction with self type.
+ weak_ptr(this_type&& weakPtr) EA_NOEXCEPT
+ : mpValue(weakPtr.mpValue),
+ mpRefCount(weakPtr.mpRefCount)
+ {
+ weakPtr.mpValue = nullptr;
+ weakPtr.mpRefCount = nullptr;
+ }
+
+
+ /// weak_ptr
+ /// Constructs a weak_ptr from another weak_ptr.
+ template <typename U>
+ weak_ptr(const weak_ptr<U>& weakPtr, typename eastl::enable_if<eastl::is_convertible<U*, element_type*>::value>::type* = 0) EA_NOEXCEPT
+ : mpValue(weakPtr.mpValue),
+ mpRefCount(weakPtr.mpRefCount)
+ {
+ if(mpRefCount)
+ mpRefCount->weak_addref();
+ }
+
+
+ /// weak_ptr
+ /// Move constructs a weak_ptr from another weak_ptr.
+ template <typename U>
+ weak_ptr(weak_ptr<U>&& weakPtr,
+ typename eastl::enable_if<eastl::is_convertible<U*, element_type*>::value>::type* = 0) EA_NOEXCEPT
+ : mpValue(weakPtr.mpValue),
+ mpRefCount(weakPtr.mpRefCount)
+ {
+ weakPtr.mpValue = nullptr;
+ weakPtr.mpRefCount = nullptr;
+ }
+
+
+ /// weak_ptr
+ /// Constructs a weak_ptr from a shared_ptr.
+ template <typename U>
+ weak_ptr(const shared_ptr<U>& sharedPtr,
+ typename eastl::enable_if<eastl::is_convertible<U*, element_type*>::value>::type* = 0) EA_NOEXCEPT
+ : mpValue(sharedPtr.mpValue),
+ mpRefCount(sharedPtr.mpRefCount)
+ {
+ if (mpRefCount)
+ mpRefCount->weak_addref();
+ }
+
+
+ /// ~weak_ptr
+ ~weak_ptr()
+ {
+ if(mpRefCount)
+ mpRefCount->weak_release();
+ }
+
+
+ /// operator=(weak_ptr)
+ /// assignment to self type.
+ this_type& operator=(const this_type& weakPtr) EA_NOEXCEPT
+ {
+ assign(weakPtr);
+ return *this;
+ }
+
+
+ this_type& operator=(this_type&& weakPtr) EA_NOEXCEPT
+ {
+ weak_ptr(eastl::move(weakPtr)).swap(*this);
+ return *this;
+ }
+
+
+ /// operator=(weak_ptr)
+ template <typename U>
+ typename eastl::enable_if<eastl::is_convertible<U*, element_type*>::value, this_type&>::type
+ operator=(const weak_ptr<U>& weakPtr) EA_NOEXCEPT
+ {
+ assign(weakPtr);
+ return *this;
+ }
+
+
+ template <typename U>
+ typename eastl::enable_if<eastl::is_convertible<U*, element_type*>::value, this_type&>::type
+ operator=(weak_ptr<U>&& weakPtr) EA_NOEXCEPT
+ {
+ weak_ptr(eastl::move(weakPtr)).swap(*this);
+ return *this;
+ }
+
+
+ /// operator=(shared_ptr)
+ /// Assigns to a weak_ptr from a shared_ptr.
+ template <typename U>
+ typename eastl::enable_if<eastl::is_convertible<U*, element_type*>::value, this_type&>::type
+ operator=(const shared_ptr<U>& sharedPtr) EA_NOEXCEPT
+ {
+ if(mpRefCount != sharedPtr.mpRefCount) // This check encompasses assignment to self.
+ {
+ // Release old reference
+ if(mpRefCount)
+ mpRefCount->weak_release();
+
+ mpValue = sharedPtr.mpValue;
+ mpRefCount = sharedPtr.mpRefCount;
+ if(mpRefCount)
+ mpRefCount->weak_addref();
+ }
+ return *this;
+ }
+
+ shared_ptr<T> lock() const EA_NOEXCEPT
+ {
+ // We can't just return shared_ptr<T>(*this), as the object may go stale while we are doing this.
+ shared_ptr<T> temp;
+ temp.mpRefCount = mpRefCount ? mpRefCount->lock() : mpRefCount; // mpRefCount->lock() addref's the return value for us.
+ if(temp.mpRefCount)
+ temp.mpValue = mpValue;
+ return temp;
+ }
+
+ // Returns: 0 if *this is empty ; otherwise, the number of shared_ptr instances that share ownership with *this.
+ int use_count() const EA_NOEXCEPT
+ {
+ return mpRefCount ? mpRefCount->use_count() : 0;
+ }
+
+ // Returns: use_count() == 0
+ bool expired() const EA_NOEXCEPT
+ {
+ return (!mpRefCount || (mpRefCount->use_count() == 0));
+ }
+
+ void reset()
+ {
+ if(mpRefCount)
+ mpRefCount->weak_release();
+
+ mpValue = nullptr;
+ mpRefCount = nullptr;
+ }
+
+ void swap(this_type& weakPtr)
+ {
+ T* const pValue = weakPtr.mpValue;
+ weakPtr.mpValue = mpValue;
+ mpValue = pValue;
+
+ ref_count_sp* const pRefCount = weakPtr.mpRefCount;
+ weakPtr.mpRefCount = mpRefCount;
+ mpRefCount = pRefCount;
+ }
+
+
+ /// assign
+ ///
+ /// Assignment via another weak_ptr.
+ ///
+ template <typename U>
+ void assign(const weak_ptr<U>& weakPtr,
+ typename eastl::enable_if<eastl::is_convertible<U*, element_type*>::value>::type* = 0) EA_NOEXCEPT
+ {
+ if(mpRefCount != weakPtr.mpRefCount) // This check encompasses assignment to self.
+ {
+ // Release old reference
+ if(mpRefCount)
+ mpRefCount->weak_release();
+
+ // Add new reference
+ mpValue = weakPtr.mpValue;
+ mpRefCount = weakPtr.mpRefCount;
+ if(mpRefCount)
+ mpRefCount->weak_addref();
+ }
+ }
+
+
+ /// owner_before
+ /// C++11 function for ordering.
+ template <typename U>
+ bool owner_before(const weak_ptr<U>& weakPtr) const EA_NOEXCEPT
+ {
+ return (mpRefCount < weakPtr.mpRefCount);
+ }
+
+ /// owner_before
+ template <typename U>
+ bool owner_before(const shared_ptr<U>& sharedPtr) const EA_NOEXCEPT
+ {
+ return (mpRefCount < sharedPtr.mpRefCount);
+ }
+
+
+ /// less_than
+ /// For compatibility with pre-C++11 weak_ptr. Use owner_before instead.
+ template <typename U>
+ bool less_than(const weak_ptr<U>& weakPtr) const EA_NOEXCEPT
+ {
+ return (mpRefCount < weakPtr.mpRefCount);
+ }
+
+
+ /// assign
+ ///
+ /// Assignment through a T/ref_count_sp pair. This is used by
+ /// external utility functions.
+ ///
+ void assign(element_type* pValue, ref_count_sp* pRefCount)
+ {
+ mpValue = pValue;
+
+ if(pRefCount != mpRefCount)
+ {
+ if(mpRefCount)
+ mpRefCount->weak_release();
+
+ mpRefCount = pRefCount;
+
+ if(mpRefCount)
+ mpRefCount->weak_addref();
+ }
+ }
+
+ protected:
+ element_type* mpValue; /// The (weakly) owned pointer.
+ ref_count_sp* mpRefCount; /// Reference count for owned pointer.
+
+ // Friend declarations
+ template <typename U> friend class shared_ptr;
+ template <typename U> friend class weak_ptr;
+
+ }; // class weak_ptr
+
+
+
+ /// Note that the C++11 Standard does not specify that weak_ptr has comparison operators,
+ /// though it does specify that the owner_before function exists in weak_ptr.
+ template <typename T, typename U>
+ inline bool operator<(const weak_ptr<T>& weakPtr1, const weak_ptr<U>& weakPtr2)
+ {
+ return weakPtr1.owner_before(weakPtr2);
+ }
+
+
+ template <typename T>
+ void swap(weak_ptr<T>& weakPtr1, weak_ptr<T>& weakPtr2)
+ {
+ weakPtr1.swap(weakPtr2);
+ }
+
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // owner_less
+ //
+ // Implements less (operator <) for shared_ptr and thus allows it to participate
+ // in algorithms and containers that use strict weak ordering, such as map.
+ ///////////////////////////////////////////////////////////////////////////
+
+ template <typename T>
+ struct owner_less;
+
+ template <typename T>
+ struct owner_less< shared_ptr<T> >
+ : public eastl::binary_function<shared_ptr<T>, shared_ptr<T>, bool>
+ {
+ typedef bool result_type;
+
+ bool operator()(shared_ptr<T> const& a, shared_ptr<T> const& b) const
+ { return a.owner_before(b); }
+
+ bool operator()(shared_ptr<T> const& a, weak_ptr<T> const& b) const
+ { return a.owner_before(b); }
+
+ bool operator()(weak_ptr<T> const& a, shared_ptr<T> const& b) const
+ { return a.owner_before(b); }
+ };
+
+ template <typename T>
+ struct owner_less< weak_ptr<T> >
+ : public eastl::binary_function<weak_ptr<T>, weak_ptr<T>, bool>
+ {
+ typedef bool result_type;
+
+ bool operator()(weak_ptr<T> const& a, weak_ptr<T> const& b) const
+ { return a.owner_before(b); }
+
+ bool operator()(weak_ptr<T> const& a, shared_ptr<T> const& b) const
+ { return a.owner_before(b); }
+
+ bool operator()(shared_ptr<T> const& a, weak_ptr<T> const& b) const
+ { return a.owner_before(b); }
+ };
+
+
+} // namespace eastl
+
+
+EA_RESTORE_VC_WARNING();
+EA_RESTORE_VC_WARNING();
+
+
+// We have to either #include enable_shared.h here or we need to move the enable_shared source code to here.
+#include <EASTL/internal/enable_shared.h>
+
+
+#endif // Header include guard
diff --git a/EASTL/include/EASTL/slist.h b/EASTL/include/EASTL/slist.h
new file mode 100644
index 0000000..dc3c447
--- /dev/null
+++ b/EASTL/include/EASTL/slist.h
@@ -0,0 +1,1946 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// An slist is a singly-linked list. The C++ standard library doesn't define
+// such a thing as an slist, nor does the C++ TR1. Our implementation of slist
+// largely follows the design of the SGI STL slist container, which is also
+// found in STLPort. Singly-linked lists use less memory than doubly-linked
+// lists, but are less flexible.
+//
+// In looking at slist, you will notice a lot of references to things like
+// 'before first', 'before last', 'insert after', and 'erase after'. This is
+// due to the fact that std::list insert and erase works on the node before
+// the referenced node, whereas slist is singly linked and operations are only
+// efficient if they work on the node after the referenced node. This is because
+// with an slist node you know the node after it but not the node before it.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+
+
+#ifndef EASTL_SLIST_H
+#define EASTL_SLIST_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/allocator.h>
+#include <EASTL/type_traits.h>
+#include <EASTL/iterator.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/initializer_list.h>
+#include <EASTL/sort.h>
+#include <EASTL/bonus/compressed_pair.h>
+#include <stddef.h>
+
+EA_DISABLE_ALL_VC_WARNINGS();
+
+ #include <new>
+
+EA_RESTORE_ALL_VC_WARNINGS();
+
+EA_DISABLE_SN_WARNING(828); // The EDG SN compiler has a bug in its handling of variadic template arguments and mistakenly reports "parameter "args" was never referenced"
+
+
+// 4530 - C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc
+// 4345 - Behavior change: an object of POD type constructed with an initializer of the form () will be default-initialized
+// 4571 - catch(...) semantics changed since Visual C++ 7.1; structured exceptions (SEH) are no longer caught.
+EA_DISABLE_VC_WARNING(4530 4345 4571);
+
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+
+ /// EASTL_SLIST_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_SLIST_DEFAULT_NAME
+ #define EASTL_SLIST_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " slist" // Unless the user overrides something, this is "EASTL slist".
+ #endif
+
+
+ /// EASTL_SLIST_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_SLIST_DEFAULT_ALLOCATOR
+ #define EASTL_SLIST_DEFAULT_ALLOCATOR allocator_type(EASTL_SLIST_DEFAULT_NAME)
+ #endif
+
+
+
+ /// SListNodeBase
+ ///
+ /// This is a standalone struct so that operations on it can be done without templates
+ /// and so that an empty slist can have an SListNodeBase and thus not create any
+ /// instances of T.
+ ///
+ struct SListNodeBase
+ {
+ SListNodeBase* mpNext;
+ } EASTL_LIST_PROXY_MAY_ALIAS;
+
+
+ #if EASTL_LIST_PROXY_ENABLED
+
+ /// SListNodeBaseProxy
+ ///
+ /// In debug builds, we define SListNodeBaseProxy to be the same thing as
+ /// SListNodeBase, except it is templated on the parent SListNode class.
+ /// We do this because we want users in debug builds to be able to easily
+ /// view the slist's contents in a debugger GUI. We do this only in a debug
+ /// build for the reasons described above: that SListNodeBase needs to be
+ /// as efficient as possible and not cause code bloat or extra function
+ /// calls (inlined or not).
+ ///
+ /// SListNodeBaseProxy *must* be separate from its parent class SListNode
+ /// because the slist class must have a member node which contains no T value.
+ /// It is thus incorrect for us to have one single SListNode class which
+ /// has both mpNext and mValue. So we do a recursive template trick in the
+ /// definition and use of SListNodeBaseProxy.
+ ///
+ template <typename SLN>
+ struct SListNodeBaseProxy
+ {
+ SLN* mpNext;
+ };
+
+ template <typename T>
+ struct SListNode : public SListNodeBaseProxy< SListNode<T> >
+ {
+ T mValue;
+ };
+
+ #else
+ template <typename T>
+ struct SListNode : public SListNodeBase
+ {
+ T mValue;
+ };
+ #endif
+
+
+ /// SListIterator
+ ///
+ template <typename T, typename Pointer, typename Reference>
+ struct SListIterator
+ {
+ typedef SListIterator<T, Pointer, Reference> this_type;
+ typedef SListIterator<T, T*, T&> iterator;
+ typedef SListIterator<T, const T*, const T&> const_iterator;
+ typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t.
+ typedef ptrdiff_t difference_type;
+ typedef T value_type;
+ typedef SListNode<T> node_type;
+ typedef Pointer pointer;
+ typedef Reference reference;
+ typedef EASTL_ITC_NS::forward_iterator_tag iterator_category;
+
+ public:
+ node_type* mpNode;
+
+ public:
+ SListIterator();
+ SListIterator(const SListNodeBase* pNode);
+ SListIterator(const iterator& x);
+
+ reference operator*() const;
+ pointer operator->() const;
+
+ this_type& operator++();
+ this_type operator++(int);
+ };
+
+
+
+ /// SListBase
+ ///
+ /// See VectorBase (class vector) for an explanation of why we
+ /// create this separate base class.
+ ///
+ template <typename T, typename Allocator>
+ struct SListBase
+ {
+ public:
+ typedef Allocator allocator_type;
+ typedef SListNode<T> node_type;
+ typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t.
+ typedef ptrdiff_t difference_type;
+ #if EASTL_LIST_PROXY_ENABLED
+ typedef SListNodeBaseProxy< SListNode<T> > base_node_type;
+ #else
+ typedef SListNodeBase base_node_type; // We use SListNodeBase instead of SListNode<T> because we don't want to create a T.
+ #endif
+
+ protected:
+ eastl::compressed_pair<base_node_type, allocator_type> mNodeAllocator;
+ #if EASTL_SLIST_SIZE_CACHE
+ size_type mSize;
+ #endif
+
+ base_node_type& internalNode() EA_NOEXCEPT { return mNodeAllocator.first(); }
+ base_node_type const& internalNode() const EA_NOEXCEPT { return mNodeAllocator.first(); }
+ allocator_type& internalAllocator() EA_NOEXCEPT { return mNodeAllocator.second(); }
+ const allocator_type& internalAllocator() const EA_NOEXCEPT { return mNodeAllocator.second(); }
+
+ public:
+ const allocator_type& get_allocator() const EA_NOEXCEPT;
+ allocator_type& get_allocator() EA_NOEXCEPT;
+ void set_allocator(const allocator_type& allocator);
+
+ protected:
+ SListBase();
+ SListBase(const allocator_type& a);
+ ~SListBase();
+
+ node_type* DoAllocateNode();
+ void DoFreeNode(node_type* pNode);
+
+ SListNodeBase* DoEraseAfter(SListNodeBase* pNode);
+ SListNodeBase* DoEraseAfter(SListNodeBase* pNode, SListNodeBase* pNodeLast);
+
+ }; // class SListBase
+
+
+
+ /// slist
+ ///
+ /// This is the equivalent of C++11's forward_list.
+ ///
+ /// -- size() is O(n) --
+ /// Note that as of this writing, list::size() is an O(n) operation when EASTL_SLIST_SIZE_CACHE is disabled.
+ /// That is, getting the size of the list is not a fast operation, as it requires traversing the list and
+ /// counting the nodes. We could make list::size() be fast by having a member mSize variable. There are reasons
+ /// for having such functionality and reasons for not having such functionality. We currently choose
+ /// to not have a member mSize variable as it would add four bytes to the class, add a tiny amount
+ /// of processing to functions such as insert and erase, and would only serve to improve the size
+ /// function, but no others. The alternative argument is that the C++ standard states that std::list
+ /// should be an O(1) operation (i.e. have a member size variable), most C++ standard library list
+ /// implementations do so, the size is but an integer which is quick to update, and many users
+ /// expect to have a fast size function. The EASTL_SLIST_SIZE_CACHE option changes this.
+ /// To consider: Make size caching an optional template parameter.
+ ///
+ /// Pool allocation
+ /// If you want to make a custom memory pool for a list container, your pool
+ /// needs to contain items of type slist::node_type. So if you have a memory
+ /// pool that has a constructor that takes the size of pool items and the
+ /// count of pool items, you would do this (assuming that MemoryPool implements
+ /// the Allocator interface):
+ /// typedef slist<Widget, MemoryPool> WidgetList; // Delare your WidgetList type.
+ /// MemoryPool myPool(sizeof(WidgetList::node_type), 100); // Make a pool of 100 Widget nodes.
+ /// WidgetList myList(&myPool); // Create a list that uses the pool.
+ ///
+ template <typename T, typename Allocator = EASTLAllocatorType >
+ class slist : public SListBase<T, Allocator>
+ {
+ typedef SListBase<T, Allocator> base_type;
+ typedef slist<T, Allocator> this_type;
+
+ public:
+ typedef T value_type;
+ typedef value_type* pointer;
+ typedef const value_type* const_pointer;
+ typedef value_type& reference;
+ typedef const value_type& const_reference;
+ typedef SListIterator<T, T*, T&> iterator;
+ typedef SListIterator<T, const T*, const T&> const_iterator;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::difference_type difference_type;
+ typedef typename base_type::allocator_type allocator_type;
+ typedef typename base_type::node_type node_type;
+ typedef typename base_type::base_node_type base_node_type;
+
+ using base_type::mNodeAllocator;
+ using base_type::DoEraseAfter;
+ using base_type::DoAllocateNode;
+ using base_type::DoFreeNode;
+ #if EASTL_SLIST_SIZE_CACHE
+ using base_type::mSize;
+ #endif
+ using base_type::internalNode;
+ using base_type::internalAllocator;
+
+ public:
+ slist();
+ slist(const allocator_type& allocator);
+ explicit slist(size_type n, const allocator_type& allocator = EASTL_SLIST_DEFAULT_ALLOCATOR);
+ slist(size_type n, const value_type& value, const allocator_type& allocator = EASTL_SLIST_DEFAULT_ALLOCATOR);
+ slist(const this_type& x);
+ slist(std::initializer_list<value_type> ilist, const allocator_type& allocator = EASTL_SLIST_DEFAULT_ALLOCATOR);
+ slist(this_type&& x);
+ slist(this_type&& x, const allocator_type& allocator);
+
+ template <typename InputIterator>
+ slist(InputIterator first, InputIterator last); // allocator arg removed because VC7.1 fails on the default arg. To do: Make a second version of this function without a default arg.
+
+ this_type& operator=(const this_type& x);
+ this_type& operator=(std::initializer_list<value_type>);
+ this_type& operator=(this_type&& x);
+
+ void swap(this_type& x);
+
+ void assign(size_type n, const value_type& value);
+ void assign(std::initializer_list<value_type> ilist);
+
+ template <typename InputIterator>
+ void assign(InputIterator first, InputIterator last);
+
+ iterator begin() EA_NOEXCEPT;
+ const_iterator begin() const EA_NOEXCEPT;
+ const_iterator cbegin() const EA_NOEXCEPT;
+
+ iterator end() EA_NOEXCEPT;
+ const_iterator end() const EA_NOEXCEPT;
+ const_iterator cend() const EA_NOEXCEPT;
+
+ iterator before_begin() EA_NOEXCEPT;
+ const_iterator before_begin() const EA_NOEXCEPT;
+ const_iterator cbefore_begin() const EA_NOEXCEPT;
+
+ iterator previous(const_iterator position);
+ const_iterator previous(const_iterator position) const;
+
+ reference front();
+ const_reference front() const;
+
+ template <class... Args>
+ void emplace_front(Args&&... args);
+
+ void push_front(const value_type& value);
+ reference push_front();
+ void push_front(value_type&& value);
+
+ void pop_front();
+
+ bool empty() const EA_NOEXCEPT;
+ size_type size() const EA_NOEXCEPT;
+
+ void resize(size_type n, const value_type& value);
+ void resize(size_type n);
+
+ iterator insert(const_iterator position);
+ iterator insert(const_iterator position, const value_type& value);
+ void insert(const_iterator position, size_type n, const value_type& value);
+
+ template <typename InputIterator>
+ void insert(const_iterator position, InputIterator first, InputIterator last);
+
+ // Returns an iterator pointing to the last inserted element, or position if insertion count is zero.
+ iterator insert_after(const_iterator position);
+ iterator insert_after(const_iterator position, const value_type& value);
+ iterator insert_after(const_iterator position, size_type n, const value_type& value);
+ iterator insert_after(const_iterator position, std::initializer_list<value_type> ilist);
+ iterator insert_after(const_iterator position, value_type&& value);
+
+ template <class... Args>
+ iterator emplace_after(const_iterator position, Args&&... args);
+
+ template <typename InputIterator>
+ iterator insert_after(const_iterator position, InputIterator first, InputIterator last);
+
+ iterator erase(const_iterator position);
+ iterator erase(const_iterator first, const_iterator last);
+
+ iterator erase_after(const_iterator position);
+ iterator erase_after(const_iterator before_first, const_iterator last);
+
+ void clear() EA_NOEXCEPT;
+ void reset_lose_memory() EA_NOEXCEPT; // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs.
+
+ size_type remove(const value_type& value);
+
+ template <typename Predicate>
+ size_type remove_if(Predicate predicate);
+
+ void reverse() EA_NOEXCEPT;
+
+ // splice splices to before position, like with the list container. However, in order to do so
+ // it must walk the list from beginning to position, which is an O(n) operation that can thus
+ // be slow. It's recommended that the splice_after functions be used whenever possible as they are O(1).
+ void splice(const_iterator position, this_type& x);
+ void splice(const_iterator position, this_type& x, const_iterator i);
+ void splice(const_iterator position, this_type& x, const_iterator first, const_iterator last);
+ void splice(const_iterator position, this_type&& x);
+ void splice(const_iterator position, this_type&& x, const_iterator i);
+ void splice(const_iterator position, this_type&& x, const_iterator first, const_iterator last);
+
+ void splice_after(const_iterator position, this_type& x);
+ void splice_after(const_iterator position, this_type& x, const_iterator i);
+ void splice_after(const_iterator position, this_type& x, const_iterator first, const_iterator last);
+ void splice_after(const_iterator position, this_type&& x);
+ void splice_after(const_iterator position, this_type&& x, const_iterator i);
+ void splice_after(const_iterator position, this_type&& x, const_iterator first, const_iterator last);
+
+ // The following splice_after funcions are deprecated, as they don't allow for recognizing
+ // the allocator, cannot maintain the source mSize, and are not in the C++11 Standard definition
+ // of std::forward_list (which is the equivalent of this class).
+ void splice_after(const_iterator position, const_iterator before_first, const_iterator before_last); // before_first and before_last come from a source container.
+ void splice_after(const_iterator position, const_iterator previous); // previous comes from a source container.
+
+ // Sorting functionality
+ // This is independent of the global sort algorithms, as lists are
+ // linked nodes and can be sorted more efficiently by moving nodes
+ // around in ways that global sort algorithms aren't privy to.
+ void sort();
+
+ template <class Compare>
+ void sort(Compare compare);
+
+ // Not yet implemented:
+ // void merge(this_type& x);
+ // void merge(this_type&& x);
+ // template <class Compare>
+ // void merge(this_type& x, Compare compare);
+ // template <class Compare>
+ // void merge(this_type&& x, Compare compare);
+ // If these get implemented then make sure to override them in fixed_slist.
+
+ bool validate() const;
+ int validate_iterator(const_iterator i) const;
+
+ protected:
+ node_type* DoCreateNode();
+
+ template<typename... Args>
+ node_type* DoCreateNode(Args&&... args);
+
+ template <typename Integer>
+ void DoAssign(Integer n, Integer value, true_type);
+
+ template <typename InputIterator>
+ void DoAssign(InputIterator first, InputIterator last, false_type);
+
+ void DoAssignValues(size_type n, const value_type& value);
+
+ template <typename InputIterator>
+ node_type* DoInsertAfter(SListNodeBase* pNode, InputIterator first, InputIterator last);
+
+ template <typename Integer>
+ node_type* DoInsertAfter(SListNodeBase* pNode, Integer n, Integer value, true_type);
+
+ template <typename InputIterator>
+ node_type* DoInsertAfter(SListNodeBase* pNode, InputIterator first, InputIterator last, false_type);
+
+ node_type* DoInsertValueAfter(SListNodeBase* pNode);
+ node_type* DoInsertValuesAfter(SListNodeBase* pNode, size_type n, const value_type& value);
+
+ template<typename... Args>
+ node_type* DoInsertValueAfter(SListNodeBase* pNode, Args&&... args);
+
+ void DoSwap(this_type& x);
+
+ }; // class slist
+
+
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // SListNodeBase functions
+ ///////////////////////////////////////////////////////////////////////
+
+ inline SListNodeBase* SListNodeInsertAfter(SListNodeBase* pPrevNode, SListNodeBase* pNode)
+ {
+ pNode->mpNext = pPrevNode->mpNext;
+ pPrevNode->mpNext = pNode;
+ return pNode;
+ }
+
+ inline SListNodeBase* SListNodeGetPrevious(SListNodeBase* pNodeBase, const SListNodeBase* pNode)
+ {
+ while(pNodeBase && (pNodeBase->mpNext != pNode))
+ pNodeBase = pNodeBase->mpNext;
+ return pNodeBase;
+ }
+
+ inline const SListNodeBase* SListNodeGetPrevious(const SListNodeBase* pNodeBase, const SListNodeBase* pNode)
+ {
+ while(pNodeBase && (pNodeBase->mpNext != pNode))
+ pNodeBase = pNodeBase->mpNext;
+ return pNodeBase;
+ }
+
+ inline void SListNodeSpliceAfter(SListNodeBase* pNode, SListNodeBase* pNodeBeforeFirst, SListNodeBase* pNodeBeforeLast)
+ {
+ if((pNode != pNodeBeforeFirst) && (pNode != pNodeBeforeLast))
+ {
+ SListNodeBase* const pFirst = pNodeBeforeFirst->mpNext;
+ SListNodeBase* const pPosition = pNode->mpNext;
+
+ pNodeBeforeFirst->mpNext = pNodeBeforeLast->mpNext;
+ pNode->mpNext = pFirst;
+ pNodeBeforeLast->mpNext = pPosition;
+ }
+ }
+
+ inline void SListNodeSpliceAfter(SListNodeBase* pNode, SListNodeBase* pNodeBase)
+ {
+ SListNodeBase* const pNodeBeforeLast = SListNodeGetPrevious(pNodeBase, NULL);
+
+ if(pNodeBeforeLast != pNodeBase)
+ {
+ SListNodeBase* const pPosition = pNode->mpNext;
+ pNode->mpNext = pNodeBase->mpNext;
+ pNodeBase->mpNext = NULL;
+ pNodeBeforeLast->mpNext = pPosition;
+ }
+ }
+
+ inline SListNodeBase* SListNodeReverse(SListNodeBase* pNode)
+ {
+ SListNodeBase* pNodeFirst = pNode;
+ pNode = pNode->mpNext;
+ pNodeFirst->mpNext = NULL;
+
+ while(pNode)
+ {
+ SListNodeBase* const pTemp = pNode->mpNext;
+ pNode->mpNext = pNodeFirst;
+ pNodeFirst = pNode;
+ pNode = pTemp;
+ }
+ return pNodeFirst;
+ }
+
+ inline uint32_t SListNodeGetSize(SListNodeBase* pNode)
+ {
+ uint32_t n = 0;
+ while(pNode)
+ {
+ ++n;
+ pNode = pNode->mpNext;
+ }
+ return n;
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // SListIterator functions
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Pointer, typename Reference>
+ inline SListIterator<T, Pointer, Reference>::SListIterator()
+ : mpNode(NULL)
+ {
+ // Empty
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline SListIterator<T, Pointer, Reference>::SListIterator(const SListNodeBase* pNode)
+ : mpNode(static_cast<node_type*>((SListNode<T>*)const_cast<SListNodeBase*>(pNode))) // All this casting is in the name of making runtime debugging much easier on the user.
+ {
+ // Empty
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline SListIterator<T, Pointer, Reference>::SListIterator(const iterator& x)
+ : mpNode(const_cast<node_type*>(x.mpNode))
+ {
+ // Empty
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename SListIterator<T, Pointer, Reference>::reference
+ SListIterator<T, Pointer, Reference>::operator*() const
+ {
+ return mpNode->mValue;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename SListIterator<T, Pointer, Reference>::pointer
+ SListIterator<T, Pointer, Reference>::operator->() const
+ {
+ return &mpNode->mValue;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename SListIterator<T, Pointer, Reference>::this_type&
+ SListIterator<T, Pointer, Reference>::operator++()
+ {
+ mpNode = static_cast<node_type*>(mpNode->mpNext);
+ return *this;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename SListIterator<T, Pointer, Reference>::this_type
+ SListIterator<T, Pointer, Reference>::operator++(int)
+ {
+ this_type temp(*this);
+ mpNode = static_cast<node_type*>(mpNode->mpNext);
+ return temp;
+ }
+
+ // The C++ defect report #179 requires that we support comparisons between const and non-const iterators.
+ // Thus we provide additional template paremeters here to support this. The defect report does not
+ // require us to support comparisons between reverse_iterators and const_reverse_iterators.
+ template <typename T, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB>
+ inline bool operator==(const SListIterator<T, PointerA, ReferenceA>& a,
+ const SListIterator<T, PointerB, ReferenceB>& b)
+ {
+ return a.mpNode == b.mpNode;
+ }
+
+
+ template <typename T, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB>
+ inline bool operator!=(const SListIterator<T, PointerA, ReferenceA>& a,
+ const SListIterator<T, PointerB, ReferenceB>& b)
+ {
+ return a.mpNode != b.mpNode;
+ }
+
+
+ // We provide a version of operator!= for the case where the iterators are of the
+ // same type. This helps prevent ambiguity errors in the presence of rel_ops.
+ template <typename T, typename Pointer, typename Reference>
+ inline bool operator!=(const SListIterator<T, Pointer, Reference>& a,
+ const SListIterator<T, Pointer, Reference>& b)
+ {
+ return a.mpNode != b.mpNode;
+ }
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // SListBase functions
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Allocator>
+ inline SListBase<T, Allocator>::SListBase()
+ : mNodeAllocator(base_node_type(), allocator_type(EASTL_SLIST_DEFAULT_NAME))
+ #if EASTL_SLIST_SIZE_CACHE
+ , mSize(0)
+ #endif
+ {
+ internalNode().mpNext = NULL;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline SListBase<T, Allocator>::SListBase(const allocator_type& allocator)
+ : mNodeAllocator(base_node_type(), allocator)
+ #if EASTL_SLIST_SIZE_CACHE
+ , mSize(0)
+ #endif
+ {
+ internalNode().mpNext = NULL;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline SListBase<T, Allocator>::~SListBase()
+ {
+ DoEraseAfter((SListNodeBase*)&internalNode(), NULL);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline const typename SListBase<T, Allocator>::allocator_type&
+ SListBase<T, Allocator>::get_allocator() const EA_NOEXCEPT
+ {
+ return internalAllocator();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename SListBase<T, Allocator>::allocator_type&
+ SListBase<T, Allocator>::get_allocator() EA_NOEXCEPT
+ {
+ return internalAllocator();
+ }
+
+
+ template <typename T, typename Allocator>
+ void
+ SListBase<T, Allocator>::set_allocator(const allocator_type& allocator)
+ {
+ EASTL_ASSERT((internalAllocator() == allocator) || (static_cast<node_type*>(internalNode().mpNext) == NULL)); // We can only assign a different allocator if we are empty of elements.
+ internalAllocator() = allocator;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline SListNode<T>* SListBase<T, Allocator>::DoAllocateNode()
+ {
+ return (node_type*)allocate_memory(internalAllocator(), sizeof(node_type), EASTL_ALIGN_OF(node_type), 0);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void SListBase<T, Allocator>::DoFreeNode(node_type* pNode)
+ {
+ EASTLFree(internalAllocator(), pNode, sizeof(node_type));
+ }
+
+
+ template <typename T, typename Allocator>
+ SListNodeBase* SListBase<T, Allocator>::DoEraseAfter(SListNodeBase* pNode)
+ {
+ node_type* const pNodeNext = static_cast<node_type*>((base_node_type*)pNode->mpNext);
+ SListNodeBase* const pNodeNextNext = (SListNodeBase*)pNodeNext->mpNext;
+
+ pNode->mpNext = pNodeNextNext;
+ pNodeNext->~node_type();
+ DoFreeNode(pNodeNext);
+ #if EASTL_SLIST_SIZE_CACHE
+ --mSize;
+ #endif
+ return pNodeNextNext;
+ }
+
+
+ template <typename T, typename Allocator>
+ SListNodeBase* SListBase<T, Allocator>::DoEraseAfter(SListNodeBase* pNode, SListNodeBase* pNodeLast)
+ {
+ node_type* pNodeCurrent = static_cast<node_type*>((base_node_type*)pNode->mpNext);
+
+ while(pNodeCurrent != (base_node_type*)pNodeLast)
+ {
+ node_type* const pNodeTemp = pNodeCurrent;
+ pNodeCurrent = static_cast<node_type*>((base_node_type*)pNodeCurrent->mpNext);
+ pNodeTemp->~node_type();
+ DoFreeNode(pNodeTemp);
+ #if EASTL_SLIST_SIZE_CACHE
+ --mSize;
+ #endif
+ }
+ pNode->mpNext = pNodeLast;
+ return pNodeLast;
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // slist functions
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Allocator>
+ inline slist<T, Allocator>::slist()
+ : base_type()
+ {
+ // Empty
+ }
+
+
+ template <typename T, typename Allocator>
+ inline slist<T, Allocator>::slist(const allocator_type& allocator)
+ : base_type(allocator)
+ {
+ // Empty
+ }
+
+
+ template <typename T, typename Allocator>
+ inline slist<T, Allocator>::slist(size_type n, const allocator_type& allocator)
+ : base_type(allocator)
+ {
+ DoInsertValuesAfter((SListNodeBase*)&internalNode(), n, value_type());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline slist<T, Allocator>::slist(size_type n, const value_type& value, const allocator_type& allocator)
+ : base_type(allocator)
+ {
+ DoInsertValuesAfter((SListNodeBase*)&internalNode(), n, value);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline slist<T, Allocator>::slist(const slist& x)
+ : base_type(x.internalAllocator())
+ {
+ DoInsertAfter((SListNodeBase*)&internalNode(), const_iterator((SListNodeBase*)x.internalNode().mpNext), const_iterator(NULL), false_type());
+ }
+
+
+ template <typename T, typename Allocator>
+ slist<T, Allocator>::slist(this_type&& x)
+ : base_type(x.internalAllocator())
+ {
+ swap(x);
+ }
+
+ template <typename T, typename Allocator>
+ slist<T, Allocator>::slist(this_type&& x, const allocator_type& allocator)
+ : base_type(allocator)
+ {
+ swap(x); // member swap handles the case that x has a different allocator than our allocator by doing a copy.
+ }
+
+
+ template <typename T, typename Allocator>
+ inline slist<T, Allocator>::slist(std::initializer_list<value_type> ilist, const allocator_type& allocator)
+ : base_type(allocator)
+ {
+ DoInsertAfter((SListNodeBase*)&internalNode(), ilist.begin(), ilist.end());
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename InputIterator>
+ inline slist<T, Allocator>::slist(InputIterator first, InputIterator last)
+ : base_type(EASTL_SLIST_DEFAULT_ALLOCATOR)
+ {
+ DoInsertAfter((SListNodeBase*)&internalNode(), first, last);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename slist<T, Allocator>::iterator
+ slist<T, Allocator>::begin() EA_NOEXCEPT
+ {
+ return iterator((SListNodeBase*)internalNode().mpNext);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename slist<T, Allocator>::const_iterator
+ slist<T, Allocator>::begin() const EA_NOEXCEPT
+ {
+ return const_iterator((SListNodeBase*)internalNode().mpNext);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename slist<T, Allocator>::const_iterator
+ slist<T, Allocator>::cbegin() const EA_NOEXCEPT
+ {
+ return const_iterator((SListNodeBase*)internalNode().mpNext);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename slist<T, Allocator>::iterator
+ slist<T, Allocator>::end() EA_NOEXCEPT
+ {
+ return iterator(NULL);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename slist<T, Allocator>::const_iterator
+ slist<T, Allocator>::end() const EA_NOEXCEPT
+ {
+ return const_iterator(NULL);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename slist<T, Allocator>::const_iterator
+ slist<T, Allocator>::cend() const EA_NOEXCEPT
+ {
+ return const_iterator(NULL);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename slist<T, Allocator>::iterator
+ slist<T, Allocator>::before_begin() EA_NOEXCEPT
+ {
+ return iterator((SListNodeBase*)&internalNode());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename slist<T, Allocator>::const_iterator
+ slist<T, Allocator>::before_begin() const EA_NOEXCEPT
+ {
+ return const_iterator((SListNodeBase*)&internalNode());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename slist<T, Allocator>::const_iterator
+ slist<T, Allocator>::cbefore_begin() const EA_NOEXCEPT
+ {
+ return const_iterator((SListNodeBase*)&internalNode());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename slist<T, Allocator>::iterator
+ slist<T, Allocator>::previous(const_iterator position)
+ {
+ return iterator(SListNodeGetPrevious((SListNodeBase*)&internalNode(), (SListNodeBase*)position.mpNode));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename slist<T, Allocator>::const_iterator
+ slist<T, Allocator>::previous(const_iterator position) const
+ {
+ return const_iterator(SListNodeGetPrevious((SListNodeBase*)&internalNode(), (SListNodeBase*)position.mpNode));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename slist<T, Allocator>::reference
+ slist<T, Allocator>::front()
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(internalNode().mpNext == NULL))
+ EASTL_FAIL_MSG("slist::front -- empty container");
+ #endif
+
+ EA_ANALYSIS_ASSUME(internalNode().mpNext != NULL);
+
+ return ((node_type*)internalNode().mpNext)->mValue;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename slist<T, Allocator>::const_reference
+ slist<T, Allocator>::front() const
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(internalNode().mpNext == NULL))
+ EASTL_FAIL_MSG("slist::front -- empty container");
+ #endif
+
+ EA_ANALYSIS_ASSUME(internalNode().mpNext != NULL);
+
+ return static_cast<node_type*>(internalNode().mpNext)->mValue;
+ }
+
+
+ template <typename T, typename Allocator>
+ template <class... Args>
+ void slist<T, Allocator>::emplace_front(Args&&... args)
+ {
+ DoInsertValueAfter((SListNodeBase*)&internalNode(), eastl::forward<Args>(args)...);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void slist<T, Allocator>::push_front(const value_type& value)
+ {
+ SListNodeInsertAfter((SListNodeBase*)&internalNode(), (SListNodeBase*)DoCreateNode(value));
+ #if EASTL_SLIST_SIZE_CACHE
+ ++mSize;
+ #endif
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename slist<T, Allocator>::reference
+ slist<T, Allocator>::push_front()
+ {
+ SListNodeInsertAfter((SListNodeBase*)&internalNode(), (SListNodeBase*)DoCreateNode());
+ #if EASTL_SLIST_SIZE_CACHE
+ ++mSize;
+ #endif
+ return ((node_type*)internalNode().mpNext)->mValue; // Same as return front();
+ }
+
+ template <typename T, typename Allocator>
+ void slist<T, Allocator>::push_front(value_type&& value)
+ {
+ emplace_after(before_begin(), eastl::move(value));
+ }
+
+
+ template <typename T, typename Allocator>
+ void slist<T, Allocator>::pop_front()
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(internalNode().mpNext == NULL))
+ EASTL_FAIL_MSG("slist::front -- empty container");
+ #endif
+
+ EA_ANALYSIS_ASSUME(internalNode().mpNext != NULL);
+
+ node_type* const pNode = static_cast<node_type*>(internalNode().mpNext);
+ internalNode().mpNext = pNode->mpNext;
+ pNode->~node_type();
+ DoFreeNode(pNode);
+ #if EASTL_SLIST_SIZE_CACHE
+ --mSize;
+ #endif
+ }
+
+
+ template <typename T, typename Allocator>
+ typename slist<T, Allocator>::this_type& slist<T, Allocator>::operator=(const this_type& x)
+ {
+ if(&x != this)
+ {
+ // If (EASTL_ALLOCATOR_COPY_ENABLED == 1) and the current contents are allocated by an
+ // allocator that's unequal to x's allocator, we need to reallocate our elements with
+ // our current allocator and reallocate it with x's allocator. If the allocators are
+ // equal then we can use a more optimal algorithm that doesn't reallocate our elements
+ // but instead can copy them in place.
+
+ #if EASTL_ALLOCATOR_COPY_ENABLED
+ bool bSlowerPathwayRequired = (internalAllocator() != x.internalAllocator());
+ #else
+ bool bSlowerPathwayRequired = false;
+ #endif
+
+ if(bSlowerPathwayRequired)
+ {
+ clear();
+
+ #if EASTL_ALLOCATOR_COPY_ENABLED
+ internalAllocator() = x.internalAllocator();
+ #endif
+ }
+
+ DoAssign(x.begin(), x.end(), eastl::false_type());
+ }
+
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ typename slist<T, Allocator>::this_type& slist<T, Allocator>::operator=(this_type&& x)
+ {
+ if(this != &x)
+ {
+ clear(); // To consider: Are we really required to clear here? x is going away soon and will clear itself in its dtor.
+ swap(x); // member swap handles the case that x has a different allocator than our allocator by doing a copy.
+ }
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ typename slist<T, Allocator>::this_type& slist<T, Allocator>::operator=(std::initializer_list<value_type> ilist)
+ {
+ DoAssign(ilist.begin(), ilist.end(), false_type());
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void slist<T, Allocator>::assign(std::initializer_list<value_type> ilist)
+ {
+ DoAssign(ilist.begin(), ilist.end(), false_type());
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename InputIterator> // It turns out that the C++ std::list specifies a two argument
+ inline void slist<T, Allocator>::assign(InputIterator first, InputIterator last) // version of assign that takes (int size, int value). These are not
+ { // iterators, so we need to do a template compiler trick to do the right thing.
+ DoAssign(first, last, is_integral<InputIterator>());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void slist<T, Allocator>::assign(size_type n, const value_type& value)
+ {
+ // To do: get rid of DoAssignValues and put its implementation directly here.
+ DoAssignValues(n, value);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void slist<T, Allocator>::swap(this_type& x)
+ {
+ if(internalAllocator() == x.internalAllocator()) // If allocators are equivalent...
+ DoSwap(x);
+ else // else swap the contents.
+ {
+ const this_type temp(*this); // Can't call eastl::swap because that would
+ *this = x; // itself call this member swap function.
+ x = temp;
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool slist<T, Allocator>::empty() const EA_NOEXCEPT
+ {
+ return internalNode().mpNext == NULL;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename slist<T, Allocator>::size_type
+ slist<T, Allocator>::size() const EA_NOEXCEPT
+ {
+ return SListNodeGetSize((SListNodeBase*)internalNode().mpNext);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void slist<T, Allocator>::clear() EA_NOEXCEPT
+ {
+ DoEraseAfter((SListNodeBase*)&internalNode(), NULL);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void slist<T, Allocator>::reset_lose_memory() EA_NOEXCEPT
+ {
+ // The reset function is a special extension function which unilaterally
+ // resets the container to an empty state without freeing the memory of
+ // the contained objects. This is useful for very quickly tearing down a
+ // container built into scratch memory.
+ internalNode().mpNext = NULL;
+ #if EASTL_SLIST_SIZE_CACHE
+ mSize = 0;
+ #endif
+ }
+
+
+ template <typename T, typename Allocator>
+ void slist<T, Allocator>::resize(size_type n, const value_type& value)
+ {
+ SListNodeBase* pNode = (SListNodeBase*)&internalNode();
+
+ for(; pNode->mpNext && (n > 0); --n)
+ pNode = pNode->mpNext;
+
+ if(pNode->mpNext)
+ DoEraseAfter(pNode, NULL);
+ else
+ DoInsertValuesAfter(pNode, n, value);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void slist<T, Allocator>::resize(size_type n)
+ {
+ resize(n, value_type());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename slist<T, Allocator>::iterator
+ slist<T, Allocator>::insert(const_iterator position)
+ {
+ return iterator((SListNodeBase*)DoInsertValueAfter(SListNodeGetPrevious((SListNodeBase*)&internalNode(), (SListNodeBase*)position.mpNode), value_type()));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename slist<T, Allocator>::iterator
+ slist<T, Allocator>::insert(const_iterator position, const value_type& value)
+ {
+ return iterator((SListNodeBase*)DoInsertValueAfter(SListNodeGetPrevious((SListNodeBase*)&internalNode(), (SListNodeBase*)position.mpNode), value));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void slist<T, Allocator>::insert(const_iterator position, size_type n, const value_type& value)
+ {
+ // To do: get rid of DoAssignValues and put its implementation directly here.
+ DoInsertValuesAfter(SListNodeGetPrevious((SListNodeBase*)&internalNode(), (SListNodeBase*)position.mpNode), n, value);
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename InputIterator>
+ inline void slist<T, Allocator>::insert(const_iterator position, InputIterator first, InputIterator last)
+ {
+ DoInsertAfter(SListNodeGetPrevious((SListNodeBase*)&internalNode(), (SListNodeBase*)position.mpNode), first, last);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename slist<T, Allocator>::iterator
+ slist<T, Allocator>::insert_after(const_iterator position)
+ {
+ return insert_after(position, value_type());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename slist<T, Allocator>::iterator
+ slist<T, Allocator>::insert_after(const_iterator position, const value_type& value)
+ {
+ return iterator((SListNodeBase*)DoInsertValueAfter((SListNodeBase*)position.mpNode, value));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename slist<T, Allocator>::iterator
+ slist<T, Allocator>::insert_after(const_iterator position, size_type n, const value_type& value)
+ {
+ return iterator((SListNodeBase*)DoInsertValuesAfter((SListNodeBase*)position.mpNode, n, value));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename slist<T, Allocator>::iterator
+ slist<T, Allocator>::insert_after(const_iterator position, std::initializer_list<value_type> ilist)
+ {
+ return iterator((SListNodeBase*)DoInsertAfter((SListNodeBase*)position.mpNode, ilist.begin(), ilist.end(), false_type()));
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename InputIterator>
+ inline typename slist<T, Allocator>::iterator
+ slist<T, Allocator>::insert_after(const_iterator position, InputIterator first, InputIterator last)
+ {
+ return iterator((SListNodeBase*)DoInsertAfter((SListNodeBase*)position.mpNode, first, last));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename slist<T, Allocator>::iterator
+ slist<T, Allocator>::insert_after(const_iterator position, value_type&& value)
+ {
+ return emplace_after(position, eastl::move(value));
+ }
+
+
+ template <typename T, typename Allocator>
+ template <class... Args>
+ inline typename slist<T, Allocator>::iterator
+ slist<T, Allocator>::emplace_after(const_iterator position, Args&&... args)
+ {
+ return iterator((SListNodeBase*)DoInsertValueAfter(position.mpNode, eastl::forward<Args>(args)...));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename slist<T, Allocator>::iterator
+ slist<T, Allocator>::erase(const_iterator position)
+ {
+ return DoEraseAfter(SListNodeGetPrevious((SListNodeBase*)&internalNode(), (SListNodeBase*)position.mpNode));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename slist<T, Allocator>::iterator
+ slist<T, Allocator>::erase(const_iterator first, const_iterator last)
+ {
+ return DoEraseAfter(SListNodeGetPrevious((SListNodeBase*)&internalNode(), (SListNodeBase*)first.mpNode), (SListNodeBase*)last.mpNode);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename slist<T, Allocator>::iterator
+ slist<T, Allocator>::erase_after(const_iterator position)
+ {
+ return iterator(DoEraseAfter((SListNodeBase*)position.mpNode));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename slist<T, Allocator>::iterator
+ slist<T, Allocator>::erase_after(const_iterator before_first, const_iterator last)
+ {
+ return iterator(DoEraseAfter((SListNodeBase*)before_first.mpNode, (SListNodeBase*)last.mpNode));
+ }
+
+
+ template <typename T, typename Allocator>
+ typename slist<T, Allocator>::size_type slist<T, Allocator>::remove(const value_type& value)
+ {
+ base_node_type* pNode = &internalNode();
+ size_type numErased = 0;
+
+ while(pNode && pNode->mpNext)
+ {
+ if (static_cast<node_type*>(pNode->mpNext)->mValue == value)
+ {
+ DoEraseAfter((SListNodeBase*)pNode); // This will take care of modifying pNode->mpNext.
+ ++numErased;
+ }
+ else
+ pNode = pNode->mpNext;
+ }
+ return numErased;
+ }
+
+ template <typename T, typename Allocator>
+ template <typename Predicate>
+ inline typename slist<T, Allocator>::size_type slist<T, Allocator>::remove_if(Predicate predicate)
+ {
+ base_node_type* pNode = &internalNode();
+ size_type numErased = 0;
+
+ while(pNode && pNode->mpNext)
+ {
+ if (predicate(static_cast<node_type*>(pNode->mpNext)->mValue))
+ {
+ DoEraseAfter((SListNodeBase*)pNode); // This will take care of modifying pNode->mpNext.
+ ++numErased;
+ }
+ else
+ pNode = pNode->mpNext;
+ }
+ return numErased;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void slist<T, Allocator>::splice(const_iterator position, this_type& x)
+ {
+ // Splicing operations cannot succeed if the two containers use unequal allocators.
+ // This issue is not addressed in the C++ 1998 standard but is discussed in the
+ // LWG defect reports, such as #431. There is no simple solution to this problem.
+ // One option is to throw an exception. Another option which probably captures the
+ // user intent most of the time is to copy the range from the source to the dest and
+ // remove it from the source. Until then it's simply disallowed to splice with unequal allocators.
+ // EASTL_ASSERT(internalAllocator() == x.internalAllocator()); // Disabled because our member sort function uses splice but with allocators that may be unequal. There isn't a simple workaround aside from disabling this assert.
+
+ if(x.internalNode().mpNext) // If there is anything to splice...
+ {
+ if(internalAllocator() == x.internalAllocator())
+ {
+ SListNodeSpliceAfter(SListNodeGetPrevious((SListNodeBase*)&internalNode(), (SListNodeBase*)position.mpNode),
+ (SListNodeBase*)&x.internalNode(),
+ SListNodeGetPrevious((SListNodeBase*)&x.internalNode(), NULL));
+
+ #if EASTL_SLIST_SIZE_CACHE
+ mSize += x.mSize;
+ x.mSize = 0;
+ #endif
+ }
+ else
+ {
+ insert(position, x.begin(), x.end());
+ x.clear();
+ }
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void slist<T, Allocator>::splice(const_iterator position, this_type& x, const_iterator i)
+ {
+ if(internalAllocator() == x.internalAllocator())
+ {
+ SListNodeSpliceAfter(SListNodeGetPrevious((SListNodeBase*)&internalNode(), (SListNodeBase*)position.mpNode),
+ SListNodeGetPrevious((SListNodeBase*)&x.internalNode(), (SListNodeBase*)i.mpNode),
+ (SListNodeBase*)i.mpNode);
+
+ #if EASTL_SLIST_SIZE_CACHE
+ ++mSize;
+ --x.mSize;
+ #endif
+ }
+ else
+ {
+ insert(position, *i);
+ x.erase(i);
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void slist<T, Allocator>::splice(const_iterator position, this_type& x, const_iterator first, const_iterator last)
+ {
+ if(first != last) // If there is anything to splice...
+ {
+ if(internalAllocator() == x.internalAllocator())
+ {
+ #if EASTL_SLIST_SIZE_CACHE
+ const size_type n = (size_type)eastl::distance(first, last);
+ mSize += n;
+ x.mSize -= n;
+ #endif
+
+ SListNodeSpliceAfter(SListNodeGetPrevious((SListNodeBase*)&internalNode(), (SListNodeBase*)position.mpNode),
+ SListNodeGetPrevious((SListNodeBase*)&x.internalNode(), (SListNodeBase*)first.mpNode),
+ SListNodeGetPrevious((SListNodeBase*)first.mpNode, (SListNodeBase*)last.mpNode));
+ }
+ else
+ {
+ insert(position, first, last);
+ x.erase(first, last);
+ }
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ void slist<T, Allocator>::splice(const_iterator position, this_type&& x)
+ {
+ return splice(position, x); // This will splice(const_iterator, this_type&)
+ }
+
+ template <typename T, typename Allocator>
+ void slist<T, Allocator>::splice(const_iterator position, this_type&& x, const_iterator i)
+ {
+ return splice(position, x, i); // This will splice_after(const_iterator, this_type&, const_iterator)
+ }
+
+ template <typename T, typename Allocator>
+ void slist<T, Allocator>::splice(const_iterator position, this_type&& x, const_iterator first, const_iterator last)
+ {
+ return splice(position, x, first, last); // This will splice(const_iterator, this_type&, const_iterator, const_iterator)
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void slist<T, Allocator>::splice_after(const_iterator position, this_type& x)
+ {
+ if(!x.empty()) // If there is anything to splice...
+ {
+ if(internalAllocator() == x.internalAllocator())
+ {
+ SListNodeSpliceAfter((SListNodeBase*)position.mpNode, (SListNodeBase*)&x.internalNode());
+
+ #if EASTL_SLIST_SIZE_CACHE
+ mSize += x.mSize;
+ x.mSize = 0;
+ #endif
+ }
+ else
+ {
+ insert_after(position, x.begin(), x.end());
+ x.clear();
+ }
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void slist<T, Allocator>::splice_after(const_iterator position, this_type& x, const_iterator i)
+ {
+ if(internalAllocator() == x.internalAllocator())
+ {
+ SListNodeSpliceAfter((SListNodeBase*)position.mpNode, (SListNodeBase*)i.mpNode);
+
+ #if EASTL_SLIST_SIZE_CACHE
+ mSize++;
+ x.mSize--;
+ #endif
+ }
+ else
+ {
+ const_iterator iNext(i);
+ insert_after(position, i, ++iNext);
+ x.erase(i);
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void slist<T, Allocator>::splice_after(const_iterator position, this_type& x, const_iterator first, const_iterator last)
+ {
+ if(first != last) // If there is anything to splice...
+ {
+ if(internalAllocator() == x.internalAllocator())
+ {
+ #if EASTL_SLIST_SIZE_CACHE
+ const size_type n = (size_type)eastl::distance(first, last);
+ mSize += n;
+ x.mSize -= n;
+ #endif
+
+ SListNodeSpliceAfter((SListNodeBase*)position.mpNode, (SListNodeBase*)first.mpNode, (SListNodeBase*)last.mpNode);
+ }
+ else
+ {
+ insert_after(position, first, last);
+ x.erase(first, last);
+ }
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void slist<T, Allocator>::splice_after(const_iterator position, this_type&& x)
+ {
+ return splice_after(position, x); // This will call splice_after(const_iterator, this_type&)
+ }
+
+ template <typename T, typename Allocator>
+ inline void slist<T, Allocator>::splice_after(const_iterator position, this_type&& x, const_iterator i)
+ {
+ return splice_after(position, x, i); // This will call splice_after(const_iterator, this_type&, const_iterator)
+ }
+
+ template <typename T, typename Allocator>
+ inline void slist<T, Allocator>::splice_after(const_iterator position, this_type&& x, const_iterator first, const_iterator last)
+ {
+ return splice_after(position, x, first, last); // This will call splice_after(const_iterator, this_type&, const_iterator, const_iterator)
+ }
+
+
+ // This function is deprecated.
+ // We have no way of knowing what the container or allocator for before_first/before_last is.
+ // Thus this function requires that the iterators come from equivalent allocators.
+ template <typename T, typename Allocator>
+ inline void slist<T, Allocator>::splice_after(const_iterator position, const_iterator before_first, const_iterator before_last)
+ {
+ if(before_first != before_last) // If there is anything to splice...
+ {
+ #if EASTL_SLIST_SIZE_CACHE
+ // We have a problem here because the inserted range may come from *this or
+ // it may come from some other list. We have no choice but to implement an O(n)
+ // brute-force search in our list for 'previous'.
+
+ iterator i((SListNodeBase*)&internalNode());
+ iterator iEnd(NULL);
+
+ for( ; i != iEnd; ++i)
+ {
+ if(i == before_first)
+ break;
+ }
+
+ if(i == iEnd) // If the input came from an external range...
+ mSize += (size_type)eastl::distance(before_first, before_last); // Note that we have no way of knowing how to decrementing the size from the external container, assuming it came from one.
+ else
+ { EASTL_FAIL_MSG("slist::splice_after: Impossible to decrement source mSize. Use the other splice_after function instead."); }
+ #endif
+
+ // Insert the range of [before_first + 1, before_last + 1) after position.
+ SListNodeSpliceAfter((SListNodeBase*)position.mpNode, (SListNodeBase*)before_first.mpNode, (SListNodeBase*)before_last.mpNode);
+ }
+ }
+
+
+ // This function is deprecated.
+ // We have no way of knowing what the container or allocator for previous is.
+ // Thus this function requires that the iterators come from equivalent allocators.
+ template <typename T, typename Allocator>
+ inline void slist<T, Allocator>::splice_after(const_iterator position, const_iterator previous)
+ {
+ #if EASTL_SLIST_SIZE_CACHE
+ // We have a problem here because the inserted range may come from *this or
+ // it may come from some other list. We have no choice but to implement an O(n)
+ // brute-force search in our list for 'previous'.
+
+ iterator i((SListNodeBase*)&internalNode());
+ iterator iEnd(NULL);
+
+ for( ; i != iEnd; ++i)
+ {
+ if(i == previous)
+ break;
+ }
+
+ if(i == iEnd) // If the input came from an external range...
+ ++mSize; // Note that we have no way of knowing how to decrementing the size from the external container, assuming it came from one.
+ else
+ { EASTL_FAIL_MSG("slist::splice_after: Impossible to decrement source mSize. Use the other splice_after function instead."); }
+ #endif
+
+ // Insert the element at previous + 1 after position.
+ SListNodeSpliceAfter((SListNodeBase*)position.mpNode, (SListNodeBase*)previous.mpNode, (SListNodeBase*)previous.mpNode->mpNext);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void slist<T, Allocator>::sort()
+ {
+ // To do: look at using a merge sort, which may well be faster.
+ eastl::comb_sort(begin(), end());
+ }
+
+
+ template <typename T, typename Allocator>
+ template <class Compare>
+ inline void slist<T, Allocator>::sort(Compare compare)
+ {
+ // To do: look at using a merge sort, which may well be faster.
+ eastl::comb_sort(begin(), end(), compare);
+ }
+
+
+
+ template <typename T, typename Allocator>
+ inline void slist<T, Allocator>::reverse() EA_NOEXCEPT
+ {
+ if(internalNode().mpNext)
+ internalNode().mpNext = static_cast<node_type*>((base_node_type*)SListNodeReverse((SListNodeBase*)internalNode().mpNext));
+ }
+
+
+ template <typename T, typename Allocator>
+ template<typename... Args>
+ inline typename slist<T, Allocator>::node_type*
+ slist<T, Allocator>::DoCreateNode(Args&&... args)
+ {
+ node_type* const pNode = DoAllocateNode(); // pNode is of type node_type, but it's uninitialized memory.
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ ::new((void*)&pNode->mValue) value_type(eastl::forward<Args>(args)...);
+ }
+ catch(...)
+ {
+ DoFreeNode(pNode);
+ throw;
+ }
+ #else
+ ::new((void*)&pNode->mValue) value_type(eastl::forward<Args>(args)...);
+ #endif
+
+ return pNode;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename slist<T, Allocator>::node_type*
+ slist<T, Allocator>::DoCreateNode()
+ {
+ node_type* const pNode = DoAllocateNode();
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ ::new((void*)&pNode->mValue) value_type();
+ }
+ catch(...)
+ {
+ DoFreeNode(pNode);
+ throw;
+ }
+ #else
+ ::new((void*)&pNode->mValue) value_type();
+ #endif
+ return pNode;
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename Integer>
+ void slist<T, Allocator>::DoAssign(Integer n, Integer value, true_type)
+ {
+ DoAssignValues(static_cast<size_type>(n), static_cast<value_type>(value));
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename InputIterator>
+ void slist<T, Allocator>::DoAssign(InputIterator first, InputIterator last, false_type)
+ {
+ base_node_type* pNodePrev = &internalNode();
+ node_type* pNode = static_cast<node_type*>(internalNode().mpNext);
+
+ for(; pNode && (first != last); ++first)
+ {
+ pNode->mValue = *first;
+ pNodePrev = pNode;
+ pNode = static_cast<node_type*>(pNode->mpNext);
+ }
+
+ if(first == last)
+ DoEraseAfter((SListNodeBase*)pNodePrev, NULL);
+ else
+ DoInsertAfter((SListNodeBase*)pNodePrev, first, last);
+ }
+
+
+ template <typename T, typename Allocator>
+ void slist<T, Allocator>::DoAssignValues(size_type n, const value_type& value)
+ {
+ base_node_type* pNodePrev = &internalNode();
+ node_type* pNode = static_cast<node_type*>(internalNode().mpNext);
+
+ for(; pNode && (n > 0); --n)
+ {
+ pNode->mValue = value;
+ pNodePrev = pNode;
+ pNode = static_cast<node_type*>(pNode->mpNext);
+ }
+
+ if(n)
+ DoInsertValuesAfter((SListNodeBase*)pNodePrev, n, value);
+ else
+ DoEraseAfter((SListNodeBase*)pNodePrev, NULL);
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename InputIterator>
+ inline typename slist<T, Allocator>::node_type*
+ slist<T, Allocator>::DoInsertAfter(SListNodeBase* pNode, InputIterator first, InputIterator last)
+ {
+ return DoInsertAfter(pNode, first, last, is_integral<InputIterator>());
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename Integer>
+ inline typename slist<T, Allocator>::node_type*
+ slist<T, Allocator>::DoInsertAfter(SListNodeBase* pNode, Integer n, Integer value, true_type)
+ {
+ return DoInsertValuesAfter(pNode, n, value);
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename InputIterator>
+ inline typename slist<T, Allocator>::node_type*
+ slist<T, Allocator>::DoInsertAfter(SListNodeBase* pNode, InputIterator first, InputIterator last, false_type)
+ {
+ for(; first != last; ++first)
+ {
+ pNode = SListNodeInsertAfter((SListNodeBase*)pNode, (SListNodeBase*)DoCreateNode(*first));
+ #if EASTL_SLIST_SIZE_CACHE
+ ++mSize;
+ #endif
+ }
+
+ return static_cast<node_type*>((base_node_type*)pNode);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename slist<T, Allocator>::node_type*
+ slist<T, Allocator>::DoInsertValueAfter(SListNodeBase* pNode)
+ {
+ #if EASTL_SLIST_SIZE_CACHE
+ pNode = SListNodeInsertAfter((SListNodeBase*)pNode, (SListNodeBase*)DoCreateNode());
+ ++mSize;
+ return static_cast<node_type*>((base_node_type*)pNode);
+ #else
+ return static_cast<node_type*>((base_node_type*)SListNodeInsertAfter((SListNodeBase*)pNode, (SListNodeBase*)DoCreateNode()));
+ #endif
+ }
+
+
+ template <typename T, typename Allocator>
+ template<typename... Args>
+ inline typename slist<T, Allocator>::node_type*
+ slist<T, Allocator>::DoInsertValueAfter(SListNodeBase* pNode, Args&&... args)
+ {
+ SListNodeBase* pNodeNew = (SListNodeBase*)DoCreateNode(eastl::forward<Args>(args)...);
+ pNode = SListNodeInsertAfter(pNode, pNodeNew);
+ #if EASTL_LIST_SIZE_CACHE
+ ++mSize; // Increment the size after the node creation because we need to assume an exception can occur in the creation.
+ #endif
+ return static_cast<node_type*>((base_node_type*)pNode);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename slist<T, Allocator>::node_type*
+ slist<T, Allocator>::DoInsertValuesAfter(SListNodeBase* pNode, size_type n, const value_type& value)
+ {
+ for(size_type i = 0; i < n; ++i)
+ {
+ pNode = SListNodeInsertAfter((SListNodeBase*)pNode, (SListNodeBase*)DoCreateNode(value));
+ #if EASTL_SLIST_SIZE_CACHE
+ ++mSize; // We don't do a single mSize += n at the end because an exception may result in only a partial range insertion.
+ #endif
+ }
+ return static_cast<node_type*>((base_node_type*)pNode);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void slist<T, Allocator>::DoSwap(this_type& x)
+ {
+ eastl::swap(internalNode().mpNext, x.internalNode().mpNext);
+ eastl::swap(internalAllocator(), x.internalAllocator()); // We do this even if EASTL_ALLOCATOR_COPY_ENABLED is 0.
+ #if EASTL_LIST_SIZE_CACHE
+ eastl::swap(mSize, x.mSize);
+ #endif
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool slist<T, Allocator>::validate() const
+ {
+ #if EASTL_SLIST_SIZE_CACHE
+ size_type n = 0;
+
+ for(const_iterator i(begin()), iEnd(end()); i != iEnd; ++i)
+ ++n;
+
+ if(n != mSize)
+ return false;
+ #endif
+
+ // To do: More validation.
+ return true;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline int slist<T, Allocator>::validate_iterator(const_iterator i) const
+ {
+ // To do: Come up with a more efficient mechanism of doing this.
+
+ for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp)
+ {
+ if(temp == i)
+ return (isf_valid | isf_current | isf_can_dereference);
+ }
+
+ if(i == end())
+ return (isf_valid | isf_current);
+
+ return isf_none;
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Allocator>
+ bool operator==(const slist<T, Allocator>& a, const slist<T, Allocator>& b)
+ {
+ typename slist<T, Allocator>::const_iterator ia = a.begin();
+ typename slist<T, Allocator>::const_iterator ib = b.begin();
+ typename slist<T, Allocator>::const_iterator enda = a.end();
+
+ #if EASTL_SLIST_SIZE_CACHE
+ if(a.size() == b.size())
+ {
+ while((ia != enda) && (*ia == *ib))
+ {
+ ++ia;
+ ++ib;
+ }
+ return (ia == enda);
+ }
+ return false;
+ #else
+ typename slist<T, Allocator>::const_iterator endb = b.end();
+
+ while((ia != enda) && (ib != endb) && (*ia == *ib))
+ {
+ ++ia;
+ ++ib;
+ }
+ return (ia == enda) && (ib == endb);
+ #endif
+ }
+
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ template <typename T, typename Allocator>
+ inline synth_three_way_result<T> operator<=>(const slist<T, Allocator>& a, const slist<T, Allocator>& b)
+ {
+ return eastl::lexicographical_compare_three_way(a.begin(), a.end(), b.begin(), b.end(), synth_three_way{});
+ }
+#else
+ template <typename T, typename Allocator>
+ inline bool operator<(const slist<T, Allocator>& a, const slist<T, Allocator>& b)
+ {
+ return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator!=(const slist<T, Allocator>& a, const slist<T, Allocator>& b)
+ {
+ return !(a == b);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator>(const slist<T, Allocator>& a, const slist<T, Allocator>& b)
+ {
+ return b < a;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator<=(const slist<T, Allocator>& a, const slist<T, Allocator>& b)
+ {
+ return !(b < a);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator>=(const slist<T, Allocator>& a, const slist<T, Allocator>& b)
+ {
+ return !(a < b);
+ }
+#endif
+
+ template <typename T, typename Allocator>
+ inline void swap(slist<T, Allocator>& a, slist<T, Allocator>& b)
+ {
+ a.swap(b);
+ }
+
+
+ /// erase / erase_if
+ ///
+ /// https://en.cppreference.com/w/cpp/container/forward_list/erase2
+ template <class T, class Allocator, class U>
+ typename slist<T, Allocator>::size_type erase(slist<T, Allocator>& c, const U& value)
+ {
+ // Erases all elements that compare equal to value from the container.
+ return c.remove(value);
+ }
+
+ template <class T, class Allocator, class Predicate>
+ typename slist<T, Allocator>::size_type erase_if(slist<T, Allocator>& c, Predicate predicate)
+ {
+ // Erases all elements that satisfy the predicate pred from the container.
+ return c.remove_if(predicate);
+ }
+
+
+ /// insert_iterator
+ ///
+ /// We borrow a trick from SGI STL here and define an insert_iterator
+ /// specialization for slist. This allows slist insertions to be O(1)
+ /// instead of O(n/2), due to caching of the previous node.
+ ///
+ template <typename T, typename Allocator>
+ class insert_iterator< slist<T, Allocator> >
+ {
+ public:
+ typedef slist<T, Allocator> Container;
+ typedef typename Container::const_reference const_reference;
+ typedef typename Container::iterator iterator_type;
+ typedef EASTL_ITC_NS::output_iterator_tag iterator_category;
+ typedef void value_type;
+ typedef void difference_type;
+ typedef void pointer;
+ typedef void reference;
+
+ protected:
+ Container& container;
+ iterator_type it;
+
+ public:
+ insert_iterator(Container& x, iterator_type i)
+ : container(x)
+ {
+ if(i == x.begin())
+ it = x.before_begin();
+ else
+ it = x.previous(i);
+ }
+
+ insert_iterator<Container>& operator=(const_reference value)
+ { it = container.insert_after(it, value); return *this; }
+
+ insert_iterator<Container>& operator*()
+ { return *this; }
+
+ insert_iterator<Container>& operator++()
+ { return *this; } // This is by design.
+
+ insert_iterator<Container>& operator++(int)
+ { return *this; } // This is by design.
+
+ }; // insert_iterator<slist>
+
+
+} // namespace eastl
+
+EA_RESTORE_SN_WARNING()
+
+EA_RESTORE_VC_WARNING();
+
+
+#endif // Header include guard
diff --git a/EASTL/include/EASTL/sort.h b/EASTL/include/EASTL/sort.h
new file mode 100644
index 0000000..fb1c6e5
--- /dev/null
+++ b/EASTL/include/EASTL/sort.h
@@ -0,0 +1,2022 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+//////////////////////////////////////////////////////////////////////////////
+
+//////////////////////////////////////////////////////////////////////////////
+// This file implements sorting algorithms. Some of these are equivalent to
+// std C++ sorting algorithms, while others don't have equivalents in the
+// C++ standard. We implement the following sorting algorithms:
+// is_sorted --
+// sort -- Unstable. The implementation of this is mapped to quick_sort by default.
+// quick_sort -- Unstable. This is actually an intro-sort (quick sort with switch to insertion sort).
+// tim_sort -- Stable.
+// tim_sort_buffer -- Stable.
+// partial_sort -- Unstable.
+// insertion_sort -- Stable.
+// shell_sort -- Unstable.
+// heap_sort -- Unstable.
+// stable_sort -- Stable. The implementation of this is simply mapped to merge_sort.
+// merge --
+// merge_sort -- Stable.
+// merge_sort_buffer -- Stable.
+// nth_element -- Unstable.
+// radix_sort -- Stable. Important and useful sort for integral data, and faster than all others for this.
+// comb_sort -- Unstable. Possibly the best combination of small code size but fast sort.
+// bubble_sort -- Stable. Useful in practice for sorting tiny sets of data (<= 10 elements).
+// selection_sort* -- Unstable.
+// shaker_sort* -- Stable.
+// bucket_sort* -- Stable.
+//
+// * Found in sort_extra.h.
+//
+// Additional sorting and related algorithms we may want to implement:
+// partial_sort_copy This would be like the std STL version.
+// paritition This would be like the std STL version. This is not categorized as a sort routine by the language standard.
+// stable_partition This would be like the std STL version.
+// counting_sort Maybe we don't want to implement this.
+//
+//////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_SORT_H
+#define EASTL_SORT_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/move_help.h>
+#include <EASTL/iterator.h>
+#include <EASTL/memory.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/functional.h>
+#include <EASTL/heap.h>
+#include <EASTL/allocator.h>
+#include <EASTL/memory.h>
+
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+// EASTL_PLATFORM_PREFERRED_ALIGNMENT
+//
+// Allows for slightly faster buffers in some cases.
+//
+#if !defined(EASTL_PLATFORM_PREFERRED_ALIGNMENT)
+ #if defined(EA_PROCESSOR_ARM)
+ #define EASTL_PLATFORM_PREFERRED_ALIGNMENT 8
+ #else
+ #define EASTL_PLATFORM_PREFERRED_ALIGNMENT 16
+ #endif
+#endif
+
+
+namespace eastl
+{
+
+ /// is_sorted
+ ///
+ /// Returns true if the range [first, last) is sorted.
+ /// An empty range is considered to be sorted.
+ /// To test if a range is reverse-sorted, use 'greater' as the comparison
+ /// instead of 'less'.
+ ///
+ /// Example usage:
+ /// vector<int> intArray;
+ /// bool bIsSorted = is_sorted(intArray.begin(), intArray.end());
+ /// bool bIsReverseSorted = is_sorted(intArray.begin(), intArray.end(), greater<int>());
+ ///
+ template <typename ForwardIterator, typename StrictWeakOrdering>
+ bool is_sorted(ForwardIterator first, ForwardIterator last, StrictWeakOrdering compare)
+ {
+ if(first != last)
+ {
+ ForwardIterator current = first;
+
+ for(++current; current != last; first = current, ++current)
+ {
+ if(compare(*current, *first))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*first, *current)); // Validate that the compare function is sane.
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ template <typename ForwardIterator>
+ inline bool is_sorted(ForwardIterator first, ForwardIterator last)
+ {
+ typedef eastl::less<typename eastl::iterator_traits<ForwardIterator>::value_type> Less;
+
+ return eastl::is_sorted<ForwardIterator, Less>(first, last, Less());
+ }
+
+
+
+ /// is_sorted_until
+ ///
+ /// Returns an iterator to the first element in the range [first,last) which does not follow an ascending order.
+ /// The range between first and the iterator returned is sorted.
+ /// If the entire range is sorted, the function returns last.
+ /// The elements are compared using operator< for the first version, and comp for the second.
+ ///
+ /// Example usage:
+ /// vector<int> intArray;
+ /// vector<int>::iterator unsorted_element = is_sorted_until(eastl::end(intArray), eastl::end(intArray));
+ /// vector<int>::iterator unsorted_element_with_user_compare = is_sorted_until(eastl::end(intArray), eastl::end(intArray), eastl::less<int>());
+ ///
+ template<typename ForwardIterator>
+ ForwardIterator is_sorted_until(ForwardIterator first, ForwardIterator last)
+ {
+ if(first != last)
+ {
+ ForwardIterator next = first;
+
+ while(++next != last)
+ {
+ if(*next < *first)
+ return next;
+
+ first = next;
+ }
+ }
+
+ return last;
+ }
+
+ template<typename ForwardIterator, typename Compare>
+ ForwardIterator is_sorted_until(ForwardIterator first, ForwardIterator last, Compare compare)
+ {
+ if(first != last)
+ {
+ ForwardIterator next = first;
+
+ while(++next != last)
+ {
+ if(compare(*next, *first))
+ return next;
+
+ first = next;
+ }
+ }
+
+ return last;
+ }
+
+
+
+ /// merge
+ ///
+ /// This function merges two sorted input sorted ranges into a result sorted range.
+ /// This merge is stable in that no element from the first range will be changed
+ /// in order relative to other elements from the first range.
+ ///
+ template <typename InputIterator1, typename InputIterator2, typename OutputIterator, typename Compare>
+ OutputIterator merge(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2, OutputIterator result, Compare compare)
+ {
+ while((first1 != last1) && (first2 != last2))
+ {
+ if(compare(*first2, *first1))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*first1, *first2)); // Validate that the compare function is sane.
+ *result = *first2;
+ ++first2;
+ }
+ else
+ {
+ *result = *first1;
+ ++first1;
+ }
+ ++result;
+ }
+
+ // Check which list is empty and explicitly copy remaining items from the other list.
+ // For performance reasons, only a single copy operation is invoked to avoid the potential overhead
+ // introduced by chaining two copy operations together. Even if a copy is of zero size there can
+ // be overhead from calling memmove with a zero size copy.
+ if (first1 == last1)
+ {
+ return eastl::copy(first2, last2, result);
+ }
+ else
+ {
+ return eastl::copy(first1, last1, result);
+ }
+ }
+
+ template <typename InputIterator1, typename InputIterator2, typename OutputIterator>
+ inline OutputIterator merge(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2, OutputIterator result)
+ {
+ typedef eastl::less<typename eastl::iterator_traits<InputIterator1>::value_type> Less;
+
+ return eastl::merge<InputIterator1, InputIterator2, OutputIterator, Less>
+ (first1, last1, first2, last2, result, Less());
+ }
+
+
+ //////////////////////////////////////////////////////////////////////////////
+ /// insertion_sort
+ ///
+ /// insertion_sort is an O(n^2) stable sorting algorithm that starts at the
+ /// (k + 1) element and assumes the first (k) elements are sorted.
+ /// Then copy_backwards from (k + 1) to the begining any elements where the
+ /// (k + 1) element is less than [0, k] elements. The position of k when
+ /// (k + 1) element is not less than k is the sorted position of the (k + 1) element.
+ ///
+ /// Example With Intermediate Steps:
+ /// (k + 1) == 2 : [3, 2, 1] -> [3, 3, 1] -> [2, 3, 1]
+ /// (k + 1) == 1 : [2, 3, 1] -> [2, 3, 3] -> [2, 2, 3] -> [1, 2, 3]
+ /// : [1, 2, 3]
+ template <typename BidirectionalIterator, typename StrictWeakOrdering>
+ void insertion_sort(BidirectionalIterator first, BidirectionalIterator last, StrictWeakOrdering compare)
+ {
+ typedef typename eastl::iterator_traits<BidirectionalIterator>::value_type value_type;
+
+ if (first != last)
+ {
+ BidirectionalIterator i = first;
+
+ for (++i; i != last; ++i)
+ {
+ value_type insertValue(eastl::move(*i));
+ BidirectionalIterator insertPosition = i;
+
+ for (BidirectionalIterator movePosition = i; movePosition != first && compare(insertValue, *(--movePosition)); --insertPosition)
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*movePosition, insertValue));
+ *insertPosition = eastl::move(*movePosition);
+ }
+
+ *insertPosition = eastl::move(insertValue);
+ }
+ }
+ } // insertion_sort
+
+
+ template <typename BidirectionalIterator>
+ void insertion_sort(BidirectionalIterator first, BidirectionalIterator last)
+ {
+ typedef eastl::less<typename eastl::iterator_traits<BidirectionalIterator>::value_type> Less;
+
+ insertion_sort<BidirectionalIterator>(first, last, Less());
+
+ } // insertion_sort
+
+
+ /// shell_sort
+ ///
+ /// Implements the ShellSort algorithm. This algorithm is a serious algorithm for larger
+ /// data sets, as reported by Sedgewick in his discussions on QuickSort. Note that shell_sort
+ /// requires a random access iterator, which usually means an array (eg. vector, deque).
+ /// ShellSort has good performance with presorted sequences.
+ /// The term "shell" derives from the name of the inventor, David Shell.
+ ///
+ /// To consider: Allow the user to specify the "h-sequence" array.
+ ///
+ template <typename RandomAccessIterator, typename StrictWeakOrdering>
+ void shell_sort(RandomAccessIterator first, RandomAccessIterator last, StrictWeakOrdering compare)
+ {
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::difference_type difference_type;
+
+ // We use the Knuth 'h' sequence below, as it is easy to calculate at runtime.
+ // However, possibly we are better off using a different sequence based on a table.
+ // One such sequence which averages slightly better than Knuth is:
+ // 1, 5, 19, 41, 109, 209, 505, 929, 2161, 3905, 8929, 16001, 36289,
+ // 64769, 146305, 260609, 587521, 1045505, 2354689, 4188161, 9427969, 16764929
+
+ if(first != last)
+ {
+ RandomAccessIterator iCurrent, iBack, iSorted, iInsertFirst;
+ difference_type nSize = last - first;
+ difference_type nSpace = 1; // nSpace is the 'h' value of the ShellSort algorithm.
+
+ while(nSpace < nSize)
+ nSpace = (nSpace * 3) + 1; // This is the Knuth 'h' sequence: 1, 4, 13, 40, 121, 364, 1093, 3280, 9841, 29524, 88573, 265720, 797161, 2391484, 7174453, 21523360, 64570081, 193710244,
+
+ for(nSpace = (nSpace - 1) / 3; nSpace >= 1; nSpace = (nSpace - 1) / 3) // Integer division is less than ideal.
+ {
+ for(difference_type i = 0; i < nSpace; i++)
+ {
+ iInsertFirst = first + i;
+
+ for(iSorted = iInsertFirst + nSpace; iSorted < last; iSorted += nSpace)
+ {
+ iBack = iCurrent = iSorted;
+
+ for(; (iCurrent != iInsertFirst) && compare(*iCurrent, *(iBack -= nSpace)); iCurrent = iBack)
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*iBack, *iCurrent)); // Validate that the compare function is sane.
+ eastl::iter_swap(iCurrent, iBack);
+ }
+ }
+ }
+ }
+ }
+ } // shell_sort
+
+ template <typename RandomAccessIterator>
+ inline void shell_sort(RandomAccessIterator first, RandomAccessIterator last)
+ {
+ typedef eastl::less<typename eastl::iterator_traits<RandomAccessIterator>::value_type> Less;
+
+ eastl::shell_sort<RandomAccessIterator, Less>(first, last, Less());
+ }
+
+
+
+ /// heap_sort
+ ///
+ /// Implements the HeapSort algorithm.
+ /// Note that heap_sort requires a random access iterator, which usually means
+ /// an array (eg. vector, deque).
+ ///
+ template <typename RandomAccessIterator, typename StrictWeakOrdering>
+ void heap_sort(RandomAccessIterator first, RandomAccessIterator last, StrictWeakOrdering compare)
+ {
+ // We simply call our heap algorithms to do the work for us.
+ eastl::make_heap<RandomAccessIterator, StrictWeakOrdering>(first, last, compare);
+ eastl::sort_heap<RandomAccessIterator, StrictWeakOrdering>(first, last, compare);
+ }
+
+ template <typename RandomAccessIterator>
+ inline void heap_sort(RandomAccessIterator first, RandomAccessIterator last)
+ {
+ typedef eastl::less<typename eastl::iterator_traits<RandomAccessIterator>::value_type> Less;
+
+ eastl::heap_sort<RandomAccessIterator, Less>(first, last, Less());
+ }
+
+
+
+ namespace Internal
+ {
+ // Sorts a range whose initial (start - first) entries are already sorted.
+ // This function is a useful helper to the tim_sort function.
+ // This is the same as insertion_sort except that it has a start parameter which indicates
+ // where the start of the unsorted data is.
+ template <typename BidirectionalIterator, typename StrictWeakOrdering>
+ void insertion_sort_already_started(BidirectionalIterator first, BidirectionalIterator last, BidirectionalIterator start, StrictWeakOrdering compare)
+ {
+ typedef typename eastl::iterator_traits<BidirectionalIterator>::value_type value_type;
+
+ if (first != last) // if the range is non-empty...
+ {
+ BidirectionalIterator iCurrent, iNext, iSorted = start - 1;
+
+ for (++iSorted; iSorted != last; ++iSorted)
+ {
+ const value_type temp(*iSorted);
+
+ iNext = iCurrent = iSorted;
+
+ for (--iCurrent; (iNext != first) && compare(temp, *iCurrent); --iNext, --iCurrent)
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*iCurrent, temp)); // Validate that the compare function is sane.
+ *iNext = *iCurrent;
+ }
+
+ *iNext = temp;
+ }
+ }
+ }
+ }
+
+
+
+ /// merge_sort_buffer
+ ///
+ /// Implements the MergeSort algorithm with a user-supplied buffer.
+ /// The input buffer must be able to hold a number of items equal to 'last - first'.
+ /// Note that merge_sort_buffer requires a random access iterator, which usually means
+ /// an array (eg. vector, deque).
+ ///
+ /// The algorithm used for merge sort is not the standard merge sort. It has been modified
+ /// to improve performance for data that is already partially sorted. In fact, if data
+ /// is completely sorted, then performance is O(n), but even data with partially sorted
+ /// regions can benefit from the modifications.
+ ///
+ /// 'InsertionSortLimit' specifies a size limit for which the algorithm will use insertion sort.
+ /// Due to the overhead of merge sort, it is often faster to use insertion sort once the size of a region
+ /// is fairly small. However, insertion sort is not as efficient (in terms of assignments orcomparisons)
+ /// so choosing a value that is too large will reduce performance. Generally a value of 16 to 32 is reasonable,
+ /// but the best choose will depend on the data being sorted.
+ template <typename RandomAccessIterator, typename T, typename StrictWeakOrdering, typename difference_type, int InsertionSortLimit>
+ class MergeSorter
+ {
+ public:
+ static void sort(RandomAccessIterator first, RandomAccessIterator last, T* pBuffer, StrictWeakOrdering compare)
+ {
+ if (sort_impl(first, last, pBuffer, difference_type(0), compare) == RL_Buffer)
+ {
+ const difference_type nCount = last - first;
+ eastl::copy<T*, RandomAccessIterator>(pBuffer, pBuffer + nCount, first);
+ }
+ EASTL_DEV_ASSERT((eastl::is_sorted<RandomAccessIterator, StrictWeakOrdering>(first, last, compare)));
+ }
+
+ private:
+ static_assert(InsertionSortLimit > 1, "Sequences of length 1 are already sorted. Use a larger value for InsertionSortLimit");
+
+ enum ResultLocation
+ {
+ RL_SourceRange, // i.e. result is in the range defined by [first, last)
+ RL_Buffer, // i.e. result is in pBuffer
+ };
+
+ // sort_impl
+ //
+ // This sort routine sorts the data in [first, last) and places the result in pBuffer or in the original range of the input. The actual
+ // location of the data is indicated by the enum returned.
+ //
+ // lastSortedEnd is used to specify a that data in the range [first, first + lastSortedEnd] is already sorted. This information is used
+ // to avoid unnecessary merge sorting of already sorted data. lastSortedEnd is a hint, and can be an under estimate of the sorted elements
+ // (i.e. it is legal to pass 0).
+ static ResultLocation sort_impl(RandomAccessIterator first, RandomAccessIterator last, T* pBuffer, difference_type lastSortedEnd, StrictWeakOrdering compare)
+ {
+ const difference_type nCount = last - first;
+
+ if (lastSortedEnd < 1)
+ {
+ lastSortedEnd = eastl::is_sorted_until<RandomAccessIterator, StrictWeakOrdering>(first, last, compare) - first;
+ }
+
+ // Sort the region unless lastSortedEnd indicates it is already sorted.
+ if (lastSortedEnd < nCount)
+ {
+ // If the size is less than or equal to InsertionSortLimit use insertion sort instead of recursing further.
+ if (nCount <= InsertionSortLimit)
+ {
+ eastl::Internal::insertion_sort_already_started<RandomAccessIterator, StrictWeakOrdering>(first, last, first + lastSortedEnd, compare);
+ return RL_SourceRange;
+ }
+ else
+ {
+ const difference_type nMid = nCount / 2;
+
+ ResultLocation firstHalfLocation = RL_SourceRange;
+ // Don't sort the first half if it is already sorted.
+ if (lastSortedEnd < nMid)
+ {
+ firstHalfLocation = sort_impl(first, first + nMid, pBuffer, lastSortedEnd, compare);
+ }
+
+ ResultLocation secondHalfLocation = sort_impl(first + nMid, last, pBuffer + nMid, lastSortedEnd - nMid, compare);
+
+ return merge_halves(first, last, nMid, pBuffer, firstHalfLocation, secondHalfLocation, compare);
+ }
+ }
+ else
+ {
+ EASTL_DEV_ASSERT((eastl::is_sorted<RandomAccessIterator, StrictWeakOrdering>(first, last, compare)));
+ return RL_SourceRange;
+ }
+ }
+
+ // merge_halves
+ //
+ // Merge two sorted regions of elements.
+ // The inputs to this method effectively define two large buffers. The variables 'firstHalfLocation' and 'secondHalfLocation' define where the data to be
+ // merged is located within the two buffers. It is entirely possible that the two areas to be merged could be entirely located in either of the larger buffers.
+ // Upon returning the merged results will be in one of the two buffers (indicated by the return result).
+ static ResultLocation merge_halves(RandomAccessIterator first, RandomAccessIterator last, difference_type nMid, T* pBuffer, ResultLocation firstHalfLocation, ResultLocation secondHalfLocation, StrictWeakOrdering compare)
+ {
+ const difference_type nCount = last - first;
+ if (firstHalfLocation == RL_SourceRange)
+ {
+ if (secondHalfLocation == RL_SourceRange)
+ {
+ eastl::merge<RandomAccessIterator, RandomAccessIterator, T*, StrictWeakOrdering>(first, first + nMid, first + nMid, last, pBuffer, compare);
+ EASTL_DEV_ASSERT((eastl::is_sorted<T*, StrictWeakOrdering>(pBuffer, pBuffer + nCount, compare)));
+ return RL_Buffer;
+ }
+ else
+ {
+ eastl::copy(first, first + nMid, pBuffer);
+ eastl::merge<T*, T*, RandomAccessIterator, StrictWeakOrdering>(pBuffer, pBuffer + nMid, pBuffer + nMid, pBuffer + nCount, first, compare);
+ EASTL_DEV_ASSERT((eastl::is_sorted<RandomAccessIterator, StrictWeakOrdering>(first, last, compare)));
+ return RL_SourceRange;
+ }
+ }
+ else
+ {
+ if (secondHalfLocation == RL_SourceRange)
+ {
+ eastl::copy(first + nMid, last, pBuffer + nMid);
+ eastl::merge<T*, T*, RandomAccessIterator, StrictWeakOrdering>(pBuffer, pBuffer + nMid, pBuffer + nMid, pBuffer + nCount, first, compare);
+ EASTL_DEV_ASSERT((eastl::is_sorted<RandomAccessIterator, StrictWeakOrdering>(first, last, compare)));
+ return RL_SourceRange;
+ }
+ else
+ {
+ eastl::merge<T*, T*, RandomAccessIterator, StrictWeakOrdering>(pBuffer, pBuffer + nMid, pBuffer + nMid, pBuffer + nCount, first, compare);
+ EASTL_DEV_ASSERT((eastl::is_sorted<RandomAccessIterator, StrictWeakOrdering>(first, last, compare)));
+ return RL_SourceRange;
+ }
+ }
+ }
+
+ };
+
+
+ template <typename RandomAccessIterator, typename T, typename StrictWeakOrdering>
+ void merge_sort_buffer(RandomAccessIterator first, RandomAccessIterator last, T* pBuffer, StrictWeakOrdering compare)
+ {
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::difference_type difference_type;
+ MergeSorter<RandomAccessIterator, T, StrictWeakOrdering, difference_type, 16>::sort(first, last, pBuffer, compare);
+ }
+
+ template <typename RandomAccessIterator, typename T>
+ inline void merge_sort_buffer(RandomAccessIterator first, RandomAccessIterator last, T* pBuffer)
+ {
+ typedef eastl::less<typename eastl::iterator_traits<RandomAccessIterator>::value_type> Less;
+
+ eastl::merge_sort_buffer<RandomAccessIterator, T, Less>(first, last, pBuffer, Less());
+ }
+
+
+
+ /// merge_sort
+ ///
+ /// Implements the MergeSort algorithm.
+ /// This algorithm allocates memory via the user-supplied allocator. Use merge_sort_buffer
+ /// function if you want a version which doesn't allocate memory.
+ /// Note that merge_sort requires a random access iterator, which usually means
+ /// an array (eg. vector, deque).
+ ///
+ template <typename RandomAccessIterator, typename Allocator, typename StrictWeakOrdering>
+ void merge_sort(RandomAccessIterator first, RandomAccessIterator last, Allocator& allocator, StrictWeakOrdering compare)
+ {
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::difference_type difference_type;
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ const difference_type nCount = last - first;
+
+ if(nCount > 1)
+ {
+ // We need to allocate an array of nCount value_type objects as a temporary buffer.
+ value_type* const pBuffer = (value_type*)allocate_memory(allocator, nCount * sizeof(value_type), EASTL_ALIGN_OF(value_type), 0);
+ eastl::uninitialized_fill(pBuffer, pBuffer + nCount, value_type());
+
+ eastl::merge_sort_buffer<RandomAccessIterator, value_type, StrictWeakOrdering>
+ (first, last, pBuffer, compare);
+
+ eastl::destruct(pBuffer, pBuffer + nCount);
+ EASTLFree(allocator, pBuffer, nCount * sizeof(value_type));
+ }
+ }
+
+ template <typename RandomAccessIterator, typename Allocator>
+ inline void merge_sort(RandomAccessIterator first, RandomAccessIterator last, Allocator& allocator)
+ {
+ typedef eastl::less<typename eastl::iterator_traits<RandomAccessIterator>::value_type> Less;
+
+ eastl::merge_sort<RandomAccessIterator, Allocator, Less>(first, last, allocator, Less());
+ }
+
+
+
+ /// partition
+ ///
+ /// Implements the partition algorithm.
+ /// Rearranges the elements in the range [first, last), in such a way that all the elements
+ /// for which pred returns true precede all those for which it returns false. The iterator
+ /// returned points to the first element of the second group.
+ /// The relative ordering within each group is not necessarily the same as before the call.
+ /// See function stable_partition for a function with a similar behavior and stability in
+ /// the ordering.
+ ///
+ /// To do: Implement a version that uses a faster BidirectionalIterator algorithm for the
+ /// case that the iterator range is a bidirectional iterator instead of just an
+ /// input iterator (one direction).
+ ///
+ template<typename InputIterator, typename Predicate>
+ InputIterator partition(InputIterator begin, InputIterator end, Predicate predicate)
+ {
+ if(begin != end)
+ {
+ while(predicate(*begin))
+ {
+ if(++begin == end)
+ return begin;
+ }
+
+ InputIterator middle = begin;
+
+ while(++middle != end)
+ {
+ if(predicate(*middle))
+ {
+ eastl::swap(*begin, *middle);
+ ++begin;
+ }
+ }
+ }
+
+ return begin;
+ }
+
+ /// stable_partition
+ ///
+ /// Performs the same function as @p partition() with the additional
+ /// guarantee that the relative ordering of elements in each group is
+ /// preserved.
+ template <typename ForwardIterator, typename Predicate>
+ ForwardIterator stable_partition(ForwardIterator first, ForwardIterator last, Predicate pred)
+ {
+ first = eastl::find_if_not(first, last, pred);
+
+ if (first == last)
+ return first;
+
+ typedef typename iterator_traits<ForwardIterator>::value_type value_type;
+
+ const auto requested_size = eastl::distance(first, last);
+
+ auto allocator = *get_default_allocator(0);
+ value_type* const buffer =
+ (value_type*)allocate_memory(allocator, requested_size * sizeof(value_type), EASTL_ALIGN_OF(value_type), 0);
+ eastl::uninitialized_fill(buffer, buffer + requested_size, value_type());
+
+ ForwardIterator result1 = first;
+ value_type* result2 = buffer;
+
+ *result2 = eastl::move(*first);
+ ++result2;
+ ++first;
+ for (; first != last; ++first)
+ {
+ if (pred(*first))
+ {
+ *result1 = eastl::move(*first);
+ ++result1;
+ }
+ else
+ {
+ *result2 = eastl::move(*first);
+ ++result2;
+ }
+ }
+
+ eastl::copy(buffer, result2, result1);
+
+ eastl::destruct(buffer, buffer + requested_size);
+ EASTLFree(allocator, buffer, requested_size * sizeof(value_type));
+
+ return result1;
+ }
+
+ /////////////////////////////////////////////////////////////////////
+ // quick_sort
+ //
+ // We do the "introspection sort" variant of quick sort which is now
+ // well-known and understood. You can read about this algorithm in
+ // many articles on quick sort, but briefly what it does is a median-
+ // of-three quick sort whereby the recursion depth is limited to a
+ // some value (after which it gives up on quick sort and switches to
+ // a heap sort) and whereby after a certain amount of sorting the
+ // algorithm stops doing quick-sort and finishes the sorting via
+ // a simple insertion sort.
+ /////////////////////////////////////////////////////////////////////
+
+ #if (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64))
+ static const int kQuickSortLimit = 28; // For sorts of random arrays over 100 items, 28 - 32 have been found to be good numbers on x86.
+ #else
+ static const int kQuickSortLimit = 16; // It seems that on other processors lower limits are more beneficial, as they result in fewer compares.
+ #endif
+
+ namespace Internal
+ {
+ template <typename Size>
+ inline Size Log2(Size n)
+ {
+ int i;
+ for(i = 0; n; ++i)
+ n >>= 1;
+ return i - 1;
+ }
+
+ // To do: Investigate the speed of this bit-trick version of Log2.
+ // It may work better on some platforms but not others.
+ //
+ // union FloatUnion {
+ // float f;
+ // uint32_t i;
+ // };
+ //
+ // inline uint32_t Log2(uint32_t x)
+ // {
+ // const FloatInt32Union u = { x };
+ // return (u.i >> 23) - 127;
+ // }
+ }
+
+ template <typename RandomAccessIterator, typename T>
+ inline RandomAccessIterator get_partition_impl(RandomAccessIterator first, RandomAccessIterator last, T&& pivotValue)
+ {
+ using PureT = decay_t<T>;
+
+ for(; ; ++first)
+ {
+ while(eastl::less<PureT>()(*first, pivotValue))
+ {
+ EASTL_VALIDATE_COMPARE(!eastl::less<PureT>()(pivotValue, *first)); // Validate that the compare function is sane.
+ ++first;
+ }
+ --last;
+
+ while(eastl::less<PureT>()(pivotValue, *last))
+ {
+ EASTL_VALIDATE_COMPARE(!eastl::less<PureT>()(*last, pivotValue)); // Validate that the compare function is sane.
+ --last;
+ }
+
+ if(first >= last) // Random access iterators allow operator >=
+ return first;
+
+ eastl::iter_swap(first, last);
+ }
+ }
+
+ /// get_partition
+ ///
+ /// This function takes const T& instead of T because T may have special alignment
+ /// requirements and some compilers (e.g. VC++) are don't respect alignment requirements
+ /// for function arguments.
+ ///
+ template <typename RandomAccessIterator, typename T>
+ inline RandomAccessIterator get_partition(RandomAccessIterator first, RandomAccessIterator last, const T& pivotValue)
+ {
+ const T pivotCopy(pivotValue); // Need to make a temporary because the sequence below is mutating.
+ return get_partition_impl<RandomAccessIterator, const T&>(first, last, pivotCopy);
+ }
+
+ template <typename RandomAccessIterator, typename T>
+ inline RandomAccessIterator get_partition(RandomAccessIterator first, RandomAccessIterator last, T&& pivotValue)
+ {
+ // Note: unlike the copy-constructible variant of get_partition... we can't create a temporary const move-constructible object
+ return get_partition_impl<RandomAccessIterator, T&&>(first, last, eastl::move(pivotValue));
+ }
+
+ template <typename RandomAccessIterator, typename T, typename Compare>
+ inline RandomAccessIterator get_partition_impl(RandomAccessIterator first, RandomAccessIterator last, T&& pivotValue, Compare compare)
+ {
+ for(; ; ++first)
+ {
+ while(compare(*first, pivotValue))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(pivotValue, *first)); // Validate that the compare function is sane.
+ ++first;
+ }
+ --last;
+
+ while(compare(pivotValue, *last))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*last, pivotValue)); // Validate that the compare function is sane.
+ --last;
+ }
+
+ if(first >= last) // Random access iterators allow operator >=
+ return first;
+
+ eastl::iter_swap(first, last);
+ }
+ }
+
+ template <typename RandomAccessIterator, typename T, typename Compare>
+ inline RandomAccessIterator get_partition(RandomAccessIterator first, RandomAccessIterator last, const T& pivotValue, Compare compare)
+ {
+ const T pivotCopy(pivotValue); // Need to make a temporary because the sequence below is mutating.
+ return get_partition_impl<RandomAccessIterator, const T&, Compare>(first, last, pivotCopy, compare);
+ }
+
+ template <typename RandomAccessIterator, typename T, typename Compare>
+ inline RandomAccessIterator get_partition(RandomAccessIterator first, RandomAccessIterator last, T&& pivotValue, Compare compare)
+ {
+ // Note: unlike the copy-constructible variant of get_partition... we can't create a temporary const move-constructible object
+ return get_partition_impl<RandomAccessIterator, T&&, Compare>(first, last, eastl::forward<T>(pivotValue), compare);
+ }
+
+
+ namespace Internal
+ {
+ // This function is used by quick_sort and is not intended to be used by itself.
+ // This is because the implementation below makes an assumption about the input
+ // data that quick_sort satisfies but arbitrary data may not.
+ // There is a standalone insertion_sort function.
+ template <typename RandomAccessIterator>
+ inline void insertion_sort_simple(RandomAccessIterator first, RandomAccessIterator last)
+ {
+ for(RandomAccessIterator current = first; current != last; ++current)
+ {
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ RandomAccessIterator end(current), prev(current);
+ value_type value(eastl::forward<value_type>(*current));
+
+ for(--prev; eastl::less<value_type>()(value, *prev); --end, --prev) // We skip checking for (prev >= first) because quick_sort (our caller) makes this unnecessary.
+ {
+ EASTL_VALIDATE_COMPARE(!eastl::less<value_type>()(*prev, value)); // Validate that the compare function is sane.
+ *end = eastl::forward<value_type>(*prev);
+ }
+
+ *end = eastl::forward<value_type>(value);
+ }
+ }
+
+
+ // This function is used by quick_sort and is not intended to be used by itself.
+ // This is because the implementation below makes an assumption about the input
+ // data that quick_sort satisfies but arbitrary data may not.
+ // There is a standalone insertion_sort function.
+ template <typename RandomAccessIterator, typename Compare>
+ inline void insertion_sort_simple(RandomAccessIterator first, RandomAccessIterator last, Compare compare)
+ {
+ for(RandomAccessIterator current = first; current != last; ++current)
+ {
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ RandomAccessIterator end(current), prev(current);
+ value_type value(eastl::forward<value_type>(*current));
+
+ for(--prev; compare(value, *prev); --end, --prev) // We skip checking for (prev >= first) because quick_sort (our caller) makes this unnecessary.
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*prev, value)); // Validate that the compare function is sane.
+ *end = eastl::forward<value_type>(*prev);
+ }
+
+ *end = eastl::forward<value_type>(value);
+ }
+ }
+ } // namespace Internal
+
+
+ template <typename RandomAccessIterator>
+ inline void partial_sort(RandomAccessIterator first, RandomAccessIterator middle, RandomAccessIterator last)
+ {
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::difference_type difference_type;
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ eastl::make_heap<RandomAccessIterator>(first, middle);
+
+ for(RandomAccessIterator i = middle; i < last; ++i)
+ {
+ if(eastl::less<value_type>()(*i, *first))
+ {
+ EASTL_VALIDATE_COMPARE(!eastl::less<value_type>()(*first, *i)); // Validate that the compare function is sane.
+ value_type temp(eastl::forward<value_type>(*i));
+ *i = eastl::forward<value_type>(*first);
+ eastl::adjust_heap<RandomAccessIterator, difference_type, value_type>
+ (first, difference_type(0), difference_type(middle - first), difference_type(0), eastl::forward<value_type>(temp));
+ }
+ }
+
+ eastl::sort_heap<RandomAccessIterator>(first, middle);
+ }
+
+
+ template <typename RandomAccessIterator, typename Compare>
+ inline void partial_sort(RandomAccessIterator first, RandomAccessIterator middle, RandomAccessIterator last, Compare compare)
+ {
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::difference_type difference_type;
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ eastl::make_heap<RandomAccessIterator, Compare>(first, middle, compare);
+
+ for(RandomAccessIterator i = middle; i < last; ++i)
+ {
+ if(compare(*i, *first))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*first, *i)); // Validate that the compare function is sane.
+ value_type temp(eastl::forward<value_type>(*i));
+ *i = eastl::forward<value_type>(*first);
+ eastl::adjust_heap<RandomAccessIterator, difference_type, value_type, Compare>
+ (first, difference_type(0), difference_type(middle - first), difference_type(0), eastl::forward<value_type>(temp), compare);
+ }
+ }
+
+ eastl::sort_heap<RandomAccessIterator, Compare>(first, middle, compare);
+ }
+
+
+ template<typename RandomAccessIterator>
+ inline void nth_element(RandomAccessIterator first, RandomAccessIterator nth, RandomAccessIterator last)
+ {
+ typedef typename iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ while((last - first) > 5)
+ {
+ const value_type midValue(eastl::median<value_type>(*first, *(first + (last - first) / 2), *(last - 1)));
+ const RandomAccessIterator midPos(eastl::get_partition<RandomAccessIterator, value_type>(first, last, midValue));
+
+ if(midPos <= nth)
+ first = midPos;
+ else
+ last = midPos;
+ }
+
+ eastl::insertion_sort<RandomAccessIterator>(first, last);
+ }
+
+
+ template<typename RandomAccessIterator, typename Compare>
+ inline void nth_element(RandomAccessIterator first, RandomAccessIterator nth, RandomAccessIterator last, Compare compare)
+ {
+ typedef typename iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ while((last - first) > 5)
+ {
+ const value_type midValue(eastl::median<value_type, Compare>(*first, *(first + (last - first) / 2), *(last - 1), compare));
+ const RandomAccessIterator midPos(eastl::get_partition<RandomAccessIterator, value_type, Compare>(first, last, midValue, compare));
+
+ if(midPos <= nth)
+ first = midPos;
+ else
+ last = midPos;
+ }
+
+ eastl::insertion_sort<RandomAccessIterator, Compare>(first, last, compare);
+ }
+
+
+ namespace Internal
+ {
+ EA_DISABLE_VC_WARNING(4702) // unreachable code
+ template <typename RandomAccessIterator, typename Size, typename PivotValueType>
+ inline void quick_sort_impl_helper(RandomAccessIterator first, RandomAccessIterator last, Size kRecursionCount)
+ {
+ typedef typename iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ while(((last - first) > kQuickSortLimit) && (kRecursionCount > 0))
+ {
+ const RandomAccessIterator position(eastl::get_partition<RandomAccessIterator, value_type>(first, last,
+ eastl::forward<PivotValueType>(eastl::median<value_type>(eastl::forward<value_type>(*first), eastl::forward<value_type>(*(first + (last - first) / 2)), eastl::forward<value_type>(*(last - 1))))));
+
+ eastl::Internal::quick_sort_impl_helper<RandomAccessIterator, Size, PivotValueType>(position, last, --kRecursionCount);
+ last = position;
+ }
+
+ if(kRecursionCount == 0)
+ eastl::partial_sort<RandomAccessIterator>(first, last, last);
+ }
+
+ template <typename RandomAccessIterator, typename Size, typename Compare, typename PivotValueType>
+ inline void quick_sort_impl_helper(RandomAccessIterator first, RandomAccessIterator last, Size kRecursionCount, Compare compare)
+ {
+ typedef typename iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ while(((last - first) > kQuickSortLimit) && (kRecursionCount > 0))
+ {
+ const RandomAccessIterator position(eastl::get_partition<RandomAccessIterator, value_type, Compare>(first, last,
+ eastl::forward<PivotValueType>(eastl::median<value_type, Compare>(eastl::forward<value_type>(*first), eastl::forward<value_type>(*(first + (last - first) / 2)), eastl::forward<value_type>(*(last - 1)), compare)), compare));
+
+ eastl::Internal::quick_sort_impl_helper<RandomAccessIterator, Size, Compare, PivotValueType>(position, last, --kRecursionCount, compare);
+ last = position;
+ }
+
+ if(kRecursionCount == 0)
+ eastl::partial_sort<RandomAccessIterator, Compare>(first, last, last, compare);
+ }
+ EA_RESTORE_VC_WARNING()
+
+ template <typename RandomAccessIterator, typename Size>
+ inline void quick_sort_impl(RandomAccessIterator first, RandomAccessIterator last, Size kRecursionCount,
+ typename eastl::enable_if<eastl::is_copy_constructible<typename iterator_traits<RandomAccessIterator>::value_type>::value>::type* = 0)
+ {
+ typedef typename iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ // copy constructors require const value_type
+ quick_sort_impl_helper<RandomAccessIterator, Size, const value_type>(first, last, kRecursionCount);
+ }
+
+ template <typename RandomAccessIterator, typename Size>
+ inline void quick_sort_impl(RandomAccessIterator first, RandomAccessIterator last, Size kRecursionCount,
+ typename eastl::enable_if<eastl::is_move_constructible<typename iterator_traits<RandomAccessIterator>::value_type>::value
+ && !eastl::is_copy_constructible<typename iterator_traits<RandomAccessIterator>::value_type>::value>::type* = 0)
+ {
+ typedef typename iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ // move constructors require non-const value_type
+ quick_sort_impl_helper<RandomAccessIterator, Size, value_type>(first, last, kRecursionCount);
+ }
+
+ template <typename RandomAccessIterator, typename Size, typename Compare>
+ inline void quick_sort_impl(RandomAccessIterator first, RandomAccessIterator last, Size kRecursionCount, Compare compare,
+ typename eastl::enable_if<eastl::is_copy_constructible<typename iterator_traits<RandomAccessIterator>::value_type>::value>::type* = 0)
+ {
+ typedef typename iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ // copy constructors require const value_type
+ quick_sort_impl_helper<RandomAccessIterator, Size, Compare, const value_type>(first, last, kRecursionCount, compare);
+ }
+
+ template <typename RandomAccessIterator, typename Size, typename Compare>
+ inline void quick_sort_impl(RandomAccessIterator first, RandomAccessIterator last, Size kRecursionCount, Compare compare,
+ typename eastl::enable_if<eastl::is_move_constructible<typename iterator_traits<RandomAccessIterator>::value_type>::value
+ && !eastl::is_copy_constructible<typename iterator_traits<RandomAccessIterator>::value_type>::value>::type* = 0)
+ {
+ typedef typename iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ // move constructors require non-const value_type
+ quick_sort_impl_helper<RandomAccessIterator, Size, Compare, value_type>(first, last, kRecursionCount, compare);
+ }
+ }
+
+
+ /// quick_sort
+ ///
+ /// This is an unstable sort.
+ /// quick_sort sorts the elements in [first, last) into ascending order,
+ /// meaning that if i and j are any two valid iterators in [first, last)
+ /// such that i precedes j, then *j is not less than *i. quick_sort is not
+ /// guaranteed to be stable. That is, suppose that *i and *j are equivalent:
+ /// neither one is less than the other. It is not guaranteed that the
+ /// relative order of these two elements will be preserved by sort.
+ ///
+ /// We implement the "introspective" variation of quick-sort. This is
+ /// considered to be the best general-purpose variant, as it avoids
+ /// worst-case behaviour and optimizes the final sorting stage by
+ /// switching to an insertion sort.
+ ///
+ template <typename RandomAccessIterator>
+ void quick_sort(RandomAccessIterator first, RandomAccessIterator last)
+ {
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::difference_type difference_type;
+
+ if(first != last)
+ {
+ eastl::Internal::quick_sort_impl<RandomAccessIterator, difference_type>(first, last, 2 * Internal::Log2(last - first));
+
+ if((last - first) > (difference_type)kQuickSortLimit)
+ {
+ eastl::insertion_sort<RandomAccessIterator>(first, first + kQuickSortLimit);
+ eastl::Internal::insertion_sort_simple<RandomAccessIterator>(first + kQuickSortLimit, last);
+ }
+ else
+ eastl::insertion_sort<RandomAccessIterator>(first, last);
+ }
+ }
+
+
+ template <typename RandomAccessIterator, typename Compare>
+ void quick_sort(RandomAccessIterator first, RandomAccessIterator last, Compare compare)
+ {
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::difference_type difference_type;
+
+ if(first != last)
+ {
+ eastl::Internal::quick_sort_impl<RandomAccessIterator, difference_type, Compare>(first, last, 2 * Internal::Log2(last - first), compare);
+
+ if((last - first) > (difference_type)kQuickSortLimit)
+ {
+ eastl::insertion_sort<RandomAccessIterator, Compare>(first, first + kQuickSortLimit, compare);
+ eastl::Internal::insertion_sort_simple<RandomAccessIterator, Compare>(first + kQuickSortLimit, last, compare);
+ }
+ else
+ eastl::insertion_sort<RandomAccessIterator, Compare>(first, last, compare);
+ }
+ }
+
+
+
+
+ namespace Internal
+ {
+ // Portions of the tim_sort code were originally written by Christopher Swenson.
+ // https://github.com/swenson/sort
+ // All code in this repository, unless otherwise specified, is hereby licensed under the
+ // MIT Public License: Copyright (c) 2010 Christopher Swenson
+
+ const intptr_t kTimSortStackSize = 64; // Question: What's the upper-limit size requirement for this?
+
+ struct tim_sort_run
+ {
+ intptr_t start;
+ intptr_t length;
+ };
+
+
+ // EASTL_COUNT_LEADING_ZEROES
+ //
+ // Count leading zeroes in an integer.
+ //
+ #ifndef EASTL_COUNT_LEADING_ZEROES
+ #if defined(__GNUC__)
+ #if (EA_PLATFORM_PTR_SIZE == 8)
+ #define EASTL_COUNT_LEADING_ZEROES __builtin_clzll
+ #else
+ #define EASTL_COUNT_LEADING_ZEROES __builtin_clz
+ #endif
+ #endif
+
+ #ifndef EASTL_COUNT_LEADING_ZEROES
+ static inline int eastl_count_leading_zeroes(uint64_t x)
+ {
+ if(x)
+ {
+ int n = 0;
+ if(x & UINT64_C(0xFFFFFFFF00000000)) { n += 32; x >>= 32; }
+ if(x & 0xFFFF0000) { n += 16; x >>= 16; }
+ if(x & 0xFFFFFF00) { n += 8; x >>= 8; }
+ if(x & 0xFFFFFFF0) { n += 4; x >>= 4; }
+ if(x & 0xFFFFFFFC) { n += 2; x >>= 2; }
+ if(x & 0xFFFFFFFE) { n += 1; }
+ return 63 - n;
+ }
+ return 64;
+ }
+
+ static inline int eastl_count_leading_zeroes(uint32_t x)
+ {
+ if(x)
+ {
+ int n = 0;
+ if(x <= 0x0000FFFF) { n += 16; x <<= 16; }
+ if(x <= 0x00FFFFFF) { n += 8; x <<= 8; }
+ if(x <= 0x0FFFFFFF) { n += 4; x <<= 4; }
+ if(x <= 0x3FFFFFFF) { n += 2; x <<= 2; }
+ if(x <= 0x7FFFFFFF) { n += 1; }
+ return n;
+ }
+ return 32;
+ }
+
+ #define EASTL_COUNT_LEADING_ZEROES eastl_count_leading_zeroes
+ #endif
+ #endif
+
+
+ // reverse_elements
+ //
+ // Reverses the range [first + start, first + start + size)
+ // To consider: Use void eastl::reverse(BidirectionalIterator first, BidirectionalIterator last);
+ //
+ template <typename RandomAccessIterator>
+ void reverse_elements(RandomAccessIterator first, intptr_t start, intptr_t end)
+ {
+ while(start < end)
+ {
+ eastl::swap(*(first + start), *(first + end));
+ ++start;
+ --end;
+ }
+ }
+
+
+ // tim_sort_count_run
+ //
+ // Finds the length of a run which is already sorted (either up or down).
+ // If the run is in reverse order, this function puts it in regular order.
+ //
+ template <typename RandomAccessIterator, typename StrictWeakOrdering>
+ intptr_t tim_sort_count_run(const RandomAccessIterator first, const intptr_t start, const intptr_t size, StrictWeakOrdering compare)
+ {
+ if((size - start) > 1) // If there is anything in the set...
+ {
+ intptr_t curr = (start + 2);
+
+ if(!compare(*(first + start + 1), *(first + start))) // If (first[start + 1] >= first[start]) (If the run is increasing) ...
+ {
+ for(;; ++curr)
+ {
+ if(curr >= (size - 1)) // If we are at the end of the data... this run is done.
+ break;
+
+ if(compare(*(first + curr), *(first + curr - 1))) // If this item is not in order... this run is done.
+ break;
+ }
+ }
+ else // Else it is decreasing.
+ {
+ for(;; ++curr)
+ {
+ if(curr >= (size - 1)) // If we are at the end of the data... this run is done.
+ break;
+
+ if(!compare(*(first + curr), *(first + curr - 1))) // If this item is not in order... this run is done.
+ break; // Note that we intentionally compare against <= 0 and not just < 0. This is because
+ } // The reverse_elements call below could reverse two equal elements and break our stability requirement.
+
+ reverse_elements(first, start, curr - 1);
+ }
+
+ return (curr - start);
+ }
+
+ // Else we have just one item in the set.
+ return 1;
+ }
+
+
+ // Input Return
+ // --------------
+ // 64 32
+ // 65 33
+ // 66 33
+ // 67 34
+ // 68 34
+ // ...
+ // 125 63
+ // 126 63
+ // 127 64
+ // 128 32
+ // 129 33
+ // 130 33
+ // 131 33
+ // 132 33
+ // 133 34
+ // 134 34
+ // 135 34
+ // 136 34
+ // 137 35
+ // ...
+ //
+ // This function will return a value that is always in the range of [32, 64].
+ //
+ static inline intptr_t timsort_compute_minrun(intptr_t size)
+ {
+ const int32_t top_bit = (int32_t)((sizeof(intptr_t) * 8) - EASTL_COUNT_LEADING_ZEROES((uintptr_t)size));
+ const int32_t shift = (top_bit > 6) ? (top_bit - 6) : 0;
+ const intptr_t mask = (intptr_t(1) << shift) - 1;
+ intptr_t minrun = (intptr_t)(size >> shift);
+
+ if(mask & size)
+ ++minrun;
+
+ return minrun;
+ }
+
+
+ template <typename RandomAccessIterator, typename T, typename StrictWeakOrdering>
+ void tim_sort_merge(RandomAccessIterator first, const tim_sort_run* run_stack, const intptr_t stack_curr,
+ T* pBuffer, StrictWeakOrdering compare)
+ {
+ const intptr_t A = run_stack[stack_curr - 2].length;
+ const intptr_t B = run_stack[stack_curr - 1].length;
+ const intptr_t curr = run_stack[stack_curr - 2].start;
+
+ EASTL_DEV_ASSERT((A < 10000000) && (B < 10000000) && (curr < 10000000)); // Sanity check.
+
+ if(A < B) // If the first run is shorter than the second run... merge left.
+ {
+ // Copy to another location so we have room in the main array to put the sorted items.
+ eastl::copy(first + curr, first + curr + A, pBuffer);
+
+ #if EASTL_DEV_DEBUG
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ for(intptr_t i = 0; i < A; i++)
+ *(first + curr + i) = value_type();
+ #endif
+
+ intptr_t i = 0;
+ intptr_t j = curr + A;
+
+ for(intptr_t k = curr; k < curr + A + B; k++)
+ {
+ if((i < A) && (j < (curr + A + B)))
+ {
+ if(!compare(*(first + j), *(pBuffer + i))) // If (first[j] >= pBuffer[i])...
+ *(first + k) = *(pBuffer + i++);
+ else
+ *(first + k) = *(first + j++);
+ }
+ else if(i < A)
+ *(first + k) = *(pBuffer + i++);
+ else
+ *(first + k) = *(first + j++);
+ }
+ }
+ else // Else the second run is equal or shorter... merge right.
+ {
+ eastl::copy(first + curr + A, first + curr + A + B, pBuffer);
+
+ intptr_t i = B - 1;
+ intptr_t j = curr + A - 1;
+
+ for(intptr_t k = curr + A + B - 1; k >= curr; k--)
+ {
+ if((i >= 0) && (j >= curr))
+ {
+ if(compare(*(pBuffer + i), *(first + j))) // If (pBuffer[i] < first[j]) ...
+ *(first + k) = *(first + j--);
+ else
+ *(first + k) = *(pBuffer + i--);
+ }
+ else if(i >= 0)
+ *(first + k) = *(pBuffer + i--);
+ else
+ *(first + k) = *(first + j--);
+ }
+ }
+ }
+
+
+ // See the timsort.txt file for an explanation of this function.
+ //
+ // ------------------------------------------------------------------------
+ // What turned out to be a good compromise maintains two invariants on the
+ // stack entries, where A, B and C are the lengths of the three righmost
+ // not-yet merged slices:
+ // 1. A > B+C
+ // 2. B > C
+ // ------------------------------------------------------------------------
+ //
+ static inline bool timsort_check_invariant(tim_sort_run* run_stack, const intptr_t stack_curr)
+ {
+ // To do: Optimize this for the most common type of values.
+ if(stack_curr > 2)
+ {
+ const intptr_t A = run_stack[stack_curr - 3].length;
+ const intptr_t B = run_stack[stack_curr - 2].length;
+ const intptr_t C = run_stack[stack_curr - 1].length;
+
+ EASTL_DEV_ASSERT((A < 10000000) && (B < 10000000) && (C < 10000000)); // Sanity check.
+
+ if((A <= (B + C)) || (B <= C))
+ return true; // Merge the right-most runs.
+ }
+ else if(stack_curr == 2)
+ {
+ const intptr_t A = run_stack[stack_curr - 2].length;
+ const intptr_t B = run_stack[stack_curr - 1].length;
+
+ EASTL_DEV_ASSERT((A < 10000000) && (B < 10000000)); // Sanity check.
+
+ if(A <= B)
+ return true; // Merge the right-most runs.
+ }
+
+ return false; // Don't merge the right-most runs.
+ }
+
+
+ template <typename RandomAccessIterator, typename T, typename StrictWeakOrdering>
+ intptr_t tim_sort_collapse(RandomAccessIterator first, tim_sort_run* run_stack, intptr_t stack_curr,
+ T* pBuffer, const intptr_t size, StrictWeakOrdering compare)
+ {
+ // If the run_stack only has one thing on it, we are done with the collapse.
+ while(stack_curr > 1)
+ {
+ // If this is the last merge, just do it.
+ if((stack_curr == 2) && ((run_stack[0].length + run_stack[1].length) == size))
+ {
+ tim_sort_merge<RandomAccessIterator, T, StrictWeakOrdering>(first, run_stack, stack_curr, pBuffer, compare);
+ run_stack[0].length += run_stack[1].length;
+ stack_curr--;
+
+ #if EASTL_DEV_DEBUG
+ memset(&run_stack[stack_curr], 0, sizeof(run_stack[stack_curr]));
+ #endif
+
+ break;
+ }
+ // Check if the invariant is off for a run_stack of 2 elements.
+ else if((stack_curr == 2) && (run_stack[0].length <= run_stack[1].length))
+ {
+ tim_sort_merge<RandomAccessIterator, T, StrictWeakOrdering>(first, run_stack, stack_curr, pBuffer, compare);
+ run_stack[0].length += run_stack[1].length;
+ stack_curr--;
+
+ #if EASTL_DEV_DEBUG
+ memset(&run_stack[stack_curr], 0, sizeof(run_stack[stack_curr]));
+ #endif
+
+ break;
+ }
+ else if (stack_curr == 2)
+ break;
+
+ const intptr_t A = run_stack[stack_curr - 3].length;
+ const intptr_t B = run_stack[stack_curr - 2].length;
+ const intptr_t C = run_stack[stack_curr - 1].length;
+
+ if(A <= (B + C)) // Check first invariant.
+ {
+ if(A < C)
+ {
+ tim_sort_merge<RandomAccessIterator, T, StrictWeakOrdering>(first, run_stack, stack_curr - 1, pBuffer, compare);
+
+ stack_curr--;
+ run_stack[stack_curr - 2].length += run_stack[stack_curr - 1].length; // Merge A and B.
+ run_stack[stack_curr - 1] = run_stack[stack_curr];
+
+ #if EASTL_DEV_DEBUG
+ EASTL_DEV_ASSERT((run_stack[stack_curr - 2].start + run_stack[stack_curr - 2].length) <= size);
+ EASTL_DEV_ASSERT((run_stack[stack_curr - 1].start + run_stack[stack_curr - 1].length) <= size);
+ memset(&run_stack[stack_curr], 0, sizeof(run_stack[stack_curr]));
+ #endif
+ }
+ else
+ {
+ tim_sort_merge<RandomAccessIterator, T, StrictWeakOrdering>(first, run_stack, stack_curr, pBuffer, compare); // Merge B and C.
+
+ stack_curr--;
+ run_stack[stack_curr - 1].length += run_stack[stack_curr].length;
+
+ #if EASTL_DEV_DEBUG
+ EASTL_DEV_ASSERT((run_stack[stack_curr - 1].start + run_stack[stack_curr - 1].length) <= size);
+ memset(&run_stack[stack_curr], 0, sizeof(run_stack[stack_curr]));
+ #endif
+ }
+ }
+ else if(B <= C) // Check second invariant
+ {
+ tim_sort_merge<RandomAccessIterator, T, StrictWeakOrdering>(first, run_stack, stack_curr, pBuffer, compare);
+
+ stack_curr--;
+ run_stack[stack_curr - 1].length += run_stack[stack_curr].length; // Merge B and C.
+
+ #if EASTL_DEV_DEBUG
+ EASTL_DEV_ASSERT((run_stack[stack_curr - 1].start + run_stack[stack_curr - 1].length) <= size);
+ memset(&run_stack[stack_curr], 0, sizeof(run_stack[stack_curr]));
+ #endif
+ }
+ else
+ break;
+ }
+
+ return stack_curr;
+ }
+
+
+ // tim_sort_add_run
+ //
+ // Return true if the sort is done.
+ //
+ template <typename RandomAccessIterator, typename T, typename StrictWeakOrdering>
+ bool tim_sort_add_run(tim_sort_run* run_stack, RandomAccessIterator first, T* pBuffer, const intptr_t size, const intptr_t minrun,
+ intptr_t& len, intptr_t& run, intptr_t& curr, intptr_t& stack_curr, StrictWeakOrdering compare)
+ {
+ len = tim_sort_count_run<RandomAccessIterator, StrictWeakOrdering>(first, curr, size, compare); // This will count the length of the run and reverse the run if it is backwards.
+ run = minrun;
+
+ if(run < minrun) // Always make runs be of minrun length (we'll sort the additional data as needed below)
+ run = minrun;
+
+ if(run > (size - curr)) // But if there isn't minrun data remaining, just sort what's remaining.
+ run = (size - curr);
+
+ if(run > len) // If there is any additional data we want to sort to bring up the run length to minrun.
+ {
+ insertion_sort_already_started<RandomAccessIterator, StrictWeakOrdering>(first + curr, first + curr + run, first + curr + len, compare);
+ len = run;
+ }
+
+ // At this point, run will be equal to minrun or will go to the end of our data.
+ // Add this run to our stack of runs.
+ EASTL_DEV_ASSERT(stack_curr < kTimSortStackSize);
+ EASTL_DEV_ASSERT((curr >= 0) && (curr < size) && ((curr + len) <= size));
+
+ run_stack[stack_curr].start = curr;
+ run_stack[stack_curr].length = len;
+ stack_curr++;
+
+ // Move to the beginning of the next run in the data.
+ curr += len;
+
+ if(curr == size) // If we have hit the end of the data...
+ {
+ while(stack_curr > 1) // If there is any more than one run... (else all the data is sorted)
+ {
+ tim_sort_merge<RandomAccessIterator, T, StrictWeakOrdering>(first, run_stack, stack_curr, pBuffer, compare);
+
+ run_stack[stack_curr - 2].length += run_stack[stack_curr - 1].length;
+ stack_curr--;
+
+ #if EASTL_DEV_DEBUG
+ EASTL_DEV_ASSERT((run_stack[stack_curr - 1].start + run_stack[stack_curr - 1].length) <= size);
+ memset(&run_stack[stack_curr], 0, sizeof(run_stack[stack_curr]));
+ #endif
+ }
+
+ return true; // We are done with sorting.
+ }
+
+ return false;
+ }
+
+ } // namespace Internal
+
+
+ // tim_sort_buffer
+ //
+ /// This is a stable sort.
+ // Implements the tim-sort sorting algorithm with a user-provided scratch buffer.
+ // http://en.wikipedia.org/wiki/Timsort
+ // This sort is the fastest sort when sort stability (maintaining order of equal values) is required and
+ // data sets are non-trivial (size >= 15). It's also the fastest sort (e.g. faster than quick_sort) for
+ // the case that at at least half your data is already sorted. Otherwise, eastl::quick_sort is about 10%
+ // faster than tim_sort_buffer but is not a stable sort. There are some reports that tim_sort outperforms
+ // quick_sort but most of these aren't taking into account that optimal quick_sort implementations use
+ // a hybrid approach called "introsort" (http://en.wikipedia.org/wiki/Introsort) which improves quick_sort
+ // considerably in practice.
+ //
+ // Strengths:
+ // - Fastest stable sort for most sizes of data.
+ // - Fastest sort for containers of data already mostly sorted.
+ // - Simpler to understand than quick_sort.
+ //
+ // Weaknesses:
+ // - User must provide a scratch buffer, otherwise the buffer is dynamically allocated during runtime.
+ // - Not as fast as quick_sort for the general case of randomized data.
+ // - Requires a RandomAccessIterator; thus must be on an array container type and not a list container type.
+ // - Uses a lot of code to implement; thus it's not great when there is little room for more code.
+ //
+ // The pBuffer parameter must hold at least ((last-first)/2) elements (i.e. half the elements of the container).
+ // This minimum size is a worst-case size requirement, but handles all possible cases. pBuffer is just a scratch
+ // buffer and is not needed after the return of this function, and doesn't need to be seeded with any particular
+ // values upon entering this function.
+ //
+ // Example usage:
+ // int intArray[64];
+ // int buffer[32];
+ // ...
+ // tim_sort_buffer(intArray, intArray + 64, buffer);
+ //
+ template <typename RandomAccessIterator, typename T, typename StrictWeakOrdering>
+ void tim_sort_buffer(RandomAccessIterator first, RandomAccessIterator last, T* pBuffer, StrictWeakOrdering compare)
+ {
+ using namespace Internal;
+
+ // To consider: Convert the implementation to use first/last instead of first/size.
+ const intptr_t size = (intptr_t)(last - first);
+
+ if(size < 64)
+ insertion_sort_already_started(first, first + size, first + 1, compare);
+ else
+ {
+ tim_sort_run run_stack[kTimSortStackSize];
+ intptr_t stack_curr = 0;
+ intptr_t len, run;
+ intptr_t curr = 0;
+ const intptr_t minrun = timsort_compute_minrun(size);
+
+ #if EASTL_DEV_DEBUG
+ memset(run_stack, 0, sizeof(run_stack));
+ #endif
+
+ if(tim_sort_add_run<RandomAccessIterator, T, StrictWeakOrdering>(run_stack, first, pBuffer, size, minrun, len, run, curr, stack_curr, compare))
+ return;
+ if(tim_sort_add_run<RandomAccessIterator, T, StrictWeakOrdering>(run_stack, first, pBuffer, size, minrun, len, run, curr, stack_curr, compare))
+ return;
+ if(tim_sort_add_run<RandomAccessIterator, T, StrictWeakOrdering>(run_stack, first, pBuffer, size, minrun, len, run, curr, stack_curr, compare))
+ return;
+
+ for(;;)
+ {
+ if(timsort_check_invariant(run_stack, stack_curr))
+ stack_curr = tim_sort_collapse<RandomAccessIterator, T, StrictWeakOrdering>(first, run_stack, stack_curr, pBuffer, size, compare);
+ else
+ {
+ if(tim_sort_add_run<RandomAccessIterator, T, StrictWeakOrdering>(run_stack, first, pBuffer, size, minrun, len, run, curr, stack_curr, compare))
+ break;
+ }
+ }
+ }
+ }
+
+
+ template <typename RandomAccessIterator, typename T>
+ inline void tim_sort_buffer(RandomAccessIterator first, RandomAccessIterator last, T* pBuffer)
+ {
+ typedef eastl::less<T> Less;
+
+ eastl::tim_sort_buffer<RandomAccessIterator, T, Less>(first, last, pBuffer, Less());
+ }
+
+
+
+
+ /// radix_sort
+ ///
+ /// Implements a classic LSD (least significant digit) radix sort.
+ /// See http://en.wikipedia.org/wiki/Radix_sort.
+ /// This sort requires that the sorted data be of a type that has a member
+ /// radix_type typedef and an mKey member of that type. The type must be
+ /// an integral type. This limits what can be sorted, but radix_sort is
+ /// very fast -- typically faster than any other sort.
+ /// For example:
+ /// struct Sortable {
+ /// typedef int radix_type;
+ /// radix_type mKey;
+ /// // User data goes here, or the user can inherit from Sortable.
+ /// };
+ /// or, more generally:
+ /// template <typname Integer>
+ /// struct Sortable {
+ /// typedef Integer radix_type;
+ /// Integer mKey;
+ /// };
+ ///
+ /// Example usage:
+ /// struct Element {
+ /// typedef uint16_t radix_type;
+ /// uint16_t mKey;
+ /// uint16_t mUserData;
+ /// };
+ ///
+ /// Element elementArray[100];
+ /// Element buffer[100];
+ ///
+ /// radix_sort<Element*, extract_radix_key<Element> >(elementArray, elementArray + 100, buffer);
+ ///
+ /// To consider: A static linked-list implementation may be faster than the version here.
+
+ namespace Internal
+ {
+ /// extract_radix_key
+ ///
+ /// Default radix sort integer value reader. It expects the sorted elements
+ /// to have an integer member of type radix_type and of name "mKey".
+ ///
+ template <typename Node>
+ struct extract_radix_key
+ {
+ typedef typename Node::radix_type radix_type;
+
+ const radix_type operator()(const Node& x) const
+ { return x.mKey; }
+ };
+
+ // The radix_sort implementation uses two optimizations that are not part of a typical radix sort implementation.
+ // 1. Computing a histogram (i.e. finding the number of elements per bucket) for the next pass is done in parallel with the loop that "scatters"
+ // elements in the current pass. The advantage is that it avoids the memory traffic / cache pressure of reading keys in a separate operation.
+ // Note: It would also be possible to compute all histograms in a single pass. However, that would increase the amount of stack space used and
+ // also increase cache pressure slightly. However, it could still be faster under some situations.
+ // 2. If all elements are mapped to a single bucket, then there is no need to perform a scatter operation. Instead the elements are left in place
+ // and only copied if they need to be copied to the final output buffer.
+ template <typename RandomAccessIterator, typename ExtractKey, int DigitBits, typename IntegerType>
+ void radix_sort_impl(RandomAccessIterator first,
+ RandomAccessIterator last,
+ RandomAccessIterator buffer,
+ ExtractKey extractKey,
+ IntegerType)
+ {
+ RandomAccessIterator srcFirst = first;
+ EA_CONSTEXPR_OR_CONST size_t numBuckets = 1 << DigitBits;
+ EA_CONSTEXPR_OR_CONST IntegerType bucketMask = numBuckets - 1;
+
+ // The alignment of this variable isn't required; it merely allows the code below to be faster on some platforms.
+ uint32_t EA_PREFIX_ALIGN(EASTL_PLATFORM_PREFERRED_ALIGNMENT) bucketSize[numBuckets];
+ uint32_t EA_PREFIX_ALIGN(EASTL_PLATFORM_PREFERRED_ALIGNMENT) bucketPosition[numBuckets];
+
+ RandomAccessIterator temp;
+ uint32_t i;
+
+ bool doSeparateHistogramCalculation = true;
+ uint32_t j;
+ for (j = 0; j < (8 * sizeof(IntegerType)); j += DigitBits)
+ {
+ if (doSeparateHistogramCalculation)
+ {
+ memset(bucketSize, 0, sizeof(bucketSize));
+ // Calculate histogram for the first scatter operation
+ for (temp = srcFirst; temp != last; ++temp)
+ ++bucketSize[(extractKey(*temp) >> j) & bucketMask];
+ }
+
+ // If a single bucket contains all of the elements, then don't bother redistributing all elements to the
+ // same bucket.
+ if (bucketSize[((extractKey(*srcFirst) >> j) & bucketMask)] == uint32_t(last - srcFirst))
+ {
+ // Set flag to ensure histogram is computed for next digit position.
+ doSeparateHistogramCalculation = true;
+ }
+ else
+ {
+ // The histogram is either not needed or it will be calculated in parallel with the scatter operation below for better cache efficiency.
+ doSeparateHistogramCalculation = false;
+
+ // If this is the last digit position, then don't calculate a histogram
+ if (j == (8 * sizeof(IntegerType) - DigitBits))
+ {
+ bucketPosition[0] = 0;
+ for (i = 0; i < numBuckets - 1; i++)
+ {
+ bucketPosition[i + 1] = bucketPosition[i] + bucketSize[i];
+ }
+
+ for (temp = srcFirst; temp != last; ++temp)
+ {
+ IntegerType key = extractKey(*temp);
+ const size_t digit = (key >> j) & bucketMask;
+ buffer[bucketPosition[digit]++] = *temp;
+ }
+ }
+ // Compute the histogram while performing the scatter operation
+ else
+ {
+ bucketPosition[0] = 0;
+ for (i = 0; i < numBuckets - 1; i++)
+ {
+ bucketPosition[i + 1] = bucketPosition[i] + bucketSize[i];
+ bucketSize[i] = 0; // Clear the bucket for the next pass
+ }
+ bucketSize[numBuckets - 1] = 0;
+
+ uint32_t jNext = j + DigitBits;
+ for (temp = srcFirst; temp != last; ++temp)
+ {
+ IntegerType key = extractKey(*temp);
+ const size_t digit = (key >> j) & bucketMask;
+ buffer[bucketPosition[digit]++] = *temp;
+
+ // Update histogram for the next scatter operation
+ ++bucketSize[(extractKey(*temp) >> jNext) & bucketMask];
+ }
+ }
+
+ last = buffer + (last - srcFirst);
+ temp = srcFirst;
+ srcFirst = buffer;
+ buffer = temp;
+ }
+ }
+
+ if (srcFirst != first)
+ {
+ // Copy values back into the expected buffer
+ for (temp = srcFirst; temp != last; ++temp)
+ *buffer++ = *temp;
+ }
+ }
+ } // namespace Internal
+
+ template <typename RandomAccessIterator, typename ExtractKey, int DigitBits = 8>
+ void radix_sort(RandomAccessIterator first, RandomAccessIterator last, RandomAccessIterator buffer)
+ {
+ static_assert(DigitBits > 0, "DigitBits must be > 0");
+ static_assert(DigitBits <= (sizeof(typename ExtractKey::radix_type) * 8), "DigitBits must be <= the size of the key (in bits)");
+ eastl::Internal::radix_sort_impl<RandomAccessIterator, ExtractKey, DigitBits>(first, last, buffer, ExtractKey(), typename ExtractKey::radix_type());
+ }
+
+
+
+ /// comb_sort
+ ///
+ /// This is an unstable sort.
+ /// Implements the CombSort algorithm; in particular, implements the CombSort11 variation
+ /// of the CombSort algorithm, based on the reference to '11' in the implementation.
+ ///
+ /// To consider: Use a comb sort table instead of the '((nSpace * 10) + 3) / 13' expression.
+ /// Ideal tables can be found on the Internet by looking up "comb sort table".
+ ///
+ template <typename ForwardIterator, typename StrictWeakOrdering>
+ void comb_sort(ForwardIterator first, ForwardIterator last, StrictWeakOrdering compare)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::difference_type difference_type;
+
+ ForwardIterator iCurrent, iNext;
+ difference_type length = eastl::distance(first, last);
+ difference_type nSpace = length;
+
+ for(bool bSwapped = false; (nSpace > 1) || bSwapped; )
+ {
+ nSpace = ((nSpace * 10) + 3) / 13; // Integer division is less than ideal.
+
+ if((nSpace == 9) || (nSpace == 10))
+ nSpace = 11;
+
+ iCurrent = iNext = first;
+ eastl::advance(iNext, nSpace);
+
+ for(bSwapped = false; iNext != last; iCurrent++, iNext++)
+ {
+ if(compare(*iNext, *iCurrent))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*iCurrent, *iNext)); // Validate that the compare function is sane.
+ eastl::iter_swap(iCurrent, iNext);
+ bSwapped = true;
+ }
+ }
+ }
+ } // comb_sort
+
+ template <typename ForwardIterator>
+ inline void comb_sort(ForwardIterator first, ForwardIterator last)
+ {
+ typedef eastl::less<typename eastl::iterator_traits<ForwardIterator>::value_type> Less;
+
+ eastl::comb_sort<ForwardIterator, Less>(first, last, Less());
+ }
+
+
+
+
+ /// bubble_sort
+ ///
+ /// This is a stable sort.
+ /// Implements the BubbleSort algorithm. This algorithm is only useful for
+ /// small range sizes, such as 10 or less items. You may be better off using
+ /// insertion_sort for cases where bubble_sort works.
+ ///
+ namespace Internal
+ {
+ template <typename ForwardIterator, typename StrictWeakOrdering>
+ void bubble_sort_impl(ForwardIterator first, ForwardIterator last, StrictWeakOrdering compare, EASTL_ITC_NS::forward_iterator_tag)
+ {
+ ForwardIterator iCurrent, iNext;
+
+ while(first != last)
+ {
+ iNext = iCurrent = first;
+
+ for(++iNext; iNext != last; iCurrent = iNext, ++iNext)
+ {
+ if(compare(*iNext, *iCurrent))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*iCurrent, *iNext)); // Validate that the compare function is sane.
+ eastl::iter_swap(iCurrent, iNext);
+ }
+ }
+ last = iCurrent;
+ }
+ }
+
+ template <typename BidirectionalIterator, typename StrictWeakOrdering>
+ void bubble_sort_impl(BidirectionalIterator first, BidirectionalIterator last, StrictWeakOrdering compare, EASTL_ITC_NS::bidirectional_iterator_tag)
+ {
+ if(first != last)
+ {
+ BidirectionalIterator iCurrent, iNext, iLastModified;
+
+ last--;
+
+ while(first != last)
+ {
+ iLastModified = iNext = iCurrent = first;
+
+ for(++iNext; iCurrent != last; iCurrent = iNext, ++iNext)
+ {
+ if(compare(*iNext, *iCurrent))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*iCurrent, *iNext)); // Validate that the compare function is sane.
+ iLastModified = iCurrent;
+ eastl::iter_swap(iCurrent, iNext);
+ }
+ }
+
+ last = iLastModified;
+ }
+ }
+ }
+ } // namespace Internal
+
+ template <typename ForwardIterator, typename StrictWeakOrdering>
+ inline void bubble_sort(ForwardIterator first, ForwardIterator last, StrictWeakOrdering compare)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::iterator_category IC;
+
+ eastl::Internal::bubble_sort_impl<ForwardIterator, StrictWeakOrdering>(first, last, compare, IC());
+ }
+
+ template <typename ForwardIterator>
+ inline void bubble_sort(ForwardIterator first, ForwardIterator last)
+ {
+ typedef eastl::less<typename eastl::iterator_traits<ForwardIterator>::value_type> Less;
+ typedef typename eastl::iterator_traits<ForwardIterator>::iterator_category IC;
+
+ eastl::Internal::bubble_sort_impl<ForwardIterator, Less>(first, last, Less(), IC());
+ }
+
+
+
+ /// sort
+ ///
+ /// We use quick_sort by default. See quick_sort for details.
+ ///
+ /// EASTL_DEFAULT_SORT_FUNCTION
+ /// If a default sort function is specified then call it, otherwise use EASTL's default quick_sort.
+ /// EASTL_DEFAULT_SORT_FUNCTION must be namespace-qualified and include any necessary template
+ /// parameters (e.g. eastl::comb_sort instead of just comb_sort), and it must be visible to this code.
+ /// The EASTL_DEFAULT_SORT_FUNCTION must be provided in two versions:
+ /// template <typename RandomAccessIterator>
+ /// void EASTL_DEFAULT_SORT_FUNCTION(RandomAccessIterator first, RandomAccessIterator last);
+ ///
+ /// template <typename RandomAccessIterator, typename Compare>
+ /// void EASTL_DEFAULT_SORT_FUNCTION(RandomAccessIterator first, RandomAccessIterator last, Compare compare)
+ ///
+ template <typename RandomAccessIterator>
+ inline void sort(RandomAccessIterator first, RandomAccessIterator last)
+ {
+ #if defined(EASTL_DEFAULT_SORT_FUNCTION)
+ EASTL_DEFAULT_SORT_FUNCTION(first, last);
+ #else
+ eastl::quick_sort<RandomAccessIterator>(first, last);
+ #endif
+ }
+
+ template <typename RandomAccessIterator, typename Compare>
+ inline void sort(RandomAccessIterator first, RandomAccessIterator last, Compare compare)
+ {
+ #if defined(EASTL_DEFAULT_SORT_FUNCTION)
+ EASTL_DEFAULT_SORT_FUNCTION(first, last, compare);
+ #else
+ eastl::quick_sort<RandomAccessIterator, Compare>(first, last, compare);
+ #endif
+ }
+
+
+
+ /// stable_sort
+ ///
+ /// We use merge_sort by default. See merge_sort for details.
+ /// Beware that the used merge_sort -- and thus stable_sort -- allocates
+ /// memory during execution. Try using merge_sort_buffer if you want
+ /// to avoid memory allocation.
+ ///
+ /// EASTL_DEFAULT_STABLE_SORT_FUNCTION
+ /// If a default sort function is specified then call it, otherwise use EASTL's default merge_sort.
+ /// EASTL_DEFAULT_STABLE_SORT_FUNCTION must be namespace-qualified and include any necessary template
+ /// parameters (e.g. eastl::tim_sort instead of just tim_sort), and it must be visible to this code.
+ /// The EASTL_DEFAULT_STABLE_SORT_FUNCTION must be provided in three versions, though the third
+ /// allocation implementation may choose to ignore the allocator parameter:
+ /// template <typename RandomAccessIterator, typename StrictWeakOrdering>
+ /// void EASTL_DEFAULT_STABLE_SORT_FUNCTION(RandomAccessIterator first, RandomAccessIterator last, StrictWeakOrdering compare);
+ ///
+ /// template <typename RandomAccessIterator>
+ /// void EASTL_DEFAULT_STABLE_SORT_FUNCTION(RandomAccessIterator first, RandomAccessIterator last);
+ ///
+ /// template <typename RandomAccessIterator, typename Allocator, typename StrictWeakOrdering>
+ /// void EASTL_DEFAULT_STABLE_SORT_FUNCTION(RandomAccessIterator first, RandomAccessIterator last, Allocator& allocator, StrictWeakOrdering compare);
+ ///
+ template <typename RandomAccessIterator, typename StrictWeakOrdering>
+ void stable_sort(RandomAccessIterator first, RandomAccessIterator last, StrictWeakOrdering compare)
+ {
+ #if defined(EASTL_DEFAULT_STABLE_SORT_FUNCTION)
+ EASTL_DEFAULT_STABLE_SORT_FUNCTION(first, last, *get_default_allocator(0), compare);
+ #else
+ eastl::merge_sort<RandomAccessIterator, EASTLAllocatorType, StrictWeakOrdering>
+ (first, last, *get_default_allocator(0), compare);
+ #endif
+ }
+
+ template <typename RandomAccessIterator>
+ void stable_sort(RandomAccessIterator first, RandomAccessIterator last)
+ {
+ #if defined(EASTL_DEFAULT_STABLE_SORT_FUNCTION)
+ EASTL_DEFAULT_STABLE_SORT_FUNCTION(first, last, *get_default_allocator(0));
+ #else
+ eastl::merge_sort<RandomAccessIterator, EASTLAllocatorType>
+ (first, last, *get_default_allocator(0));
+ #endif
+ }
+
+ template <typename RandomAccessIterator, typename Allocator, typename StrictWeakOrdering>
+ void stable_sort(RandomAccessIterator first, RandomAccessIterator last, Allocator& allocator, StrictWeakOrdering compare)
+ {
+ #if defined(EASTL_DEFAULT_STABLE_SORT_FUNCTION)
+ EASTL_DEFAULT_STABLE_SORT_FUNCTION(first, last, allocator, compare);
+ #else
+ eastl::merge_sort<RandomAccessIterator, Allocator, StrictWeakOrdering>(first, last, allocator, compare);
+ #endif
+ }
+
+ // This is not defined because it would cause compiler errors due to conflicts with a version above.
+ //template <typename RandomAccessIterator, typename Allocator>
+ //void stable_sort(RandomAccessIterator first, RandomAccessIterator last, Allocator& allocator)
+ //{
+ // #if defined(EASTL_DEFAULT_STABLE_SORT_FUNCTION)
+ // EASTL_DEFAULT_STABLE_SORT_FUNCTION<RandomAccessIterator, Allocator>(first, last, allocator);
+ // #else
+ // eastl::merge_sort<RandomAccessIterator, Allocator>(first, last, allocator);
+ // #endif
+ //}
+
+
+
+
+ /*
+ // Something to consider adding: An eastl sort which uses qsort underneath.
+ // The primary purpose of this is to have an eastl interface for sorting which
+ // results in very little code generation, since all instances map to the
+ // C qsort function.
+
+ template <typename T>
+ int small_footprint_sort_func(const void* a, const void* b)
+ {
+ if(*(const T*)a < *(const T*)b)
+ return -1;
+ if(*(const T*)a > *(const T*)b)
+ return +1;
+ return 0;
+ }
+
+ template <typename ContiguousIterator>
+ void small_footprint_sort(ContiguousIterator first, ContiguousIterator last)
+ {
+ typedef typename eastl::iterator_traits<ContiguousIterator>::value_type value_type;
+
+ qsort(first, (size_t)eastl::distance(first, last), sizeof(value_type), small_footprint_sort_func<value_type>);
+ }
+ */
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
diff --git a/EASTL/include/EASTL/span.h b/EASTL/include/EASTL/span.h
new file mode 100644
index 0000000..9c47f5b
--- /dev/null
+++ b/EASTL/include/EASTL/span.h
@@ -0,0 +1,441 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements the eastl::span which is part of the C++ standard
+// STL library specification.
+//
+// eastl::span is a non-owning container that refers to a contiguous block of
+// memory. It bundles up the classic pattern of a pointer and a size into a
+// single type. A span can either have a static extent, in which case the
+// number of elements in the sequence is known and encoded in the type, or a
+// dynamic extent.
+//
+// http://en.cppreference.com/w/cpp/container/span
+// http://eel.is/c++draft/views#span.syn
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_SPAN_H
+#define EASTL_SPAN_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <EASTL/internal/config.h>
+#include <EASTL/type_traits.h>
+#include <EASTL/iterator.h>
+#include <EASTL/array.h>
+
+namespace eastl
+{
+ static EA_CONSTEXPR size_t dynamic_extent = size_t(-1);
+
+ namespace Internal
+ {
+ // HasSizeAndData
+ //
+ // custom type trait to determine if eastl::data(Container) and eastl::size(Container) are well-formed.
+ //
+ template <typename, typename = void>
+ struct HasSizeAndData : eastl::false_type {};
+
+ template <typename T>
+ struct HasSizeAndData<T, void_t<decltype(eastl::size(eastl::declval<T>())), decltype(eastl::data(eastl::declval<T>()))>> : eastl::true_type {};
+
+ // SubspanExtent
+ //
+ // Integral constant that calculates the resulting extent of a templated subspan operation.
+ //
+ // If Count is not dynamic_extent then SubspanExtent::value is Count,
+ // otherwise, if Extent is not dynamic_extent, SubspanExtent::value is (Extent - Offset),
+ // otherwise, SubspanExtent::value is dynamic_extent.
+ //
+ template<size_t Extent, size_t Offset, size_t Count>
+ struct SubspanExtent : eastl::integral_constant<size_t, (Count != dynamic_extent ? Count : (Extent != dynamic_extent ? (Extent - Offset) : dynamic_extent))> {};
+ }
+
+ template <typename T, size_t Extent = eastl::dynamic_extent>
+ class span
+ {
+ public:
+ typedef T element_type;
+ typedef remove_cv_t<T> value_type;
+ typedef eastl_size_t index_type;
+ typedef ptrdiff_t difference_type;
+ typedef T* pointer;
+ typedef const T* const_pointer;
+ typedef T& reference;
+ typedef const T& const_reference;
+ typedef T* iterator;
+ typedef const T* const_iterator;
+ typedef eastl::reverse_iterator<iterator> reverse_iterator;
+ typedef eastl::reverse_iterator<const_iterator> const_reverse_iterator;
+
+ static EA_CONSTEXPR size_t extent = Extent;
+
+ // constructors / destructor
+ EA_CONSTEXPR span() EA_NOEXCEPT;
+ EA_CONSTEXPR span(const span& other) EA_NOEXCEPT = default;
+ EA_CONSTEXPR span(pointer ptr, index_type count);
+ EA_CONSTEXPR span(pointer pBegin, pointer pEnd);
+ ~span() EA_NOEXCEPT = default;
+
+ // copy-assignment operator
+ EA_CPP14_CONSTEXPR span& operator=(const span& other) EA_NOEXCEPT = default;
+
+ // conversion constructors for c-array and eastl::array
+ template <size_t N, typename = enable_if_t<(Extent == eastl::dynamic_extent || N == Extent)>>
+ EA_CONSTEXPR span(element_type (&arr)[N]) EA_NOEXCEPT;
+
+ template <size_t N, typename = enable_if_t<(Extent == eastl::dynamic_extent || N == Extent)>>
+ EA_CONSTEXPR span(eastl::array<value_type, N>& arr) EA_NOEXCEPT;
+
+ template <size_t N, typename = enable_if_t<(Extent == eastl::dynamic_extent || N == Extent)>>
+ EA_CONSTEXPR span(const eastl::array<value_type, N>& arr) EA_NOEXCEPT;
+
+ // SfinaeForGenericContainers
+ //
+ template <typename Container>
+ using SfinaeForGenericContainers =
+ enable_if_t<!is_same_v<Container, span> && !is_same_v<Container, array<value_type>> &&
+ !is_array_v<Container> &&
+ Internal::HasSizeAndData<Container>::value &&
+ is_convertible_v<remove_pointer_t<decltype(eastl::data(eastl::declval<Container&>()))> (*)[], element_type (*)[]>>;
+
+ // generic container conversion constructors
+ template <typename Container, typename = SfinaeForGenericContainers<Container>>
+ EA_CONSTEXPR span(Container& cont);
+
+ template <typename Container, typename = SfinaeForGenericContainers<const Container>>
+ EA_CONSTEXPR span(const Container& cont);
+
+ template <typename U, size_t N, typename = enable_if_t<(Extent == eastl::dynamic_extent || N == Extent) && (is_convertible_v<U(*)[], element_type(*)[]>)>>
+ EA_CONSTEXPR span(const span<U, N>& s) EA_NOEXCEPT;
+
+ // subviews
+ template<size_t Count>
+ EA_CPP14_CONSTEXPR span<element_type, Count> first() const;
+ EA_CPP14_CONSTEXPR span<element_type, dynamic_extent> first(size_t Count) const;
+
+ template<size_t Count>
+ EA_CPP14_CONSTEXPR span<element_type, Count> last() const;
+ EA_CPP14_CONSTEXPR span<element_type, dynamic_extent> last(size_t Count) const;
+
+ template <size_t Offset, size_t Count = dynamic_extent>
+ EA_CONSTEXPR span<element_type, Internal::SubspanExtent<Extent, Offset, Count>::value> subspan() const;
+ EA_CONSTEXPR span<element_type, dynamic_extent> subspan(size_t Offset, size_t Count = dynamic_extent) const;
+
+ // observers
+ EA_CONSTEXPR pointer data() const EA_NOEXCEPT;
+ EA_CONSTEXPR index_type size() const EA_NOEXCEPT;
+ EA_CONSTEXPR index_type size_bytes() const EA_NOEXCEPT;
+ EA_CONSTEXPR bool empty() const EA_NOEXCEPT;
+
+ // subscript operators, element access
+ EA_CONSTEXPR reference front() const;
+ EA_CONSTEXPR reference back() const;
+ EA_CONSTEXPR reference operator[](index_type idx) const;
+ EA_CONSTEXPR reference operator()(index_type idx) const;
+
+ // iterator support
+ EA_CONSTEXPR iterator begin() const EA_NOEXCEPT;
+ EA_CONSTEXPR iterator end() const EA_NOEXCEPT;
+ EA_CONSTEXPR const_iterator cbegin() const EA_NOEXCEPT;
+ EA_CONSTEXPR const_iterator cend() const EA_NOEXCEPT;
+ EA_CONSTEXPR reverse_iterator rbegin() const EA_NOEXCEPT;
+ EA_CONSTEXPR reverse_iterator rend() const EA_NOEXCEPT;
+ EA_CONSTEXPR const_reverse_iterator crbegin() const EA_NOEXCEPT;
+ EA_CONSTEXPR const_reverse_iterator crend() const EA_NOEXCEPT;
+
+ private:
+ pointer mpData = nullptr;
+ index_type mnSize = 0;
+
+ private:
+ EA_CONSTEXPR bool bounds_check(size_t) const; // utility used in asserts
+ };
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // template deduction guides
+ ///////////////////////////////////////////////////////////////////////////
+ #ifdef __cpp_deduction_guides
+ template<class T, size_t N> span(T (&)[N]) -> span <T, N>;
+ template<class T, size_t N> span(array<T, N>&) -> span <T, N>;
+ template<class T, size_t N> span(const array<T, N>&) -> span <const T, N>;
+ template<class Container> span(Container&) -> span <typename Container::value_type>;
+ template<class Container> span(const Container&) -> span <const typename Container::value_type>;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // comparison operators
+ ///////////////////////////////////////////////////////////////////////////
+
+ template <class T, size_t X, class U, size_t Y>
+ EA_CONSTEXPR bool operator==(span<T, X> l, span<U, Y> r)
+ {
+ return (l.size() == r.size()) && eastl::equal(l.begin(), l.end(), r.begin());
+ }
+
+ template <class T, size_t X, class U, size_t Y>
+ EA_CONSTEXPR bool operator<(span<T, X> l, span<U, Y> r)
+ {
+ return eastl::lexicographical_compare(l.begin(), l.end(), r.begin(), r.end());
+ }
+
+ template <class T, size_t X, class U, size_t Y>
+ EA_CONSTEXPR bool operator!=(span<T, X> l, span<U, Y> r) { return !(l == r); }
+
+ template <class T, size_t X, class U, size_t Y>
+ EA_CONSTEXPR bool operator<=(span<T, X> l, span<U, Y> r) { return !(r < l); }
+
+ template <class T, size_t X, class U, size_t Y>
+ EA_CONSTEXPR bool operator>(span<T, X> l, span<U, Y> r) { return r < l; }
+
+ template <class T, size_t X, class U, size_t Y>
+ EA_CONSTEXPR bool operator>=(span<T, X> l, span<U, Y> r) { return !(l < r); }
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // ctor implementations
+ ///////////////////////////////////////////////////////////////////////////
+
+
+ template <typename T, size_t Extent>
+ EA_CONSTEXPR span<T, Extent>::span() EA_NOEXCEPT
+ {
+ static_assert(Extent == dynamic_extent || Extent == 0, "impossible to default construct a span with a fixed Extent different than 0");
+ }
+
+ template <typename T, size_t Extent>
+ EA_CONSTEXPR span<T, Extent>::span(pointer ptr, index_type size)
+ : mpData(ptr), mnSize(size)
+ {
+ EASTL_ASSERT_MSG(Extent == dynamic_extent || Extent == mnSize, "impossible to create a span with a fixed Extent different than the size of the supplied buffer");
+ }
+
+ template <typename T, size_t Extent>
+ EA_CONSTEXPR span<T, Extent>::span(pointer pBegin, pointer pEnd)
+ : mpData(pBegin), mnSize(static_cast<index_type>(pEnd - pBegin))
+ {
+ EASTL_ASSERT_MSG(Extent == dynamic_extent || Extent == mnSize, "impossible to create a span with a fixed Extent different than the size of the supplied buffer");
+ }
+
+ template <typename T, size_t Extent>
+ template <size_t N, typename>
+ EA_CONSTEXPR span<T, Extent>::span(element_type(&arr)[N]) EA_NOEXCEPT
+ : span(arr, static_cast<index_type>(N))
+ {
+ }
+
+ template <typename T, size_t Extent>
+ template <size_t N, typename>
+ EA_CONSTEXPR span<T, Extent>::span(eastl::array<value_type, N> &arr) EA_NOEXCEPT
+ : span(arr.data(), arr.size())
+ {
+ }
+
+ template <typename T, size_t Extent>
+ template <size_t N, typename>
+ EA_CONSTEXPR span<T, Extent>::span(const eastl::array<value_type, N>& arr) EA_NOEXCEPT
+ : span(arr.data(), arr.size())
+ {
+ }
+
+
+ template <typename T, size_t Extent>
+ template <typename Container, typename>
+ EA_CONSTEXPR span<T, Extent>::span(Container& cont)
+ : span(static_cast<pointer>(eastl::data(cont)), static_cast<index_type>(eastl::size(cont)))
+ {
+ }
+
+ template <typename T, size_t Extent>
+ template <typename Container, typename>
+ EA_CONSTEXPR span<T, Extent>::span(const Container& cont)
+ : span(static_cast<pointer>(eastl::data(cont)), static_cast<index_type>(eastl::size(cont)))
+ {
+ }
+
+ template <typename T, size_t Extent>
+ template <typename U, size_t N, typename>
+ EA_CONSTEXPR span<T, Extent>::span(const span<U, N>& s) EA_NOEXCEPT
+ : span(s.data(), s.size())
+ {
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // member function implementations
+ ///////////////////////////////////////////////////////////////////////////
+
+ template <typename T, size_t Extent>
+ EA_CONSTEXPR typename span<T, Extent>::pointer span<T, Extent>::data() const EA_NOEXCEPT
+ {
+ return mpData;
+ }
+
+ template <typename T, size_t Extent>
+ EA_CONSTEXPR typename span<T, Extent>::index_type span<T, Extent>::size() const EA_NOEXCEPT
+ {
+ return mnSize;
+ }
+
+ template <typename T, size_t Extent>
+ EA_CONSTEXPR typename span<T, Extent>::index_type span<T, Extent>::size_bytes() const EA_NOEXCEPT
+ {
+ return size() * sizeof(element_type);
+ }
+
+ template <typename T, size_t Extent>
+ EA_CONSTEXPR bool span<T, Extent>::empty() const EA_NOEXCEPT
+ {
+ return size() == 0;
+ }
+
+ template <typename T, size_t Extent>
+ EA_CONSTEXPR typename span<T, Extent>::reference span<T, Extent>::front() const
+ {
+ EASTL_ASSERT_MSG(!empty(), "undefined behavior accessing an empty span");
+
+ return mpData[0];
+ }
+
+ template <typename T, size_t Extent>
+ EA_CONSTEXPR typename span<T, Extent>::reference span<T, Extent>::back() const
+ {
+ EASTL_ASSERT_MSG(!empty(), "undefined behavior accessing an empty span");
+
+ return mpData[mnSize - 1];
+ }
+
+ template <typename T, size_t Extent>
+ EA_CONSTEXPR typename span<T, Extent>::reference span<T, Extent>::operator[](index_type idx) const
+ {
+ EASTL_ASSERT_MSG(!empty(), "undefined behavior accessing an empty span");
+ EASTL_ASSERT_MSG(bounds_check(idx), "undefined behavior accessing out of bounds");
+
+ return mpData[idx];
+ }
+
+ template <typename T, size_t Extent>
+ EA_CONSTEXPR typename span<T, Extent>::reference span<T, Extent>::operator()(index_type idx) const
+ {
+ EASTL_ASSERT_MSG(!empty(), "undefined behavior accessing an empty span");
+ EASTL_ASSERT_MSG(bounds_check(idx), "undefined behavior accessing out of bounds");
+
+ return mpData[idx];
+ }
+
+ template <typename T, size_t Extent>
+ EA_CONSTEXPR typename span<T, Extent>::iterator span<T, Extent>::begin() const EA_NOEXCEPT
+ {
+ return mpData;
+ }
+
+ template <typename T, size_t Extent>
+ EA_CONSTEXPR typename span<T, Extent>::iterator span<T, Extent>::end() const EA_NOEXCEPT
+ {
+ return mpData + mnSize;
+ }
+
+ template <typename T, size_t Extent>
+ EA_CONSTEXPR typename span<T, Extent>::const_iterator span<T, Extent>::cbegin() const EA_NOEXCEPT
+ {
+ return mpData;
+ }
+
+ template <typename T, size_t Extent>
+ EA_CONSTEXPR typename span<T, Extent>::const_iterator span<T, Extent>::cend() const EA_NOEXCEPT
+ {
+ return mpData + mnSize;
+ }
+
+ template <typename T, size_t Extent>
+ EA_CONSTEXPR typename span<T, Extent>::reverse_iterator span<T, Extent>::rbegin() const EA_NOEXCEPT
+ {
+ return reverse_iterator(mpData + mnSize);
+ }
+
+ template <typename T, size_t Extent>
+ EA_CONSTEXPR typename span<T, Extent>::reverse_iterator span<T, Extent>::rend() const EA_NOEXCEPT
+ {
+ return reverse_iterator(mpData);
+ }
+
+ template <typename T, size_t Extent>
+ EA_CONSTEXPR typename span<T, Extent>::const_reverse_iterator span<T, Extent>::crbegin() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(mpData + mnSize);
+ }
+
+ template <typename T, size_t Extent>
+ EA_CONSTEXPR typename span<T, Extent>::const_reverse_iterator span<T, Extent>::crend() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(mpData);
+ }
+
+ template <typename T, size_t Extent>
+ template <size_t Count>
+ EA_CPP14_CONSTEXPR span<typename span<T, Extent>::element_type, Count> span<T, Extent>::first() const
+ {
+ EASTL_ASSERT_MSG(Count <= size(), "undefined behavior accessing out of bounds");
+ return {data(), static_cast<index_type>(Count)};
+ }
+
+ template <typename T, size_t Extent>
+ EA_CPP14_CONSTEXPR span<typename span<T, Extent>::element_type, dynamic_extent>
+ span<T, Extent>::first(size_t sz) const
+ {
+ EASTL_ASSERT_MSG(sz <= size(), "undefined behavior accessing out of bounds");
+ return {data(), static_cast<index_type>(sz)};
+ }
+
+ template <typename T, size_t Extent>
+ template <size_t Count>
+ EA_CPP14_CONSTEXPR span<typename span<T, Extent>::element_type, Count> span<T, Extent>::last() const
+ {
+ EASTL_ASSERT_MSG(Count <= size(), "undefined behavior accessing out of bounds");
+ return {data() + size() - Count, static_cast<index_type>(Count)};
+ }
+
+ template <typename T, size_t Extent>
+ EA_CPP14_CONSTEXPR span<typename span<T, Extent>::element_type, dynamic_extent>
+ span<T, Extent>::last(size_t sz) const
+ {
+ EASTL_ASSERT_MSG(sz <= size(), "undefined behavior accessing out of bounds");
+ return {data() + size() - sz, static_cast<index_type>(sz)};
+ }
+
+ template <typename T, size_t Extent>
+ template <size_t Offset, size_t Count>
+ EA_CONSTEXPR span<typename span<T, Extent>::element_type, Internal::SubspanExtent<Extent, Offset, Count>::value>
+ span<T, Extent>::subspan() const
+ {
+ EASTL_ASSERT_MSG(Offset <= size(), "undefined behaviour accessing out of bounds");
+ EASTL_ASSERT_MSG(Count == dynamic_extent || Count <= (size() - Offset), "undefined behaviour exceeding size of span");
+
+ return {data() + Offset, eastl_size_t(Count == dynamic_extent ? size() - Offset : Count)};
+ }
+
+ template <typename T, size_t Extent>
+ EA_CONSTEXPR span<typename span<T, Extent>::element_type, dynamic_extent>
+ span<T, Extent>::subspan(size_t offset, size_t count) const
+ {
+ EASTL_ASSERT_MSG(offset <= size(), "undefined behaviour accessing out of bounds");
+ EASTL_ASSERT_MSG(count == dynamic_extent || count <= (size() - offset), "undefined behaviour exceeding size of span");
+
+ return {data() + offset, eastl_size_t(count == dynamic_extent ? size() - offset : count)};
+ }
+
+ template <typename T, size_t Extent>
+ EA_CONSTEXPR bool span<T, Extent>::bounds_check(size_t offset) const
+ {
+ return offset < size();
+ }
+}
+
+#endif // EASTL_SPAN_H
diff --git a/EASTL/include/EASTL/stack.h b/EASTL/include/EASTL/stack.h
new file mode 100644
index 0000000..f060b60
--- /dev/null
+++ b/EASTL/include/EASTL/stack.h
@@ -0,0 +1,352 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements a stack that is just like the C++ std::stack adapter class.
+// The only significant difference is that the stack here provides a get_container
+// function to provide access to the underlying container.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_STACK_H
+#define EASTL_STACK_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/vector.h>
+#include <EASTL/initializer_list.h>
+#include <stddef.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+
+ /// EASTL_STACK_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_STACK_DEFAULT_NAME
+ #define EASTL_STACK_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " stack" // Unless the user overrides something, this is "EASTL stack".
+ #endif
+
+ /// EASTL_STACK_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_STACK_DEFAULT_ALLOCATOR
+ #define EASTL_STACK_DEFAULT_ALLOCATOR allocator_type(EASTL_STACK_DEFAULT_NAME)
+ #endif
+
+
+
+ /// stack
+ ///
+ /// stack is an adapter class provides a LIFO (last-in, first-out) interface
+ /// via wrapping a sequence that provides at least the following operations:
+ /// push_back
+ /// pop_back
+ /// back
+ ///
+ /// In practice this means vector, deque, string, list, intrusive_list.
+ ///
+ template <typename T, typename Container = eastl::vector<T> >
+ class stack
+ {
+ public:
+ typedef stack<T, Container> this_type;
+ typedef Container container_type;
+ //typedef typename Container::allocator_type allocator_type; // We can't currently declare this because the container may be a type that doesn't have an allocator.
+ typedef typename Container::value_type value_type;
+ typedef typename Container::reference reference;
+ typedef typename Container::const_reference const_reference;
+ typedef typename Container::size_type size_type;
+
+ public: // We declare public so that global comparison operators can be implemented without adding an inline level and without tripping up GCC 2.x friend declaration failures. GCC (through at least v4.0) is poor at inlining and performance wins over correctness.
+ container_type c; // The C++ standard specifies that you declare a protected member variable of type Container called 'c'.
+
+ public:
+ stack();
+
+ // Allocator is templated here because we aren't allowed to infer the allocator_type from the Container, as some containers (e.g. array) don't
+ // have allocators. For containers that don't have allocator types, you could use void or char as the Allocator template type.
+
+ template <class Allocator>
+ explicit stack(const Allocator& allocator, typename eastl::enable_if<eastl::uses_allocator<container_type, Allocator>::value>::type* = NULL)
+ : c(allocator)
+ {
+ }
+
+ template <class Allocator>
+ stack(const this_type& x, const Allocator& allocator, typename eastl::enable_if<eastl::uses_allocator<container_type, Allocator>::value>::type* = NULL)
+ : c(x.c, allocator)
+ {
+ }
+
+ template <class Allocator>
+ stack(this_type&& x, const Allocator& allocator, typename eastl::enable_if<eastl::uses_allocator<container_type, Allocator>::value>::type* = NULL)
+ : c(eastl::move(x.c), allocator)
+ {
+ }
+
+ explicit stack(const container_type& x);
+ explicit stack(container_type&& x);
+
+ // Additional C++11 support to consider:
+ //
+ // template <class Allocator>
+ // stack(const container_type& x, const Allocator& allocator);
+ //
+ // template <class Allocator>
+ // stack(container_type&& x, const Allocator& allocator);
+
+ stack(std::initializer_list<value_type> ilist); // The first item in the initializer list is pushed first. C++11 doesn't specify that std::stack has initializer list support.
+
+ bool empty() const;
+ size_type size() const;
+
+ reference top();
+ const_reference top() const;
+
+ void push(const value_type& value);
+ void push(value_type&& x);
+
+ template <class... Args> void emplace_back(Args&&... args); // backwards compatibility
+ template <class... Args> decltype(auto) emplace(Args&&... args);
+
+ void pop();
+
+ container_type& get_container();
+ const container_type& get_container() const;
+
+ void swap(this_type& x) EA_NOEXCEPT_IF(eastl::is_nothrow_swappable<this_type::container_type>::value);
+
+ bool validate() const;
+
+ }; // class stack
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // stack
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Container>
+ inline stack<T, Container>::stack()
+ : c() // To consider: use c(EASTL_STACK_DEFAULT_ALLOCATOR) here, though that would add the requirement that the user supplied container support this.
+ {
+ // Empty
+ }
+
+
+ template <typename T, typename Container>
+ inline stack<T, Container>::stack(const Container& x)
+ : c(x)
+ {
+ // Empty
+ }
+
+
+ template <typename T, typename Container>
+ inline stack<T, Container>::stack(Container&& x)
+ : c(eastl::move(x))
+ {
+ // Empty
+ }
+
+
+ template <typename T, typename Container>
+ inline stack<T, Container>::stack(std::initializer_list<value_type> ilist)
+ : c() // We could alternatively use c(ilist) here, but that would require c to have an ilist constructor.
+ {
+ // Better solution but requires an insert function.
+ // c.insert(ilist.begin(), ilist.end());
+
+ // Possibly slower solution but doesn't require an insert function.
+ for(const auto& value : ilist)
+ {
+ c.push_back(value);
+ }
+ }
+
+ template <typename T, typename Container>
+ inline bool stack<T, Container>::empty() const
+ {
+ return c.empty();
+ }
+
+
+ template <typename T, typename Container>
+ inline typename stack<T, Container>::size_type
+ stack<T, Container>::size() const
+ {
+ return c.size();
+ }
+
+
+ template <typename T, typename Container>
+ inline typename stack<T, Container>::reference
+ stack<T, Container>::top()
+ {
+ return c.back();
+ }
+
+
+ template <typename T, typename Container>
+ inline typename stack<T, Container>::const_reference
+ stack<T, Container>::top() const
+ {
+ return c.back();
+ }
+
+
+ template <typename T, typename Container>
+ inline void stack<T, Container>::push(const value_type& value)
+ {
+ c.push_back(const_cast<value_type&>(value)); // const_cast so that intrusive_list can work. We may revisit this.
+ }
+
+
+ template <typename T, typename Container>
+ inline void stack<T, Container>::push(value_type&& x)
+ {
+ c.push_back(eastl::move(x));
+ }
+
+
+ template <typename T, typename Container>
+ template <class... Args>
+ inline void stack<T, Container>::emplace_back(Args&&... args)
+ {
+ emplace(eastl::forward<Args>(args)...);
+ }
+
+
+ template <typename T, typename Container>
+ template <class... Args>
+ inline decltype(auto) stack<T, Container>::emplace(Args&&... args)
+ {
+ return c.emplace_back(eastl::forward<Args>(args)...);
+ }
+
+
+ template <typename T, typename Container>
+ inline void stack<T, Container>::pop()
+ {
+ c.pop_back();
+ }
+
+
+ template <typename T, typename Container>
+ inline typename stack<T, Container>::container_type&
+ stack<T, Container>::get_container()
+ {
+ return c;
+ }
+
+
+ template <typename T, typename Container>
+ inline const typename stack<T, Container>::container_type&
+ stack<T, Container>::get_container() const
+ {
+ return c;
+ }
+
+
+ template <typename T, typename Container>
+ void stack<T, Container>::swap(this_type& x) EA_NOEXCEPT_IF(eastl::is_nothrow_swappable<this_type::container_type>::value)
+ {
+ using eastl::swap;
+ swap(c, x.c);
+ }
+
+
+ template <typename T, typename Container>
+ bool stack<T, Container>::validate() const
+ {
+ return c.validate();
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Container>
+ inline bool operator==(const stack<T, Container>& a, const stack<T, Container>& b)
+ {
+ return (a.c == b.c);
+ }
+
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ template <typename T, typename Container> requires std::three_way_comparable<Container>
+ inline synth_three_way_result<T> operator<=>(const stack<T, Container>& a, const stack<T, Container>& b)
+ {
+ return a.c <=> b.c;
+ }
+#endif
+
+ template <typename T, typename Container>
+ inline bool operator!=(const stack<T, Container>& a, const stack<T, Container>& b)
+ {
+ return !(a.c == b.c);
+ }
+
+
+ template <typename T, typename Container>
+ inline bool operator<(const stack<T, Container>& a, const stack<T, Container>& b)
+ {
+ return (a.c < b.c);
+ }
+
+
+ template <typename T, typename Container>
+ inline bool operator>(const stack<T, Container>& a, const stack<T, Container>& b)
+ {
+ return (b.c < a.c);
+ }
+
+
+ template <typename T, typename Container>
+ inline bool operator<=(const stack<T, Container>& a, const stack<T, Container>& b)
+ {
+ return !(b.c < a.c);
+ }
+
+
+ template <typename T, typename Container>
+ inline bool operator>=(const stack<T, Container>& a, const stack<T, Container>& b)
+ {
+ return !(a.c < b.c);
+ }
+
+ template <typename T, typename Container>
+ inline void swap(stack<T, Container>& a, stack<T, Container>& b) EA_NOEXCEPT_IF((eastl::is_nothrow_swappable<typename stack<T, Container>::container_type>::value))
+ {
+ a.swap(b);
+ }
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/string.h b/EASTL/include/EASTL/string.h
new file mode 100644
index 0000000..3a70b79
--- /dev/null
+++ b/EASTL/include/EASTL/string.h
@@ -0,0 +1,4296 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// Implements a basic_string class, much like the C++ std::basic_string.
+// The primary distinctions between basic_string and std::basic_string are:
+// - basic_string has a few extension functions that allow for increased performance.
+// - basic_string has a few extension functions that make use easier,
+// such as a member sprintf function and member tolower/toupper functions.
+// - basic_string supports debug memory naming natively.
+// - basic_string is easier to read, debug, and visualize.
+// - basic_string internally manually expands basic functions such as begin(),
+// size(), etc. in order to improve debug performance and optimizer success.
+// - basic_string is savvy to an environment that doesn't have exception handling,
+// as is sometimes the case with console or embedded environments.
+// - basic_string has less deeply nested function calls and allows the user to
+// enable forced inlining in debug builds in order to reduce bloat.
+// - basic_string doesn't use char traits. As a result, EASTL assumes that
+// strings will hold characters and not exotic things like widgets. At the
+// very least, basic_string assumes that the value_type is a POD.
+// - basic_string::size_type is defined as eastl_size_t instead of size_t in
+// order to save memory and run faster on 64 bit systems.
+// - basic_string data is guaranteed to be contiguous.
+// - basic_string data is guaranteed to be 0-terminated, and the c_str() function
+// is guaranteed to return the same pointer as the data() which is guaranteed
+// to be the same value as &string[0].
+// - basic_string has a set_capacity() function which frees excess capacity.
+// The only way to do this with std::basic_string is via the cryptic non-obvious
+// trick of using: basic_string<char>(x).swap(x);
+// - basic_string has a force_size() function, which unilaterally moves the string
+// end position (mpEnd) to the given location. Useful for when the user writes
+// into the string via some external means such as C strcpy or sprintf.
+// - basic_string substr() deviates from the standard and returns a string with
+// a copy of this->get_allocator()
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// Copy on Write (cow)
+//
+// This string implementation does not do copy on write (cow). This is by design,
+// as cow penalizes 95% of string uses for the benefit of only 5% of the uses
+// (these percentages are qualitative, not quantitative). The primary benefit of
+// cow is that it allows for the sharing of string data between two string objects.
+// Thus if you say this:
+// string a("hello");
+// string b(a);
+// the "hello" will be shared between a and b. If you then say this:
+// a = "world";
+// then a will release its reference to "hello" and leave b with the only reference
+// to it. Normally this functionality is accomplished via reference counting and
+// with atomic operations or mutexes.
+//
+// The C++ standard does not say anything about basic_string and cow. However,
+// for a basic_string implementation to be standards-conforming, a number of
+// issues arise which dictate some things about how one would have to implement
+// a cow string. The discussion of these issues will not be rehashed here, as you
+// can read the references below for better detail than can be provided in the
+// space we have here. However, we can say that the C++ standard is sensible and
+// that anything we try to do here to allow for an efficient cow implementation
+// would result in a generally unacceptable string interface.
+//
+// The disadvantages of cow strings are:
+// - A reference count needs to exist with the string, which increases string memory usage.
+// - With thread safety, atomic operations and mutex locks are expensive, especially
+// on weaker memory systems such as console gaming platforms.
+// - All non-const string accessor functions need to do a sharing check then the
+// first such check needs to detach the string. Similarly, all string assignments
+// need to do a sharing check as well. If you access the string before doing an
+// assignment, the assignment doesn't result in a shared string, because the string
+// has already been detached.
+// - String sharing doesn't happen the large majority of the time. In some cases,
+// the total sum of the reference count memory can exceed any memory savings
+// gained by the strings that share representations.
+//
+// The addition of a string_cow class is under consideration for this library.
+// There are conceivably some systems which have string usage patterns which would
+// benefit from cow sharing. Such functionality is best saved for a separate string
+// implementation so that the other string uses aren't penalized.
+//
+// References:
+// This is a good starting HTML reference on the topic:
+// http://www.gotw.ca/publications/optimizations.htm
+// Here is a Usenet discussion on the topic:
+// http://groups-beta.google.com/group/comp.lang.c++.moderated/browse_thread/thread/3dc6af5198d0bf7/886c8642cb06e03d
+//
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_STRING_H
+#define EASTL_STRING_H
+
+#include <EASTL/internal/config.h>
+#include <EASTL/allocator.h>
+#include <EASTL/iterator.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/initializer_list.h>
+#include <EASTL/bonus/compressed_pair.h>
+
+EA_DISABLE_ALL_VC_WARNINGS()
+#include <stddef.h> // size_t, ptrdiff_t, etc.
+#include <stdarg.h> // vararg functionality.
+
+#include <stdlib.h> // malloc, free.
+#include <stdio.h> // snprintf, etc.
+#include <ctype.h> // toupper, etc.
+
+EA_DISABLE_GCC_WARNING(-Wtype-limits)
+#include <wchar.h>
+EA_RESTORE_GCC_WARNING()
+
+#include <string.h> // strlen, etc.
+
+#if EASTL_EXCEPTIONS_ENABLED
+ #include <stdexcept> // std::out_of_range, std::length_error.
+#endif
+EA_RESTORE_ALL_VC_WARNINGS()
+
+
+// 4530 - C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc
+// 4480 - nonstandard extension used: specifying underlying type for enum
+// 4571 - catch(...) semantics changed since Visual C++ 7.1; structured exceptions (SEH) are no longer caught.
+// 4267 - 'argument' : conversion from 'size_t' to 'const uint32_t', possible loss of data. This is a bogus warning resulting from a bug in VC++.
+// 4702 - unreachable code
+EA_DISABLE_VC_WARNING(4530 4480 4571 4267 4702);
+
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+#include <EASTL/internal/char_traits.h>
+#include <EASTL/string_view.h>
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_STRING_EXPLICIT
+//
+// See EASTL_STRING_OPT_EXPLICIT_CTORS for documentation.
+//
+#if EASTL_STRING_OPT_EXPLICIT_CTORS
+ #define EASTL_STRING_EXPLICIT explicit
+#else
+ #define EASTL_STRING_EXPLICIT
+#endif
+///////////////////////////////////////////////////////////////////////////////
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Vsnprintf
+//
+// The user is expected to supply these functions one way or another. Note that
+// these functions are expected to accept parameters as per the C99 standard.
+// These functions can deal with C99 standard return values or Microsoft non-standard
+// return values but act more efficiently if implemented via the C99 style.
+//
+// In the case of EASTL_EASTDC_VSNPRINTF == 1, the user is expected to either
+// link EAStdC or provide the functions below that act the same. In the case of
+// EASTL_EASTDC_VSNPRINTF == 0, the user is expected to provide the function
+// implementations, and may simply use C vsnprintf if desired, though it's not
+// completely portable between compilers.
+//
+#if EASTL_EASTDC_VSNPRINTF
+ namespace EA
+ {
+ namespace StdC
+ {
+ // Provided by the EAStdC package or by the user.
+ EASTL_EASTDC_API int Vsnprintf(char* EA_RESTRICT pDestination, size_t n, const char* EA_RESTRICT pFormat, va_list arguments);
+ EASTL_EASTDC_API int Vsnprintf(char16_t* EA_RESTRICT pDestination, size_t n, const char16_t* EA_RESTRICT pFormat, va_list arguments);
+ EASTL_EASTDC_API int Vsnprintf(char32_t* EA_RESTRICT pDestination, size_t n, const char32_t* EA_RESTRICT pFormat, va_list arguments);
+ #if EA_CHAR8_UNIQUE
+ EASTL_EASTDC_API int Vsnprintf(char8_t* EA_RESTRICT pDestination, size_t n, const char8_t* EA_RESTRICT pFormat, va_list arguments);
+ #endif
+ #if defined(EA_WCHAR_UNIQUE) && EA_WCHAR_UNIQUE
+ EASTL_EASTDC_API int Vsnprintf(wchar_t* EA_RESTRICT pDestination, size_t n, const wchar_t* EA_RESTRICT pFormat, va_list arguments);
+ #endif
+ }
+ }
+
+ namespace eastl
+ {
+ inline int Vsnprintf(char* EA_RESTRICT pDestination, size_t n, const char* EA_RESTRICT pFormat, va_list arguments)
+ { return EA::StdC::Vsnprintf(pDestination, n, pFormat, arguments); }
+
+ inline int Vsnprintf(char16_t* EA_RESTRICT pDestination, size_t n, const char16_t* EA_RESTRICT pFormat, va_list arguments)
+ { return EA::StdC::Vsnprintf(pDestination, n, pFormat, arguments); }
+
+ inline int Vsnprintf(char32_t* EA_RESTRICT pDestination, size_t n, const char32_t* EA_RESTRICT pFormat, va_list arguments)
+ { return EA::StdC::Vsnprintf(pDestination, n, pFormat, arguments); }
+
+ #if EA_CHAR8_UNIQUE
+ inline int Vsnprintf(char8_t* EA_RESTRICT pDestination, size_t n, const char8_t* EA_RESTRICT pFormat, va_list arguments)
+ { return EA::StdC::Vsnprintf((char*)pDestination, n, (const char*)pFormat, arguments); }
+ #endif
+
+ #if defined(EA_WCHAR_UNIQUE) && EA_WCHAR_UNIQUE
+ inline int Vsnprintf(wchar_t* EA_RESTRICT pDestination, size_t n, const wchar_t* EA_RESTRICT pFormat, va_list arguments)
+ { return EA::StdC::Vsnprintf(pDestination, n, pFormat, arguments); }
+ #endif
+ }
+#else
+ // User-provided functions.
+ extern int Vsnprintf8 (char* pDestination, size_t n, const char* pFormat, va_list arguments);
+ extern int Vsnprintf16(char16_t* pDestination, size_t n, const char16_t* pFormat, va_list arguments);
+ extern int Vsnprintf32(char32_t* pDestination, size_t n, const char32_t* pFormat, va_list arguments);
+ #if EA_CHAR8_UNIQUE
+ extern int Vsnprintf8 (char8_t* pDestination, size_t n, const char8_t* pFormat, va_list arguments);
+ #endif
+ #if defined(EA_WCHAR_UNIQUE) && EA_WCHAR_UNIQUE
+ extern int VsnprintfW(wchar_t* pDestination, size_t n, const wchar_t* pFormat, va_list arguments);
+ #endif
+
+ namespace eastl
+ {
+ inline int Vsnprintf(char* pDestination, size_t n, const char* pFormat, va_list arguments)
+ { return Vsnprintf8(pDestination, n, pFormat, arguments); }
+
+ inline int Vsnprintf(char16_t* pDestination, size_t n, const char16_t* pFormat, va_list arguments)
+ { return Vsnprintf16(pDestination, n, pFormat, arguments); }
+
+ inline int Vsnprintf(char32_t* pDestination, size_t n, const char32_t* pFormat, va_list arguments)
+ { return Vsnprintf32(pDestination, n, pFormat, arguments); }
+
+ #if EA_CHAR8_UNIQUE
+ inline int Vsnprintf(char8_t* pDestination, size_t n, const char8_t* pFormat, va_list arguments)
+ { return Vsnprintf8(pDestination, n, pFormat, arguments); }
+ #endif
+
+ #if defined(EA_WCHAR_UNIQUE) && EA_WCHAR_UNIQUE
+ inline int Vsnprintf(wchar_t* pDestination, size_t n, const wchar_t* pFormat, va_list arguments)
+ { return VsnprintfW(pDestination, n, pFormat, arguments); }
+ #endif
+ }
+#endif
+///////////////////////////////////////////////////////////////////////////////
+
+
+
+namespace eastl
+{
+
+ /// EASTL_BASIC_STRING_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_BASIC_STRING_DEFAULT_NAME
+ #define EASTL_BASIC_STRING_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " basic_string" // Unless the user overrides something, this is "EASTL basic_string".
+ #endif
+
+
+ /// EASTL_BASIC_STRING_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_BASIC_STRING_DEFAULT_ALLOCATOR
+ #define EASTL_BASIC_STRING_DEFAULT_ALLOCATOR allocator_type(EASTL_BASIC_STRING_DEFAULT_NAME)
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ /// basic_string
+ ///
+ /// Implements a templated string class, somewhat like C++ std::basic_string.
+ ///
+ /// Notes:
+ /// As of this writing, an insert of a string into itself necessarily
+ /// triggers a reallocation, even if there is enough capacity in self
+ /// to handle the increase in size. This is due to the slightly tricky
+ /// nature of the operation of modifying one's self with one's self,
+ /// and thus the source and destination are being modified during the
+ /// operation. It might be useful to rectify this to the extent possible.
+ ///
+ /// Our usage of noexcept specifiers is a little different from the
+ /// requirements specified by std::basic_string in C++11. This is because
+ /// our allocators are instances and not types and thus can be non-equal
+ /// and result in exceptions during assignments that theoretically can't
+ /// occur with std containers.
+ ///
+ template <typename T, typename Allocator = EASTLAllocatorType>
+ class basic_string
+ {
+ public:
+ typedef basic_string<T, Allocator> this_type;
+ typedef basic_string_view<T> view_type;
+ typedef T value_type;
+ typedef T* pointer;
+ typedef const T* const_pointer;
+ typedef T& reference;
+ typedef const T& const_reference;
+ typedef T* iterator; // Maintainer note: We want to leave iterator defined as T* -- at least in release builds -- as this gives some algorithms an advantage that optimizers cannot get around.
+ typedef const T* const_iterator;
+ typedef eastl::reverse_iterator<iterator> reverse_iterator;
+ typedef eastl::reverse_iterator<const_iterator> const_reverse_iterator;
+ typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t.
+ typedef ptrdiff_t difference_type;
+ typedef Allocator allocator_type;
+
+ static const EA_CONSTEXPR size_type npos = (size_type)-1; /// 'npos' means non-valid position or simply non-position.
+
+ public:
+ // CtorDoNotInitialize exists so that we can create a constructor that allocates but doesn't
+ // initialize and also doesn't collide with any other constructor declaration.
+ struct CtorDoNotInitialize{};
+
+ // CtorSprintf exists so that we can create a constructor that accepts printf-style
+ // arguments but also doesn't collide with any other constructor declaration.
+ #ifdef EA_PLATFORM_MINGW
+ // Workaround for MinGW compiler bug: variadic arguments are corrupted if empty object is passed before it
+ struct CtorSprintf{ int dummy; };
+ #else
+ struct CtorSprintf{};
+ #endif
+
+ // CtorConvert exists so that we can have a constructor that implements string encoding
+ // conversion, such as between UCS2 char16_t and UTF8 char8_t.
+ struct CtorConvert{};
+
+ protected:
+ // Masks used to determine if we are in SSO or Heap
+ #ifdef EA_SYSTEM_BIG_ENDIAN
+ // Big Endian use LSB, unless we want to reorder struct layouts on endianness, Bit is set when we are in Heap
+ static EA_CONSTEXPR_OR_CONST size_type kHeapMask = 0x1;
+ static EA_CONSTEXPR_OR_CONST size_type kSSOMask = 0x1;
+ #else
+ // Little Endian use MSB
+ static EA_CONSTEXPR_OR_CONST size_type kHeapMask = ~(size_type(~size_type(0)) >> 1);
+ static EA_CONSTEXPR_OR_CONST size_type kSSOMask = 0x80;
+ #endif
+
+ public:
+ #ifdef EA_SYSTEM_BIG_ENDIAN
+ static EA_CONSTEXPR_OR_CONST size_type kMaxSize = (~kHeapMask) >> 1;
+ #else
+ static EA_CONSTEXPR_OR_CONST size_type kMaxSize = ~kHeapMask;
+ #endif
+
+ protected:
+ // The view of memory when the string data is obtained from the allocator.
+ struct HeapLayout
+ {
+ value_type* mpBegin; // Begin of string.
+ size_type mnSize; // Size of the string. Number of characters currently in the string, not including the trailing '0'
+ size_type mnCapacity; // Capacity of the string. Number of characters string can hold, not including the trailing '0'
+ };
+
+ template <typename CharT, size_t = sizeof(CharT)>
+ struct SSOPadding
+ {
+ char padding[sizeof(CharT) - sizeof(char)];
+ };
+
+ template <typename CharT>
+ struct SSOPadding<CharT, 1>
+ {
+ // template specialization to remove the padding structure to avoid warnings on zero length arrays
+ // also, this allows us to take advantage of the empty-base-class optimization.
+ };
+
+ // The view of memory when the string data is able to store the string data locally (without a heap allocation).
+ struct SSOLayout
+ {
+ static EA_CONSTEXPR_OR_CONST size_type SSO_CAPACITY = (sizeof(HeapLayout) - sizeof(char)) / sizeof(value_type);
+
+ // mnSize must correspond to the last byte of HeapLayout.mnCapacity, so we don't want the compiler to insert
+ // padding after mnSize if sizeof(value_type) != 1; Also ensures both layouts are the same size.
+ struct SSOSize : SSOPadding<value_type>
+ {
+ char mnRemainingSize;
+ };
+
+ value_type mData[SSO_CAPACITY]; // Local buffer for string data.
+ SSOSize mRemainingSizeField;
+ };
+
+ // This view of memory is a utility structure for easy copying of the string data.
+ struct RawLayout
+ {
+ char mBuffer[sizeof(HeapLayout)];
+ };
+
+ static_assert(sizeof(SSOLayout) == sizeof(HeapLayout), "heap and sso layout structures must be the same size");
+ static_assert(sizeof(HeapLayout) == sizeof(RawLayout), "heap and raw layout structures must be the same size");
+
+ // This implements the 'short string optimization' or SSO. SSO reuses the existing storage of string class to
+ // hold string data short enough to fit therefore avoiding a heap allocation. The number of characters stored in
+ // the string SSO buffer is variable and depends on the string character width. This implementation favors a
+ // consistent string size than increasing the size of the string local data to accommodate a consistent number
+ // of characters despite character width.
+ struct Layout
+ {
+ union
+ {
+ HeapLayout heap;
+ SSOLayout sso;
+ RawLayout raw;
+ };
+
+ Layout() { ResetToSSO(); } // start as SSO by default
+ Layout(const Layout& other) { Copy(*this, other); }
+ Layout(Layout&& other) { Move(*this, other); }
+ Layout& operator=(const Layout& other) { Copy(*this, other); return *this; }
+ Layout& operator=(Layout&& other) { Move(*this, other); return *this; }
+
+ // We are using Heap when the bit is set, easier to conceptualize checking IsHeap instead of IsSSO
+ inline bool IsHeap() const EA_NOEXCEPT { return !!(sso.mRemainingSizeField.mnRemainingSize & kSSOMask); }
+ inline bool IsSSO() const EA_NOEXCEPT { return !IsHeap(); }
+ inline value_type* SSOBufferPtr() EA_NOEXCEPT { return sso.mData; }
+ inline const value_type* SSOBufferPtr() const EA_NOEXCEPT { return sso.mData; }
+
+ // Largest value for SSO.mnSize == 23, which has two LSB bits set, but on big-endian (BE)
+ // use least significant bit (LSB) to denote heap so shift.
+ inline size_type GetSSOSize() const EA_NOEXCEPT
+ {
+ #ifdef EA_SYSTEM_BIG_ENDIAN
+ return SSOLayout::SSO_CAPACITY - (sso.mRemainingSizeField.mnRemainingSize >> 2);
+ #else
+ return (SSOLayout::SSO_CAPACITY - sso.mRemainingSizeField.mnRemainingSize);
+ #endif
+ }
+ inline size_type GetHeapSize() const EA_NOEXCEPT { return heap.mnSize; }
+ inline size_type GetSize() const EA_NOEXCEPT { return IsHeap() ? GetHeapSize() : GetSSOSize(); }
+
+ inline void SetSSOSize(size_type size) EA_NOEXCEPT
+ {
+ #ifdef EA_SYSTEM_BIG_ENDIAN
+ sso.mRemainingSizeField.mnRemainingSize = (char)((SSOLayout::SSO_CAPACITY - size) << 2);
+ #else
+ sso.mRemainingSizeField.mnRemainingSize = (char)(SSOLayout::SSO_CAPACITY - size);
+ #endif
+ }
+
+ inline void SetHeapSize(size_type size) EA_NOEXCEPT { heap.mnSize = size; }
+ inline void SetSize(size_type size) EA_NOEXCEPT { IsHeap() ? SetHeapSize(size) : SetSSOSize(size); }
+
+ inline size_type GetRemainingCapacity() const EA_NOEXCEPT { return size_type(CapacityPtr() - EndPtr()); }
+
+ inline value_type* HeapBeginPtr() EA_NOEXCEPT { return heap.mpBegin; };
+ inline const value_type* HeapBeginPtr() const EA_NOEXCEPT { return heap.mpBegin; };
+
+ inline value_type* SSOBeginPtr() EA_NOEXCEPT { return sso.mData; }
+ inline const value_type* SSOBeginPtr() const EA_NOEXCEPT { return sso.mData; }
+
+ inline value_type* BeginPtr() EA_NOEXCEPT { return IsHeap() ? HeapBeginPtr() : SSOBeginPtr(); }
+ inline const value_type* BeginPtr() const EA_NOEXCEPT { return IsHeap() ? HeapBeginPtr() : SSOBeginPtr(); }
+
+ inline value_type* HeapEndPtr() EA_NOEXCEPT { return heap.mpBegin + heap.mnSize; }
+ inline const value_type* HeapEndPtr() const EA_NOEXCEPT { return heap.mpBegin + heap.mnSize; }
+
+ inline value_type* SSOEndPtr() EA_NOEXCEPT { return sso.mData + GetSSOSize(); }
+ inline const value_type* SSOEndPtr() const EA_NOEXCEPT { return sso.mData + GetSSOSize(); }
+
+ // Points to end of character stream, *ptr == '0'
+ inline value_type* EndPtr() EA_NOEXCEPT { return IsHeap() ? HeapEndPtr() : SSOEndPtr(); }
+ inline const value_type* EndPtr() const EA_NOEXCEPT { return IsHeap() ? HeapEndPtr() : SSOEndPtr(); }
+
+ inline value_type* HeapCapacityPtr() EA_NOEXCEPT { return heap.mpBegin + GetHeapCapacity(); }
+ inline const value_type* HeapCapacityPtr() const EA_NOEXCEPT { return heap.mpBegin + GetHeapCapacity(); }
+
+ inline value_type* SSOCapacityPtr() EA_NOEXCEPT { return sso.mData + SSOLayout::SSO_CAPACITY; }
+ inline const value_type* SSOCapacityPtr() const EA_NOEXCEPT { return sso.mData + SSOLayout::SSO_CAPACITY; }
+
+ // Points to end of the buffer at the terminating '0', *ptr == '0' <- only true when size() == capacity()
+ inline value_type* CapacityPtr() EA_NOEXCEPT { return IsHeap() ? HeapCapacityPtr() : SSOCapacityPtr(); }
+ inline const value_type* CapacityPtr() const EA_NOEXCEPT { return IsHeap() ? HeapCapacityPtr() : SSOCapacityPtr(); }
+
+ inline void SetHeapBeginPtr(value_type* pBegin) EA_NOEXCEPT { heap.mpBegin = pBegin; }
+
+ inline void SetHeapCapacity(size_type cap) EA_NOEXCEPT
+ {
+ #ifdef EA_SYSTEM_BIG_ENDIAN
+ heap.mnCapacity = (cap << 1) | kHeapMask;
+ #else
+ heap.mnCapacity = (cap | kHeapMask);
+ #endif
+ }
+
+ inline size_type GetHeapCapacity() const EA_NOEXCEPT
+ {
+ #ifdef EA_SYSTEM_BIG_ENDIAN
+ return (heap.mnCapacity >> 1);
+ #else
+ return (heap.mnCapacity & ~kHeapMask);
+ #endif
+ }
+
+ inline void Copy(Layout& dst, const Layout& src) EA_NOEXCEPT { dst.raw = src.raw; }
+ inline void Move(Layout& dst, Layout& src) EA_NOEXCEPT { eastl::swap(dst.raw, src.raw); }
+ inline void Swap(Layout& a, Layout& b) EA_NOEXCEPT { eastl::swap(a.raw, b.raw); }
+
+ inline void ResetToSSO() EA_NOEXCEPT { *SSOBeginPtr() = 0; SetSSOSize(0); }
+ };
+
+ eastl::compressed_pair<Layout, allocator_type> mPair;
+
+ inline Layout& internalLayout() EA_NOEXCEPT { return mPair.first(); }
+ inline const Layout& internalLayout() const EA_NOEXCEPT { return mPair.first(); }
+ inline allocator_type& internalAllocator() EA_NOEXCEPT { return mPair.second(); }
+ inline const allocator_type& internalAllocator() const EA_NOEXCEPT { return mPair.second(); }
+
+ public:
+ // Constructor, destructor
+ basic_string() EA_NOEXCEPT_IF(EA_NOEXCEPT_EXPR(EASTL_BASIC_STRING_DEFAULT_ALLOCATOR));
+ explicit basic_string(const allocator_type& allocator) EA_NOEXCEPT;
+ basic_string(const this_type& x, size_type position, size_type n = npos);
+ basic_string(const value_type* p, size_type n, const allocator_type& allocator = EASTL_BASIC_STRING_DEFAULT_ALLOCATOR);
+ EASTL_STRING_EXPLICIT basic_string(const value_type* p, const allocator_type& allocator = EASTL_BASIC_STRING_DEFAULT_ALLOCATOR);
+ basic_string(size_type n, value_type c, const allocator_type& allocator = EASTL_BASIC_STRING_DEFAULT_ALLOCATOR);
+ basic_string(const this_type& x);
+ basic_string(const this_type& x, const allocator_type& allocator);
+ basic_string(const value_type* pBegin, const value_type* pEnd, const allocator_type& allocator = EASTL_BASIC_STRING_DEFAULT_ALLOCATOR);
+ basic_string(CtorDoNotInitialize, size_type n, const allocator_type& allocator = EASTL_BASIC_STRING_DEFAULT_ALLOCATOR);
+ basic_string(CtorSprintf, const value_type* pFormat, ...);
+ basic_string(std::initializer_list<value_type> init, const allocator_type& allocator = EASTL_BASIC_STRING_DEFAULT_ALLOCATOR);
+
+ basic_string(this_type&& x) EA_NOEXCEPT;
+ basic_string(this_type&& x, const allocator_type& allocator);
+
+ explicit basic_string(const view_type& sv, const allocator_type& allocator = EASTL_BASIC_STRING_DEFAULT_ALLOCATOR);
+ basic_string(const view_type& sv, size_type position, size_type n, const allocator_type& allocator = EASTL_BASIC_STRING_DEFAULT_ALLOCATOR);
+
+ template <typename OtherCharType>
+ basic_string(CtorConvert, const OtherCharType* p, const allocator_type& allocator = EASTL_BASIC_STRING_DEFAULT_ALLOCATOR);
+
+ template <typename OtherCharType>
+ basic_string(CtorConvert, const OtherCharType* p, size_type n, const allocator_type& allocator = EASTL_BASIC_STRING_DEFAULT_ALLOCATOR);
+
+ template <typename OtherStringType> // Unfortunately we need the CtorConvert here because otherwise this function would collide with the value_type* constructor.
+ basic_string(CtorConvert, const OtherStringType& x);
+
+ ~basic_string();
+
+ // Allocator
+ const allocator_type& get_allocator() const EA_NOEXCEPT;
+ allocator_type& get_allocator() EA_NOEXCEPT;
+ void set_allocator(const allocator_type& allocator);
+
+ // Implicit conversion operator
+ operator basic_string_view<T>() const EA_NOEXCEPT;
+
+ // Operator=
+ this_type& operator=(const this_type& x);
+ this_type& operator=(const value_type* p);
+ this_type& operator=(value_type c);
+ this_type& operator=(std::initializer_list<value_type> ilist);
+ this_type& operator=(view_type v);
+ this_type& operator=(this_type&& x); // TODO(c++17): noexcept(allocator_traits<Allocator>::propagate_on_container_move_assignment::value || allocator_traits<Allocator>::is_always_equal::value);
+
+ #if EASTL_OPERATOR_EQUALS_OTHER_ENABLED
+ this_type& operator=(value_type* p) { return operator=((const value_type*)p); } // We need this because otherwise the const value_type* version can collide with the const OtherStringType& version below.
+
+ template <typename OtherCharType>
+ this_type& operator=(const OtherCharType* p);
+
+ template <typename OtherStringType>
+ this_type& operator=(const OtherStringType& x);
+ #endif
+
+ void swap(this_type& x); // TODO(c++17): noexcept(allocator_traits<Allocator>::propagate_on_container_swap::value || allocator_traits<Allocator>::is_always_equal::value);
+
+ // Assignment operations
+ this_type& assign(const this_type& x);
+ this_type& assign(const this_type& x, size_type position, size_type n = npos);
+ this_type& assign(const value_type* p, size_type n);
+ this_type& assign(const value_type* p);
+ this_type& assign(size_type n, value_type c);
+ this_type& assign(const value_type* pBegin, const value_type* pEnd);
+ this_type& assign(this_type&& x); // TODO(c++17): noexcept(allocator_traits<Allocator>::propagate_on_container_move_assignment::value || allocator_traits<Allocator>::is_always_equal::value);
+ this_type& assign(std::initializer_list<value_type>);
+
+ template <typename OtherCharType>
+ this_type& assign_convert(const OtherCharType* p);
+
+ template <typename OtherCharType>
+ this_type& assign_convert(const OtherCharType* p, size_type n);
+
+ template <typename OtherStringType>
+ this_type& assign_convert(const OtherStringType& x);
+
+ // Iterators.
+ iterator begin() EA_NOEXCEPT; // Expanded in source code as: mpBegin
+ const_iterator begin() const EA_NOEXCEPT; // Expanded in source code as: mpBegin
+ const_iterator cbegin() const EA_NOEXCEPT;
+
+ iterator end() EA_NOEXCEPT; // Expanded in source code as: mpEnd
+ const_iterator end() const EA_NOEXCEPT; // Expanded in source code as: mpEnd
+ const_iterator cend() const EA_NOEXCEPT;
+
+ reverse_iterator rbegin() EA_NOEXCEPT;
+ const_reverse_iterator rbegin() const EA_NOEXCEPT;
+ const_reverse_iterator crbegin() const EA_NOEXCEPT;
+
+ reverse_iterator rend() EA_NOEXCEPT;
+ const_reverse_iterator rend() const EA_NOEXCEPT;
+ const_reverse_iterator crend() const EA_NOEXCEPT;
+
+
+ // Size-related functionality
+ bool empty() const EA_NOEXCEPT;
+ size_type size() const EA_NOEXCEPT;
+ size_type length() const EA_NOEXCEPT;
+ size_type max_size() const EA_NOEXCEPT;
+ size_type capacity() const EA_NOEXCEPT;
+ void resize(size_type n, value_type c);
+ void resize(size_type n);
+ void reserve(size_type = 0);
+ void set_capacity(size_type n = npos); // Revises the capacity to the user-specified value. Resizes the container to match the capacity if the requested capacity n is less than the current size. If n == npos then the capacity is reallocated (if necessary) such that capacity == size.
+ void force_size(size_type n); // Unilaterally moves the string end position (mpEnd) to the given location. Useful for when the user writes into the string via some extenal means such as C strcpy or sprintf. This allows for more efficient use than using resize to achieve this.
+ void shrink_to_fit();
+
+ // Raw access
+ const value_type* data() const EA_NOEXCEPT;
+ value_type* data() EA_NOEXCEPT;
+ const value_type* c_str() const EA_NOEXCEPT;
+
+ // Element access
+ reference operator[](size_type n);
+ const_reference operator[](size_type n) const;
+ reference at(size_type n);
+ const_reference at(size_type n) const;
+ reference front();
+ const_reference front() const;
+ reference back();
+ const_reference back() const;
+
+ // Append operations
+ this_type& operator+=(const this_type& x);
+ this_type& operator+=(const value_type* p);
+ this_type& operator+=(value_type c);
+
+ this_type& append(const this_type& x);
+ this_type& append(const this_type& x, size_type position, size_type n = npos);
+ this_type& append(const value_type* p, size_type n);
+ this_type& append(const value_type* p);
+ this_type& append(size_type n, value_type c);
+ this_type& append(const value_type* pBegin, const value_type* pEnd);
+
+ this_type& append_sprintf_va_list(const value_type* pFormat, va_list arguments);
+ this_type& append_sprintf(const value_type* pFormat, ...);
+
+ template <typename OtherCharType>
+ this_type& append_convert(const OtherCharType* p);
+
+ template <typename OtherCharType>
+ this_type& append_convert(const OtherCharType* p, size_type n);
+
+ template <typename OtherStringType>
+ this_type& append_convert(const OtherStringType& x);
+
+ void push_back(value_type c);
+ void pop_back();
+
+ // Insertion operations
+ this_type& insert(size_type position, const this_type& x);
+ this_type& insert(size_type position, const this_type& x, size_type beg, size_type n);
+ this_type& insert(size_type position, const value_type* p, size_type n);
+ this_type& insert(size_type position, const value_type* p);
+ this_type& insert(size_type position, size_type n, value_type c);
+ iterator insert(const_iterator p, value_type c);
+ iterator insert(const_iterator p, size_type n, value_type c);
+ iterator insert(const_iterator p, const value_type* pBegin, const value_type* pEnd);
+ iterator insert(const_iterator p, std::initializer_list<value_type>);
+
+ // Erase operations
+ this_type& erase(size_type position = 0, size_type n = npos);
+ iterator erase(const_iterator p);
+ iterator erase(const_iterator pBegin, const_iterator pEnd);
+ reverse_iterator erase(reverse_iterator position);
+ reverse_iterator erase(reverse_iterator first, reverse_iterator last);
+ void clear() EA_NOEXCEPT;
+
+ // Detach memory
+ pointer detach() EA_NOEXCEPT;
+
+ // Replacement operations
+ this_type& replace(size_type position, size_type n, const this_type& x);
+ this_type& replace(size_type pos1, size_type n1, const this_type& x, size_type pos2, size_type n2 = npos);
+ this_type& replace(size_type position, size_type n1, const value_type* p, size_type n2);
+ this_type& replace(size_type position, size_type n1, const value_type* p);
+ this_type& replace(size_type position, size_type n1, size_type n2, value_type c);
+ this_type& replace(const_iterator first, const_iterator last, const this_type& x);
+ this_type& replace(const_iterator first, const_iterator last, const value_type* p, size_type n);
+ this_type& replace(const_iterator first, const_iterator last, const value_type* p);
+ this_type& replace(const_iterator first, const_iterator last, size_type n, value_type c);
+ this_type& replace(const_iterator first, const_iterator last, const value_type* pBegin, const value_type* pEnd);
+ size_type copy(value_type* p, size_type n, size_type position = 0) const;
+
+ // Find operations
+ size_type find(const this_type& x, size_type position = 0) const EA_NOEXCEPT;
+ size_type find(const value_type* p, size_type position = 0) const;
+ size_type find(const value_type* p, size_type position, size_type n) const;
+ size_type find(value_type c, size_type position = 0) const EA_NOEXCEPT;
+
+ // Reverse find operations
+ size_type rfind(const this_type& x, size_type position = npos) const EA_NOEXCEPT;
+ size_type rfind(const value_type* p, size_type position = npos) const;
+ size_type rfind(const value_type* p, size_type position, size_type n) const;
+ size_type rfind(value_type c, size_type position = npos) const EA_NOEXCEPT;
+
+ // Find first-of operations
+ size_type find_first_of(const this_type& x, size_type position = 0) const EA_NOEXCEPT;
+ size_type find_first_of(const value_type* p, size_type position = 0) const;
+ size_type find_first_of(const value_type* p, size_type position, size_type n) const;
+ size_type find_first_of(value_type c, size_type position = 0) const EA_NOEXCEPT;
+
+ // Find last-of operations
+ size_type find_last_of(const this_type& x, size_type position = npos) const EA_NOEXCEPT;
+ size_type find_last_of(const value_type* p, size_type position = npos) const;
+ size_type find_last_of(const value_type* p, size_type position, size_type n) const;
+ size_type find_last_of(value_type c, size_type position = npos) const EA_NOEXCEPT;
+
+ // Find first not-of operations
+ size_type find_first_not_of(const this_type& x, size_type position = 0) const EA_NOEXCEPT;
+ size_type find_first_not_of(const value_type* p, size_type position = 0) const;
+ size_type find_first_not_of(const value_type* p, size_type position, size_type n) const;
+ size_type find_first_not_of(value_type c, size_type position = 0) const EA_NOEXCEPT;
+
+ // Find last not-of operations
+ size_type find_last_not_of(const this_type& x, size_type position = npos) const EA_NOEXCEPT;
+ size_type find_last_not_of(const value_type* p, size_type position = npos) const;
+ size_type find_last_not_of(const value_type* p, size_type position, size_type n) const;
+ size_type find_last_not_of(value_type c, size_type position = npos) const EA_NOEXCEPT;
+
+ // Substring functionality
+ this_type substr(size_type position = 0, size_type n = npos) const;
+
+ // Comparison operations
+ int compare(const this_type& x) const EA_NOEXCEPT;
+ int compare(size_type pos1, size_type n1, const this_type& x) const;
+ int compare(size_type pos1, size_type n1, const this_type& x, size_type pos2, size_type n2) const;
+ int compare(const value_type* p) const;
+ int compare(size_type pos1, size_type n1, const value_type* p) const;
+ int compare(size_type pos1, size_type n1, const value_type* p, size_type n2) const;
+ static int compare(const value_type* pBegin1, const value_type* pEnd1, const value_type* pBegin2, const value_type* pEnd2);
+
+ // Case-insensitive comparison functions. Not part of C++ this_type. Only ASCII-level locale functionality is supported. Thus this is not suitable for localization purposes.
+ int comparei(const this_type& x) const EA_NOEXCEPT;
+ int comparei(const value_type* p) const;
+ static int comparei(const value_type* pBegin1, const value_type* pEnd1, const value_type* pBegin2, const value_type* pEnd2);
+
+ // Misc functionality, not part of C++ this_type.
+ void make_lower();
+ void make_upper();
+ void ltrim();
+ void rtrim();
+ void trim();
+ void ltrim(const value_type* p);
+ void rtrim(const value_type* p);
+ void trim(const value_type* p);
+ this_type left(size_type n) const;
+ this_type right(size_type n) const;
+ this_type& sprintf_va_list(const value_type* pFormat, va_list arguments);
+ this_type& sprintf(const value_type* pFormat, ...);
+
+ bool validate() const EA_NOEXCEPT;
+ int validate_iterator(const_iterator i) const EA_NOEXCEPT;
+
+
+ protected:
+ // Helper functions for initialization/insertion operations.
+ value_type* DoAllocate(size_type n);
+ void DoFree(value_type* p, size_type n);
+ size_type GetNewCapacity(size_type currentCapacity);
+ size_type GetNewCapacity(size_type currentCapacity, size_type minimumGrowSize);
+ void AllocateSelf();
+ void AllocateSelf(size_type n);
+ void DeallocateSelf();
+ iterator InsertInternal(const_iterator p, value_type c);
+ void RangeInitialize(const value_type* pBegin, const value_type* pEnd);
+ void RangeInitialize(const value_type* pBegin);
+ void SizeInitialize(size_type n, value_type c);
+
+ bool IsSSO() const EA_NOEXCEPT;
+
+ void ThrowLengthException() const;
+ void ThrowRangeException() const;
+ void ThrowInvalidArgumentException() const;
+
+ #if EASTL_OPERATOR_EQUALS_OTHER_ENABLED
+ template <typename CharType>
+ void DoAssignConvert(CharType c, true_type);
+
+ template <typename StringType>
+ void DoAssignConvert(const StringType& x, false_type);
+ #endif
+
+ // Replacements for STL template functions.
+ static const value_type* CharTypeStringFindEnd(const value_type* pBegin, const value_type* pEnd, value_type c);
+ static const value_type* CharTypeStringRFind(const value_type* pRBegin, const value_type* pREnd, const value_type c);
+ static const value_type* CharTypeStringSearch(const value_type* p1Begin, const value_type* p1End, const value_type* p2Begin, const value_type* p2End);
+ static const value_type* CharTypeStringRSearch(const value_type* p1Begin, const value_type* p1End, const value_type* p2Begin, const value_type* p2End);
+ static const value_type* CharTypeStringFindFirstOf(const value_type* p1Begin, const value_type* p1End, const value_type* p2Begin, const value_type* p2End);
+ static const value_type* CharTypeStringRFindFirstOf(const value_type* p1RBegin, const value_type* p1REnd, const value_type* p2Begin, const value_type* p2End);
+ static const value_type* CharTypeStringFindFirstNotOf(const value_type* p1Begin, const value_type* p1End, const value_type* p2Begin, const value_type* p2End);
+ static const value_type* CharTypeStringRFindFirstNotOf(const value_type* p1RBegin, const value_type* p1REnd, const value_type* p2Begin, const value_type* p2End);
+
+ }; // basic_string
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // basic_string
+ ///////////////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>::basic_string() EA_NOEXCEPT_IF(EA_NOEXCEPT_EXPR(EASTL_BASIC_STRING_DEFAULT_ALLOCATOR))
+ : mPair(allocator_type(EASTL_BASIC_STRING_DEFAULT_NAME))
+ {
+ AllocateSelf();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>::basic_string(const allocator_type& allocator) EA_NOEXCEPT
+ : mPair(allocator)
+ {
+ AllocateSelf();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>::basic_string(const this_type& x)
+ : mPair(x.get_allocator())
+ {
+ RangeInitialize(x.internalLayout().BeginPtr(), x.internalLayout().EndPtr());
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>::basic_string(const this_type& x, const allocator_type& allocator)
+ : mPair(allocator)
+ {
+ RangeInitialize(x.internalLayout().BeginPtr(), x.internalLayout().EndPtr());
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename OtherStringType>
+ inline basic_string<T, Allocator>::basic_string(CtorConvert, const OtherStringType& x)
+ : mPair(x.get_allocator())
+ {
+ AllocateSelf();
+ append_convert(x.c_str(), x.length());
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>::basic_string(const this_type& x, size_type position, size_type n)
+ : mPair(x.get_allocator())
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if (EASTL_UNLIKELY(position > x.internalLayout().GetSize())) // 21.4.2 p4
+ {
+ ThrowRangeException();
+ AllocateSelf();
+ }
+ else
+ RangeInitialize(
+ x.internalLayout().BeginPtr() + position,
+ x.internalLayout().BeginPtr() + position + eastl::min_alt(n, x.internalLayout().GetSize() - position));
+ #else
+ RangeInitialize(
+ x.internalLayout().BeginPtr() + position,
+ x.internalLayout().BeginPtr() + position + eastl::min_alt(n, x.internalLayout().GetSize() - position));
+ #endif
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>::basic_string(const value_type* p, size_type n, const allocator_type& allocator)
+ : mPair(allocator)
+ {
+ RangeInitialize(p, p + n);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>::basic_string(const view_type& sv, const allocator_type& allocator)
+ : basic_string(sv.data(), sv.size(), allocator)
+ {
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>::basic_string(const view_type& sv, size_type position, size_type n, const allocator_type& allocator)
+ : basic_string(sv.substr(position, n), allocator)
+ {
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename OtherCharType>
+ inline basic_string<T, Allocator>::basic_string(CtorConvert, const OtherCharType* p, const allocator_type& allocator)
+ : mPair(allocator)
+ {
+ AllocateSelf(); // In this case we are converting from one string encoding to another, and we
+ append_convert(p); // implement this in the simplest way, by simply default-constructing and calling assign.
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename OtherCharType>
+ inline basic_string<T, Allocator>::basic_string(CtorConvert, const OtherCharType* p, size_type n, const allocator_type& allocator)
+ : mPair(allocator)
+ {
+ AllocateSelf(); // In this case we are converting from one string encoding to another, and we
+ append_convert(p, n); // implement this in the simplest way, by simply default-constructing and calling assign.
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>::basic_string(const value_type* p, const allocator_type& allocator)
+ : mPair(allocator)
+ {
+ RangeInitialize(p);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>::basic_string(size_type n, value_type c, const allocator_type& allocator)
+ : mPair(allocator)
+ {
+ SizeInitialize(n, c);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>::basic_string(const value_type* pBegin, const value_type* pEnd, const allocator_type& allocator)
+ : mPair(allocator)
+ {
+ RangeInitialize(pBegin, pEnd);
+ }
+
+
+ // CtorDoNotInitialize exists so that we can create a version that allocates but doesn't
+ // initialize but also doesn't collide with any other constructor declaration.
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>::basic_string(CtorDoNotInitialize /*unused*/, size_type n, const allocator_type& allocator)
+ : mPair(allocator)
+ {
+ // Note that we do not call SizeInitialize here.
+ AllocateSelf(n);
+ internalLayout().SetSize(0);
+ *internalLayout().EndPtr() = 0;
+ }
+
+
+ // CtorSprintf exists so that we can create a version that does a variable argument
+ // sprintf but also doesn't collide with any other constructor declaration.
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>::basic_string(CtorSprintf /*unused*/, const value_type* pFormat, ...)
+ : mPair()
+ {
+ const size_type n = (size_type)CharStrlen(pFormat);
+ AllocateSelf(n);
+ internalLayout().SetSize(0);
+
+ va_list arguments;
+ va_start(arguments, pFormat);
+ append_sprintf_va_list(pFormat, arguments);
+ va_end(arguments);
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>::basic_string(std::initializer_list<value_type> init, const allocator_type& allocator)
+ : mPair(allocator)
+ {
+ RangeInitialize(init.begin(), init.end());
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>::basic_string(this_type&& x) EA_NOEXCEPT
+ : mPair(x.get_allocator())
+ {
+ internalLayout() = eastl::move(x.internalLayout());
+ x.AllocateSelf();
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>::basic_string(this_type&& x, const allocator_type& allocator)
+ : mPair(allocator)
+ {
+ if(get_allocator() == x.get_allocator()) // If we can borrow from x...
+ {
+ internalLayout() = eastl::move(x.internalLayout());
+ x.AllocateSelf();
+ }
+ else if(x.internalLayout().BeginPtr())
+ {
+ RangeInitialize(x.internalLayout().BeginPtr(), x.internalLayout().EndPtr());
+ // Let x destruct its own items.
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>::~basic_string()
+ {
+ DeallocateSelf();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline const typename basic_string<T, Allocator>::allocator_type&
+ basic_string<T, Allocator>::get_allocator() const EA_NOEXCEPT
+ {
+ return internalAllocator();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::allocator_type&
+ basic_string<T, Allocator>::get_allocator() EA_NOEXCEPT
+ {
+ return internalAllocator();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::set_allocator(const allocator_type& allocator)
+ {
+ get_allocator() = allocator;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline const typename basic_string<T, Allocator>::value_type*
+ basic_string<T, Allocator>::data() const EA_NOEXCEPT
+ {
+ return internalLayout().BeginPtr();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline const typename basic_string<T, Allocator>::value_type*
+ basic_string<T, Allocator>::c_str() const EA_NOEXCEPT
+ {
+ return internalLayout().BeginPtr();
+ }
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::value_type*
+ basic_string<T, Allocator>::data() EA_NOEXCEPT
+ {
+ return internalLayout().BeginPtr();
+ }
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::iterator
+ basic_string<T, Allocator>::begin() EA_NOEXCEPT
+ {
+ return internalLayout().BeginPtr();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::iterator
+ basic_string<T, Allocator>::end() EA_NOEXCEPT
+ {
+ return internalLayout().EndPtr();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::const_iterator
+ basic_string<T, Allocator>::begin() const EA_NOEXCEPT
+ {
+ return internalLayout().BeginPtr();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::const_iterator
+ basic_string<T, Allocator>::cbegin() const EA_NOEXCEPT
+ {
+ return internalLayout().BeginPtr();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::const_iterator
+ basic_string<T, Allocator>::end() const EA_NOEXCEPT
+ {
+ return internalLayout().EndPtr();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::const_iterator
+ basic_string<T, Allocator>::cend() const EA_NOEXCEPT
+ {
+ return internalLayout().EndPtr();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::reverse_iterator
+ basic_string<T, Allocator>::rbegin() EA_NOEXCEPT
+ {
+ return reverse_iterator(internalLayout().EndPtr());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::reverse_iterator
+ basic_string<T, Allocator>::rend() EA_NOEXCEPT
+ {
+ return reverse_iterator(internalLayout().BeginPtr());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::const_reverse_iterator
+ basic_string<T, Allocator>::rbegin() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(internalLayout().EndPtr());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::const_reverse_iterator
+ basic_string<T, Allocator>::crbegin() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(internalLayout().EndPtr());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::const_reverse_iterator
+ basic_string<T, Allocator>::rend() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(internalLayout().BeginPtr());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::const_reverse_iterator
+ basic_string<T, Allocator>::crend() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(internalLayout().BeginPtr());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool basic_string<T, Allocator>::empty() const EA_NOEXCEPT
+ {
+ return (internalLayout().GetSize() == 0);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool basic_string<T, Allocator>::IsSSO() const EA_NOEXCEPT
+ {
+ return internalLayout().IsSSO();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::size() const EA_NOEXCEPT
+ {
+ return internalLayout().GetSize();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::length() const EA_NOEXCEPT
+ {
+ return internalLayout().GetSize();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::max_size() const EA_NOEXCEPT
+ {
+ return kMaxSize;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::capacity() const EA_NOEXCEPT
+ {
+ if (internalLayout().IsHeap())
+ {
+ return internalLayout().GetHeapCapacity();
+ }
+ return SSOLayout::SSO_CAPACITY;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::const_reference
+ basic_string<T, Allocator>::operator[](size_type n) const
+ {
+ #if EASTL_ASSERT_ENABLED // We allow the user to reference the trailing 0 char without asserting. Perhaps we shouldn't.
+ if(EASTL_UNLIKELY(n > internalLayout().GetSize()))
+ EASTL_FAIL_MSG("basic_string::operator[] -- out of range");
+ #endif
+
+ return internalLayout().BeginPtr()[n]; // Sometimes done as *(mpBegin + n)
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::reference
+ basic_string<T, Allocator>::operator[](size_type n)
+ {
+ #if EASTL_ASSERT_ENABLED // We allow the user to reference the trailing 0 char without asserting. Perhaps we shouldn't.
+ if(EASTL_UNLIKELY(n > internalLayout().GetSize()))
+ EASTL_FAIL_MSG("basic_string::operator[] -- out of range");
+ #endif
+
+ return internalLayout().BeginPtr()[n]; // Sometimes done as *(mpBegin + n)
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T,Allocator>::operator basic_string_view<T>() const EA_NOEXCEPT
+ {
+ return basic_string_view<T>(data(), size());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::this_type& basic_string<T, Allocator>::operator=(const this_type& x)
+ {
+ if(&x != this)
+ {
+ #if EASTL_ALLOCATOR_COPY_ENABLED
+ bool bSlowerPathwayRequired = (get_allocator() != x.get_allocator());
+ #else
+ bool bSlowerPathwayRequired = false;
+ #endif
+
+ if(bSlowerPathwayRequired)
+ {
+ set_capacity(0); // Must use set_capacity instead of clear because set_capacity frees our memory, unlike clear.
+
+ #if EASTL_ALLOCATOR_COPY_ENABLED
+ get_allocator() = x.get_allocator();
+ #endif
+ }
+
+ assign(x.internalLayout().BeginPtr(), x.internalLayout().EndPtr());
+ }
+ return *this;
+ }
+
+
+ #if EASTL_OPERATOR_EQUALS_OTHER_ENABLED
+ template <typename T, typename Allocator>
+ template <typename CharType>
+ inline void basic_string<T, Allocator>::DoAssignConvert(CharType c, true_type)
+ {
+ assign_convert(&c, 1); // Call this version of append because it will result in the encoding-converting append being used.
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename StringType>
+ inline void basic_string<T, Allocator>::DoAssignConvert(const StringType& x, false_type)
+ {
+ //if(&x != this) // Unnecessary because &x cannot possibly equal this.
+ {
+ #if EASTL_ALLOCATOR_COPY_ENABLED
+ get_allocator() = x.get_allocator();
+ #endif
+
+ assign_convert(x.c_str(), x.length());
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename OtherStringType>
+ inline typename basic_string<T, Allocator>::this_type& basic_string<T, Allocator>::operator=(const OtherStringType& x)
+ {
+ clear();
+ DoAssignConvert(x, is_integral<OtherStringType>());
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename OtherCharType>
+ inline typename basic_string<T, Allocator>::this_type& basic_string<T, Allocator>::operator=(const OtherCharType* p)
+ {
+ return assign_convert(p);
+ }
+ #endif
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::this_type& basic_string<T, Allocator>::operator=(const value_type* p)
+ {
+ return assign(p, p + CharStrlen(p));
+ }
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::this_type& basic_string<T, Allocator>::operator=(value_type c)
+ {
+ return assign((size_type)1, c);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::this_type& basic_string<T, Allocator>::operator=(this_type&& x)
+ {
+ return assign(eastl::move(x));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::this_type& basic_string<T, Allocator>::operator=(std::initializer_list<value_type> ilist)
+ {
+ return assign(ilist.begin(), ilist.end());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::this_type& basic_string<T, Allocator>::operator=(view_type v)
+ {
+ return assign(v.data(), static_cast<this_type::size_type>(v.size()));
+ }
+
+
+ template <typename T, typename Allocator>
+ void basic_string<T, Allocator>::resize(size_type n, value_type c)
+ {
+ const size_type s = internalLayout().GetSize();
+
+ if(n < s)
+ erase(internalLayout().BeginPtr() + n, internalLayout().EndPtr());
+ else if(n > s)
+ append(n - s, c);
+ }
+
+
+ template <typename T, typename Allocator>
+ void basic_string<T, Allocator>::resize(size_type n)
+ {
+ // C++ basic_string specifies that resize(n) is equivalent to resize(n, value_type()).
+ // For built-in types, value_type() is the same as zero (value_type(0)).
+ // We can improve the efficiency (especially for long strings) of this
+ // string class by resizing without assigning to anything.
+
+ const size_type s = internalLayout().GetSize();
+
+ if(n < s)
+ erase(internalLayout().BeginPtr() + n, internalLayout().EndPtr());
+ else if(n > s)
+ {
+ append(n - s, value_type());
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ void basic_string<T, Allocator>::reserve(size_type n)
+ {
+ #if EASTL_STRING_OPT_LENGTH_ERRORS
+ if(EASTL_UNLIKELY(n > max_size()))
+ ThrowLengthException();
+ #endif
+
+ // C++20 says if the passed in capacity is less than the current capacity we do not shrink
+ // If new_cap is less than or equal to the current capacity(), there is no effect.
+ // http://en.cppreference.com/w/cpp/string/basic_string/reserve
+
+ n = eastl::max_alt(n, internalLayout().GetSize()); // Calculate the new capacity, which needs to be >= container size.
+
+ if(n > capacity())
+ set_capacity(n);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::shrink_to_fit()
+ {
+ set_capacity(internalLayout().GetSize());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::set_capacity(size_type n)
+ {
+ if(n == npos)
+ // If the user wants to set the capacity to equal the current size...
+ // '-1' because we pretend that we didn't allocate memory for the terminating 0.
+ n = internalLayout().GetSize();
+ else if(n < internalLayout().GetSize())
+ {
+ internalLayout().SetSize(n);
+ *internalLayout().EndPtr() = 0;
+ }
+
+ if((n < capacity() && internalLayout().IsHeap()) || (n > capacity()))
+ {
+ // In here the string is transition from heap->heap, heap->sso or sso->heap
+
+ if(EASTL_LIKELY(n))
+ {
+
+ if(n <= SSOLayout::SSO_CAPACITY)
+ {
+ // heap->sso
+ // A heap based layout wants to reduce its size to within sso capacity
+ // An sso layout wanting to reduce its capacity will not get in here
+ pointer pOldBegin = internalLayout().BeginPtr();
+ const size_type nOldCap = internalLayout().GetHeapCapacity();
+
+ CharStringUninitializedCopy(pOldBegin, pOldBegin + n, internalLayout().SSOBeginPtr());
+ internalLayout().SetSSOSize(n);
+ *internalLayout().SSOEndPtr() = 0;
+
+ DoFree(pOldBegin, nOldCap + 1);
+
+ return;
+ }
+
+ pointer pNewBegin = DoAllocate(n + 1); // We need the + 1 to accomodate the trailing 0.
+ size_type nSavedSize = internalLayout().GetSize(); // save the size in case we transition from sso->heap
+
+ pointer pNewEnd = CharStringUninitializedCopy(internalLayout().BeginPtr(), internalLayout().EndPtr(), pNewBegin);
+ *pNewEnd = 0;
+
+ DeallocateSelf();
+
+ internalLayout().SetHeapBeginPtr(pNewBegin);
+ internalLayout().SetHeapCapacity(n);
+ internalLayout().SetHeapSize(nSavedSize);
+ }
+ else
+ {
+ DeallocateSelf();
+ AllocateSelf();
+ }
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::force_size(size_type n)
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(n > capacity()))
+ ThrowRangeException();
+ #elif EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(n > capacity()))
+ EASTL_FAIL_MSG("basic_string::force_size -- out of range");
+ #endif
+
+ internalLayout().SetSize(n);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::clear() EA_NOEXCEPT
+ {
+ internalLayout().SetSize(0);
+ *internalLayout().BeginPtr() = value_type(0);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::pointer
+ basic_string<T, Allocator>::detach() EA_NOEXCEPT
+ {
+ // The detach function is an extension function which simply forgets the
+ // owned pointer. It doesn't free it but rather assumes that the user
+ // does. If the string is utilizing the short-string-optimization when a
+ // detach is requested, a copy of the string into a seperate memory
+ // allocation occurs and the owning pointer is given to the user who is
+ // responsible for freeing the memory.
+
+ pointer pDetached = nullptr;
+
+ if (internalLayout().IsSSO())
+ {
+ const size_type n = internalLayout().GetSize() + 1; // +1' so that we have room for the terminating 0.
+ pDetached = DoAllocate(n);
+ pointer pNewEnd = CharStringUninitializedCopy(internalLayout().BeginPtr(), internalLayout().EndPtr(), pDetached);
+ *pNewEnd = 0;
+ }
+ else
+ {
+ pDetached = internalLayout().BeginPtr();
+ }
+
+ AllocateSelf(); // reset to string to empty
+ return pDetached;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::const_reference
+ basic_string<T, Allocator>::at(size_type n) const
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(n >= internalLayout().GetSize()))
+ ThrowRangeException();
+ #elif EASTL_ASSERT_ENABLED // We assert if the user references the trailing 0 char.
+ if(EASTL_UNLIKELY(n >= internalLayout().GetSize()))
+ EASTL_FAIL_MSG("basic_string::at -- out of range");
+ #endif
+
+ return internalLayout().BeginPtr()[n];
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::reference
+ basic_string<T, Allocator>::at(size_type n)
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(n >= internalLayout().GetSize()))
+ ThrowRangeException();
+ #elif EASTL_ASSERT_ENABLED // We assert if the user references the trailing 0 char.
+ if(EASTL_UNLIKELY(n >= internalLayout().GetSize()))
+ EASTL_FAIL_MSG("basic_string::at -- out of range");
+ #endif
+
+ return internalLayout().BeginPtr()[n];
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::reference
+ basic_string<T, Allocator>::front()
+ {
+ #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(internalLayout().GetSize() <= 0)) // We assert if the user references the trailing 0 char.
+ EASTL_FAIL_MSG("basic_string::front -- empty string");
+ #else
+ // We allow the user to reference the trailing 0 char without asserting.
+ #endif
+
+ return *internalLayout().BeginPtr();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::const_reference
+ basic_string<T, Allocator>::front() const
+ {
+ #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(internalLayout().GetSize() <= 0)) // We assert if the user references the trailing 0 char.
+ EASTL_FAIL_MSG("basic_string::front -- empty string");
+ #else
+ // We allow the user to reference the trailing 0 char without asserting.
+ #endif
+
+ return *internalLayout().BeginPtr();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::reference
+ basic_string<T, Allocator>::back()
+ {
+ #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(internalLayout().GetSize() <= 0)) // We assert if the user references the trailing 0 char.
+ EASTL_FAIL_MSG("basic_string::back -- empty string");
+ #else
+ // We allow the user to reference the trailing 0 char without asserting.
+ #endif
+
+ return *(internalLayout().EndPtr() - 1);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::const_reference
+ basic_string<T, Allocator>::back() const
+ {
+ #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(internalLayout().GetSize() <= 0)) // We assert if the user references the trailing 0 char.
+ EASTL_FAIL_MSG("basic_string::back -- empty string");
+ #else
+ // We allow the user to reference the trailing 0 char without asserting.
+ #endif
+
+ return *(internalLayout().EndPtr() - 1);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>& basic_string<T, Allocator>::operator+=(const this_type& x)
+ {
+ return append(x);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>& basic_string<T, Allocator>::operator+=(const value_type* p)
+ {
+ return append(p);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>& basic_string<T, Allocator>::operator+=(value_type c)
+ {
+ push_back(c);
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>& basic_string<T, Allocator>::append(const this_type& x)
+ {
+ return append(x.internalLayout().BeginPtr(), x.internalLayout().EndPtr());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>& basic_string<T, Allocator>::append(const this_type& x, size_type position, size_type n)
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(position >= x.internalLayout().GetSize())) // position must be < x.mpEnd, but position + n may be > mpEnd.
+ ThrowRangeException();
+ #endif
+
+ return append(x.internalLayout().BeginPtr() + position,
+ x.internalLayout().BeginPtr() + position + eastl::min_alt(n, x.internalLayout().GetSize() - position));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>& basic_string<T, Allocator>::append(const value_type* p, size_type n)
+ {
+ return append(p, p + n);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>& basic_string<T, Allocator>::append(const value_type* p)
+ {
+ return append(p, p + CharStrlen(p));
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename OtherCharType>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::append_convert(const OtherCharType* pOther)
+ {
+ return append_convert(pOther, (size_type)CharStrlen(pOther));
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename OtherStringType>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::append_convert(const OtherStringType& x)
+ {
+ return append_convert(x.c_str(), x.length());
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename OtherCharType>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::append_convert(const OtherCharType* pOther, size_type n)
+ {
+ // Question: What do we do in the case that we have an illegally encoded source string?
+ // This can happen with UTF8 strings. Do we throw an exception or do we ignore the input?
+ // One argument is that it's not a string class' job to handle the security aspects of a
+ // program and the higher level application code should be verifying UTF8 string validity,
+ // and thus we should do the friendly thing and ignore the invalid characters as opposed
+ // to making the user of this function handle exceptions that are easily forgotten.
+
+ const size_t kBufferSize = 512;
+ value_type selfBuffer[kBufferSize]; // This assumes that value_type is one of char8_t, char16_t, char32_t, or wchar_t. Or more importantly, a type with a trivial constructor and destructor.
+ value_type* const selfBufferEnd = selfBuffer + kBufferSize;
+ const OtherCharType* pOtherEnd = pOther + n;
+
+ while(pOther != pOtherEnd)
+ {
+ value_type* pSelfBufferCurrent = selfBuffer;
+ DecodePart(pOther, pOtherEnd, pSelfBufferCurrent, selfBufferEnd); // Write pOther to pSelfBuffer, converting encoding as we go. We currently ignore the return value, as we don't yet have a plan for handling encoding errors.
+ append(selfBuffer, pSelfBufferCurrent);
+ }
+
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::append(size_type n, value_type c)
+ {
+ if (n > 0)
+ {
+ const size_type nSize = internalLayout().GetSize();
+ const size_type nCapacity = capacity();
+
+ if((nSize + n) > nCapacity)
+ reserve(GetNewCapacity(nCapacity, (nSize + n) - nCapacity));
+
+ pointer pNewEnd = CharStringUninitializedFillN(internalLayout().EndPtr(), n, c);
+ *pNewEnd = 0;
+ internalLayout().SetSize(nSize + n);
+ }
+
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::append(const value_type* pBegin, const value_type* pEnd)
+ {
+ if(pBegin != pEnd)
+ {
+ const size_type nOldSize = internalLayout().GetSize();
+ const size_type n = (size_type)(pEnd - pBegin);
+ const size_type nCapacity = capacity();
+ const size_type nNewSize = nOldSize + n;
+
+ if(nNewSize > nCapacity)
+ {
+ const size_type nLength = GetNewCapacity(nCapacity, nNewSize - nCapacity);
+
+ pointer pNewBegin = DoAllocate(nLength + 1);
+
+ pointer pNewEnd = CharStringUninitializedCopy(internalLayout().BeginPtr(), internalLayout().EndPtr(), pNewBegin);
+ pNewEnd = CharStringUninitializedCopy(pBegin, pEnd, pNewEnd);
+ *pNewEnd = 0;
+
+ DeallocateSelf();
+ internalLayout().SetHeapBeginPtr(pNewBegin);
+ internalLayout().SetHeapCapacity(nLength);
+ internalLayout().SetHeapSize(nNewSize);
+ }
+ else
+ {
+ pointer pNewEnd = CharStringUninitializedCopy(pBegin, pEnd, internalLayout().EndPtr());
+ *pNewEnd = 0;
+ internalLayout().SetSize(nNewSize);
+ }
+ }
+
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::append_sprintf_va_list(const value_type* pFormat, va_list arguments)
+ {
+ // From unofficial C89 extension documentation:
+ // The vsnprintf returns the number of characters written into the array,
+ // not counting the terminating null character, or a negative value
+ // if count or more characters are requested to be generated.
+ // An error can occur while converting a value for output.
+
+ // From the C99 standard:
+ // The vsnprintf function returns the number of characters that would have
+ // been written had n been sufficiently large, not counting the terminating
+ // null character, or a negative value if an encoding error occurred.
+ // Thus, the null-terminated output has been completely written if and only
+ // if the returned value is nonnegative and less than n.
+
+ // https://www.freebsd.org/cgi/man.cgi?query=vswprintf&sektion=3&manpath=freebsd-release-ports
+ // https://www.freebsd.org/cgi/man.cgi?query=snprintf&manpath=SuSE+Linux/i386+11.3
+ // Well its time to go on an adventure...
+ // C99 vsnprintf states that a buffer size of zero returns the number of characters that would
+ // be written to the buffer irrelevant of whether the buffer is a nullptr
+ // But C99 vswprintf for wchar_t changes the behaviour of the return to instead say that it
+ // "will fail if n or more wide characters were requested to be written", so
+ // calling vswprintf with a buffer size of zero always returns -1
+ // unless... you are MSVC where they deviate from the std and say if the buffer is NULL
+ // and the size is zero it will return the number of characters written or if we are using
+ // EAStdC which also does the sane behaviour.
+
+#if !EASTL_OPENSOURCE || defined(EA_PLATFORM_MICROSOFT)
+ size_type nInitialSize = internalLayout().GetSize();
+ int nReturnValue;
+
+ #if EASTL_VA_COPY_ENABLED
+ va_list argumentsSaved;
+ va_copy(argumentsSaved, arguments);
+ #endif
+
+ nReturnValue = eastl::Vsnprintf(nullptr, 0, pFormat, arguments);
+
+ if (nReturnValue > 0)
+ {
+ resize(nReturnValue + nInitialSize);
+
+ #if EASTL_VA_COPY_ENABLED
+ va_end(arguments);
+ va_copy(arguments, argumentsSaved);
+ #endif
+
+ nReturnValue = eastl::Vsnprintf(internalLayout().BeginPtr() + nInitialSize, static_cast<size_t>(nReturnValue) + 1, pFormat, arguments);
+ }
+
+ if (nReturnValue >= 0)
+ {
+ internalLayout().SetSize(nInitialSize + nReturnValue);
+ }
+
+ #if EASTL_VA_COPY_ENABLED
+ // va_end for arguments will be called by the caller.
+ va_end(argumentsSaved);
+ #endif
+
+#else
+ size_type nInitialSize = internalLayout().GetSize();
+ size_type nInitialRemainingCapacity = internalLayout().GetRemainingCapacity();
+ int nReturnValue;
+
+ #if EASTL_VA_COPY_ENABLED
+ va_list argumentsSaved;
+ va_copy(argumentsSaved, arguments);
+ #endif
+
+ nReturnValue = eastl::Vsnprintf(internalLayout().EndPtr(), (size_t)nInitialRemainingCapacity + 1,
+ pFormat, arguments);
+
+ if(nReturnValue >= (int)(nInitialRemainingCapacity + 1)) // If there wasn't enough capacity...
+ {
+ // In this case we definitely have C99 Vsnprintf behaviour.
+ #if EASTL_VA_COPY_ENABLED
+ va_end(arguments);
+ va_copy(arguments, argumentsSaved);
+ #endif
+ resize(nInitialSize + nReturnValue);
+ nReturnValue = eastl::Vsnprintf(internalLayout().BeginPtr() + nInitialSize, (size_t)(nReturnValue + 1),
+ pFormat, arguments);
+ }
+ else if(nReturnValue < 0) // If vsnprintf is non-C99-standard
+ {
+ // In this case we either have C89 extension behaviour or C99 behaviour.
+ size_type n = eastl::max_alt((size_type)(SSOLayout::SSO_CAPACITY - 1), (size_type)(nInitialSize * 2));
+
+ for(; (nReturnValue < 0) && (n < 1000000); n *= 2)
+ {
+ #if EASTL_VA_COPY_ENABLED
+ va_end(arguments);
+ va_copy(arguments, argumentsSaved);
+ #endif
+ resize(n);
+
+ const size_t nCapacity = (size_t)(n - nInitialSize);
+ nReturnValue = eastl::Vsnprintf(internalLayout().BeginPtr() + nInitialSize, nCapacity + 1, pFormat, arguments);
+
+ if(nReturnValue == (int)(unsigned)nCapacity)
+ {
+ resize(++n);
+ nReturnValue = eastl::Vsnprintf(internalLayout().BeginPtr() + nInitialSize, nCapacity + 2, pFormat, arguments);
+ }
+ }
+ }
+
+ if(nReturnValue >= 0)
+ {
+ internalLayout().SetSize(nInitialSize + nReturnValue);
+ }
+
+ #if EASTL_VA_COPY_ENABLED
+ // va_end for arguments will be called by the caller.
+ va_end(argumentsSaved);
+ #endif
+
+#endif // EASTL_OPENSOURCE
+
+ return *this;
+ }
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::append_sprintf(const value_type* pFormat, ...)
+ {
+ va_list arguments;
+ va_start(arguments, pFormat);
+ append_sprintf_va_list(pFormat, arguments);
+ va_end(arguments);
+
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::push_back(value_type c)
+ {
+ append((size_type)1, c);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::pop_back()
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(internalLayout().GetSize() <= 0))
+ EASTL_FAIL_MSG("basic_string::pop_back -- empty string");
+ #endif
+
+ internalLayout().EndPtr()[-1] = value_type(0);
+ internalLayout().SetSize(internalLayout().GetSize() - 1);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>& basic_string<T, Allocator>::assign(const this_type& x)
+ {
+ // The C++11 Standard 21.4.6.3 p6 specifies that assign from this_type assigns contents only and not the allocator.
+ return assign(x.internalLayout().BeginPtr(), x.internalLayout().EndPtr());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>& basic_string<T, Allocator>::assign(const this_type& x, size_type position, size_type n)
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(position > x.internalLayout().GetSize()))
+ ThrowRangeException();
+ #endif
+
+ // The C++11 Standard 21.4.6.3 p6 specifies that assign from this_type assigns contents only and not the allocator.
+ return assign(
+ x.internalLayout().BeginPtr() + position,
+ x.internalLayout().BeginPtr() + position + eastl::min_alt(n, x.internalLayout().GetSize() - position));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>& basic_string<T, Allocator>::assign(const value_type* p, size_type n)
+ {
+ return assign(p, p + n);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>& basic_string<T, Allocator>::assign(const value_type* p)
+ {
+ return assign(p, p + CharStrlen(p));
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::assign(size_type n, value_type c)
+ {
+ if(n <= internalLayout().GetSize())
+ {
+ CharTypeAssignN(internalLayout().BeginPtr(), n, c);
+ erase(internalLayout().BeginPtr() + n, internalLayout().EndPtr());
+ }
+ else
+ {
+ CharTypeAssignN(internalLayout().BeginPtr(), internalLayout().GetSize(), c);
+ append(n - internalLayout().GetSize(), c);
+ }
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::assign(const value_type* pBegin, const value_type* pEnd)
+ {
+ const size_type n = (size_type)(pEnd - pBegin);
+ if(n <= internalLayout().GetSize())
+ {
+ memmove(internalLayout().BeginPtr(), pBegin, (size_t)n * sizeof(value_type));
+ erase(internalLayout().BeginPtr() + n, internalLayout().EndPtr());
+ }
+ else
+ {
+ memmove(internalLayout().BeginPtr(), pBegin, (size_t)(internalLayout().GetSize()) * sizeof(value_type));
+ append(pBegin + internalLayout().GetSize(), pEnd);
+ }
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>& basic_string<T, Allocator>::assign(std::initializer_list<value_type> ilist)
+ {
+ return assign(ilist.begin(), ilist.end());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>& basic_string<T, Allocator>::assign(this_type&& x)
+ {
+ if(get_allocator() == x.get_allocator())
+ {
+ eastl::swap(internalLayout(), x.internalLayout());
+ }
+ else
+ assign(x.internalLayout().BeginPtr(), x.internalLayout().EndPtr());
+
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename OtherCharType>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::assign_convert(const OtherCharType* p)
+ {
+ clear();
+ append_convert(p);
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename OtherCharType>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::assign_convert(const OtherCharType* p, size_type n)
+ {
+ clear();
+ append_convert(p, n);
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename OtherStringType>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::assign_convert(const OtherStringType& x)
+ {
+ clear();
+ append_convert(x.data(), x.length());
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::insert(size_type position, const this_type& x)
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(position > internalLayout().GetSize()))
+ ThrowRangeException();
+ #endif
+
+ #if EASTL_STRING_OPT_LENGTH_ERRORS
+ if(EASTL_UNLIKELY(internalLayout().GetSize() > (max_size() - x.internalLayout().GetSize())))
+ ThrowLengthException();
+ #endif
+
+ insert(internalLayout().BeginPtr() + position, x.internalLayout().BeginPtr(), x.internalLayout().EndPtr());
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::insert(size_type position, const this_type& x, size_type beg, size_type n)
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY((position > internalLayout().GetSize()) || (beg > x.internalLayout().GetSize())))
+ ThrowRangeException();
+ #endif
+
+ size_type nLength = eastl::min_alt(n, x.internalLayout().GetSize() - beg);
+
+ #if EASTL_STRING_OPT_LENGTH_ERRORS
+ if(EASTL_UNLIKELY(internalLayout().GetSize() > (max_size() - nLength)))
+ ThrowLengthException();
+ #endif
+
+ insert(internalLayout().BeginPtr() + position, x.internalLayout().BeginPtr() + beg, x.internalLayout().BeginPtr() + beg + nLength);
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::insert(size_type position, const value_type* p, size_type n)
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(position > internalLayout().GetSize()))
+ ThrowRangeException();
+ #endif
+
+ #if EASTL_STRING_OPT_LENGTH_ERRORS
+ if(EASTL_UNLIKELY(internalLayout().GetSize() > (max_size() - n)))
+ ThrowLengthException();
+ #endif
+
+ insert(internalLayout().BeginPtr() + position, p, p + n);
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::insert(size_type position, const value_type* p)
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(position > internalLayout().GetSize()))
+ ThrowRangeException();
+ #endif
+
+ size_type nLength = (size_type)CharStrlen(p);
+
+ #if EASTL_STRING_OPT_LENGTH_ERRORS
+ if(EASTL_UNLIKELY(internalLayout().GetSize() > (max_size() - nLength)))
+ ThrowLengthException();
+ #endif
+
+ insert(internalLayout().BeginPtr() + position, p, p + nLength);
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::insert(size_type position, size_type n, value_type c)
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(position > internalLayout().GetSize()))
+ ThrowRangeException();
+ #endif
+
+ #if EASTL_STRING_OPT_LENGTH_ERRORS
+ if(EASTL_UNLIKELY(internalLayout().GetSize() > (max_size() - n)))
+ ThrowLengthException();
+ #endif
+
+ insert(internalLayout().BeginPtr() + position, n, c);
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::iterator
+ basic_string<T, Allocator>::insert(const_iterator p, value_type c)
+ {
+ if(p == internalLayout().EndPtr())
+ {
+ push_back(c);
+ return internalLayout().EndPtr() - 1;
+ }
+ return InsertInternal(p, c);
+ }
+
+
+ template <typename T, typename Allocator>
+ typename basic_string<T, Allocator>::iterator
+ basic_string<T, Allocator>::insert(const_iterator p, size_type n, value_type c)
+ {
+ const difference_type nPosition = (p - internalLayout().BeginPtr()); // Save this because we might reallocate.
+
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY((p < internalLayout().BeginPtr()) || (p > internalLayout().EndPtr())))
+ EASTL_FAIL_MSG("basic_string::insert -- invalid position");
+ #endif
+
+ if(n) // If there is anything to insert...
+ {
+ if(internalLayout().GetRemainingCapacity() >= n) // If we have enough capacity...
+ {
+ const size_type nElementsAfter = (size_type)(internalLayout().EndPtr() - p);
+
+ if(nElementsAfter >= n) // If there's enough space for the new chars between the insert position and the end...
+ {
+ // Ensure we save the size before we do the copy, as we might overwrite the size field with the NULL
+ // terminator in the edge case where we are inserting enough characters to equal our capacity
+ const size_type nSavedSize = internalLayout().GetSize();
+ CharStringUninitializedCopy((internalLayout().EndPtr() - n) + 1, internalLayout().EndPtr() + 1, internalLayout().EndPtr() + 1);
+ internalLayout().SetSize(nSavedSize + n);
+ memmove(const_cast<value_type*>(p) + n, p, (size_t)((nElementsAfter - n) + 1) * sizeof(value_type));
+ CharTypeAssignN(const_cast<value_type*>(p), n, c);
+ }
+ else
+ {
+ pointer pOldEnd = internalLayout().EndPtr();
+ #if EASTL_EXCEPTIONS_ENABLED
+ const size_type nOldSize = internalLayout().GetSize();
+ #endif
+ CharStringUninitializedFillN(internalLayout().EndPtr() + 1, n - nElementsAfter - 1, c);
+ internalLayout().SetSize(internalLayout().GetSize() + (n - nElementsAfter));
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ // See comment in if block above
+ const size_type nSavedSize = internalLayout().GetSize();
+ CharStringUninitializedCopy(p, pOldEnd + 1, internalLayout().EndPtr());
+ internalLayout().SetSize(nSavedSize + nElementsAfter);
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ internalLayout().SetSize(nOldSize);
+ throw;
+ }
+ #endif
+
+ CharTypeAssignN(const_cast<value_type*>(p), nElementsAfter + 1, c);
+ }
+ }
+ else
+ {
+ const size_type nOldSize = internalLayout().GetSize();
+ const size_type nOldCap = capacity();
+ const size_type nLength = GetNewCapacity(nOldCap, (nOldSize + n) - nOldCap);
+
+ iterator pNewBegin = DoAllocate(nLength + 1);
+
+ iterator pNewEnd = CharStringUninitializedCopy(internalLayout().BeginPtr(), p, pNewBegin);
+ pNewEnd = CharStringUninitializedFillN(pNewEnd, n, c);
+ pNewEnd = CharStringUninitializedCopy(p, internalLayout().EndPtr(), pNewEnd);
+ *pNewEnd = 0;
+
+ DeallocateSelf();
+ internalLayout().SetHeapBeginPtr(pNewBegin);
+ internalLayout().SetHeapCapacity(nLength);
+ internalLayout().SetHeapSize(nOldSize + n);
+ }
+ }
+
+ return internalLayout().BeginPtr() + nPosition;
+ }
+
+
+ template <typename T, typename Allocator>
+ typename basic_string<T, Allocator>::iterator
+ basic_string<T, Allocator>::insert(const_iterator p, const value_type* pBegin, const value_type* pEnd)
+ {
+ const difference_type nPosition = (p - internalLayout().BeginPtr()); // Save this because we might reallocate.
+
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY((p < internalLayout().BeginPtr()) || (p > internalLayout().EndPtr())))
+ EASTL_FAIL_MSG("basic_string::insert -- invalid position");
+ #endif
+
+ const size_type n = (size_type)(pEnd - pBegin);
+
+ if(n)
+ {
+ const bool bCapacityIsSufficient = (internalLayout().GetRemainingCapacity() >= n);
+ const bool bSourceIsFromSelf = ((pEnd >= internalLayout().BeginPtr()) && (pBegin <= internalLayout().EndPtr()));
+
+ if(bSourceIsFromSelf && internalLayout().IsSSO())
+ {
+ // pBegin to pEnd will be <= this->GetSize(), so stackTemp will guaranteed be an SSO String
+ // If we are inserting ourself into ourself and we are SSO, then on the recursive call we can
+ // guarantee 0 or 1 allocation depending if we need to realloc
+ // We don't do this for Heap strings as then this path may do 1 or 2 allocations instead of
+ // only 1 allocation when we fall through to the last else case below
+ const this_type stackTemp(pBegin, pEnd, get_allocator());
+ return insert(p, stackTemp.data(), stackTemp.data() + stackTemp.size());
+ }
+
+ // If bSourceIsFromSelf is true, then we reallocate. This is because we are
+ // inserting ourself into ourself and thus both the source and destination
+ // be modified, making it rather tricky to attempt to do in place. The simplest
+ // resolution is to reallocate. To consider: there may be a way to implement this
+ // whereby we don't need to reallocate or can often avoid reallocating.
+ if(bCapacityIsSufficient && !bSourceIsFromSelf)
+ {
+ const size_type nElementsAfter = (size_type)(internalLayout().EndPtr() - p);
+
+ if(nElementsAfter >= n) // If there are enough characters between insert pos and end
+ {
+ // Ensure we save the size before we do the copy, as we might overwrite the size field with the NULL
+ // terminator in the edge case where we are inserting enough characters to equal our capacity
+ const size_type nSavedSize = internalLayout().GetSize();
+ CharStringUninitializedCopy((internalLayout().EndPtr() - n) + 1, internalLayout().EndPtr() + 1, internalLayout().EndPtr() + 1);
+ internalLayout().SetSize(nSavedSize + n);
+ memmove(const_cast<value_type*>(p) + n, p, (size_t)((nElementsAfter - n) + 1) * sizeof(value_type));
+ memmove(const_cast<value_type*>(p), pBegin, (size_t)(n) * sizeof(value_type));
+ }
+ else
+ {
+ pointer pOldEnd = internalLayout().EndPtr();
+ #if EASTL_EXCEPTIONS_ENABLED
+ const size_type nOldSize = internalLayout().GetSize();
+ #endif
+ const value_type* const pMid = pBegin + (nElementsAfter + 1);
+
+ CharStringUninitializedCopy(pMid, pEnd, internalLayout().EndPtr() + 1);
+ internalLayout().SetSize(internalLayout().GetSize() + (n - nElementsAfter));
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ // See comment in if block above
+ const size_type nSavedSize = internalLayout().GetSize();
+ CharStringUninitializedCopy(p, pOldEnd + 1, internalLayout().EndPtr());
+ internalLayout().SetSize(nSavedSize + nElementsAfter);
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ internalLayout().SetSize(nOldSize);
+ throw;
+ }
+ #endif
+
+ CharStringUninitializedCopy(pBegin, pMid, const_cast<value_type*>(p));
+ }
+ }
+ else // Else we need to reallocate to implement this.
+ {
+ const size_type nOldSize = internalLayout().GetSize();
+ const size_type nOldCap = capacity();
+ size_type nLength;
+
+ if(bCapacityIsSufficient) // If bCapacityIsSufficient is true, then bSourceIsFromSelf must be true.
+ nLength = nOldSize + n;
+ else
+ nLength = GetNewCapacity(nOldCap, (nOldSize + n) - nOldCap);
+
+ pointer pNewBegin = DoAllocate(nLength + 1);
+
+ pointer pNewEnd = CharStringUninitializedCopy(internalLayout().BeginPtr(), p, pNewBegin);
+ pNewEnd = CharStringUninitializedCopy(pBegin, pEnd, pNewEnd);
+ pNewEnd = CharStringUninitializedCopy(p, internalLayout().EndPtr(), pNewEnd);
+ *pNewEnd = 0;
+
+ DeallocateSelf();
+ internalLayout().SetHeapBeginPtr(pNewBegin);
+ internalLayout().SetHeapCapacity(nLength);
+ internalLayout().SetHeapSize(nOldSize + n);
+ }
+ }
+
+ return internalLayout().BeginPtr() + nPosition;
+ }
+
+
+ template <typename T, typename Allocator>
+ typename basic_string<T, Allocator>::iterator
+ basic_string<T, Allocator>::insert(const_iterator p, std::initializer_list<value_type> ilist)
+ {
+ return insert(p, ilist.begin(), ilist.end());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>& basic_string<T, Allocator>::erase(size_type position, size_type n)
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(position > internalLayout().GetSize()))
+ ThrowRangeException();
+ #endif
+
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(position > internalLayout().GetSize()))
+ EASTL_FAIL_MSG("basic_string::erase -- invalid position");
+ #endif
+
+ erase(internalLayout().BeginPtr() + position,
+ internalLayout().BeginPtr() + position + eastl::min_alt(n, internalLayout().GetSize() - position));
+
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::iterator
+ basic_string<T, Allocator>::erase(const_iterator p)
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY((p < internalLayout().BeginPtr()) || (p >= internalLayout().EndPtr())))
+ EASTL_FAIL_MSG("basic_string::erase -- invalid position");
+ #endif
+
+ memmove(const_cast<value_type*>(p), p + 1, (size_t)(internalLayout().EndPtr() - p) * sizeof(value_type));
+ internalLayout().SetSize(internalLayout().GetSize() - 1);
+ return const_cast<value_type*>(p);
+ }
+
+
+ template <typename T, typename Allocator>
+ typename basic_string<T, Allocator>::iterator
+ basic_string<T, Allocator>::erase(const_iterator pBegin, const_iterator pEnd)
+ {
+ #if EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY((pBegin < internalLayout().BeginPtr()) || (pBegin > internalLayout().EndPtr()) ||
+ (pEnd < internalLayout().BeginPtr()) || (pEnd > internalLayout().EndPtr()) || (pEnd < pBegin)))
+ EASTL_FAIL_MSG("basic_string::erase -- invalid position");
+ #endif
+
+ if(pBegin != pEnd)
+ {
+ memmove(const_cast<value_type*>(pBegin), pEnd, (size_t)((internalLayout().EndPtr() - pEnd) + 1) * sizeof(value_type));
+ const size_type n = (size_type)(pEnd - pBegin);
+ internalLayout().SetSize(internalLayout().GetSize() - n);
+ }
+ return const_cast<value_type*>(pBegin);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::reverse_iterator
+ basic_string<T, Allocator>::erase(reverse_iterator position)
+ {
+ return reverse_iterator(erase((++position).base()));
+ }
+
+
+ template <typename T, typename Allocator>
+ typename basic_string<T, Allocator>::reverse_iterator
+ basic_string<T, Allocator>::erase(reverse_iterator first, reverse_iterator last)
+ {
+ return reverse_iterator(erase((++last).base(), (++first).base()));
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::replace(size_type position, size_type n, const this_type& x)
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(position > internalLayout().GetSize()))
+ ThrowRangeException();
+ #endif
+
+ const size_type nLength = eastl::min_alt(n, internalLayout().GetSize() - position);
+
+ #if EASTL_STRING_OPT_LENGTH_ERRORS
+ if(EASTL_UNLIKELY((internalLayout().GetSize() - nLength) >= (max_size() - x.internalLayout().GetSize())))
+ ThrowLengthException();
+ #endif
+
+ return replace(internalLayout().BeginPtr() + position, internalLayout().BeginPtr() + position + nLength, x.internalLayout().BeginPtr(), x.internalLayout().EndPtr());
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::replace(size_type pos1, size_type n1, const this_type& x, size_type pos2, size_type n2)
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY((pos1 > internalLayout().GetSize()) || (pos2 > x.internalLayout().GetSize())))
+ ThrowRangeException();
+ #endif
+
+ const size_type nLength1 = eastl::min_alt(n1, internalLayout().GetSize() - pos1);
+ const size_type nLength2 = eastl::min_alt(n2, x.internalLayout().GetSize() - pos2);
+
+ #if EASTL_STRING_OPT_LENGTH_ERRORS
+ if(EASTL_UNLIKELY((internalLayout().GetSize() - nLength1) >= (max_size() - nLength2)))
+ ThrowLengthException();
+ #endif
+
+ return replace(internalLayout().BeginPtr() + pos1, internalLayout().BeginPtr() + pos1 + nLength1, x.internalLayout().BeginPtr() + pos2, x.internalLayout().BeginPtr() + pos2 + nLength2);
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::replace(size_type position, size_type n1, const value_type* p, size_type n2)
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(position > internalLayout().GetSize()))
+ ThrowRangeException();
+ #endif
+
+ const size_type nLength = eastl::min_alt(n1, internalLayout().GetSize() - position);
+
+ #if EASTL_STRING_OPT_LENGTH_ERRORS
+ if(EASTL_UNLIKELY((n2 > max_size()) || ((internalLayout().GetSize() - nLength) >= (max_size() - n2))))
+ ThrowLengthException();
+ #endif
+
+ return replace(internalLayout().BeginPtr() + position, internalLayout().BeginPtr() + position + nLength, p, p + n2);
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::replace(size_type position, size_type n1, const value_type* p)
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(position > internalLayout().GetSize()))
+ ThrowRangeException();
+ #endif
+
+ const size_type nLength = eastl::min_alt(n1, internalLayout().GetSize() - position);
+
+ #if EASTL_STRING_OPT_LENGTH_ERRORS
+ const size_type n2 = (size_type)CharStrlen(p);
+ if(EASTL_UNLIKELY((n2 > max_size()) || ((internalLayout().GetSize() - nLength) >= (max_size() - n2))))
+ ThrowLengthException();
+ #endif
+
+ return replace(internalLayout().BeginPtr() + position, internalLayout().BeginPtr() + position + nLength, p, p + CharStrlen(p));
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::replace(size_type position, size_type n1, size_type n2, value_type c)
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(position > internalLayout().GetSize()))
+ ThrowRangeException();
+ #endif
+
+ const size_type nLength = eastl::min_alt(n1, internalLayout().GetSize() - position);
+
+ #if EASTL_STRING_OPT_LENGTH_ERRORS
+ if(EASTL_UNLIKELY((n2 > max_size()) || (internalLayout().GetSize() - nLength) >= (max_size() - n2)))
+ ThrowLengthException();
+ #endif
+
+ return replace(internalLayout().BeginPtr() + position, internalLayout().BeginPtr() + position + nLength, n2, c);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>& basic_string<T, Allocator>::replace(const_iterator pBegin, const_iterator pEnd, const this_type& x)
+ {
+ return replace(pBegin, pEnd, x.internalLayout().BeginPtr(), x.internalLayout().EndPtr());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>& basic_string<T, Allocator>::replace(const_iterator pBegin, const_iterator pEnd, const value_type* p, size_type n)
+ {
+ return replace(pBegin, pEnd, p, p + n);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>& basic_string<T, Allocator>::replace(const_iterator pBegin, const_iterator pEnd, const value_type* p)
+ {
+ return replace(pBegin, pEnd, p, p + CharStrlen(p));
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::replace(const_iterator pBegin, const_iterator pEnd, size_type n, value_type c)
+ {
+ #if EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY((pBegin < internalLayout().BeginPtr()) || (pBegin > internalLayout().EndPtr()) ||
+ (pEnd < internalLayout().BeginPtr()) || (pEnd > internalLayout().EndPtr()) || (pEnd < pBegin)))
+ EASTL_FAIL_MSG("basic_string::replace -- invalid position");
+ #endif
+
+ const size_type nLength = static_cast<size_type>(pEnd - pBegin);
+
+ if(nLength >= n)
+ {
+ CharTypeAssignN(const_cast<value_type*>(pBegin), n, c);
+ erase(pBegin + n, pEnd);
+ }
+ else
+ {
+ CharTypeAssignN(const_cast<value_type*>(pBegin), nLength, c);
+ insert(pEnd, n - nLength, c);
+ }
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::replace(const_iterator pBegin1, const_iterator pEnd1, const value_type* pBegin2, const value_type* pEnd2)
+ {
+ #if EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY((pBegin1 < internalLayout().BeginPtr()) || (pBegin1 > internalLayout().EndPtr()) ||
+ (pEnd1 < internalLayout().BeginPtr()) || (pEnd1 > internalLayout().EndPtr()) || (pEnd1 < pBegin1)))
+ EASTL_FAIL_MSG("basic_string::replace -- invalid position");
+ #endif
+
+ const size_type nLength1 = (size_type)(pEnd1 - pBegin1);
+ const size_type nLength2 = (size_type)(pEnd2 - pBegin2);
+
+ if(nLength1 >= nLength2) // If we have a non-expanding operation...
+ {
+ if((pBegin2 > pEnd1) || (pEnd2 <= pBegin1)) // If we have a non-overlapping operation...
+ memcpy(const_cast<value_type*>(pBegin1), pBegin2, (size_t)(pEnd2 - pBegin2) * sizeof(value_type));
+ else
+ memmove(const_cast<value_type*>(pBegin1), pBegin2, (size_t)(pEnd2 - pBegin2) * sizeof(value_type));
+ erase(pBegin1 + nLength2, pEnd1);
+ }
+ else // Else we are expanding.
+ {
+ if((pBegin2 > pEnd1) || (pEnd2 <= pBegin1)) // If we have a non-overlapping operation...
+ {
+ const value_type* const pMid2 = pBegin2 + nLength1;
+
+ if((pEnd2 <= pBegin1) || (pBegin2 > pEnd1))
+ memcpy(const_cast<value_type*>(pBegin1), pBegin2, (size_t)(pMid2 - pBegin2) * sizeof(value_type));
+ else
+ memmove(const_cast<value_type*>(pBegin1), pBegin2, (size_t)(pMid2 - pBegin2) * sizeof(value_type));
+ insert(pEnd1, pMid2, pEnd2);
+ }
+ else // else we have an overlapping operation.
+ {
+ // I can't think of any easy way of doing this without allocating temporary memory.
+ const size_type nOldSize = internalLayout().GetSize();
+ const size_type nOldCap = capacity();
+ const size_type nNewCapacity = GetNewCapacity(nOldCap, (nOldSize + (nLength2 - nLength1)) - nOldCap);
+
+ pointer pNewBegin = DoAllocate(nNewCapacity + 1);
+
+ pointer pNewEnd = CharStringUninitializedCopy(internalLayout().BeginPtr(), pBegin1, pNewBegin);
+ pNewEnd = CharStringUninitializedCopy(pBegin2, pEnd2, pNewEnd);
+ pNewEnd = CharStringUninitializedCopy(pEnd1, internalLayout().EndPtr(), pNewEnd);
+ *pNewEnd = 0;
+
+ DeallocateSelf();
+ internalLayout().SetHeapBeginPtr(pNewBegin);
+ internalLayout().SetHeapCapacity(nNewCapacity);
+ internalLayout().SetHeapSize(nOldSize + (nLength2 - nLength1));
+ }
+ }
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::copy(value_type* p, size_type n, size_type position) const
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(position > internalLayout().GetSize()))
+ ThrowRangeException();
+ #endif
+
+ // C++ std says the effects of this function are as if calling char_traits::copy()
+ // thus the 'p' must not overlap *this string, so we can use memcpy
+ const size_type nLength = eastl::min_alt(n, internalLayout().GetSize() - position);
+ CharStringUninitializedCopy(internalLayout().BeginPtr() + position, internalLayout().BeginPtr() + position + nLength, p);
+ return nLength;
+ }
+
+
+ template <typename T, typename Allocator>
+ void basic_string<T, Allocator>::swap(this_type& x)
+ {
+ if(get_allocator() == x.get_allocator() || (internalLayout().IsSSO() && x.internalLayout().IsSSO())) // If allocators are equivalent...
+ {
+ // We leave mAllocator as-is.
+ eastl::swap(internalLayout(), x.internalLayout());
+ }
+ else // else swap the contents.
+ {
+ const this_type temp(*this); // Can't call eastl::swap because that would
+ *this = x; // itself call this member swap function.
+ x = temp;
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find(const this_type& x, size_type position) const EA_NOEXCEPT
+ {
+ return find(x.internalLayout().BeginPtr(), position, x.internalLayout().GetSize());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find(const value_type* p, size_type position) const
+ {
+ return find(p, position, (size_type)CharStrlen(p));
+ }
+
+
+ template <typename T, typename Allocator>
+ typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find(const value_type* p, size_type position, size_type n) const
+ {
+ // It is not clear what the requirements are for position, but since the C++ standard
+ // appears to be silent it is assumed for now that position can be any value.
+ //#if EASTL_ASSERT_ENABLED
+ // if(EASTL_UNLIKELY(position > (size_type)(mpEnd - mpBegin)))
+ // EASTL_FAIL_MSG("basic_string::find -- invalid position");
+ //#endif
+
+ if(EASTL_LIKELY(((npos - n) >= position) && (position + n) <= internalLayout().GetSize())) // If the range is valid...
+ {
+ const value_type* const pTemp = eastl::search(internalLayout().BeginPtr() + position, internalLayout().EndPtr(), p, p + n);
+
+ if((pTemp != internalLayout().EndPtr()) || (n == 0))
+ return (size_type)(pTemp - internalLayout().BeginPtr());
+ }
+ return npos;
+ }
+
+
+ template <typename T, typename Allocator>
+ typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find(value_type c, size_type position) const EA_NOEXCEPT
+ {
+ // It is not clear what the requirements are for position, but since the C++ standard
+ // appears to be silent it is assumed for now that position can be any value.
+ //#if EASTL_ASSERT_ENABLED
+ // if(EASTL_UNLIKELY(position > (size_type)(mpEnd - mpBegin)))
+ // EASTL_FAIL_MSG("basic_string::find -- invalid position");
+ //#endif
+
+ if(EASTL_LIKELY(position < internalLayout().GetSize()))// If the position is valid...
+ {
+ const const_iterator pResult = eastl::find(internalLayout().BeginPtr() + position, internalLayout().EndPtr(), c);
+
+ if(pResult != internalLayout().EndPtr())
+ return (size_type)(pResult - internalLayout().BeginPtr());
+ }
+ return npos;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::rfind(const this_type& x, size_type position) const EA_NOEXCEPT
+ {
+ return rfind(x.internalLayout().BeginPtr(), position, x.internalLayout().GetSize());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::rfind(const value_type* p, size_type position) const
+ {
+ return rfind(p, position, (size_type)CharStrlen(p));
+ }
+
+
+ template <typename T, typename Allocator>
+ typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::rfind(const value_type* p, size_type position, size_type n) const
+ {
+ // Disabled because it's not clear what values are valid for position.
+ // It is documented that npos is a valid value, though. We return npos and
+ // don't crash if postion is any invalid value.
+ //#if EASTL_ASSERT_ENABLED
+ // if(EASTL_UNLIKELY((position != npos) && (position > (size_type)(mpEnd - mpBegin))))
+ // EASTL_FAIL_MSG("basic_string::rfind -- invalid position");
+ //#endif
+
+ // Note that a search for a zero length string starting at position = end() returns end() and not npos.
+ // Note by Paul Pedriana: I am not sure how this should behave in the case of n == 0 and position > size.
+ // The standard seems to suggest that rfind doesn't act exactly the same as find in that input position
+ // can be > size and the return value can still be other than npos. Thus, if n == 0 then you can
+ // never return npos, unlike the case with find.
+ const size_type nLength = internalLayout().GetSize();
+
+ if(EASTL_LIKELY(n <= nLength))
+ {
+ if(EASTL_LIKELY(n))
+ {
+ const const_iterator pEnd = internalLayout().BeginPtr() + eastl::min_alt(nLength - n, position) + n;
+ const const_iterator pResult = CharTypeStringRSearch(internalLayout().BeginPtr(), pEnd, p, p + n);
+
+ if(pResult != pEnd)
+ return (size_type)(pResult - internalLayout().BeginPtr());
+ }
+ else
+ return eastl::min_alt(nLength, position);
+ }
+ return npos;
+ }
+
+
+ template <typename T, typename Allocator>
+ typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::rfind(value_type c, size_type position) const EA_NOEXCEPT
+ {
+ // If n is zero or position is >= size, we return npos.
+ const size_type nLength = internalLayout().GetSize();
+
+ if(EASTL_LIKELY(nLength))
+ {
+ const value_type* const pEnd = internalLayout().BeginPtr() + eastl::min_alt(nLength - 1, position) + 1;
+ const value_type* const pResult = CharTypeStringRFind(pEnd, internalLayout().BeginPtr(), c);
+
+ if(pResult != internalLayout().BeginPtr())
+ return (size_type)((pResult - 1) - internalLayout().BeginPtr());
+ }
+ return npos;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find_first_of(const this_type& x, size_type position) const EA_NOEXCEPT
+ {
+ return find_first_of(x.internalLayout().BeginPtr(), position, x.internalLayout().GetSize());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find_first_of(const value_type* p, size_type position) const
+ {
+ return find_first_of(p, position, (size_type)CharStrlen(p));
+ }
+
+
+ template <typename T, typename Allocator>
+ typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find_first_of(const value_type* p, size_type position, size_type n) const
+ {
+ // If position is >= size, we return npos.
+ if(EASTL_LIKELY((position < internalLayout().GetSize())))
+ {
+ const value_type* const pBegin = internalLayout().BeginPtr() + position;
+ const const_iterator pResult = CharTypeStringFindFirstOf(pBegin, internalLayout().EndPtr(), p, p + n);
+
+ if(pResult != internalLayout().EndPtr())
+ return (size_type)(pResult - internalLayout().BeginPtr());
+ }
+ return npos;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find_first_of(value_type c, size_type position) const EA_NOEXCEPT
+ {
+ return find(c, position);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find_last_of(const this_type& x, size_type position) const EA_NOEXCEPT
+ {
+ return find_last_of(x.internalLayout().BeginPtr(), position, x.internalLayout().GetSize());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find_last_of(const value_type* p, size_type position) const
+ {
+ return find_last_of(p, position, (size_type)CharStrlen(p));
+ }
+
+
+ template <typename T, typename Allocator>
+ typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find_last_of(const value_type* p, size_type position, size_type n) const
+ {
+ // If n is zero or position is >= size, we return npos.
+ const size_type nLength = internalLayout().GetSize();
+
+ if(EASTL_LIKELY(nLength))
+ {
+ const value_type* const pEnd = internalLayout().BeginPtr() + eastl::min_alt(nLength - 1, position) + 1;
+ const value_type* const pResult = CharTypeStringRFindFirstOf(pEnd, internalLayout().BeginPtr(), p, p + n);
+
+ if(pResult != internalLayout().BeginPtr())
+ return (size_type)((pResult - 1) - internalLayout().BeginPtr());
+ }
+ return npos;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find_last_of(value_type c, size_type position) const EA_NOEXCEPT
+ {
+ return rfind(c, position);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find_first_not_of(const this_type& x, size_type position) const EA_NOEXCEPT
+ {
+ return find_first_not_of(x.internalLayout().BeginPtr(), position, x.internalLayout().GetSize());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find_first_not_of(const value_type* p, size_type position) const
+ {
+ return find_first_not_of(p, position, (size_type)CharStrlen(p));
+ }
+
+
+ template <typename T, typename Allocator>
+ typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find_first_not_of(const value_type* p, size_type position, size_type n) const
+ {
+ if(EASTL_LIKELY(position <= internalLayout().GetSize()))
+ {
+ const const_iterator pResult =
+ CharTypeStringFindFirstNotOf(internalLayout().BeginPtr() + position, internalLayout().EndPtr(), p, p + n);
+
+ if(pResult != internalLayout().EndPtr())
+ return (size_type)(pResult - internalLayout().BeginPtr());
+ }
+ return npos;
+ }
+
+
+ template <typename T, typename Allocator>
+ typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find_first_not_of(value_type c, size_type position) const EA_NOEXCEPT
+ {
+ if(EASTL_LIKELY(position <= internalLayout().GetSize()))
+ {
+ // Todo: Possibly make a specialized version of CharTypeStringFindFirstNotOf(pBegin, pEnd, c).
+ const const_iterator pResult =
+ CharTypeStringFindFirstNotOf(internalLayout().BeginPtr() + position, internalLayout().EndPtr(), &c, &c + 1);
+
+ if(pResult != internalLayout().EndPtr())
+ return (size_type)(pResult - internalLayout().BeginPtr());
+ }
+ return npos;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find_last_not_of(const this_type& x, size_type position) const EA_NOEXCEPT
+ {
+ return find_last_not_of(x.internalLayout().BeginPtr(), position, x.internalLayout().GetSize());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find_last_not_of(const value_type* p, size_type position) const
+ {
+ return find_last_not_of(p, position, (size_type)CharStrlen(p));
+ }
+
+
+ template <typename T, typename Allocator>
+ typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find_last_not_of(const value_type* p, size_type position, size_type n) const
+ {
+ const size_type nLength = internalLayout().GetSize();
+
+ if(EASTL_LIKELY(nLength))
+ {
+ const value_type* const pEnd = internalLayout().BeginPtr() + eastl::min_alt(nLength - 1, position) + 1;
+ const value_type* const pResult = CharTypeStringRFindFirstNotOf(pEnd, internalLayout().BeginPtr(), p, p + n);
+
+ if(pResult != internalLayout().BeginPtr())
+ return (size_type)((pResult - 1) - internalLayout().BeginPtr());
+ }
+ return npos;
+ }
+
+
+ template <typename T, typename Allocator>
+ typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find_last_not_of(value_type c, size_type position) const EA_NOEXCEPT
+ {
+ const size_type nLength = internalLayout().GetSize();
+
+ if(EASTL_LIKELY(nLength))
+ {
+ // Todo: Possibly make a specialized version of CharTypeStringRFindFirstNotOf(pBegin, pEnd, c).
+ const value_type* const pEnd = internalLayout().BeginPtr() + eastl::min_alt(nLength - 1, position) + 1;
+ const value_type* const pResult = CharTypeStringRFindFirstNotOf(pEnd, internalLayout().BeginPtr(), &c, &c + 1);
+
+ if(pResult != internalLayout().BeginPtr())
+ return (size_type)((pResult - 1) - internalLayout().BeginPtr());
+ }
+ return npos;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator> basic_string<T, Allocator>::substr(size_type position, size_type n) const
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(position > internalLayout().GetSize()))
+ ThrowRangeException();
+ #elif EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(position > internalLayout().GetSize()))
+ EASTL_FAIL_MSG("basic_string::substr -- invalid position");
+ #endif
+
+ // C++ std says the return string allocator must be default constructed, not a copy of this->get_allocator()
+ return basic_string(
+ internalLayout().BeginPtr() + position,
+ internalLayout().BeginPtr() + position +
+ eastl::min_alt(n, internalLayout().GetSize() - position), get_allocator());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline int basic_string<T, Allocator>::compare(const this_type& x) const EA_NOEXCEPT
+ {
+ return compare(internalLayout().BeginPtr(), internalLayout().EndPtr(), x.internalLayout().BeginPtr(), x.internalLayout().EndPtr());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline int basic_string<T, Allocator>::compare(size_type pos1, size_type n1, const this_type& x) const
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(pos1 > internalLayout().GetSize()))
+ ThrowRangeException();
+ #endif
+
+ return compare(
+ internalLayout().BeginPtr() + pos1,
+ internalLayout().BeginPtr() + pos1 + eastl::min_alt(n1, internalLayout().GetSize() - pos1),
+ x.internalLayout().BeginPtr(), x.internalLayout().EndPtr());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline int basic_string<T, Allocator>::compare(size_type pos1, size_type n1, const this_type& x, size_type pos2, size_type n2) const
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY((pos1 > (size_type)(internalLayout().EndPtr() - internalLayout().BeginPtr())) ||
+ (pos2 > (size_type)(x.internalLayout().EndPtr() - x.internalLayout().BeginPtr()))))
+ ThrowRangeException();
+ #endif
+
+ return compare(internalLayout().BeginPtr() + pos1,
+ internalLayout().BeginPtr() + pos1 + eastl::min_alt(n1, internalLayout().GetSize() - pos1),
+ x.internalLayout().BeginPtr() + pos2,
+ x.internalLayout().BeginPtr() + pos2 + eastl::min_alt(n2, x.internalLayout().GetSize() - pos2));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline int basic_string<T, Allocator>::compare(const value_type* p) const
+ {
+ return compare(internalLayout().BeginPtr(), internalLayout().EndPtr(), p, p + CharStrlen(p));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline int basic_string<T, Allocator>::compare(size_type pos1, size_type n1, const value_type* p) const
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(pos1 > internalLayout().GetSize()))
+ ThrowRangeException();
+ #endif
+
+ return compare(internalLayout().BeginPtr() + pos1,
+ internalLayout().BeginPtr() + pos1 + eastl::min_alt(n1, internalLayout().GetSize() - pos1),
+ p,
+ p + CharStrlen(p));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline int basic_string<T, Allocator>::compare(size_type pos1, size_type n1, const value_type* p, size_type n2) const
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(pos1 > internalLayout().GetSize()))
+ ThrowRangeException();
+ #endif
+
+ return compare(internalLayout().BeginPtr() + pos1,
+ internalLayout().BeginPtr() + pos1 + eastl::min_alt(n1, internalLayout().GetSize() - pos1),
+ p,
+ p + n2);
+ }
+
+
+ // make_lower
+ // This is a very simple ASCII-only case conversion function
+ // Anything more complicated should use a more powerful separate library.
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::make_lower()
+ {
+ for(pointer p = internalLayout().BeginPtr(); p < internalLayout().EndPtr(); ++p)
+ *p = (value_type)CharToLower(*p);
+ }
+
+
+ // make_upper
+ // This is a very simple ASCII-only case conversion function
+ // Anything more complicated should use a more powerful separate library.
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::make_upper()
+ {
+ for(pointer p = internalLayout().BeginPtr(); p < internalLayout().EndPtr(); ++p)
+ *p = (value_type)CharToUpper(*p);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::ltrim()
+ {
+ const value_type array[] = { ' ', '\t', 0 }; // This is a pretty simplistic view of whitespace.
+ erase(0, find_first_not_of(array));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::rtrim()
+ {
+ const value_type array[] = { ' ', '\t', 0 }; // This is a pretty simplistic view of whitespace.
+ erase(find_last_not_of(array) + 1);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::trim()
+ {
+ ltrim();
+ rtrim();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::ltrim(const value_type* p)
+ {
+ erase(0, find_first_not_of(p));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::rtrim(const value_type* p)
+ {
+ erase(find_last_not_of(p) + 1);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::trim(const value_type* p)
+ {
+ ltrim(p);
+ rtrim(p);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator> basic_string<T, Allocator>::left(size_type n) const
+ {
+ const size_type nLength = length();
+ if(n < nLength)
+ return substr(0, n);
+ // C++ std says that substr must return default constructed allocated, but we do not.
+ // Instead it is much more practical to provide the copy of the current allocator
+ return basic_string(*this, get_allocator());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator> basic_string<T, Allocator>::right(size_type n) const
+ {
+ const size_type nLength = length();
+ if(n < nLength)
+ return substr(nLength - n, n);
+ // C++ std says that substr must return default constructed allocated, but we do not.
+ // Instead it is much more practical to provide the copy of the current allocator
+ return basic_string(*this, get_allocator());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>& basic_string<T, Allocator>::sprintf(const value_type* pFormat, ...)
+ {
+ va_list arguments;
+ va_start(arguments, pFormat);
+ internalLayout().SetSize(0); // Fast truncate to zero length.
+ append_sprintf_va_list(pFormat, arguments);
+ va_end(arguments);
+
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::sprintf_va_list(const value_type* pFormat, va_list arguments)
+ {
+ internalLayout().SetSize(0); // Fast truncate to zero length.
+
+ return append_sprintf_va_list(pFormat, arguments);
+ }
+
+
+ template <typename T, typename Allocator>
+ int basic_string<T, Allocator>::compare(const value_type* pBegin1, const value_type* pEnd1,
+ const value_type* pBegin2, const value_type* pEnd2)
+ {
+ const difference_type n1 = pEnd1 - pBegin1;
+ const difference_type n2 = pEnd2 - pBegin2;
+ const difference_type nMin = eastl::min_alt(n1, n2);
+ const int cmp = Compare(pBegin1, pBegin2, (size_t)nMin);
+
+ return (cmp != 0 ? cmp : (n1 < n2 ? -1 : (n1 > n2 ? 1 : 0)));
+ }
+
+
+ template <typename T, typename Allocator>
+ int basic_string<T, Allocator>::comparei(const value_type* pBegin1, const value_type* pEnd1,
+ const value_type* pBegin2, const value_type* pEnd2)
+ {
+ const difference_type n1 = pEnd1 - pBegin1;
+ const difference_type n2 = pEnd2 - pBegin2;
+ const difference_type nMin = eastl::min_alt(n1, n2);
+ const int cmp = CompareI(pBegin1, pBegin2, (size_t)nMin);
+
+ return (cmp != 0 ? cmp : (n1 < n2 ? -1 : (n1 > n2 ? 1 : 0)));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline int basic_string<T, Allocator>::comparei(const this_type& x) const EA_NOEXCEPT
+ {
+ return comparei(internalLayout().BeginPtr(), internalLayout().EndPtr(), x.internalLayout().BeginPtr(), x.internalLayout().EndPtr());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline int basic_string<T, Allocator>::comparei(const value_type* p) const
+ {
+ return comparei(internalLayout().BeginPtr(), internalLayout().EndPtr(), p, p + CharStrlen(p));
+ }
+
+
+ template <typename T, typename Allocator>
+ typename basic_string<T, Allocator>::iterator
+ basic_string<T, Allocator>::InsertInternal(const_iterator p, value_type c)
+ {
+ iterator pNewPosition = const_cast<value_type*>(p);
+
+ if((internalLayout().EndPtr() + 1) <= internalLayout().CapacityPtr())
+ {
+ const size_type nSavedSize = internalLayout().GetSize();
+ memmove(const_cast<value_type*>(p) + 1, p, (size_t)(internalLayout().EndPtr() - p) * sizeof(value_type));
+ *(internalLayout().EndPtr() + 1) = 0;
+ *pNewPosition = c;
+ internalLayout().SetSize(nSavedSize + 1);
+ }
+ else
+ {
+ const size_type nOldSize = internalLayout().GetSize();
+ const size_type nOldCap = capacity();
+ const size_type nLength = GetNewCapacity(nOldCap, 1);
+
+ iterator pNewBegin = DoAllocate(nLength + 1);
+
+ pNewPosition = CharStringUninitializedCopy(internalLayout().BeginPtr(), p, pNewBegin);
+ *pNewPosition = c;
+
+ iterator pNewEnd = pNewPosition + 1;
+ pNewEnd = CharStringUninitializedCopy(p, internalLayout().EndPtr(), pNewEnd);
+ *pNewEnd = 0;
+
+ DeallocateSelf();
+ internalLayout().SetHeapBeginPtr(pNewBegin);
+ internalLayout().SetHeapCapacity(nLength);
+ internalLayout().SetHeapSize(nOldSize + 1);
+ }
+ return pNewPosition;
+ }
+
+
+ template <typename T, typename Allocator>
+ void basic_string<T, Allocator>::SizeInitialize(size_type n, value_type c)
+ {
+ AllocateSelf(n);
+
+ CharStringUninitializedFillN(internalLayout().BeginPtr(), n, c);
+ *internalLayout().EndPtr() = 0;
+ }
+
+
+ template <typename T, typename Allocator>
+ void basic_string<T, Allocator>::RangeInitialize(const value_type* pBegin, const value_type* pEnd)
+ {
+ #if EASTL_STRING_OPT_ARGUMENT_ERRORS
+ if(EASTL_UNLIKELY(!pBegin && (pEnd < pBegin))) // 21.4.2 p7
+ ThrowInvalidArgumentException();
+ #endif
+
+ const size_type n = (size_type)(pEnd - pBegin);
+
+ AllocateSelf(n);
+
+ CharStringUninitializedCopy(pBegin, pEnd, internalLayout().BeginPtr());
+ *internalLayout().EndPtr() = 0;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::RangeInitialize(const value_type* pBegin)
+ {
+ #if EASTL_STRING_OPT_ARGUMENT_ERRORS
+ if(EASTL_UNLIKELY(!pBegin))
+ ThrowInvalidArgumentException();
+ #endif
+
+ RangeInitialize(pBegin, pBegin + CharStrlen(pBegin));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::value_type*
+ basic_string<T, Allocator>::DoAllocate(size_type n)
+ {
+ return (value_type*)EASTLAlloc(get_allocator(), n * sizeof(value_type));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::DoFree(value_type* p, size_type n)
+ {
+ if(p)
+ EASTLFree(get_allocator(), p, n * sizeof(value_type));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::GetNewCapacity(size_type currentCapacity)
+ {
+ return GetNewCapacity(currentCapacity, 1);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::GetNewCapacity(size_type currentCapacity, size_type minimumGrowSize)
+ {
+ #if EASTL_STRING_OPT_LENGTH_ERRORS
+ const size_type nRemainingSize = max_size() - currentCapacity;
+ if(EASTL_UNLIKELY((minimumGrowSize > nRemainingSize)))
+ {
+ ThrowLengthException();
+ }
+ #endif
+
+ const size_type nNewCapacity = eastl::max_alt(currentCapacity + minimumGrowSize, currentCapacity * 2);
+
+ return nNewCapacity;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::AllocateSelf()
+ {
+ internalLayout().ResetToSSO();
+ }
+
+
+ template <typename T, typename Allocator>
+ void basic_string<T, Allocator>::AllocateSelf(size_type n)
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(n >= 0x40000000))
+ EASTL_FAIL_MSG("basic_string::AllocateSelf -- improbably large request.");
+ #endif
+
+ #if EASTL_STRING_OPT_LENGTH_ERRORS
+ if(EASTL_UNLIKELY(n > max_size()))
+ ThrowLengthException();
+ #endif
+
+ if(n > SSOLayout::SSO_CAPACITY)
+ {
+ pointer pBegin = DoAllocate(n + 1);
+ internalLayout().SetHeapBeginPtr(pBegin);
+ internalLayout().SetHeapCapacity(n);
+ internalLayout().SetHeapSize(n);
+ }
+ else
+ internalLayout().SetSSOSize(n);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::DeallocateSelf()
+ {
+ if(internalLayout().IsHeap())
+ {
+ DoFree(internalLayout().BeginPtr(), internalLayout().GetHeapCapacity() + 1);
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::ThrowLengthException() const
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ throw std::length_error("basic_string -- length_error");
+ #elif EASTL_ASSERT_ENABLED
+ EASTL_FAIL_MSG("basic_string -- length_error");
+ #endif
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::ThrowRangeException() const
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ throw std::out_of_range("basic_string -- out of range");
+ #elif EASTL_ASSERT_ENABLED
+ EASTL_FAIL_MSG("basic_string -- out of range");
+ #endif
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::ThrowInvalidArgumentException() const
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ throw std::invalid_argument("basic_string -- invalid argument");
+ #elif EASTL_ASSERT_ENABLED
+ EASTL_FAIL_MSG("basic_string -- invalid argument");
+ #endif
+ }
+
+
+ // CharTypeStringFindEnd
+ // Specialized char version of STL find() from back function.
+ // Not the same as RFind because search range is specified as forward iterators.
+ template <typename T, typename Allocator>
+ const typename basic_string<T, Allocator>::value_type*
+ basic_string<T, Allocator>::CharTypeStringFindEnd(const value_type* pBegin, const value_type* pEnd, value_type c)
+ {
+ const value_type* pTemp = pEnd;
+ while(--pTemp >= pBegin)
+ {
+ if(*pTemp == c)
+ return pTemp;
+ }
+
+ return pEnd;
+ }
+
+
+ // CharTypeStringRFind
+ // Specialized value_type version of STL find() function in reverse.
+ template <typename T, typename Allocator>
+ const typename basic_string<T, Allocator>::value_type*
+ basic_string<T, Allocator>::CharTypeStringRFind(const value_type* pRBegin, const value_type* pREnd, const value_type c)
+ {
+ while(pRBegin > pREnd)
+ {
+ if(*(pRBegin - 1) == c)
+ return pRBegin;
+ --pRBegin;
+ }
+ return pREnd;
+ }
+
+
+ // CharTypeStringSearch
+ // Specialized value_type version of STL search() function.
+ // Purpose: find p2 within p1. Return p1End if not found or if either string is zero length.
+ template <typename T, typename Allocator>
+ const typename basic_string<T, Allocator>::value_type*
+ basic_string<T, Allocator>::CharTypeStringSearch(const value_type* p1Begin, const value_type* p1End,
+ const value_type* p2Begin, const value_type* p2End)
+ {
+ // Test for zero length strings, in which case we have a match or a failure,
+ // but the return value is the same either way.
+ if((p1Begin == p1End) || (p2Begin == p2End))
+ return p1Begin;
+
+ // Test for a pattern of length 1.
+ if((p2Begin + 1) == p2End)
+ return eastl::find(p1Begin, p1End, *p2Begin);
+
+ // General case.
+ const value_type* pTemp;
+ const value_type* pTemp1 = (p2Begin + 1);
+ const value_type* pCurrent = p1Begin;
+
+ while(p1Begin != p1End)
+ {
+ p1Begin = eastl::find(p1Begin, p1End, *p2Begin);
+ if(p1Begin == p1End)
+ return p1End;
+
+ pTemp = pTemp1;
+ pCurrent = p1Begin;
+ if(++pCurrent == p1End)
+ return p1End;
+
+ while(*pCurrent == *pTemp)
+ {
+ if(++pTemp == p2End)
+ return p1Begin;
+ if(++pCurrent == p1End)
+ return p1End;
+ }
+
+ ++p1Begin;
+ }
+
+ return p1Begin;
+ }
+
+
+ // CharTypeStringRSearch
+ // Specialized value_type version of STL find_end() function (which really is a reverse search function).
+ // Purpose: find last instance of p2 within p1. Return p1End if not found or if either string is zero length.
+ template <typename T, typename Allocator>
+ const typename basic_string<T, Allocator>::value_type*
+ basic_string<T, Allocator>::CharTypeStringRSearch(const value_type* p1Begin, const value_type* p1End,
+ const value_type* p2Begin, const value_type* p2End)
+ {
+ // Test for zero length strings, in which case we have a match or a failure,
+ // but the return value is the same either way.
+ if((p1Begin == p1End) || (p2Begin == p2End))
+ return p1Begin;
+
+ // Test for a pattern of length 1.
+ if((p2Begin + 1) == p2End)
+ return CharTypeStringFindEnd(p1Begin, p1End, *p2Begin);
+
+ // Test for search string length being longer than string length.
+ if((p2End - p2Begin) > (p1End - p1Begin))
+ return p1End;
+
+ // General case.
+ const value_type* pSearchEnd = (p1End - (p2End - p2Begin) + 1);
+ const value_type* pCurrent1;
+ const value_type* pCurrent2;
+
+ while(pSearchEnd != p1Begin)
+ {
+ // Search for the last occurrence of *p2Begin.
+ pCurrent1 = CharTypeStringFindEnd(p1Begin, pSearchEnd, *p2Begin);
+ if(pCurrent1 == pSearchEnd) // If the first char of p2 wasn't found,
+ return p1End; // then we immediately have failure.
+
+ // In this case, *pTemp == *p2Begin. So compare the rest.
+ pCurrent2 = p2Begin;
+ while(*pCurrent1++ == *pCurrent2++)
+ {
+ if(pCurrent2 == p2End)
+ return (pCurrent1 - (p2End - p2Begin));
+ }
+
+ // A smarter algorithm might know to subtract more than just one,
+ // but in most cases it won't make much difference anyway.
+ --pSearchEnd;
+ }
+
+ return p1End;
+ }
+
+
+ // CharTypeStringFindFirstOf
+ // Specialized value_type version of STL find_first_of() function.
+ // This function is much like the C runtime strtok function, except the strings aren't null-terminated.
+ template <typename T, typename Allocator>
+ const typename basic_string<T, Allocator>::value_type*
+ basic_string<T, Allocator>::CharTypeStringFindFirstOf(const value_type* p1Begin, const value_type* p1End,
+ const value_type* p2Begin, const value_type* p2End)
+ {
+ for( ; p1Begin != p1End; ++p1Begin)
+ {
+ for(const value_type* pTemp = p2Begin; pTemp != p2End; ++pTemp)
+ {
+ if(*p1Begin == *pTemp)
+ return p1Begin;
+ }
+ }
+ return p1End;
+ }
+
+
+ // CharTypeStringRFindFirstOf
+ // Specialized value_type version of STL find_first_of() function in reverse.
+ // This function is much like the C runtime strtok function, except the strings aren't null-terminated.
+ template <typename T, typename Allocator>
+ const typename basic_string<T, Allocator>::value_type*
+ basic_string<T, Allocator>::CharTypeStringRFindFirstOf(const value_type* p1RBegin, const value_type* p1REnd,
+ const value_type* p2Begin, const value_type* p2End)
+ {
+ for( ; p1RBegin != p1REnd; --p1RBegin)
+ {
+ for(const value_type* pTemp = p2Begin; pTemp != p2End; ++pTemp)
+ {
+ if(*(p1RBegin - 1) == *pTemp)
+ return p1RBegin;
+ }
+ }
+ return p1REnd;
+ }
+
+
+
+ // CharTypeStringFindFirstNotOf
+ // Specialized value_type version of STL find_first_not_of() function.
+ template <typename T, typename Allocator>
+ const typename basic_string<T, Allocator>::value_type*
+ basic_string<T, Allocator>::CharTypeStringFindFirstNotOf(const value_type* p1Begin, const value_type* p1End,
+ const value_type* p2Begin, const value_type* p2End)
+ {
+ for( ; p1Begin != p1End; ++p1Begin)
+ {
+ const value_type* pTemp;
+ for(pTemp = p2Begin; pTemp != p2End; ++pTemp)
+ {
+ if(*p1Begin == *pTemp)
+ break;
+ }
+ if(pTemp == p2End)
+ return p1Begin;
+ }
+ return p1End;
+ }
+
+
+ // CharTypeStringRFindFirstNotOf
+ // Specialized value_type version of STL find_first_not_of() function in reverse.
+ template <typename T, typename Allocator>
+ const typename basic_string<T, Allocator>::value_type*
+ basic_string<T, Allocator>::CharTypeStringRFindFirstNotOf(const value_type* p1RBegin, const value_type* p1REnd,
+ const value_type* p2Begin, const value_type* p2End)
+ {
+ for( ; p1RBegin != p1REnd; --p1RBegin)
+ {
+ const value_type* pTemp;
+ for(pTemp = p2Begin; pTemp != p2End; ++pTemp)
+ {
+ if(*(p1RBegin-1) == *pTemp)
+ break;
+ }
+ if(pTemp == p2End)
+ return p1RBegin;
+ }
+ return p1REnd;
+ }
+
+
+
+
+ // iterator operators
+ template <typename T, typename Allocator>
+ inline bool operator==(const typename basic_string<T, Allocator>::reverse_iterator& r1,
+ const typename basic_string<T, Allocator>::reverse_iterator& r2)
+ {
+ return r1.mpCurrent == r2.mpCurrent;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator!=(const typename basic_string<T, Allocator>::reverse_iterator& r1,
+ const typename basic_string<T, Allocator>::reverse_iterator& r2)
+ {
+ return r1.mpCurrent != r2.mpCurrent;
+ }
+
+
+ // Operator +
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator> operator+(const basic_string<T, Allocator>& a, const basic_string<T, Allocator>& b)
+ {
+ typedef typename basic_string<T, Allocator>::CtorDoNotInitialize CtorDoNotInitialize;
+ CtorDoNotInitialize cDNI; // GCC 2.x forces us to declare a named temporary like this.
+ basic_string<T, Allocator> result(cDNI, a.size() + b.size(), const_cast<basic_string<T, Allocator>&>(a).get_allocator()); // Note that we choose to assign a's allocator.
+ result.append(a);
+ result.append(b);
+ return result;
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator> operator+(const typename basic_string<T, Allocator>::value_type* p, const basic_string<T, Allocator>& b)
+ {
+ typedef typename basic_string<T, Allocator>::CtorDoNotInitialize CtorDoNotInitialize;
+ CtorDoNotInitialize cDNI; // GCC 2.x forces us to declare a named temporary like this.
+ const typename basic_string<T, Allocator>::size_type n = (typename basic_string<T, Allocator>::size_type)CharStrlen(p);
+ basic_string<T, Allocator> result(cDNI, n + b.size(), const_cast<basic_string<T, Allocator>&>(b).get_allocator());
+ result.append(p, p + n);
+ result.append(b);
+ return result;
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator> operator+(typename basic_string<T, Allocator>::value_type c, const basic_string<T, Allocator>& b)
+ {
+ typedef typename basic_string<T, Allocator>::CtorDoNotInitialize CtorDoNotInitialize;
+ CtorDoNotInitialize cDNI; // GCC 2.x forces us to declare a named temporary like this.
+ basic_string<T, Allocator> result(cDNI, 1 + b.size(), const_cast<basic_string<T, Allocator>&>(b).get_allocator());
+ result.push_back(c);
+ result.append(b);
+ return result;
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator> operator+(const basic_string<T, Allocator>& a, const typename basic_string<T, Allocator>::value_type* p)
+ {
+ typedef typename basic_string<T, Allocator>::CtorDoNotInitialize CtorDoNotInitialize;
+ CtorDoNotInitialize cDNI; // GCC 2.x forces us to declare a named temporary like this.
+ const typename basic_string<T, Allocator>::size_type n = (typename basic_string<T, Allocator>::size_type)CharStrlen(p);
+ basic_string<T, Allocator> result(cDNI, a.size() + n, const_cast<basic_string<T, Allocator>&>(a).get_allocator());
+ result.append(a);
+ result.append(p, p + n);
+ return result;
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator> operator+(const basic_string<T, Allocator>& a, typename basic_string<T, Allocator>::value_type c)
+ {
+ typedef typename basic_string<T, Allocator>::CtorDoNotInitialize CtorDoNotInitialize;
+ CtorDoNotInitialize cDNI; // GCC 2.x forces us to declare a named temporary like this.
+ basic_string<T, Allocator> result(cDNI, a.size() + 1, const_cast<basic_string<T, Allocator>&>(a).get_allocator());
+ result.append(a);
+ result.push_back(c);
+ return result;
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator> operator+(basic_string<T, Allocator>&& a, basic_string<T, Allocator>&& b)
+ {
+ a.append(b); // Using an rvalue by name results in it becoming an lvalue.
+ return eastl::move(a);
+ }
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator> operator+(basic_string<T, Allocator>&& a, const basic_string<T, Allocator>& b)
+ {
+ a.append(b);
+ return eastl::move(a);
+ }
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator> operator+(const typename basic_string<T, Allocator>::value_type* p, basic_string<T, Allocator>&& b)
+ {
+ b.insert(0, p);
+ return eastl::move(b);
+ }
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator> operator+(basic_string<T, Allocator>&& a, const typename basic_string<T, Allocator>::value_type* p)
+ {
+ a.append(p);
+ return eastl::move(a);
+ }
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator> operator+(basic_string<T, Allocator>&& a, typename basic_string<T, Allocator>::value_type c)
+ {
+ a.push_back(c);
+ return eastl::move(a);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool basic_string<T, Allocator>::validate() const EA_NOEXCEPT
+ {
+ if((internalLayout().BeginPtr() == nullptr) || (internalLayout().EndPtr() == nullptr))
+ return false;
+ if(internalLayout().EndPtr() < internalLayout().BeginPtr())
+ return false;
+ if(internalLayout().CapacityPtr() < internalLayout().EndPtr())
+ return false;
+ if(*internalLayout().EndPtr() != 0)
+ return false;
+ return true;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline int basic_string<T, Allocator>::validate_iterator(const_iterator i) const EA_NOEXCEPT
+ {
+ if(i >= internalLayout().BeginPtr())
+ {
+ if(i < internalLayout().EndPtr())
+ return (isf_valid | isf_current | isf_can_dereference);
+
+ if(i <= internalLayout().EndPtr())
+ return (isf_valid | isf_current);
+ }
+
+ return isf_none;
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ // Operator== and operator!=
+ template <typename T, typename Allocator>
+ inline bool operator==(const basic_string<T, Allocator>& a, const basic_string<T, Allocator>& b)
+ {
+ return ((a.size() == b.size()) && (memcmp(a.data(), b.data(), (size_t)a.size() * sizeof(typename basic_string<T, Allocator>::value_type)) == 0));
+ }
+
+#if !defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ template <typename T, typename Allocator>
+ inline bool operator==(const typename basic_string<T, Allocator>::value_type* p, const basic_string<T, Allocator>& b)
+ {
+ typedef typename basic_string<T, Allocator>::size_type size_type;
+ const size_type n = (size_type)CharStrlen(p);
+ return ((n == b.size()) && (memcmp(p, b.data(), (size_t)n * sizeof(*p)) == 0));
+ }
+#endif
+
+ template <typename T, typename Allocator>
+ inline bool operator==(const basic_string<T, Allocator>& a, const typename basic_string<T, Allocator>::value_type* p)
+ {
+ typedef typename basic_string<T, Allocator>::size_type size_type;
+ const size_type n = (size_type)CharStrlen(p);
+ return ((a.size() == n) && (memcmp(a.data(), p, (size_t)n * sizeof(*p)) == 0));
+ }
+
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ template <typename T, typename Allocator>
+ inline auto operator<=>(const basic_string<T, Allocator>& a, const basic_string<T, Allocator>& b)
+ {
+ return basic_string<T, Allocator>::compare(a.begin(), a.end(), b.begin(), b.end()) <=> 0;
+ }
+
+ template <typename T, typename Allocator>
+ inline auto operator<=>(const basic_string<T, Allocator>& a, const typename basic_string<T, Allocator>::value_type* p)
+ {
+ typedef typename basic_string<T, Allocator>::size_type size_type;
+ const size_type n = (size_type)CharStrlen(p);
+ return basic_string<T, Allocator>::compare(a.begin(), a.end(), p, p + n) <=> 0;
+ }
+
+ template <typename T, typename Allocator>
+ inline auto operator<=>(const basic_string<T, Allocator>& a, const typename basic_string<T, Allocator>::view_type v)
+ {
+ typedef typename basic_string<T, Allocator>::view_type view_type;
+ return static_cast<view_type>(a) <=> v;
+ }
+
+#else
+
+ template <typename T, typename Allocator>
+ inline bool operator==(const typename basic_string<T, Allocator>::view_type v, const basic_string<T, Allocator>& b)
+ {
+ // Workaround for basic_string_view comparisons that require conversions,
+ // since they are causing an internal compiler error when compiled using
+ // MSVC when certain flags are enabled (/Zi /O2 /Zc:inline).
+
+ typedef typename basic_string<T, Allocator>::view_type view_type;
+ return v == static_cast<view_type>(b);
+ }
+
+ template <typename T, typename Allocator>
+ inline bool operator==(const basic_string<T, Allocator>& a, const typename basic_string<T, Allocator>::view_type v)
+ {
+ // Workaround for basic_string_view comparisons that require conversions,
+ // since they are causing an internal compiler error when compiled using
+ // MSVC when certain flags are enabled (/Zi /O2 /Zc:inline).
+
+ typedef typename basic_string<T, Allocator>::view_type view_type;
+ return static_cast<view_type>(a) == v;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator!=(const basic_string<T, Allocator>& a, const basic_string<T, Allocator>& b)
+ {
+ return !(a == b);
+ }
+
+ template <typename T, typename Allocator>
+ inline bool operator!=(const typename basic_string<T, Allocator>::value_type* p, const basic_string<T, Allocator>& b)
+ {
+ return !(p == b);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator!=(const basic_string<T, Allocator>& a, const typename basic_string<T, Allocator>::value_type* p)
+ {
+ return !(a == p);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator!=(const typename basic_string<T, Allocator>::view_type v, const basic_string<T, Allocator>& b)
+ {
+ // Workaround for basic_string_view comparisons that require conversions,
+ // since they are causing an internal compiler error when compiled using
+ // MSVC when certain flags are enabled (/Zi /O2 /Zc:inline).
+
+ return !(v == b);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator!=(const basic_string<T, Allocator>& a, const typename basic_string<T, Allocator>::view_type v)
+ {
+ // Workaround for basic_string_view comparisons that require conversions,
+ // since they are causing an internal compiler error when compiled using
+ // MSVC when certain flags are enabled (/Zi /O2 /Zc:inline).
+
+ return !(a == v);
+ }
+
+
+ // Operator< (and also >, <=, and >=).
+ template <typename T, typename Allocator>
+ inline bool operator<(const basic_string<T, Allocator>& a, const basic_string<T, Allocator>& b)
+ {
+ return basic_string<T, Allocator>::compare(a.begin(), a.end(), b.begin(), b.end()) < 0; }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator<(const typename basic_string<T, Allocator>::value_type* p, const basic_string<T, Allocator>& b)
+ {
+ typedef typename basic_string<T, Allocator>::size_type size_type;
+ const size_type n = (size_type)CharStrlen(p);
+ return basic_string<T, Allocator>::compare(p, p + n, b.begin(), b.end()) < 0;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator<(const basic_string<T, Allocator>& a, const typename basic_string<T, Allocator>::value_type* p)
+ {
+ typedef typename basic_string<T, Allocator>::size_type size_type;
+ const size_type n = (size_type)CharStrlen(p);
+ return basic_string<T, Allocator>::compare(a.begin(), a.end(), p, p + n) < 0;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator<(const typename basic_string<T, Allocator>::view_type v, const basic_string<T, Allocator>& b)
+ {
+ // Workaround for basic_string_view comparisons that require conversions,
+ // since they are causing an internal compiler error when compiled using
+ // MSVC when certain flags are enabled (/Zi /O2 /Zc:inline).
+
+ typedef typename basic_string<T, Allocator>::view_type view_type;
+ return v < static_cast<view_type>(b);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator<(const basic_string<T, Allocator>& a, const typename basic_string<T, Allocator>::view_type v)
+ {
+ // Workaround for basic_string_view comparisons that require conversions,
+ // since they are causing an internal compiler error when compiled using
+ // MSVC when certain flags are enabled (/Zi /O2 /Zc:inline).
+
+ typedef typename basic_string<T, Allocator>::view_type view_type;
+ return static_cast<view_type>(a) < v;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator>(const basic_string<T, Allocator>& a, const basic_string<T, Allocator>& b)
+ {
+ return b < a;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator>(const typename basic_string<T, Allocator>::value_type* p, const basic_string<T, Allocator>& b)
+ {
+ return b < p;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator>(const basic_string<T, Allocator>& a, const typename basic_string<T, Allocator>::value_type* p)
+ {
+ return p < a;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator>(const typename basic_string<T, Allocator>::view_type v, const basic_string<T, Allocator>& b)
+ {
+ // Workaround for basic_string_view comparisons that require conversions,
+ // since they are causing an internal compiler error when compiled using
+ // MSVC when certain flags are enabled (/Zi /O2 /Zc:inline).
+
+ return b < v;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator>(const basic_string<T, Allocator>& a, const typename basic_string<T, Allocator>::view_type v)
+ {
+ // Workaround for basic_string_view comparisons that require conversions,
+ // since they are causing an internal compiler error when compiled using
+ // MSVC when certain flags are enabled (/Zi /O2 /Zc:inline).
+
+ return v < a;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator<=(const basic_string<T, Allocator>& a, const basic_string<T, Allocator>& b)
+ {
+ return !(b < a);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator<=(const typename basic_string<T, Allocator>::value_type* p, const basic_string<T, Allocator>& b)
+ {
+ return !(b < p);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator<=(const basic_string<T, Allocator>& a, const typename basic_string<T, Allocator>::value_type* p)
+ {
+ return !(p < a);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator<=(const typename basic_string<T, Allocator>::view_type v, const basic_string<T, Allocator>& b)
+ {
+ // Workaround for basic_string_view comparisons that require conversions,
+ // since they are causing an internal compiler error when compiled using
+ // MSVC when certain flags are enabled (/Zi /O2 /Zc:inline).
+
+ return !(b < v);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator<=(const basic_string<T, Allocator>& a, const typename basic_string<T, Allocator>::view_type v)
+ {
+ // Workaround for basic_string_view comparisons that require conversions,
+ // since they are causing an internal compiler error when compiled using
+ // MSVC when certain flags are enabled (/Zi /O2 /Zc:inline).
+
+ return !(v < a);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator>=(const basic_string<T, Allocator>& a, const basic_string<T, Allocator>& b)
+ {
+ return !(a < b);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator>=(const typename basic_string<T, Allocator>::value_type* p, const basic_string<T, Allocator>& b)
+ {
+ return !(p < b);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator>=(const basic_string<T, Allocator>& a, const typename basic_string<T, Allocator>::value_type* p)
+ {
+ return !(a < p);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator>=(const typename basic_string<T, Allocator>::view_type v, const basic_string<T, Allocator>& b)
+ {
+ // Workaround for basic_string_view comparisons that require conversions,
+ // since they are causing an internal compiler error when compiled using
+ // MSVC when certain flags are enabled (/Zi /O2 /Zc:inline).
+
+ return !(v < b);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator>=(const basic_string<T, Allocator>& a, const typename basic_string<T, Allocator>::view_type v)
+ {
+ // Workaround for basic_string_view comparisons that require conversions,
+ // since they are causing an internal compiler error when compiled using
+ // MSVC when certain flags are enabled (/Zi /O2 /Zc:inline).
+
+ return !(a < v);
+ }
+#endif
+
+ template <typename T, typename Allocator>
+ inline void swap(basic_string<T, Allocator>& a, basic_string<T, Allocator>& b)
+ {
+ a.swap(b);
+ }
+
+
+ /// string / wstring
+ typedef basic_string<char> string;
+ typedef basic_string<wchar_t> wstring;
+
+ /// custom string8 / string16 / string32
+ typedef basic_string<char> string8;
+ typedef basic_string<char16_t> string16;
+ typedef basic_string<char32_t> string32;
+
+ /// ISO mandated string types
+ typedef basic_string<char8_t> u8string; // Actually not a C++11 type, but added for consistency.
+ typedef basic_string<char16_t> u16string;
+ typedef basic_string<char32_t> u32string;
+
+
+ /// hash<string>
+ ///
+ /// We provide EASTL hash function objects for use in hash table containers.
+ ///
+ /// Example usage:
+ /// #include <EASTL/hash_set.h>
+ /// hash_set<string> stringHashSet;
+ ///
+ template <typename T> struct hash;
+
+ template <>
+ struct hash<string>
+ {
+ size_t operator()(const string& x) const
+ {
+ const unsigned char* p = (const unsigned char*)x.c_str(); // To consider: limit p to at most 256 chars.
+ unsigned int c, result = 2166136261U; // We implement an FNV-like string hash.
+ while((c = *p++) != 0) // Using '!=' disables compiler warnings.
+ result = (result * 16777619) ^ c;
+ return (size_t)result;
+ }
+ };
+
+ #if defined(EA_CHAR8_UNIQUE) && EA_CHAR8_UNIQUE
+ template <>
+ struct hash<u8string>
+ {
+ size_t operator()(const u8string& x) const
+ {
+ const char8_t* p = (const char8_t*)x.c_str();
+ unsigned int c, result = 2166136261U;
+ while((c = *p++) != 0)
+ result = (result * 16777619) ^ c;
+ return (size_t)result;
+ }
+ };
+ #endif
+
+ template <>
+ struct hash<string16>
+ {
+ size_t operator()(const string16& x) const
+ {
+ const char16_t* p = x.c_str();
+ unsigned int c, result = 2166136261U;
+ while((c = *p++) != 0)
+ result = (result * 16777619) ^ c;
+ return (size_t)result;
+ }
+ };
+
+ template <>
+ struct hash<string32>
+ {
+ size_t operator()(const string32& x) const
+ {
+ const char32_t* p = x.c_str();
+ unsigned int c, result = 2166136261U;
+ while((c = (unsigned int)*p++) != 0)
+ result = (result * 16777619) ^ c;
+ return (size_t)result;
+ }
+ };
+
+ #if defined(EA_WCHAR_UNIQUE) && EA_WCHAR_UNIQUE
+ template <>
+ struct hash<wstring>
+ {
+ size_t operator()(const wstring& x) const
+ {
+ const wchar_t* p = x.c_str();
+ unsigned int c, result = 2166136261U;
+ while((c = (unsigned int)*p++) != 0)
+ result = (result * 16777619) ^ c;
+ return (size_t)result;
+ }
+ };
+ #endif
+
+
+ /// to_string
+ ///
+ /// Converts integral types to an eastl::string with the same content that sprintf produces. The following
+ /// implementation provides a type safe conversion mechanism which avoids the common bugs associated with sprintf
+ /// style format strings.
+ ///
+ /// http://en.cppreference.com/w/cpp/string/basic_string/to_string
+ ///
+ inline string to_string(int value)
+ { return string(string::CtorSprintf(), "%d", value); }
+ inline string to_string(long value)
+ { return string(string::CtorSprintf(), "%ld", value); }
+ inline string to_string(long long value)
+ { return string(string::CtorSprintf(), "%lld", value); }
+ inline string to_string(unsigned value)
+ { return string(string::CtorSprintf(), "%u", value); }
+ inline string to_string(unsigned long value)
+ { return string(string::CtorSprintf(), "%lu", value); }
+ inline string to_string(unsigned long long value)
+ { return string(string::CtorSprintf(), "%llu", value); }
+ inline string to_string(float value)
+ { return string(string::CtorSprintf(), "%f", value); }
+ inline string to_string(double value)
+ { return string(string::CtorSprintf(), "%f", value); }
+ inline string to_string(long double value)
+ { return string(string::CtorSprintf(), "%Lf", value); }
+
+
+ /// to_wstring
+ ///
+ /// Converts integral types to an eastl::wstring with the same content that sprintf produces. The following
+ /// implementation provides a type safe conversion mechanism which avoids the common bugs associated with sprintf
+ /// style format strings.
+ ///
+ /// http://en.cppreference.com/w/cpp/string/basic_string/to_wstring
+ ///
+ inline wstring to_wstring(int value)
+ { return wstring(wstring::CtorSprintf(), L"%d", value); }
+ inline wstring to_wstring(long value)
+ { return wstring(wstring::CtorSprintf(), L"%ld", value); }
+ inline wstring to_wstring(long long value)
+ { return wstring(wstring::CtorSprintf(), L"%lld", value); }
+ inline wstring to_wstring(unsigned value)
+ { return wstring(wstring::CtorSprintf(), L"%u", value); }
+ inline wstring to_wstring(unsigned long value)
+ { return wstring(wstring::CtorSprintf(), L"%lu", value); }
+ inline wstring to_wstring(unsigned long long value)
+ { return wstring(wstring::CtorSprintf(), L"%llu", value); }
+ inline wstring to_wstring(float value)
+ { return wstring(wstring::CtorSprintf(), L"%f", value); }
+ inline wstring to_wstring(double value)
+ { return wstring(wstring::CtorSprintf(), L"%f", value); }
+ inline wstring to_wstring(long double value)
+ { return wstring(wstring::CtorSprintf(), L"%Lf", value); }
+
+
+ /// user defined literals
+ ///
+ /// Converts a character array literal to a basic_string.
+ ///
+ /// Example:
+ /// string s = "abcdef"s;
+ ///
+ /// http://en.cppreference.com/w/cpp/string/basic_string/operator%22%22s
+ ///
+ #if EASTL_USER_LITERALS_ENABLED && EASTL_INLINE_NAMESPACES_ENABLED
+ // Disabling the Clang/GCC/MSVC warning about using user
+ // defined literals without a leading '_' as they are reserved
+ // for standard libary usage.
+ EA_DISABLE_VC_WARNING(4455)
+ EA_DISABLE_CLANG_WARNING(-Wuser-defined-literals)
+ EA_DISABLE_GCC_WARNING(-Wliteral-suffix)
+ inline namespace literals
+ {
+ inline namespace string_literals
+ {
+ inline string operator"" s(const char* str, size_t len) EA_NOEXCEPT { return {str, string::size_type(len)}; }
+ inline u16string operator"" s(const char16_t* str, size_t len) EA_NOEXCEPT { return {str, u16string::size_type(len)}; }
+ inline u32string operator"" s(const char32_t* str, size_t len) EA_NOEXCEPT { return {str, u32string::size_type(len)}; }
+ inline wstring operator"" s(const wchar_t* str, size_t len) EA_NOEXCEPT { return {str, wstring::size_type(len)}; }
+
+ // C++20 char8_t support.
+ #if EA_CHAR8_UNIQUE
+ inline u8string operator"" s(const char8_t* str, size_t len) EA_NOEXCEPT { return {str, u8string::size_type(len)}; }
+ #endif
+ }
+ }
+ EA_RESTORE_GCC_WARNING() // -Wliteral-suffix
+ EA_RESTORE_CLANG_WARNING() // -Wuser-defined-literals
+ EA_RESTORE_VC_WARNING() // warning: 4455
+ #endif
+
+
+ /// erase / erase_if
+ ///
+ /// https://en.cppreference.com/w/cpp/string/basic_string/erase2
+ template <class CharT, class Allocator, class U>
+ typename basic_string<CharT, Allocator>::size_type erase(basic_string<CharT, Allocator>& c, const U& value)
+ {
+ // Erases all elements that compare equal to value from the container.
+ auto origEnd = c.end();
+ auto newEnd = eastl::remove(c.begin(), origEnd, value);
+ auto numRemoved = eastl::distance(newEnd, origEnd);
+ c.erase(newEnd, origEnd);
+
+ // Note: This is technically a lossy conversion when size_type
+ // is 32bits and ptrdiff_t is 64bits (could happen on 64bit
+ // systems when EASTL_SIZE_T_32BIT is set). In practice this
+ // is fine because if EASTL_SIZE_T_32BIT is set then the
+ // string should not have more characters than fit in a
+ // uint32_t and so the distance here should fit in a
+ // size_type.
+ return static_cast<typename basic_string<CharT, Allocator>::size_type>(numRemoved);
+ }
+
+ template <class CharT, class Allocator, class Predicate>
+ typename basic_string<CharT, Allocator>::size_type erase_if(basic_string<CharT, Allocator>& c, Predicate predicate)
+ {
+ // Erases all elements that satisfy the predicate pred from the container.
+ auto origEnd = c.end();
+ auto newEnd = eastl::remove_if(c.begin(), origEnd, predicate);
+ auto numRemoved = eastl::distance(newEnd, origEnd);
+ c.erase(newEnd, origEnd);
+ // Note: This is technically a lossy conversion when size_type
+ // is 32bits and ptrdiff_t is 64bits (could happen on 64bit
+ // systems when EASTL_SIZE_T_32BIT is set). In practice this
+ // is fine because if EASTL_SIZE_T_32BIT is set then the
+ // string should not have more characters than fit in a
+ // uint32_t and so the distance here should fit in a
+ // size_type.
+ return static_cast<typename basic_string<CharT, Allocator>::size_type>(numRemoved);
+ }
+} // namespace eastl
+
+
+EA_RESTORE_VC_WARNING();
+
+
+#endif // Header include guard
diff --git a/EASTL/include/EASTL/string_hash_map.h b/EASTL/include/EASTL/string_hash_map.h
new file mode 100644
index 0000000..25bdfaf
--- /dev/null
+++ b/EASTL/include/EASTL/string_hash_map.h
@@ -0,0 +1,189 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_STRING_HASH_MAP_H
+#define EASTL_STRING_HASH_MAP_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <EASTL/hash_map.h>
+#include <EASTL/string.h>
+
+namespace eastl
+{
+
+
+template<typename T, typename Hash = hash<const char*>, typename Predicate = str_equal_to<const char*>, typename Allocator = EASTLAllocatorType>
+class string_hash_map : public eastl::hash_map<const char*, T, Hash, Predicate, Allocator>
+{
+public:
+ typedef eastl::hash_map<const char*, T, Hash, Predicate, Allocator> base;
+ typedef string_hash_map<T, Hash, Predicate, Allocator> this_type;
+ typedef typename base::base_type::allocator_type allocator_type;
+ typedef typename base::base_type::insert_return_type insert_return_type;
+ typedef typename base::base_type::iterator iterator;
+// typedef typename base::base_type::reverse_iterator reverse_iterator;
+ typedef typename base::base_type::const_iterator const_iterator;
+ typedef typename base::base_type::size_type size_type;
+ typedef typename base::base_type::value_type value_type;
+ typedef typename base::mapped_type mapped_type;
+
+ string_hash_map(const allocator_type& allocator = allocator_type()) : base(allocator) {}
+ string_hash_map(const string_hash_map& src, const allocator_type& allocator = allocator_type());
+ ~string_hash_map();
+ void clear();
+ void clear(bool clearBuckets);
+
+ this_type& operator=(const this_type& x);
+
+ insert_return_type insert(const char* key, const T& value);
+ insert_return_type insert(const char* key);
+ pair<iterator, bool> insert_or_assign(const char* key, const T& value);
+ iterator erase(const_iterator position);
+ size_type erase(const char* key);
+ mapped_type& operator[](const char* key);
+
+private:
+ char* strduplicate(const char* str);
+
+ // Not implemented right now
+ // insert_return_type insert(const value_type& value);
+ // iterator insert(iterator position, const value_type& value);
+ // reverse_iterator erase(reverse_iterator position);
+ // reverse_iterator erase(reverse_iterator first, reverse_iterator last);
+};
+
+
+template<typename T, typename Hash, typename Predicate, typename Allocator>
+string_hash_map<T, Hash, Predicate, Allocator>::string_hash_map(const string_hash_map& src, const allocator_type& allocator) : base(allocator)
+{
+ for (const_iterator i=src.begin(), e=src.end(); i!=e; ++i)
+ base::base_type::insert(eastl::make_pair(strduplicate(i->first), i->second));
+}
+
+template<typename T, typename Hash, typename Predicate, typename Allocator>
+string_hash_map<T, Hash, Predicate, Allocator>::~string_hash_map()
+{
+ clear();
+}
+
+template<typename T, typename Hash, typename Predicate, typename Allocator>
+void string_hash_map<T, Hash, Predicate, Allocator>::clear()
+{
+ allocator_type& allocator = base::base_type::get_allocator();
+ for (const_iterator i=base::base_type::begin(), e=base::base_type::end(); i!=e; ++i)
+ EASTLFree(allocator, (void*)i->first, 0);
+ base::base_type::clear();
+}
+
+template<typename T, typename Hash, typename Predicate, typename Allocator>
+void string_hash_map<T, Hash, Predicate, Allocator>::clear(bool clearBuckets)
+{
+ allocator_type& allocator = base::base_type::get_allocator();
+ for (const_iterator i=base::base_type::begin(), e=base::base_type::end(); i!=e; ++i)
+ EASTLFree(allocator, (void*)i->first, 0);
+ base::base_type::clear(clearBuckets);
+}
+
+template<typename T, typename Hash, typename Predicate, typename Allocator>
+typename string_hash_map<T, Hash, Predicate, Allocator>::this_type&
+string_hash_map<T, Hash, Predicate, Allocator>::operator=(const this_type& x)
+{
+ allocator_type allocator = base::base_type::get_allocator();
+ this->~this_type();
+ new (this) this_type(x, allocator);
+ return *this;
+}
+
+template<typename T, typename Hash, typename Predicate, typename Allocator>
+typename string_hash_map<T, Hash, Predicate, Allocator>::insert_return_type
+string_hash_map<T, Hash, Predicate, Allocator>::insert(const char* key)
+{
+ return insert(key, mapped_type());
+}
+
+template<typename T, typename Hash, typename Predicate, typename Allocator>
+typename string_hash_map<T, Hash, Predicate, Allocator>::insert_return_type
+string_hash_map<T, Hash, Predicate, Allocator>::insert(const char* key, const T& value)
+{
+ EASTL_ASSERT(key);
+ iterator i = base::base_type::find(key);
+ if (i != base::base_type::end())
+ {
+ insert_return_type ret;
+ ret.first = i;
+ ret.second = false;
+ return ret;
+ }
+ return base::base_type::insert(eastl::make_pair(strduplicate(key), value));
+}
+
+template<typename T, typename Hash, typename Predicate, typename Allocator>
+eastl::pair<typename string_hash_map<T, Hash, Predicate, Allocator>::iterator, bool>
+string_hash_map<T, Hash, Predicate, Allocator>::insert_or_assign(const char* key, const T& value)
+{
+ iterator i = base::base_type::find(key);
+ if (i != base::base_type::end())
+ {
+ return base::base_type::insert_or_assign(i->first, value);
+ }
+ else
+ {
+ return base::base_type::insert_or_assign(strduplicate(key), value);
+ }
+}
+
+template<typename T, typename Hash, typename Predicate, typename Allocator>
+typename string_hash_map<T, Hash, Predicate, Allocator>::iterator
+string_hash_map<T, Hash, Predicate, Allocator>::erase(const_iterator position)
+{
+ const char* key = position->first;
+ iterator result = base::base_type::erase(position);
+ EASTLFree(base::base_type::get_allocator(), (void*)key, 0);
+ return result;
+}
+
+template<typename T, typename Hash, typename Predicate, typename Allocator>
+typename string_hash_map<T, Hash, Predicate, Allocator>::size_type
+string_hash_map<T, Hash, Predicate, Allocator>::erase(const char* key)
+{
+ const iterator it(base::base_type::find(key));
+
+ if(it != base::base_type::end())
+ {
+ erase(it);
+ return 1;
+ }
+ return 0;
+}
+
+template<typename T, typename Hash, typename Predicate, typename Allocator>
+typename string_hash_map<T, Hash, Predicate, Allocator>::mapped_type&
+string_hash_map<T, Hash, Predicate, Allocator>::operator[](const char* key)
+{
+ using base_value_type = typename base::base_type::value_type;
+
+ EASTL_ASSERT(key);
+ iterator i = base::base_type::find(key);
+ if (i != base::base_type::end())
+ return i->second;
+ return base::base_type::insert(base_value_type(pair_first_construct, strduplicate(key))).first->second;
+}
+
+template<typename T, typename Hash, typename Predicate, typename Allocator>
+char*
+string_hash_map<T, Hash, Predicate, Allocator>::strduplicate(const char* str)
+{
+ size_t len = strlen(str);
+ char* result = (char*)EASTLAlloc(base::base_type::get_allocator(), (len + 1));
+ memcpy(result, str, len+1);
+ return result;
+}
+
+
+}
+
+#endif
diff --git a/EASTL/include/EASTL/string_map.h b/EASTL/include/EASTL/string_map.h
new file mode 100644
index 0000000..b952e39
--- /dev/null
+++ b/EASTL/include/EASTL/string_map.h
@@ -0,0 +1,167 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_STRING_MAP_H
+#define EASTL_STRING_MAP_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <EASTL/map.h>
+#include <EASTL/string.h>
+
+namespace eastl
+{
+
+
+template<typename T, typename Predicate = str_less<const char*>, typename Allocator = EASTLAllocatorType>
+class string_map : public eastl::map<const char*, T, Predicate, Allocator>
+{
+public:
+ typedef eastl::map<const char*, T, Predicate, Allocator> base;
+ typedef string_map<T, Predicate, Allocator> this_type;
+ typedef typename base::base_type::allocator_type allocator_type;
+ typedef typename base::base_type::insert_return_type insert_return_type;
+ typedef typename base::base_type::iterator iterator;
+ typedef typename base::base_type::reverse_iterator reverse_iterator;
+ typedef typename base::base_type::const_iterator const_iterator;
+ typedef typename base::base_type::size_type size_type;
+ typedef typename base::base_type::key_type key_type;
+ typedef typename base::base_type::value_type value_type;
+ typedef typename base::mapped_type mapped_type;
+
+ string_map(const allocator_type& allocator = allocator_type()) : base(allocator) {}
+ string_map(const string_map& src, const allocator_type& allocator = allocator_type());
+ ~string_map();
+ void clear();
+
+ this_type& operator=(const this_type& x);
+
+ insert_return_type insert(const char* key, const T& value);
+ insert_return_type insert(const char* key);
+ iterator erase(iterator position);
+ size_type erase(const char* key);
+ mapped_type& operator[](const char* key);
+
+private:
+ char* strduplicate(const char* str);
+
+ // Not implemented right now
+ // insert_return_type insert(const value_type& value);
+ // iterator insert(iterator position, const value_type& value);
+ // reverse_iterator erase(reverse_iterator position);
+ // reverse_iterator erase(reverse_iterator first, reverse_iterator last);
+ // void erase(const key_type* first, const key_type* last);
+};
+
+
+
+template<typename T, typename Predicate, typename Allocator>
+string_map<T, Predicate, Allocator>::string_map(const string_map& src, const allocator_type& allocator) : base(allocator)
+{
+ for (const_iterator i=src.begin(), e=src.end(); i!=e; ++i)
+ base::base_type::insert(eastl::make_pair(strduplicate(i->first), i->second));
+}
+
+template<typename T, typename Predicate, typename Allocator>
+string_map<T, Predicate, Allocator>::~string_map()
+{
+ clear();
+}
+
+template<typename T, typename Predicate, typename Allocator>
+void
+string_map<T, Predicate, Allocator>::clear()
+{
+ allocator_type& allocator = base::base_type::get_allocator();
+ for (const_iterator i=base::base_type::begin(), e=base::base_type::end(); i!=e; ++i)
+ allocator.deallocate((void*)i->first, 0);
+ base::base_type::clear();
+}
+
+template<typename T, typename Predicate, typename Allocator>
+typename string_map<T, Predicate, Allocator>::this_type&
+string_map<T, Predicate, Allocator>::operator=(const this_type& x)
+{
+ allocator_type allocator = base::base_type::get_allocator();
+ this->~this_type();
+ new (this) this_type(x, allocator);
+ return *this;
+}
+
+template<typename T, typename Predicate, typename Allocator>
+typename string_map<T, Predicate, Allocator>::insert_return_type
+string_map<T, Predicate, Allocator>::insert(const char* key)
+{
+ return insert(key, mapped_type());
+}
+
+template<typename T, typename Predicate, typename Allocator>
+typename string_map<T, Predicate, Allocator>::insert_return_type
+string_map<T, Predicate, Allocator>::insert(const char* key, const T& value)
+{
+ EASTL_ASSERT(key);
+ iterator i = base::base_type::find(key);
+ if (i != base::base_type::end())
+ {
+ insert_return_type ret;
+ ret.first = i;
+ ret.second = false;
+ return ret;
+ }
+ return base::base_type::insert(eastl::make_pair(strduplicate(key), value));
+}
+
+template<typename T, typename Predicate, typename Allocator>
+typename string_map<T, Predicate, Allocator>::iterator
+string_map<T, Predicate, Allocator>::erase(iterator position)
+{
+ const char* key = position->first;
+ iterator result = base::base_type::erase(position);
+ base::base_type::get_allocator().deallocate((void*)key, 0);
+ return result;
+}
+
+template<typename T, typename Predicate, typename Allocator>
+typename string_map<T, Predicate, Allocator>::size_type
+string_map<T, Predicate, Allocator>::erase(const char* key)
+{
+ const iterator it(base::base_type::find(key));
+
+ if(it != base::base_type::end())
+ {
+ erase(it);
+ return 1;
+ }
+ return 0;
+}
+
+template<typename T, typename Predicate, typename Allocator>
+typename string_map<T, Predicate, Allocator>::mapped_type&
+string_map<T, Predicate, Allocator>::operator[](const char* key)
+{
+ using base_value_type = typename base::base_type::value_type;
+
+ EASTL_ASSERT(key);
+ iterator i = base::base_type::find(key);
+ if (i != base::base_type::end())
+ return i->second;
+ return base::base_type::insert(base_value_type(pair_first_construct, strduplicate(key))).first->second;
+}
+
+template<typename T, typename Predicate, typename Allocator>
+char*
+string_map<T, Predicate, Allocator>::strduplicate(const char* str)
+{
+ size_t len = strlen(str);
+ char* result = (char*)base::base_type::get_allocator().allocate(len + 1);
+ memcpy(result, str, len+1);
+ return result;
+}
+
+
+}
+
+#endif
diff --git a/EASTL/include/EASTL/string_view.h b/EASTL/include/EASTL/string_view.h
new file mode 100644
index 0000000..f600e50
--- /dev/null
+++ b/EASTL/include/EASTL/string_view.h
@@ -0,0 +1,849 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements the eastl::string_view which is part of the C++ standard
+// STL library specification.
+//
+// http://en.cppreference.com/w/cpp/header/string_view
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_STRING_VIEW_H
+#define EASTL_STRING_VIEW_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/char_traits.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/iterator.h>
+#include <EASTL/numeric_limits.h>
+
+#if EASTL_EXCEPTIONS_ENABLED
+ EA_DISABLE_ALL_VC_WARNINGS()
+ #include <stdexcept> // std::out_of_range.
+ EA_RESTORE_ALL_VC_WARNINGS()
+#endif
+
+EA_DISABLE_VC_WARNING(4814)
+
+namespace eastl
+{
+ template <typename T>
+ class basic_string_view
+ {
+ public:
+ typedef basic_string_view<T> this_type;
+ typedef T value_type;
+ typedef T* pointer;
+ typedef const T* const_pointer;
+ typedef T& reference;
+ typedef const T& const_reference;
+ typedef T* iterator;
+ typedef const T* const_iterator;
+ typedef eastl::reverse_iterator<iterator> reverse_iterator;
+ typedef eastl::reverse_iterator<const_iterator> const_reverse_iterator;
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+
+ static const EA_CONSTEXPR size_type npos = size_type(-1);
+
+ protected:
+ const_pointer mpBegin = nullptr;
+ size_type mnCount = 0;
+
+ public:
+ // 21.4.2.1, construction and assignment
+ EA_CONSTEXPR basic_string_view() EA_NOEXCEPT : mpBegin(nullptr), mnCount(0) {}
+ EA_CONSTEXPR basic_string_view(const basic_string_view& other) EA_NOEXCEPT = default;
+ EA_CONSTEXPR basic_string_view(const T* s, size_type count) : mpBegin(s), mnCount(count) {}
+ EA_CONSTEXPR basic_string_view(const T* s) : mpBegin(s), mnCount(s != nullptr ? CharStrlen(s) : 0) {}
+ basic_string_view& operator=(const basic_string_view& view) = default;
+
+ // 21.4.2.2, iterator support
+ EA_CONSTEXPR const_iterator begin() const EA_NOEXCEPT { return mpBegin; }
+ EA_CONSTEXPR const_iterator cbegin() const EA_NOEXCEPT { return mpBegin; }
+ EA_CONSTEXPR const_iterator end() const EA_NOEXCEPT { return mpBegin + mnCount; }
+ EA_CONSTEXPR const_iterator cend() const EA_NOEXCEPT { return mpBegin + mnCount; }
+ EA_CONSTEXPR const_reverse_iterator rbegin() const EA_NOEXCEPT { return const_reverse_iterator(mpBegin + mnCount); }
+ EA_CONSTEXPR const_reverse_iterator crbegin() const EA_NOEXCEPT { return const_reverse_iterator(mpBegin + mnCount); }
+ EA_CONSTEXPR const_reverse_iterator rend() const EA_NOEXCEPT { return const_reverse_iterator(mpBegin); }
+ EA_CONSTEXPR const_reverse_iterator crend() const EA_NOEXCEPT { return const_reverse_iterator(mpBegin); }
+
+
+ // 21.4.2.4, element access
+ EA_CONSTEXPR const_pointer data() const { return mpBegin; }
+ EA_CONSTEXPR const_reference front() const
+ {
+ return [&] { EASTL_ASSERT_MSG(!empty(), "behavior is undefined if string_view is empty"); }(), mpBegin[0];
+ }
+
+ EA_CONSTEXPR const_reference back() const
+ {
+ return [&] { EASTL_ASSERT_MSG(!empty(), "behavior is undefined if string_view is empty"); }(), mpBegin[mnCount - 1];
+ }
+
+ EA_CONSTEXPR const_reference operator[](size_type pos) const
+ {
+ // As per the standard spec: No bounds checking is performed: the behavior is undefined if pos >= size().
+ return mpBegin[pos];
+ }
+
+ EA_CPP14_CONSTEXPR const_reference at(size_type pos) const
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ if(EASTL_UNLIKELY(pos >= mnCount))
+ throw std::out_of_range("string_view::at -- out of range");
+ #elif EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(pos >= mnCount))
+ EASTL_FAIL_MSG("string_view::at -- out of range");
+ #endif
+
+ return mpBegin[pos];
+ }
+
+
+ // 21.4.2.3, capacity
+ EA_CONSTEXPR size_type size() const EA_NOEXCEPT { return mnCount; }
+ EA_CONSTEXPR size_type length() const EA_NOEXCEPT { return mnCount; }
+
+ // avoid macro expansion of max(...) from windows headers (potentially included before this file)
+ // by wrapping function name in brackets
+ EA_CONSTEXPR size_type max_size() const EA_NOEXCEPT { return (numeric_limits<size_type>::max)(); }
+ EA_CONSTEXPR bool empty() const EA_NOEXCEPT { return mnCount == 0; }
+
+
+ // 21.4.2.5, modifiers
+ EA_CPP14_CONSTEXPR void swap(basic_string_view& v)
+ {
+ eastl::swap(mpBegin, v.mpBegin);
+ eastl::swap(mnCount, v.mnCount);
+ }
+
+ EA_CPP14_CONSTEXPR void remove_prefix(size_type n)
+ {
+ EASTL_ASSERT_MSG(n <= mnCount, "behavior is undefined if moving past the end of the string");
+ mpBegin += n;
+ mnCount -= n;
+ }
+
+ EA_CPP14_CONSTEXPR void remove_suffix(size_type n)
+ {
+ EASTL_ASSERT_MSG(n <= mnCount, "behavior is undefined if moving past the beginning of the string");
+ mnCount -= n;
+ }
+
+
+ // 21.4.2.6, string operations
+ size_type copy(T* pDestination, size_type count, size_type pos = 0) const
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ if(EASTL_UNLIKELY(pos > mnCount))
+ throw std::out_of_range("string_view::copy -- out of range");
+ #elif EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(pos > mnCount))
+ EASTL_FAIL_MSG("string_view::copy -- out of range");
+ #endif
+
+ count = eastl::min<size_type>(count, mnCount - pos);
+ auto* pResult = CharStringUninitializedCopy(mpBegin + pos, mpBegin + pos + count, pDestination);
+ // *pResult = 0; // don't write the null-terminator
+ return pResult - pDestination;
+ }
+
+ EA_CPP14_CONSTEXPR basic_string_view substr(size_type pos = 0, size_type count = npos) const
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ if(EASTL_UNLIKELY(pos > mnCount))
+ throw std::out_of_range("string_view::substr -- out of range");
+ #elif EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(pos > mnCount))
+ EASTL_FAIL_MSG("string_view::substr -- out of range");
+ #endif
+
+ count = eastl::min<size_type>(count, mnCount - pos);
+ return this_type(mpBegin + pos, count);
+ }
+
+ static EA_CPP14_CONSTEXPR int compare(const T* pBegin1, const T* pEnd1, const T* pBegin2, const T* pEnd2)
+ {
+ const ptrdiff_t n1 = pEnd1 - pBegin1;
+ const ptrdiff_t n2 = pEnd2 - pBegin2;
+ const ptrdiff_t nMin = eastl::min_alt(n1, n2);
+ const int cmp = Compare(pBegin1, pBegin2, (size_type)nMin);
+
+ return (cmp != 0 ? cmp : (n1 < n2 ? -1 : (n1 > n2 ? 1 : 0)));
+ }
+
+ EA_CPP14_CONSTEXPR int compare(basic_string_view sw) const EA_NOEXCEPT
+ {
+ return compare(mpBegin, mpBegin + mnCount, sw.mpBegin, sw.mpBegin + sw.mnCount);
+ }
+
+ EA_CONSTEXPR int compare(size_type pos1, size_type count1, basic_string_view sw) const
+ {
+ return substr(pos1, count1).compare(sw);
+ }
+
+ EA_CONSTEXPR int compare(size_type pos1,
+ size_type count1,
+ basic_string_view sw,
+ size_type pos2,
+ size_type count2) const
+ {
+ return substr(pos1, count1).compare(sw.substr(pos2, count2));
+ }
+
+ EA_CONSTEXPR int compare(const T* s) const { return compare(basic_string_view(s)); }
+
+ EA_CONSTEXPR int compare(size_type pos1, size_type count1, const T* s) const
+ {
+ return substr(pos1, count1).compare(basic_string_view(s));
+ }
+
+ EA_CONSTEXPR int compare(size_type pos1, size_type count1, const T* s, size_type count2) const
+ {
+ return substr(pos1, count1).compare(basic_string_view(s, count2));
+ }
+
+ EA_CPP14_CONSTEXPR size_type find(basic_string_view sw, size_type pos = 0) const EA_NOEXCEPT
+ {
+ auto* pEnd = mpBegin + mnCount;
+ if (EASTL_LIKELY(((npos - sw.size()) >= pos) && (pos + sw.size()) <= mnCount))
+ {
+ const value_type* const pTemp = eastl::search(mpBegin + pos, pEnd, sw.data(), sw.data() + sw.size());
+
+ if ((pTemp != pEnd) || (sw.size() == 0))
+ return (size_type)(pTemp - mpBegin);
+ }
+ return npos;
+ }
+
+ EA_CONSTEXPR size_type find(T c, size_type pos = 0) const EA_NOEXCEPT
+ {
+ return find(basic_string_view(&c, 1), pos);
+ }
+
+ EA_CONSTEXPR size_type find(const T* s, size_type pos, size_type count) const
+ {
+ return find(basic_string_view(s, count), pos);
+ }
+
+ EA_CONSTEXPR size_type find(const T* s, size_type pos = 0) const { return find(basic_string_view(s), pos); }
+
+ EA_CONSTEXPR size_type rfind(basic_string_view sw, size_type pos = npos) const EA_NOEXCEPT
+ {
+ return rfind(sw.mpBegin, pos, sw.mnCount);
+ }
+
+ EA_CPP14_CONSTEXPR size_type rfind(T c, size_type pos = npos) const EA_NOEXCEPT
+ {
+ if (EASTL_LIKELY(mnCount))
+ {
+ const value_type* const pEnd = mpBegin + eastl::min_alt(mnCount - 1, pos) + 1;
+ const value_type* const pResult = CharTypeStringRFind(pEnd, mpBegin, c);
+
+ if (pResult != mpBegin)
+ return (size_type)((pResult - 1) - mpBegin);
+ }
+ return npos;
+ }
+
+ EA_CPP14_CONSTEXPR size_type rfind(const T* s, size_type pos, size_type n) const
+ {
+ // Disabled because it's not clear what values are valid for position.
+ // It is documented that npos is a valid value, though. We return npos and
+ // don't crash if postion is any invalid value.
+ //#if EASTL_ASSERT_ENABLED
+ // if(EASTL_UNLIKELY((position != npos) && (position > (size_type)(mpEnd - mpBegin))))
+ // EASTL_FAIL_MSG("basic_string::rfind -- invalid position");
+ //#endif
+
+ // Note that a search for a zero length string starting at position = end() returns end() and not npos.
+ // Note by Paul Pedriana: I am not sure how this should behave in the case of n == 0 and position > size.
+ // The standard seems to suggest that rfind doesn't act exactly the same as find in that input position
+ // can be > size and the return value can still be other than npos. Thus, if n == 0 then you can
+ // never return npos, unlike the case with find.
+ if (EASTL_LIKELY(n <= mnCount))
+ {
+ if (EASTL_LIKELY(n))
+ {
+ const const_iterator pEnd = mpBegin + eastl::min_alt(mnCount - n, pos) + n;
+ const const_iterator pResult = CharTypeStringRSearch(mpBegin, pEnd, s, s + n);
+
+ if (pResult != pEnd)
+ return (size_type)(pResult - mpBegin);
+ }
+ else
+ return eastl::min_alt(mnCount, pos);
+ }
+ return npos;
+ }
+
+ EA_CONSTEXPR size_type rfind(const T* s, size_type pos = npos) const
+ {
+ return rfind(s, pos, (size_type)CharStrlen(s));
+ }
+
+ EA_CONSTEXPR size_type find_first_of(basic_string_view sw, size_type pos = 0) const EA_NOEXCEPT
+ {
+ return find_first_of(sw.mpBegin, pos, sw.mnCount);
+ }
+
+ EA_CONSTEXPR size_type find_first_of(T c, size_type pos = 0) const EA_NOEXCEPT { return find(c, pos); }
+
+ EA_CPP14_CONSTEXPR size_type find_first_of(const T* s, size_type pos, size_type n) const
+ {
+ // If position is >= size, we return npos.
+ if (EASTL_LIKELY((pos < mnCount)))
+ {
+ const value_type* const pBegin = mpBegin + pos;
+ const value_type* const pEnd = mpBegin + mnCount;
+ const const_iterator pResult = CharTypeStringFindFirstOf(pBegin, pEnd, s, s + n);
+
+ if (pResult != pEnd)
+ return (size_type)(pResult - mpBegin);
+ }
+ return npos;
+ }
+
+ EA_CONSTEXPR size_type find_first_of(const T* s, size_type pos = 0) const
+ {
+ return find_first_of(s, pos, (size_type)CharStrlen(s));
+ }
+
+ EA_CONSTEXPR size_type find_last_of(basic_string_view sw, size_type pos = npos) const EA_NOEXCEPT
+ {
+ return find_last_of(sw.mpBegin, pos, sw.mnCount);
+ }
+
+ EA_CONSTEXPR size_type find_last_of(T c, size_type pos = npos) const EA_NOEXCEPT { return rfind(c, pos); }
+
+ EA_CPP14_CONSTEXPR size_type find_last_of(const T* s, size_type pos, size_type n) const
+ {
+ // If n is zero or position is >= size, we return npos.
+ if (EASTL_LIKELY(mnCount))
+ {
+ const value_type* const pEnd = mpBegin + eastl::min_alt(mnCount - 1, pos) + 1;
+ const value_type* const pResult = CharTypeStringRFindFirstOf(pEnd, mpBegin, s, s + n);
+
+ if (pResult != mpBegin)
+ return (size_type)((pResult - 1) - mpBegin);
+ }
+ return npos;
+ }
+
+ EA_CONSTEXPR size_type find_last_of(const T* s, size_type pos = npos) const
+ {
+ return find_last_of(s, pos, (size_type)CharStrlen(s));
+ }
+
+ EA_CONSTEXPR size_type find_first_not_of(basic_string_view sw, size_type pos = 0) const EA_NOEXCEPT
+ {
+ return find_first_not_of(sw.mpBegin, pos, sw.mnCount);
+ }
+
+ EA_CPP14_CONSTEXPR size_type find_first_not_of(T c, size_type pos = 0) const EA_NOEXCEPT
+ {
+ if (EASTL_LIKELY(pos <= mnCount))
+ {
+ const auto pEnd = mpBegin + mnCount;
+ // Todo: Possibly make a specialized version of CharTypeStringFindFirstNotOf(pBegin, pEnd, c).
+ const const_iterator pResult = CharTypeStringFindFirstNotOf(mpBegin + pos, pEnd, &c, &c + 1);
+
+ if (pResult != pEnd)
+ return (size_type)(pResult - mpBegin);
+ }
+ return npos;
+ }
+
+ EA_CPP14_CONSTEXPR size_type find_first_not_of(const T* s, size_type pos, size_type n) const
+ {
+ if (EASTL_LIKELY(pos <= mnCount))
+ {
+ const auto pEnd = mpBegin + mnCount;
+ const const_iterator pResult = CharTypeStringFindFirstNotOf(mpBegin + pos, pEnd, s, s + n);
+
+ if (pResult != pEnd)
+ return (size_type)(pResult - mpBegin);
+ }
+ return npos;
+ }
+
+ EA_CONSTEXPR size_type find_first_not_of(const T* s, size_type pos = 0) const
+ {
+ return find_first_not_of(s, pos, (size_type)CharStrlen(s));
+ }
+
+ EA_CONSTEXPR size_type find_last_not_of(basic_string_view sw, size_type pos = npos) const EA_NOEXCEPT
+ {
+ return find_last_not_of(sw.mpBegin, pos, sw.mnCount);
+ }
+
+ EA_CPP14_CONSTEXPR size_type find_last_not_of(T c, size_type pos = npos) const EA_NOEXCEPT
+ {
+ if (EASTL_LIKELY(mnCount))
+ {
+ // Todo: Possibly make a specialized version of CharTypeStringRFindFirstNotOf(pBegin, pEnd, c).
+ const value_type* const pEnd = mpBegin + eastl::min_alt(mnCount - 1, pos) + 1;
+ const value_type* const pResult = CharTypeStringRFindFirstNotOf(pEnd, mpBegin, &c, &c + 1);
+
+ if (pResult != mpBegin)
+ return (size_type)((pResult - 1) - mpBegin);
+ }
+ return npos;
+ }
+
+ EA_CPP14_CONSTEXPR size_type find_last_not_of(const T* s, size_type pos, size_type n) const
+ {
+ if (EASTL_LIKELY(mnCount))
+ {
+ const value_type* const pEnd = mpBegin + eastl::min_alt(mnCount - 1, pos) + 1;
+ const value_type* const pResult = CharTypeStringRFindFirstNotOf(pEnd, mpBegin, s, s + n);
+
+ if (pResult != mpBegin)
+ return (size_type)((pResult - 1) - mpBegin);
+ }
+ return npos;
+ }
+
+ EA_CONSTEXPR size_type find_last_not_of(const T* s, size_type pos = npos) const
+ {
+ return find_last_not_of(s, pos, (size_type)CharStrlen(s));
+ }
+
+ // starts_with
+ EA_CONSTEXPR bool starts_with(basic_string_view x) const EA_NOEXCEPT
+ {
+ return (size() >= x.size()) && (compare(0, x.size(), x) == 0);
+ }
+
+ EA_CONSTEXPR bool starts_with(T x) const EA_NOEXCEPT
+ {
+ return starts_with(basic_string_view(&x, 1));
+ }
+
+ EA_CONSTEXPR bool starts_with(const T* s) const
+ {
+ return starts_with(basic_string_view(s));
+ }
+
+ // ends_with
+ EA_CONSTEXPR bool ends_with(basic_string_view x) const EA_NOEXCEPT
+ {
+ return (size() >= x.size()) && (compare(size() - x.size(), npos, x) == 0);
+ }
+
+ EA_CONSTEXPR bool ends_with(T x) const EA_NOEXCEPT
+ {
+ return ends_with(basic_string_view(&x, 1));
+ }
+
+ EA_CONSTEXPR bool ends_with(const T* s) const
+ {
+ return ends_with(basic_string_view(s));
+ }
+ };
+
+
+ // global operators
+
+ // Disabling symmetric comparisons that require conversions, since they are causing an internal compiler error
+ // when compiled using MSVC when certain flags are enabled (/Zi /O2 /Zc:inline)
+ // template <class CharT>
+ // inline EA_CONSTEXPR bool operator==(basic_string_view<CharT> lhs, basic_string_view<CharT> rhs) EA_NOEXCEPT
+ // {
+ // return (lhs.size() == rhs.size()) && (lhs.compare(rhs) == 0);
+ // }
+ //
+ // // type_identity_t is used in this context to forcefully trigger conversion operators towards basic_string_view.
+ // // Mostly we want basic_string::operator basic_string_view() to kick-in to be able to compare strings and string_views.
+ // template <class CharT>
+ // inline EA_CONSTEXPR bool operator==(type_identity_t<basic_string_view<CharT>> lhs, basic_string_view<CharT> rhs) EA_NOEXCEPT
+ // {
+ // return (lhs.size() == rhs.size()) && (lhs.compare(rhs) == 0);
+ // }
+
+ template <class CharT>
+ inline EA_CONSTEXPR bool operator==(basic_string_view<CharT> lhs, type_identity_t<basic_string_view<CharT>> rhs) EA_NOEXCEPT
+ {
+ return (lhs.size() == rhs.size()) && (lhs.compare(rhs) == 0);
+ }
+
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ template <class CharT>
+ inline EA_CONSTEXPR auto operator<=>(basic_string_view<CharT> lhs, basic_string_view<CharT> rhs) EA_NOEXCEPT
+ {
+ return static_cast<std::weak_ordering>(lhs.compare(rhs) <=> 0);
+ }
+
+ template <class CharT>
+ inline EA_CONSTEXPR auto operator<=>(basic_string_view<CharT> lhs, typename basic_string_view<CharT>::const_pointer rhs) EA_NOEXCEPT
+ {
+ typedef basic_string_view<CharT> view_type;
+ return static_cast<std::weak_ordering>(lhs <=> static_cast<view_type>(rhs));
+ }
+
+#else
+ template <class CharT>
+ inline EA_CONSTEXPR bool operator==(typename basic_string_view<CharT>::const_pointer lhs, basic_string_view<CharT> rhs) EA_NOEXCEPT
+ {
+ // Workaround for basic_string_view comparisons that require conversions,
+ // since they are causing an internal compiler error when compiled using
+ // MSVC when certain flags are enabled (/Zi /O2 /Zc:inline).
+
+ typedef basic_string_view<CharT> view_type;
+ return static_cast<view_type>(lhs) == rhs;
+ }
+
+ template <class CharT>
+ inline EA_CONSTEXPR bool operator==(basic_string_view<CharT> lhs, typename basic_string_view<CharT>::const_pointer rhs) EA_NOEXCEPT
+ {
+ // Workaround for basic_string_view comparisons that require conversions,
+ // since they are causing an internal compiler error when compiled using
+ // MSVC when certain flags are enabled (/Zi /O2 /Zc:inline).
+
+ typedef basic_string_view<CharT> view_type;
+ return lhs == static_cast<view_type>(rhs);
+ }
+
+ // Disabling symmetric comparisons that require conversions, since they are causing an internal compiler error
+ // when compiled using MSVC when certain flags are enabled (/Zi /O2 /Zc:inline)
+ // template <class CharT>
+ // inline EA_CONSTEXPR bool operator!=(basic_string_view<CharT> lhs, basic_string_view<CharT> rhs) EA_NOEXCEPT
+ // {
+ // return !(lhs == rhs);
+ // }
+ //
+ // template <class CharT>
+ // inline EA_CONSTEXPR bool operator!=(type_identity_t<basic_string_view<CharT>> lhs, basic_string_view<CharT> rhs) EA_NOEXCEPT
+ // {
+ // return !(lhs == rhs);
+ // }
+
+ template <class CharT>
+ inline EA_CONSTEXPR bool operator!=(basic_string_view<CharT> lhs, type_identity_t<basic_string_view<CharT>> rhs) EA_NOEXCEPT
+ {
+ return !(lhs == rhs);
+ }
+
+ template <class CharT>
+ inline EA_CONSTEXPR bool operator!=(typename basic_string_view<CharT>::const_pointer lhs, basic_string_view<CharT> rhs) EA_NOEXCEPT
+ {
+ // Workaround for basic_string_view comparisons that require conversions,
+ // since they are causing an internal compiler error when compiled using
+ // MSVC when certain flags are enabled (/Zi /O2 /Zc:inline).
+
+ return !(lhs == rhs);
+ }
+
+ template <class CharT>
+ inline EA_CONSTEXPR bool operator!=(basic_string_view<CharT> lhs, typename basic_string_view<CharT>::const_pointer rhs) EA_NOEXCEPT
+ {
+ // Workaround for basic_string_view comparisons that require conversions,
+ // since they are causing an internal compiler error when compiled using
+ // MSVC when certain flags are enabled (/Zi /O2 /Zc:inline).
+
+ return !(lhs == rhs);
+ }
+
+ // Disabling symmetric comparisons that require conversions, since they are causing an internal compiler error
+ // when compiled using MSVC when certain flags are enabled (/Zi /O2 /Zc:inline)
+ // template <class CharT>
+ // inline EA_CONSTEXPR bool operator<(basic_string_view<CharT> lhs, basic_string_view<CharT> rhs) EA_NOEXCEPT
+ // {
+ // return lhs.compare(rhs) < 0;
+ // }
+ //
+ // template <class CharT>
+ // inline EA_CONSTEXPR bool operator<(type_identity_t<basic_string_view<CharT>> lhs, basic_string_view<CharT> rhs) EA_NOEXCEPT
+ // {
+ // return lhs.compare(rhs) < 0;
+ // }
+
+ template <class CharT>
+ inline EA_CONSTEXPR bool operator<(basic_string_view<CharT> lhs, type_identity_t<basic_string_view<CharT>> rhs) EA_NOEXCEPT
+ {
+ return lhs.compare(rhs) < 0;
+ }
+
+ template <class CharT>
+ inline EA_CONSTEXPR bool operator<(typename basic_string_view<CharT>::const_pointer lhs, basic_string_view<CharT> rhs) EA_NOEXCEPT
+ {
+ // Workaround for basic_string_view comparisons that require conversions,
+ // since they are causing an internal compiler error when compiled using
+ // MSVC when certain flags are enabled (/Zi /O2 /Zc:inline).
+
+ typedef basic_string_view<CharT> view_type;
+ return static_cast<view_type>(lhs) < rhs;
+ }
+
+ template <class CharT>
+ inline EA_CONSTEXPR bool operator<(basic_string_view<CharT> lhs, typename basic_string_view<CharT>::const_pointer rhs) EA_NOEXCEPT
+ {
+ // Workaround for basic_string_view comparisons that require conversions,
+ // since they are causing an internal compiler error when compiled using
+ // MSVC when certain flags are enabled (/Zi /O2 /Zc:inline).
+
+ typedef basic_string_view<CharT> view_type;
+ return lhs < static_cast<view_type>(rhs);
+ }
+
+ // Disabling symmetric comparisons that require conversions, since they are causing an internal compiler error
+ // when compiled using MSVC when certain flags are enabled (/Zi /O2 /Zc:inline)
+ // template <class CharT>
+ // inline EA_CONSTEXPR bool operator<=(basic_string_view<CharT> lhs, basic_string_view<CharT> rhs) EA_NOEXCEPT
+ // {
+ // return !(rhs < lhs);
+ // }
+ //
+ // template <class CharT>
+ // inline EA_CONSTEXPR bool operator<=(type_identity_t<basic_string_view<CharT>> lhs, basic_string_view<CharT> rhs) EA_NOEXCEPT
+ // {
+ // return !(rhs < lhs);
+ // }
+
+ template <class CharT>
+ inline EA_CONSTEXPR bool operator<=(basic_string_view<CharT> lhs, type_identity_t<basic_string_view<CharT>> rhs) EA_NOEXCEPT
+ {
+ return !(rhs < lhs);
+ }
+
+ template <class CharT>
+ inline EA_CONSTEXPR bool operator<=(typename basic_string_view<CharT>::const_pointer lhs, basic_string_view<CharT> rhs) EA_NOEXCEPT
+ {
+ // Workaround for basic_string_view comparisons that require conversions,
+ // since they are causing an internal compiler error when compiled using
+ // MSVC when certain flags are enabled (/Zi /O2 /Zc:inline).
+
+ return !(rhs < lhs);
+ }
+
+ template <class CharT>
+ inline EA_CONSTEXPR bool operator<=(basic_string_view<CharT> lhs, typename basic_string_view<CharT>::const_pointer rhs) EA_NOEXCEPT
+ {
+ // Workaround for basic_string_view comparisons that require conversions,
+ // since they are causing an internal compiler error when compiled using
+ // MSVC when certain flags are enabled (/Zi /O2 /Zc:inline).
+
+ return !(rhs < lhs);
+ }
+
+ // Disabling symmetric comparisons that require conversions, since they are causing an internal compiler error
+ // when compiled using MSVC when certain flags are enabled (/Zi /O2 /Zc:inline)
+ // template <class CharT>
+ // inline EA_CONSTEXPR bool operator>(basic_string_view<CharT> lhs, basic_string_view<CharT> rhs) EA_NOEXCEPT
+ // {
+ // return rhs < lhs;
+ // }
+ //
+ // template <class CharT>
+ // inline EA_CONSTEXPR bool operator>(type_identity_t<basic_string_view<CharT>> lhs, basic_string_view<CharT> rhs) EA_NOEXCEPT
+ // {
+ // return rhs < lhs;
+ // }
+
+ template <class CharT>
+ inline EA_CONSTEXPR bool operator>(basic_string_view<CharT> lhs, type_identity_t<basic_string_view<CharT>> rhs) EA_NOEXCEPT
+ {
+ return rhs < lhs;
+ }
+
+ template <class CharT>
+ inline EA_CONSTEXPR bool operator>(typename basic_string_view<CharT>::const_pointer lhs, basic_string_view<CharT> rhs) EA_NOEXCEPT
+ {
+ // Workaround for basic_string_view comparisons that require conversions,
+ // since they are causing an internal compiler error when compiled using
+ // MSVC when certain flags are enabled (/Zi /O2 /Zc:inline).
+
+ return rhs < lhs;
+ }
+
+ template <class CharT>
+ inline EA_CONSTEXPR bool operator>(basic_string_view<CharT> lhs, typename basic_string_view<CharT>::const_pointer rhs) EA_NOEXCEPT
+ {
+ // Workaround for basic_string_view comparisons that require conversions,
+ // since they are causing an internal compiler error when compiled using
+ // MSVC when certain flags are enabled (/Zi /O2 /Zc:inline).
+
+ return rhs < lhs;
+ }
+
+ // Disabling symmetric comparisons that require conversions, since they are causing an internal compiler error
+ // when compiled using MSVC when certain flags are enabled (/Zi /O2 /Zc:inline)
+ // template <class CharT>
+ // inline EA_CONSTEXPR bool operator>=(basic_string_view<CharT> lhs, basic_string_view<CharT> rhs) EA_NOEXCEPT
+ // {
+ // return !(lhs < rhs);
+ // }
+ //
+ // template <class CharT>
+ // inline EA_CONSTEXPR bool operator>=(type_identity_t<basic_string_view<CharT>> lhs, basic_string_view<CharT> rhs) EA_NOEXCEPT
+ // {
+ // return !(lhs < rhs);
+ // }
+
+ template <class CharT>
+ inline EA_CONSTEXPR bool operator>=(basic_string_view<CharT> lhs, type_identity_t<basic_string_view<CharT>> rhs) EA_NOEXCEPT
+ {
+ return !(lhs < rhs);
+ }
+
+ template <class CharT>
+ inline EA_CONSTEXPR bool operator>=(typename basic_string_view<CharT>::const_pointer lhs, basic_string_view<CharT> rhs) EA_NOEXCEPT
+ {
+ // Workaround for basic_string_view comparisons that require conversions,
+ // since they are causing an internal compiler error when compiled using
+ // MSVC when certain flags are enabled (/Zi /O2 /Zc:inline).
+
+ return !(lhs < rhs);
+ }
+
+ template <class CharT>
+ inline EA_CONSTEXPR bool operator>=(basic_string_view<CharT> lhs, typename basic_string_view<CharT>::const_pointer rhs) EA_NOEXCEPT
+ {
+ // Workaround for basic_string_view comparisons that require conversions,
+ // since they are causing an internal compiler error when compiled using
+ // MSVC when certain flags are enabled (/Zi /O2 /Zc:inline).
+
+ return !(lhs < rhs);
+ }
+#endif
+ // string_view / wstring_view
+ typedef basic_string_view<char> string_view;
+ typedef basic_string_view<wchar_t> wstring_view;
+
+ // C++17 string types
+ typedef basic_string_view<char8_t> u8string_view; // C++20 feature, but always present for consistency.
+ typedef basic_string_view<char16_t> u16string_view;
+ typedef basic_string_view<char32_t> u32string_view;
+
+
+ /// hash<string_view>
+ ///
+ /// We provide EASTL hash function objects for use in hash table containers.
+ ///
+ /// Example usage:
+ /// #include <EASTL/hash_set.h>
+ /// hash_set<string_view> stringHashSet;
+ ///
+ template <typename T> struct hash;
+
+ template<> struct hash<string_view>
+ {
+ size_t operator()(const string_view& x) const
+ {
+ string_view::const_iterator p = x.cbegin();
+ string_view::const_iterator end = x.cend();
+ uint32_t result = 2166136261U; // We implement an FNV-like string hash.
+ while (p != end)
+ result = (result * 16777619) ^ (uint8_t)*p++;
+ return (size_t)result;
+ }
+ };
+
+ #if defined(EA_CHAR8_UNIQUE) && EA_CHAR8_UNIQUE
+ template<> struct hash<u8string_view>
+ {
+ size_t operator()(const u8string_view& x) const
+ {
+ u8string_view::const_iterator p = x.cbegin();
+ u8string_view::const_iterator end = x.cend();
+ uint32_t result = 2166136261U;
+ while (p != end)
+ result = (result * 16777619) ^ (uint8_t)*p++;
+ return (size_t)result;
+ }
+ };
+ #endif
+
+ template<> struct hash<u16string_view>
+ {
+ size_t operator()(const u16string_view& x) const
+ {
+ u16string_view::const_iterator p = x.cbegin();
+ u16string_view::const_iterator end = x.cend();
+ uint32_t result = 2166136261U;
+ while (p != end)
+ result = (result * 16777619) ^ (uint16_t)*p++;
+ return (size_t)result;
+ }
+ };
+
+ template<> struct hash<u32string_view>
+ {
+ size_t operator()(const u32string_view& x) const
+ {
+ u32string_view::const_iterator p = x.cbegin();
+ u32string_view::const_iterator end = x.cend();
+ uint32_t result = 2166136261U;
+ while (p != end)
+ result = (result * 16777619) ^ (uint32_t)*p++;
+ return (size_t)result;
+ }
+ };
+
+ #if defined(EA_WCHAR_UNIQUE) && EA_WCHAR_UNIQUE
+ template<> struct hash<wstring_view>
+ {
+ size_t operator()(const wstring_view& x) const
+ {
+ wstring_view::const_iterator p = x.cbegin();
+ wstring_view::const_iterator end = x.cend();
+ uint32_t result = 2166136261U;
+ while (p != end)
+ result = (result * 16777619) ^ (uint32_t)*p++;
+ return (size_t)result;
+ }
+ };
+ #endif
+
+
+ #if EASTL_USER_LITERALS_ENABLED && EASTL_INLINE_NAMESPACES_ENABLED
+ // Disabling the Clang/GCC/MSVC warning about using user
+ // defined literals without a leading '_' as they are reserved
+ // for standard libary usage.
+ EA_DISABLE_VC_WARNING(4455)
+ EA_DISABLE_CLANG_WARNING(-Wuser-defined-literals)
+ EA_DISABLE_GCC_WARNING(-Wliteral-suffix)
+
+ inline namespace literals
+ {
+ inline namespace string_view_literals
+ {
+ EA_CONSTEXPR inline string_view operator "" sv(const char* str, size_t len) EA_NOEXCEPT { return {str, len}; }
+ EA_CONSTEXPR inline u16string_view operator "" sv(const char16_t* str, size_t len) EA_NOEXCEPT { return {str, len}; }
+ EA_CONSTEXPR inline u32string_view operator "" sv(const char32_t* str, size_t len) EA_NOEXCEPT { return {str, len}; }
+ EA_CONSTEXPR inline wstring_view operator "" sv(const wchar_t* str, size_t len) EA_NOEXCEPT { return {str, len}; }
+
+ // We've seen _sv trigger the following warning on clang:
+ // identifier '_sv' is reserved because it starts with '_' at global scope [-Wreserved-identifier]
+ // Temporarily disable the warning until we figure out why it thinks _sv is "at global scope".
+ EA_DISABLE_CLANG_WARNING(-Wreserved-identifier)
+ // Backwards compatibility.
+ EA_CONSTEXPR inline string_view operator "" _sv(const char* str, size_t len) EA_NOEXCEPT { return {str, len}; }
+ EA_CONSTEXPR inline u16string_view operator "" _sv(const char16_t* str, size_t len) EA_NOEXCEPT { return {str, len}; }
+ EA_CONSTEXPR inline u32string_view operator "" _sv(const char32_t* str, size_t len) EA_NOEXCEPT { return {str, len}; }
+ EA_CONSTEXPR inline wstring_view operator "" _sv(const wchar_t* str, size_t len) EA_NOEXCEPT { return {str, len}; }
+ EA_RESTORE_CLANG_WARNING() // -Wreserved-identifier
+
+ // C++20 char8_t support.
+ #if EA_CHAR8_UNIQUE
+ EA_CONSTEXPR inline u8string_view operator "" sv(const char8_t* str, size_t len) EA_NOEXCEPT { return {str, len}; }
+ EA_CONSTEXPR inline u8string_view operator "" _sv(const char8_t* str, size_t len) EA_NOEXCEPT { return {str, len}; }
+ #endif
+ }
+ }
+
+ EA_RESTORE_GCC_WARNING() // -Wliteral-suffix
+ EA_RESTORE_CLANG_WARNING() // -Wuser-defined-literals
+ EA_RESTORE_VC_WARNING() // warning: 4455
+ #endif
+
+} // namespace eastl
+
+EA_RESTORE_VC_WARNING()
+#endif // EASTL_STRING_VIEW_H
diff --git a/EASTL/include/EASTL/tuple.h b/EASTL/include/EASTL/tuple.h
new file mode 100644
index 0000000..12460c6
--- /dev/null
+++ b/EASTL/include/EASTL/tuple.h
@@ -0,0 +1,978 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_TUPLE_H
+#define EASTL_TUPLE_H
+
+#include <EASTL/internal/config.h>
+#include <EASTL/compare.h>
+#include <EASTL/functional.h>
+#include <EASTL/type_traits.h>
+#include <EASTL/utility.h>
+
+#include <EASTL/internal/tuple_fwd_decls.h>
+
+EA_DISABLE_VC_WARNING(4623) // warning C4623: default constructor was implicitly defined as deleted
+EA_DISABLE_VC_WARNING(4625) // warning C4625: copy constructor was implicitly defined as deleted
+EA_DISABLE_VC_WARNING(4510) // warning C4510: default constructor could not be generated
+
+#if EASTL_TUPLE_ENABLED
+
+namespace eastl
+{
+// non-recursive tuple implementation based on libc++ tuple implementation and description at
+// http://mitchnull.blogspot.ca/2012/06/c11-tuple-implementation-details-part-1.html
+
+// TupleTypes helper
+template <typename... Ts> struct TupleTypes {};
+
+// tuple_size helper
+template <typename T> class tuple_size {};
+template <typename T> class tuple_size<const T> : public tuple_size<T> {};
+template <typename T> class tuple_size<volatile T> : public tuple_size<T> {};
+template <typename T> class tuple_size<const volatile T> : public tuple_size<T> {};
+
+template <typename... Ts> class tuple_size<TupleTypes<Ts...>> : public integral_constant<size_t, sizeof...(Ts)> {};
+template <typename... Ts> class tuple_size<tuple<Ts...>> : public integral_constant<size_t, sizeof...(Ts)> {};
+
+#if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR size_t tuple_size_v = tuple_size<T>::value;
+#endif
+
+namespace Internal
+{
+ template <typename TupleIndices, typename... Ts>
+ struct TupleImpl;
+} // namespace Internal
+
+template <typename Indices, typename... Ts>
+class tuple_size<Internal::TupleImpl<Indices, Ts...>> : public integral_constant<size_t, sizeof...(Ts)>
+{
+};
+
+// tuple_element helper to be able to isolate a type given an index
+template <size_t I, typename T>
+class tuple_element
+{
+};
+
+template <size_t I>
+class tuple_element<I, TupleTypes<>>
+{
+public:
+ static_assert(I != I, "tuple_element index out of range");
+};
+
+template <typename H, typename... Ts>
+class tuple_element<0, TupleTypes<H, Ts...>>
+{
+public:
+ typedef H type;
+};
+
+template <size_t I, typename H, typename... Ts>
+class tuple_element<I, TupleTypes<H, Ts...>>
+{
+public:
+ typedef tuple_element_t<I - 1, TupleTypes<Ts...>> type;
+};
+
+// specialization for tuple
+template <size_t I, typename... Ts>
+class tuple_element<I, tuple<Ts...>>
+{
+public:
+ typedef tuple_element_t<I, TupleTypes<Ts...>> type;
+};
+
+template <size_t I, typename... Ts>
+class tuple_element<I, const tuple<Ts...>>
+{
+public:
+ typedef typename add_const<tuple_element_t<I, TupleTypes<Ts...>>>::type type;
+};
+
+template <size_t I, typename... Ts>
+class tuple_element<I, volatile tuple<Ts...>>
+{
+public:
+ typedef typename add_volatile<tuple_element_t<I, TupleTypes<Ts...>>>::type type;
+};
+
+template <size_t I, typename... Ts>
+class tuple_element<I, const volatile tuple<Ts...>>
+{
+public:
+ typedef typename add_cv<tuple_element_t<I, TupleTypes<Ts...>>>::type type;
+};
+
+// specialization for TupleImpl
+template <size_t I, typename Indices, typename... Ts>
+class tuple_element<I, Internal::TupleImpl<Indices, Ts...>> : public tuple_element<I, tuple<Ts...>>
+{
+};
+
+template <size_t I, typename Indices, typename... Ts>
+class tuple_element<I, const Internal::TupleImpl<Indices, Ts...>> : public tuple_element<I, const tuple<Ts...>>
+{
+};
+
+template <size_t I, typename Indices, typename... Ts>
+class tuple_element<I, volatile Internal::TupleImpl<Indices, Ts...>> : public tuple_element<I, volatile tuple<Ts...>>
+{
+};
+
+template <size_t I, typename Indices, typename... Ts>
+class tuple_element<I, const volatile Internal::TupleImpl<Indices, Ts...>> : public tuple_element<
+ I, const volatile tuple<Ts...>>
+{
+};
+
+// attempt to isolate index given a type
+template <typename T, typename Tuple>
+struct tuple_index
+{
+};
+
+template <typename T>
+struct tuple_index<T, TupleTypes<>>
+{
+ typedef void DuplicateTypeCheck;
+ tuple_index() = delete; // tuple_index should only be used for compile-time assistance, and never be instantiated
+ static const size_t index = 0;
+};
+
+template <typename T, typename... TsRest>
+struct tuple_index<T, TupleTypes<T, TsRest...>>
+{
+ typedef int DuplicateTypeCheck;
+ // after finding type T in the list of types, try to find type T in TsRest.
+ // If we stumble back into this version of tuple_index, i.e. type T appears twice in the list of types, then DuplicateTypeCheck will be of type int, and the static_assert will fail.
+ // If we don't, then we'll go through the version of tuple_index above, where all of the types have been exhausted, and DuplicateTypeCheck will be void.
+ static_assert(is_void<typename tuple_index<T, TupleTypes<TsRest...>>::DuplicateTypeCheck>::value, "duplicate type T in tuple_vector::get<T>(); unique types must be provided in declaration, or only use get<size_t>()");
+
+ static const size_t index = 0;
+};
+
+template <typename T, typename TsHead, typename... TsRest>
+struct tuple_index<T, TupleTypes<TsHead, TsRest...>>
+{
+ typedef typename tuple_index<T, TupleTypes<TsRest...>>::DuplicateTypeCheck DuplicateTypeCheck;
+ static const size_t index = tuple_index<T, TupleTypes<TsRest...>>::index + 1;
+};
+
+template <typename T, typename Indices, typename... Ts>
+struct tuple_index<T, Internal::TupleImpl<Indices, Ts...>> : public tuple_index<T, TupleTypes<Ts...>>
+{
+};
+
+
+namespace Internal
+{
+ // swallow
+ //
+ // Provides a vessel to expand variadic packs.
+ //
+ template <typename... Ts>
+ void swallow(Ts&&...) {}
+
+
+ // TupleLeaf
+ //
+ template <size_t I, typename ValueType, bool IsEmpty = is_empty_v<ValueType>>
+ class TupleLeaf;
+
+ template <size_t I, typename ValueType, bool IsEmpty>
+ inline void swap(TupleLeaf<I, ValueType, IsEmpty>& a, TupleLeaf<I, ValueType, IsEmpty>& b)
+ {
+ eastl::swap(a.getInternal(), b.getInternal());
+ }
+
+ template <size_t I, typename ValueType, bool IsEmpty>
+ class TupleLeaf
+ {
+ public:
+ TupleLeaf() : mValue() {}
+ TupleLeaf(const TupleLeaf&) = default;
+ TupleLeaf& operator=(const TupleLeaf&) = delete;
+
+ // We shouldn't need this explicit constructor as it should be handled by the template below but OSX clang
+ // is_constructible type trait incorrectly gives false for is_constructible<T&&, T&&>::value
+ explicit TupleLeaf(ValueType&& v) : mValue(eastl::forward<ValueType>(v)) {}
+
+ template <typename T, typename = typename enable_if<is_constructible<ValueType, T&&>::value>::type>
+ explicit TupleLeaf(T&& t)
+ : mValue(eastl::forward<T>(t))
+ {
+ }
+
+ template <typename T>
+ explicit TupleLeaf(const TupleLeaf<I, T>& t)
+ : mValue(t.getInternal())
+ {
+ }
+
+ template <typename T>
+ TupleLeaf& operator=(T&& t)
+ {
+ mValue = eastl::forward<T>(t);
+ return *this;
+ }
+
+ int swap(TupleLeaf& t)
+ {
+ eastl::Internal::swap(*this, t);
+ return 0;
+ }
+
+ ValueType& getInternal() { return mValue; }
+ const ValueType& getInternal() const { return mValue; }
+
+ private:
+ ValueType mValue;
+ };
+
+ // TupleLeaf: partial specialization for when we can use the Empty Base Class Optimization
+ template <size_t I, typename ValueType>
+ class TupleLeaf<I, ValueType, true> : private ValueType
+ {
+ public:
+ // true_type / false_type constructors for case where ValueType is default constructible and should be value
+ // initialized and case where it is not
+ TupleLeaf(const TupleLeaf&) = default;
+
+ template <typename T, typename = typename enable_if<is_constructible<ValueType, T&&>::value>::type>
+ explicit TupleLeaf(T&& t)
+ : ValueType(eastl::forward<T>(t))
+ {
+ }
+
+ template <typename T>
+ explicit TupleLeaf(const TupleLeaf<I, T>& t)
+ : ValueType(t.getInternal())
+ {
+ }
+
+ template <typename T>
+ TupleLeaf& operator=(T&& t)
+ {
+ ValueType::operator=(eastl::forward<T>(t));
+ return *this;
+ }
+
+ int swap(TupleLeaf& t)
+ {
+ eastl::Internal::swap(*this, t);
+ return 0;
+ }
+
+ ValueType& getInternal() { return static_cast<ValueType&>(*this); }
+ const ValueType& getInternal() const { return static_cast<const ValueType&>(*this); }
+
+ private:
+ TupleLeaf& operator=(const TupleLeaf&) = delete;
+ };
+
+
+
+ // MakeTupleTypes
+ //
+ //
+ template <typename TupleTypes, typename Tuple, size_t Start, size_t End>
+ struct MakeTupleTypesImpl;
+
+ template <typename... Types, typename Tuple, size_t Start, size_t End>
+ struct MakeTupleTypesImpl<TupleTypes<Types...>, Tuple, Start, End>
+ {
+ typedef typename remove_reference<Tuple>::type TupleType;
+ typedef typename MakeTupleTypesImpl<
+ TupleTypes<Types..., typename conditional<is_lvalue_reference<Tuple>::value,
+ // append ref if Tuple is ref
+ tuple_element_t<Start, TupleType>&,
+ // append non-ref otherwise
+ tuple_element_t<Start, TupleType>>::type>,
+ Tuple, Start + 1, End>::type type;
+ };
+
+ template <typename... Types, typename Tuple, size_t End>
+ struct MakeTupleTypesImpl<TupleTypes<Types...>, Tuple, End, End>
+ {
+ typedef TupleTypes<Types...> type;
+ };
+
+ template <typename Tuple>
+ using MakeTupleTypes_t = typename MakeTupleTypesImpl<TupleTypes<>, Tuple, 0,
+ tuple_size<typename remove_reference<Tuple>::type>::value>::type;
+
+
+ // TupleImpl
+ //
+ //
+ template <size_t I, typename Indices, typename... Ts>
+ tuple_element_t<I, TupleImpl<Indices, Ts...>>& get(TupleImpl<Indices, Ts...>& t);
+
+ template <size_t I, typename Indices, typename... Ts>
+ const_tuple_element_t<I, TupleImpl<Indices, Ts...>>& get(const TupleImpl<Indices, Ts...>& t);
+
+ template <size_t I, typename Indices, typename... Ts>
+ tuple_element_t<I, TupleImpl<Indices, Ts...>>&& get(TupleImpl<Indices, Ts...>&& t);
+
+ template <typename T, typename Indices, typename... Ts>
+ T& get(TupleImpl<Indices, Ts...>& t);
+
+ template <typename T, typename Indices, typename... Ts>
+ const T& get(const TupleImpl<Indices, Ts...>& t);
+
+ template <typename T, typename Indices, typename... Ts>
+ T&& get(TupleImpl<Indices, Ts...>&& t);
+
+ template <size_t... Indices, typename... Ts>
+ struct TupleImpl<integer_sequence<size_t, Indices...>, Ts...> : public TupleLeaf<Indices, Ts>...
+ {
+ EA_CONSTEXPR TupleImpl() = default;
+
+ // index_sequence changed to integer_sequence due to issues described below in VS2015 CTP 6.
+ // https://connect.microsoft.com/VisualStudio/feedback/details/1126958/error-in-template-parameter-pack-expansion-of-std-index-sequence
+ //
+ template <typename... Us, typename... ValueTypes>
+ explicit TupleImpl(integer_sequence<size_t, Indices...>, TupleTypes<Us...>, ValueTypes&&... values)
+ : TupleLeaf<Indices, Ts>(eastl::forward<ValueTypes>(values))...
+ {
+ }
+
+ template <typename OtherTuple>
+ TupleImpl(OtherTuple&& t)
+ : TupleLeaf<Indices, Ts>(eastl::forward<tuple_element_t<Indices, MakeTupleTypes_t<OtherTuple>>>(get<Indices>(t)))...
+ {
+ }
+
+ template <typename OtherTuple>
+ TupleImpl& operator=(OtherTuple&& t)
+ {
+ swallow(TupleLeaf<Indices, Ts>::operator=(
+ eastl::forward<tuple_element_t<Indices, MakeTupleTypes_t<OtherTuple>>>(get<Indices>(t)))...);
+ return *this;
+ }
+
+ TupleImpl& operator=(const TupleImpl& t)
+ {
+ swallow(TupleLeaf<Indices, Ts>::operator=(static_cast<const TupleLeaf<Indices, Ts>&>(t).getInternal())...);
+ return *this;
+ }
+
+ void swap(TupleImpl& t) { swallow(TupleLeaf<Indices, Ts>::swap(static_cast<TupleLeaf<Indices, Ts>&>(t))...); }
+ };
+
+ template <size_t I, typename Indices, typename... Ts>
+ inline tuple_element_t<I, TupleImpl<Indices, Ts...>>& get(TupleImpl<Indices, Ts...>& t)
+ {
+ typedef tuple_element_t<I, TupleImpl<Indices, Ts...>> Type;
+ return static_cast<Internal::TupleLeaf<I, Type>&>(t).getInternal();
+ }
+
+ template <size_t I, typename Indices, typename... Ts>
+ inline const_tuple_element_t<I, TupleImpl<Indices, Ts...>>& get(const TupleImpl<Indices, Ts...>& t)
+ {
+ typedef tuple_element_t<I, TupleImpl<Indices, Ts...>> Type;
+ return static_cast<const Internal::TupleLeaf<I, Type>&>(t).getInternal();
+ }
+
+ template <size_t I, typename Indices, typename... Ts>
+ inline tuple_element_t<I, TupleImpl<Indices, Ts...>>&& get(TupleImpl<Indices, Ts...>&& t)
+ {
+ typedef tuple_element_t<I, TupleImpl<Indices, Ts...>> Type;
+ return static_cast<Type&&>(static_cast<Internal::TupleLeaf<I, Type>&>(t).getInternal());
+ }
+
+ template <typename T, typename Indices, typename... Ts>
+ inline T& get(TupleImpl<Indices, Ts...>& t)
+ {
+ typedef tuple_index<T, TupleImpl<Indices, Ts...>> Index;
+ return static_cast<Internal::TupleLeaf<Index::index, T>&>(t).getInternal();
+ }
+
+ template <typename T, typename Indices, typename... Ts>
+ inline const T& get(const TupleImpl<Indices, Ts...>& t)
+ {
+ typedef tuple_index<T, TupleImpl<Indices, Ts...>> Index;
+ return static_cast<const Internal::TupleLeaf<Index::index, T>&>(t).getInternal();
+ }
+
+ template <typename T, typename Indices, typename... Ts>
+ inline T&& get(TupleImpl<Indices, Ts...>&& t)
+ {
+ typedef tuple_index<T, TupleImpl<Indices, Ts...>> Index;
+ return static_cast<T&&>(static_cast<Internal::TupleLeaf<Index::index, T>&>(t).getInternal());
+ }
+
+
+ // TupleLike
+ //
+ // type-trait that determines if a type is an eastl::tuple or an eastl::pair.
+ //
+ template <typename T> struct TupleLike : public false_type {};
+ template <typename T> struct TupleLike<const T> : public TupleLike<T> {};
+ template <typename T> struct TupleLike<volatile T> : public TupleLike<T> {};
+ template <typename T> struct TupleLike<const volatile T> : public TupleLike<T> {};
+
+ template <typename... Ts>
+ struct TupleLike<tuple<Ts...>> : public true_type {};
+
+ template <typename First, typename Second>
+ struct TupleLike<eastl::pair<First, Second>> : public true_type {};
+
+
+ // TupleConvertible
+ //
+ //
+ //
+ template <bool IsSameSize, typename From, typename To>
+ struct TupleConvertibleImpl : public false_type
+ {
+ };
+
+ template <typename... FromTypes, typename... ToTypes>
+ struct TupleConvertibleImpl<true, TupleTypes<FromTypes...>, TupleTypes<ToTypes...>>
+ : public integral_constant<bool, conjunction<is_convertible<FromTypes, ToTypes>...>::value>
+ {
+ };
+
+ template <typename From, typename To,
+ bool = TupleLike<typename remove_reference<From>::type>::value,
+ bool = TupleLike<typename remove_reference<To>::type>::value>
+ struct TupleConvertible : public false_type
+ {
+ };
+
+ template <typename From, typename To>
+ struct TupleConvertible<From, To, true, true>
+ : public TupleConvertibleImpl<tuple_size<typename remove_reference<From>::type>::value ==
+ tuple_size<typename remove_reference<To>::type>::value,
+ MakeTupleTypes_t<From>, MakeTupleTypes_t<To>>
+ {
+ };
+
+
+ // TupleAssignable
+ //
+ //
+ //
+ template <bool IsSameSize, typename Target, typename From>
+ struct TupleAssignableImpl : public false_type
+ {
+ };
+
+ template <typename... TargetTypes, typename... FromTypes>
+ struct TupleAssignableImpl<true, TupleTypes<TargetTypes...>, TupleTypes<FromTypes...>>
+ : public bool_constant<conjunction<is_assignable<TargetTypes, FromTypes>...>::value>
+ {
+ };
+
+ template <typename Target, typename From,
+ bool = TupleLike<typename remove_reference<Target>::type>::value,
+ bool = TupleLike<typename remove_reference<From>::type>::value>
+ struct TupleAssignable : public false_type
+ {
+ };
+
+ template <typename Target, typename From>
+ struct TupleAssignable<Target, From, true, true>
+ : public TupleAssignableImpl<
+ tuple_size<typename remove_reference<Target>::type>::value ==
+ tuple_size<typename remove_reference<From>::type>::value,
+ MakeTupleTypes_t<Target>, MakeTupleTypes_t<From>>
+ {
+ };
+
+
+ // TupleImplicitlyConvertible and TupleExplicitlyConvertible
+ //
+ // helpers for constraining conditionally-explicit ctors
+ //
+ template <bool IsSameSize, typename TargetType, typename... FromTypes>
+ struct TupleImplicitlyConvertibleImpl : public false_type
+ {
+ };
+
+
+ template <typename... TargetTypes, typename... FromTypes>
+ struct TupleImplicitlyConvertibleImpl<true, TupleTypes<TargetTypes...>, FromTypes...>
+ : public conjunction<
+ is_constructible<TargetTypes, FromTypes>...,
+ is_convertible<FromTypes, TargetTypes>...>
+ {
+ };
+
+ template <typename TargetTupleType, typename... FromTypes>
+ struct TupleImplicitlyConvertible
+ : public TupleImplicitlyConvertibleImpl<
+ tuple_size<TargetTupleType>::value == sizeof...(FromTypes),
+ MakeTupleTypes_t<TargetTupleType>, FromTypes...>::type
+ {
+ };
+
+ template<typename TargetTupleType, typename... FromTypes>
+ using TupleImplicitlyConvertible_t = enable_if_t<TupleImplicitlyConvertible<TargetTupleType, FromTypes...>::value, bool>;
+
+ template <bool IsSameSize, typename TargetType, typename... FromTypes>
+ struct TupleExplicitlyConvertibleImpl : public false_type
+ {
+ };
+
+ template <typename... TargetTypes, typename... FromTypes>
+ struct TupleExplicitlyConvertibleImpl<true, TupleTypes<TargetTypes...>, FromTypes...>
+ : public conjunction<
+ is_constructible<TargetTypes, FromTypes>...,
+ negation<conjunction<is_convertible<FromTypes, TargetTypes>...>>>
+ {
+ };
+
+ template <typename TargetTupleType, typename... FromTypes>
+ struct TupleExplicitlyConvertible
+ : public TupleExplicitlyConvertibleImpl<
+ tuple_size<TargetTupleType>::value == sizeof...(FromTypes),
+ MakeTupleTypes_t<TargetTupleType>, FromTypes...>::type
+ {
+ };
+
+ template<typename TargetTupleType, typename... FromTypes>
+ using TupleExplicitlyConvertible_t = enable_if_t<TupleExplicitlyConvertible<TargetTupleType, FromTypes...>::value, bool>;
+
+
+ // TupleEqual
+ //
+ //
+ //
+ template <size_t I>
+ struct TupleEqual
+ {
+ template <typename Tuple1, typename Tuple2>
+ bool operator()(const Tuple1& t1, const Tuple2& t2)
+ {
+ static_assert(tuple_size<Tuple1>::value == tuple_size<Tuple2>::value, "comparing tuples of different sizes.");
+ return TupleEqual<I - 1>()(t1, t2) && get<I - 1>(t1) == get<I - 1>(t2);
+ }
+ };
+
+ template <>
+ struct TupleEqual<0>
+ {
+ template <typename Tuple1, typename Tuple2>
+ bool operator()(const Tuple1&, const Tuple2&)
+ {
+ return true;
+ }
+ };
+
+ // TupleLess
+ //
+ //
+ //
+ template <size_t I>
+ struct TupleLess
+ {
+ template <typename Tuple1, typename Tuple2>
+ bool operator()(const Tuple1& t1, const Tuple2& t2)
+ {
+ static_assert(tuple_size<Tuple1>::value == tuple_size<Tuple2>::value, "comparing tuples of different sizes.");
+ return TupleLess<I - 1>()(t1, t2) || (!TupleLess<I - 1>()(t2, t1) && get<I - 1>(t1) < get<I - 1>(t2));
+ }
+ };
+
+ template <>
+ struct TupleLess<0>
+ {
+ template <typename Tuple1, typename Tuple2>
+ bool operator()(const Tuple1&, const Tuple2&)
+ {
+ return false;
+ }
+ };
+
+
+ // MakeTupleReturnImpl
+ //
+ //
+ //
+ template <typename T> struct MakeTupleReturnImpl { typedef T type; };
+ template <typename T> struct MakeTupleReturnImpl<reference_wrapper<T>> { typedef T& type; };
+
+ template <typename T>
+ using MakeTupleReturn_t = typename MakeTupleReturnImpl<decay_t<T>>::type;
+
+
+ // tuple_cat helpers
+ //
+ //
+ //
+
+ // TupleCat2Impl
+ template <typename Tuple1, typename Is1, typename Tuple2, typename Is2>
+ struct TupleCat2Impl;
+
+ template <typename... T1s, size_t... I1s, typename... T2s, size_t... I2s>
+ struct TupleCat2Impl<tuple<T1s...>, index_sequence<I1s...>, tuple<T2s...>, index_sequence<I2s...>>
+ {
+ using ResultType = tuple<T1s..., T2s...>;
+
+ template <typename Tuple1, typename Tuple2>
+ static inline ResultType DoCat2(Tuple1&& t1, Tuple2&& t2)
+ {
+ return ResultType(get<I1s>(eastl::forward<Tuple1>(t1))..., get<I2s>(eastl::forward<Tuple2>(t2))...);
+ }
+ };
+
+ // TupleCat2
+ template <typename Tuple1, typename Tuple2>
+ struct TupleCat2;
+
+ template <typename... T1s, typename... T2s>
+ struct TupleCat2<tuple<T1s...>, tuple<T2s...>>
+ {
+ using Is1 = make_index_sequence<sizeof...(T1s)>;
+ using Is2 = make_index_sequence<sizeof...(T2s)>;
+ using TCI = TupleCat2Impl<tuple<T1s...>, Is1, tuple<T2s...>, Is2>;
+ using ResultType = typename TCI::ResultType;
+
+ template <typename Tuple1, typename Tuple2>
+ static inline ResultType DoCat2(Tuple1&& t1, Tuple2&& t2)
+ {
+ return TCI::DoCat2(eastl::forward<Tuple1>(t1), eastl::forward<Tuple2>(t2));
+ }
+ };
+
+ // TupleCat
+ template <typename... Tuples>
+ struct TupleCat;
+
+ template <typename Tuple1, typename Tuple2, typename... TuplesRest>
+ struct TupleCat<Tuple1, Tuple2, TuplesRest...>
+ {
+ using FirstResultType = typename TupleCat2<Tuple1, Tuple2>::ResultType;
+ using ResultType = typename TupleCat<FirstResultType, TuplesRest...>::ResultType;
+
+ template <typename TupleArg1, typename TupleArg2, typename... TupleArgsRest>
+ static inline ResultType DoCat(TupleArg1&& t1, TupleArg2&& t2, TupleArgsRest&&... ts)
+ {
+ return TupleCat<FirstResultType, TuplesRest...>::DoCat(
+ TupleCat2<TupleArg1, TupleArg2>::DoCat2(eastl::forward<TupleArg1>(t1), eastl::forward<TupleArg2>(t2)),
+ eastl::forward<TupleArgsRest>(ts)...);
+ }
+ };
+
+ template <typename Tuple1, typename Tuple2>
+ struct TupleCat<Tuple1, Tuple2>
+ {
+ using TC2 = TupleCat2<Tuple1, remove_reference_t<Tuple2>>;
+ using ResultType = typename TC2::ResultType;
+
+ template <typename TupleArg1, typename TupleArg2>
+ static inline ResultType DoCat(TupleArg1&& t1, TupleArg2&& t2)
+ {
+ return TC2::DoCat2(eastl::forward<TupleArg1>(t1), eastl::forward<TupleArg2>(t2));
+ }
+ };
+
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ template <typename... T1s, typename... T2s, size_t... Is>
+ constexpr auto TupleThreeWay(const tuple<T1s...>& t1, const tuple<T2s...>& t2, index_sequence<Is...> is)
+ {
+ std::common_comparison_category_t<synth_three_way_result<T1s, T2s>...> result = std::strong_ordering::equal;
+ ((result = synth_three_way{}(get<Is>(t1), get<Is>(t2)), result != 0) || ...);
+ return result;
+ }
+#endif
+} // namespace Internal
+
+
+
+// tuple
+//
+// eastl::tuple is a fixed-size container of heterogeneous values. It is a
+// generalization of eastl::pair which hold only two heterogeneous values.
+//
+// https://en.cppreference.com/w/cpp/utility/tuple
+//
+template <typename... Ts>
+class tuple;
+
+template <typename T, typename... Ts>
+class tuple<T, Ts...>
+{
+public:
+ EA_CONSTEXPR tuple() = default;
+
+ template <typename T2 = T,
+ Internal::TupleImplicitlyConvertible_t<tuple, const T2&, const Ts&...> = 0>
+ EA_CONSTEXPR tuple(const T& t, const Ts&... ts)
+ : mImpl(make_index_sequence<sizeof...(Ts) + 1>{}, Internal::MakeTupleTypes_t<tuple>{}, t, ts...)
+ {
+ }
+
+ template <typename T2 = T,
+ Internal::TupleExplicitlyConvertible_t<tuple, const T2&, const Ts&...> = 0>
+ explicit EA_CONSTEXPR tuple(const T& t, const Ts&... ts)
+ : mImpl(make_index_sequence<sizeof...(Ts) + 1>{}, Internal::MakeTupleTypes_t<tuple>{}, t, ts...)
+ {
+ }
+
+ template <typename U, typename... Us,
+ Internal::TupleImplicitlyConvertible_t<tuple, U, Us...> = 0>
+ EA_CONSTEXPR tuple(U&& u, Us&&... us)
+ : mImpl(make_index_sequence<sizeof...(Us) + 1>{}, Internal::MakeTupleTypes_t<tuple>{}, eastl::forward<U>(u),
+ eastl::forward<Us>(us)...)
+ {
+ }
+
+ template <typename U, typename... Us,
+ Internal::TupleExplicitlyConvertible_t<tuple, U, Us...> = 0>
+ explicit EA_CONSTEXPR tuple(U&& u, Us&&... us)
+ : mImpl(make_index_sequence<sizeof...(Us) + 1>{}, Internal::MakeTupleTypes_t<tuple>{}, eastl::forward<U>(u),
+ eastl::forward<Us>(us)...)
+ {
+ }
+
+ template <typename OtherTuple,
+ typename enable_if<Internal::TupleConvertible<OtherTuple, tuple>::value, bool>::type = false>
+ tuple(OtherTuple&& t)
+ : mImpl(eastl::forward<OtherTuple>(t))
+ {
+ }
+
+ template <typename OtherTuple,
+ typename enable_if<Internal::TupleAssignable<tuple, OtherTuple>::value, bool>::type = false>
+ tuple& operator=(OtherTuple&& t)
+ {
+ mImpl.operator=(eastl::forward<OtherTuple>(t));
+ return *this;
+ }
+
+ void swap(tuple& t) { mImpl.swap(t.mImpl); }
+
+private:
+ typedef Internal::TupleImpl<make_index_sequence<sizeof...(Ts) + 1>, T, Ts...> Impl;
+ Impl mImpl;
+
+ template <size_t I, typename... Ts_>
+ friend tuple_element_t<I, tuple<Ts_...>>& get(tuple<Ts_...>& t);
+
+ template <size_t I, typename... Ts_>
+ friend const_tuple_element_t<I, tuple<Ts_...>>& get(const tuple<Ts_...>& t);
+
+ template <size_t I, typename... Ts_>
+ friend tuple_element_t<I, tuple<Ts_...>>&& get(tuple<Ts_...>&& t);
+
+ template <typename T_, typename... ts_>
+ friend T_& get(tuple<ts_...>& t);
+
+ template <typename T_, typename... ts_>
+ friend const T_& get(const tuple<ts_...>& t);
+
+ template <typename T_, typename... ts_>
+ friend T_&& get(tuple<ts_...>&& t);
+};
+
+// template specialization for an empty tuple
+template <>
+class tuple<>
+{
+public:
+ void swap(tuple&) {}
+};
+
+template <size_t I, typename... Ts>
+inline tuple_element_t<I, tuple<Ts...>>& get(tuple<Ts...>& t)
+{
+ return get<I>(t.mImpl);
+}
+
+template <size_t I, typename... Ts>
+inline const_tuple_element_t<I, tuple<Ts...>>& get(const tuple<Ts...>& t)
+{
+ return get<I>(t.mImpl);
+}
+
+template <size_t I, typename... Ts>
+inline tuple_element_t<I, tuple<Ts...>>&& get(tuple<Ts...>&& t)
+{
+ return get<I>(eastl::move(t.mImpl));
+}
+
+template <typename T, typename... Ts>
+inline T& get(tuple<Ts...>& t)
+{
+ return get<T>(t.mImpl);
+}
+
+template <typename T, typename... Ts>
+inline const T& get(const tuple<Ts...>& t)
+{
+ return get<T>(t.mImpl);
+}
+
+template <typename T, typename... Ts>
+inline T&& get(tuple<Ts...>&& t)
+{
+ return get<T>(eastl::move(t.mImpl));
+}
+
+template <typename... Ts>
+inline void swap(tuple<Ts...>& a, tuple<Ts...>& b)
+{
+ a.swap(b);
+}
+
+
+// tuple operators
+//
+//
+template <typename... T1s, typename... T2s>
+inline bool operator==(const tuple<T1s...>& t1, const tuple<T2s...>& t2)
+{
+ return Internal::TupleEqual<sizeof...(T1s)>()(t1, t2);
+}
+
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+template <typename... T1s, typename... T2s>
+inline constexpr std::common_comparison_category_t<synth_three_way_result<T1s, T2s>...> operator<=>(const tuple<T1s...>& t1, const tuple<T2s...>& t2)
+{
+ return Internal::TupleThreeWay(t1, t2, make_index_sequence<sizeof...(T1s)>{});
+}
+#else
+template <typename... T1s, typename... T2s>
+inline bool operator<(const tuple<T1s...>& t1, const tuple<T2s...>& t2)
+{
+ return Internal::TupleLess<sizeof...(T1s)>()(t1, t2);
+}
+
+template <typename... T1s, typename... T2s> inline bool operator!=(const tuple<T1s...>& t1, const tuple<T2s...>& t2) { return !(t1 == t2); }
+template <typename... T1s, typename... T2s> inline bool operator> (const tuple<T1s...>& t1, const tuple<T2s...>& t2) { return t2 < t1; }
+template <typename... T1s, typename... T2s> inline bool operator<=(const tuple<T1s...>& t1, const tuple<T2s...>& t2) { return !(t2 < t1); }
+template <typename... T1s, typename... T2s> inline bool operator>=(const tuple<T1s...>& t1, const tuple<T2s...>& t2) { return !(t1 < t2); }
+#endif
+
+// tuple_cat
+//
+//
+template <typename... Tuples>
+inline typename Internal::TupleCat<Tuples...>::ResultType tuple_cat(Tuples&&... ts)
+{
+ return Internal::TupleCat<Tuples...>::DoCat(eastl::forward<Tuples>(ts)...);
+}
+
+
+// make_tuple
+//
+//
+template <typename... Ts>
+inline EA_CONSTEXPR tuple<Internal::MakeTupleReturn_t<Ts>...> make_tuple(Ts&&... values)
+{
+ return tuple<Internal::MakeTupleReturn_t<Ts>...>(eastl::forward<Ts>(values)...);
+}
+
+
+// forward_as_tuple
+//
+//
+template <typename... Ts>
+inline EA_CONSTEXPR tuple<Ts&&...> forward_as_tuple(Ts&&... ts) EA_NOEXCEPT
+{
+ return tuple<Ts&&...>(eastl::forward<Ts&&>(ts)...);
+}
+
+
+// ignore
+//
+// An object of unspecified type such that any value can be assigned to it with no effect.
+//
+// https://en.cppreference.com/w/cpp/utility/tuple/ignore
+//
+namespace Internal
+{
+ struct ignore_t
+ {
+ ignore_t() = default;
+
+ template <typename T>
+ const ignore_t& operator=(const T&) const
+ {
+ return *this;
+ }
+ };
+}// namespace Internal
+
+
+
+// tie
+//
+// Creates a tuple of lvalue references to its arguments or instances of eastl::ignore.
+//
+// https://en.cppreference.com/w/cpp/utility/tuple/tie
+//
+template <typename... Ts>
+inline EA_CONSTEXPR tuple<Ts&...> tie(Ts&... ts) EA_NOEXCEPT
+{
+ return tuple<Ts&...>(ts...);
+}
+
+
+// apply
+//
+// Invoke a callable object using a tuple to supply the arguments.
+//
+// http://en.cppreference.com/w/cpp/utility/apply
+//
+namespace detail
+{
+ template <class F, class Tuple, size_t... I>
+ EA_CONSTEXPR decltype(auto) apply_impl(F&& f, Tuple&& t, index_sequence<I...>)
+ {
+ return invoke(eastl::forward<F>(f), get<I>(eastl::forward<Tuple>(t))...);
+ }
+} // namespace detail
+
+template <class F, class Tuple>
+EA_CONSTEXPR decltype(auto) apply(F&& f, Tuple&& t)
+{
+ return detail::apply_impl(eastl::forward<F>(f), eastl::forward<Tuple>(t),
+ make_index_sequence<tuple_size_v<remove_reference_t<Tuple>>>{});
+}
+
+} // namespace eastl
+
+
+///////////////////////////////////////////////////////////////
+// C++17 structured bindings support for eastl::tuple
+//
+#ifndef EA_COMPILER_NO_STRUCTURED_BINDING
+ #include <tuple>
+ namespace std
+ {
+ // NOTE(rparolin): Some platform implementations didn't check the standard specification and implemented the
+ // "tuple_size" and "tuple_element" primary template with as a struct. The standard specifies they are
+ // implemented with the class keyword so we provide the template specializations as a class and disable the
+ // generated warning.
+ EA_DISABLE_CLANG_WARNING(-Wmismatched-tags)
+
+ template <class... Ts>
+ class tuple_size<::eastl::tuple<Ts...>> : public ::eastl::integral_constant<size_t, sizeof...(Ts)>
+ {
+ };
+
+ template <size_t I, class... Ts>
+ class tuple_element<I, ::eastl::tuple<Ts...>> : public ::eastl::tuple_element<I, ::eastl::tuple<Ts...>>
+ {
+ };
+
+ EA_RESTORE_CLANG_WARNING()
+ }
+#endif
+
+
+#endif // EASTL_TUPLE_ENABLED
+EA_RESTORE_VC_WARNING()
+EA_RESTORE_VC_WARNING()
+EA_RESTORE_VC_WARNING()
+#endif // EASTL_TUPLE_H
diff --git a/EASTL/include/EASTL/type_traits.h b/EASTL/include/EASTL/type_traits.h
new file mode 100644
index 0000000..73d2216
--- /dev/null
+++ b/EASTL/include/EASTL/type_traits.h
@@ -0,0 +1,1041 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Specification
+//
+// This file implements C++ type traits as proposed by the emerging C++ update
+// as of May, 2005. This update is known as "Proposed Draft Technical Report
+// on C++ Library Extensions" and is document number n1745. It can be found
+// on the Internet as n1745.pdf and as of this writing it is updated every
+// couple months to reflect current thinking.
+//////////////////////////////////////////////////////////////////////////////
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Description
+//
+// EASTL includes a fairly serious type traits library that is on par with the
+// one found in Boost but offers some additional performance-enhancing help as well.
+// The type_traits library provides information about class types, as opposed to
+// class instances. For example, the is_integral type trait tells if a type is
+// one of int, short, long, char, uint64_t, etc.
+//
+// There are three primary uses of type traits:
+// * Allowing for optimized operations on some data types.
+// * Allowing for different logic pathways based on data types.
+// * Allowing for compile-type assertions about data type expectations.
+//
+// Most of the type traits are automatically detected and implemented by the compiler.
+// However, EASTL allows for the user to explicitly give the compiler hints about
+// type traits that the compiler cannot know, via the EASTL_DECLARE declarations.
+// If the user has a class that is relocatable (i.e. can safely use memcpy to copy values),
+// the user can use the EASTL_DECLARE_TRIVIAL_RELOCATE declaration to tell the compiler
+// that the class can be copied via memcpy. This will automatically significantly speed
+// up some containers and algorithms that use that class.
+//
+// Here is an example of using type traits to tell if a value is a floating point
+// value or not:
+//
+// template <typename T>
+// DoSomething(T t) {
+// assert(is_floating_point<T>::value);
+// }
+//
+// Here is an example of declaring a class as relocatable and using it in a vector.
+//
+// EASTL_DECLARE_TRIVIAL_RELOCATE(Widget); // Usually you put this at the Widget class declaration.
+// vector<Widget> wVector;
+// wVector.erase(wVector.begin()); // This operation will be optimized via using memcpy.
+//
+// The following is a full list of the currently recognized type traits. Most of these
+// are implemented as of this writing, but if there is one that is missing, feel free
+// to contact the maintainer of this library and request that it be completed.
+// As of this writing all C++11 type traits are supported, as well as some additional ones.
+// http://en.cppreference.com/w/cpp/types
+//
+// Trait Description
+// ------------------------------------------------------------------------------
+// is_void T is void or a cv-qualified (const/void-qualified) void.
+// is_null_pointer
+// is_integral T is an integral type.
+// is_floating_point T is a floating point type.
+// is_array T is an array type. The templated array container is not an array type.
+// is_enum T is an enumeration type.
+// is_union T is a union type.
+// is_class T is a class type but not a union type.
+// is_function T is a function type.
+// is_pointer T is a pointer type. Includes function pointers, but not pointers to (data or function) members.
+// is_rvalue_reference
+// is_lvalue_reference
+// is_member_object_pointer T is a pointer to data member.
+// is_member_function_pointer T is a pointer to member function.
+//
+// is_fundamental T is a fundamental type (void, integral, or floating point).
+// is_arithmetic T is an arithmetic type (integral or floating point).
+// is_scalar T is a scalar type (arithmetic, enum, pointer, member_pointer)
+// is_object T is an object type.
+// is_compound T is a compound type (anything but fundamental).
+// is_reference T is a reference type. Includes references to functions.
+// is_member_pointer T is a pointer to a member or member function.
+//
+// is_const T is const-qualified.
+// is_volatile T is volatile-qualified.
+// is_trivial
+// is_trivially_copyable
+// is_standard_layout
+// is_pod T is a POD type.
+// is_literal_type
+// is_empty T is an empty class.
+// is_polymorphic T is a polymorphic class.
+// is_abstract T is an abstract class.
+// is_signed T is a signed integral type.
+// is_unsigned T is an unsigned integral type.
+// is_bounded_array T is a type is an array type of known bound
+// is_unbounded_array T is a type is an array type of unknown bound
+//
+// is_constructible
+// is_trivially_constructible
+// is_nothrow_constructible
+// is_default_constructible
+// is_trivially_default_constructible
+// is_nothrow_default_constructible
+// is_copy_constructible
+// is_trivially_copy_constructible
+// is_nothrow_copy_constructible
+// is_move_constructible
+// is_trivially_move_constructible
+// is_nothrow_move_constructible
+// is_assignable
+// is_trivially_assignable
+// is_nothrow_assignable
+// is_copy_assignable
+// is_trivially_copy_assignable
+// is_nothrow_copy_assignable
+// is_move_assignable
+// is_trivially_move_assignable
+// is_nothrow_move_assignable
+// is_destructible
+// is_trivially_destructible
+// is_nothrow_destructible
+// has_virtual_destructor T has a virtual destructor.
+//
+// alignment_of An integer value representing the number of bytes of the alignment of objects of type T; an object of type T may be allocated at an address that is a multiple of its alignment.
+// rank An integer value representing the rank of objects of type T. The term 'rank' here is used to describe the number of dimensions of an array type.
+// extent An integer value representing the extent (dimension) of the I'th bound of objects of type T. If the type T is not an array type, has rank of less than I, or if I == 0 and T is of type 'array of unknown bound of U,' then value shall evaluate to zero; otherwise value shall evaluate to the number of elements in the I'th array bound of T. The term 'extent' here is used to describe the number of elements in an array type.
+//
+// is_same T and U name the same type.
+// is_base_of Base is a base class of Derived or Base and Derived name the same type.
+// is_convertible An imaginary lvalue of type From is implicitly convertible to type To. Special conversions involving string-literals and null-pointer constants are not considered. No function-parameter adjustments are made to type To when determining whether From is convertible to To; this implies that if type To is a function type or an array type, then the condition is false.
+//
+// remove_cv
+// remove_const The member typedef type shall be the same as T except that any top level const-qualifier has been removed. remove_const<const volatile int>::type evaluates to volatile int, whereas remove_const<const int*> is const int*.
+// remove_volatile
+// remove_cvref
+// add_cv
+// add_const
+// add_volatile
+//
+// remove_reference
+// add_lvalue_reference
+// add_rvalue_reference
+//
+// remove_pointer
+// add_pointer
+//
+// make_signed
+// make_unsigned
+//
+// remove_extent
+// remove_all_extents
+//
+// aligned_storage
+// aligned_union
+// decay
+// enable_if
+// conditional
+// common_type
+// underlying_type
+// result_of
+//
+// integral_constant
+// bool_constant
+// true_type
+// false_type
+//
+// EASTL extension type traits
+// identity Simply sets T as type.
+// is_aligned Defined as true if the type has alignment requirements greater than default alignment, which is taken to be 8. is_aligned is not found in Boost nor C++11, though alignment_of is.
+// union_cast Allows for easy-to-read casting between types that are unrelated but have binary equivalence. The classic use case is converting between float and int32_t bit representations.
+// is_array_of_known_bounds
+// is_array_of_unknown_bounds
+// add_signed Deprecated in favor of make_signed.
+// add_unsigned Deprecated in favor of make_unsigned.
+// add_reference
+// yes_type
+// no_type
+// is_swappable Found in <EASTL/utility.h>
+// is_nothrow_swappable "
+// is_reference_wrapper Found in <EASTL/functional.h>
+// remove_reference_wrapper "
+// is_detected Checks if some supplied arguments (Args) respect a constraint (Op).
+// detected_t Check which type we obtain after expanding some arguments (Args) over a constraint (Op).
+// detected_or Checks if some supplied arguments (Args) respect a constraint (Op) and allow to overwrite return type.
+// detected_or_t Equivalent to detected_or<Default, Op, Args...>::type.
+// is_detected_exact Check that the type we obtain after expanding some arguments (Args) over a constraint (Op) is equivalent to Expected.
+// is_detected_convertible Check that the type we obtain after expanding some arguments (Args) over a constraint (Op) is convertible to Expected.
+//
+// Deprecated pre-C++11 type traits
+// has_trivial_constructor The default constructor for T is trivial.
+// has_trivial_copy The copy constructor for T is trivial.
+// has_trivial_assign The assignment operator for T is trivial.
+// has_trivial_destructor The destructor for T is trivial.
+// has_nothrow_constructor The default constructor for T has an empty exception specification or can otherwise be deduced never to throw an exception.
+// has_nothrow_copy The copy constructor for T has an empty exception specification or can otherwise be deduced never to throw an exception.
+// has_nothrow_assign The assignment operator for T has an empty exception specification or can otherwise be deduced never to throw an exception.
+// *has_trivial_relocate T can be moved to a new location via bitwise copy. Note that C++11 rvalue/move functionality supercedes this.
+//
+// * has_trivial_relocate is not found in Boost nor the pre-C++ standard update proposal.
+// However, it is somewhat useful in pre-C++11 environments (prior to move semantics)
+// for allowing the generation of optimized object moving operations. It is similar to
+// the is_pod type trait, but goes further and allows non-pod classes to be categorized
+// as relocatable. Such categorization is something that no compiler can do, as only
+// the user can know if it is such. Thus EASTL_DECLARE_TRIVIAL_RELOCATE is provided to
+// allow the user to give the compiler a hint. However, C++11 rvalue/move functionality
+// supercedes this and will eventually fully displace it.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Requirements
+//
+// As of this writing (5/2005), type_traits here requires a well-conforming
+// C++ compiler with respect to template metaprogramming. To use this library
+// you need to have at least one of the following:
+// MSVC++ 7.1 (includes Win32, Win64, and WinCE platforms)
+// GCC 3.2 (includes MacOSX, and Linux platforms)
+// Metrowerks 8.0 (incluees MacOSX, Windows, and other platforms)
+// EDG (includes any compiler with EDG as a back-end, such as the Intel compiler)
+// Comeau (this is a C++ to C generator)
+//
+// It may be useful to list the compilers/platforms the current version of
+// type_traits doesn't support:
+// Borland C++ (it simply has too many bugs with respect to templates).
+// GCC 2.96 We used to have a separate set of type traits for this compiler, but removed it due to lack of necessity.
+//////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// Implementation
+//
+// The implementation here is almost entirely based on template metaprogramming.
+// This is whereby you use the compiler's template functionality to define types
+// and values and make compilation decisions based on template declarations.
+// Many of the algorithms here are similar to those found in books such as
+// "Modern C++ Design" and C++ libraries such as Boost. The implementations here
+// are simpler and more straightforward than those found in some libraries, due
+// largely to our assumption that the compiler is good at doing template programming.
+///////////////////////////////////////////////////////////////////////////////
+
+
+
+#ifndef EASTL_TYPE_TRAITS_H
+#define EASTL_TYPE_TRAITS_H
+
+
+
+#include <EASTL/internal/config.h>
+#include <stddef.h> // Is needed for size_t usage by some traits.
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+
+ ///////////////////////////////////////////////////////////////////////
+ // integral_constant
+ //
+ // This is the base class for various type traits, as defined by C++11.
+ // This is essentially a utility base class for defining properties
+ // as both class constants (value) and as types (type).
+ //
+ template <typename T, T v>
+ struct integral_constant
+ {
+ static EA_CONSTEXPR T value = v;
+ typedef T value_type;
+ typedef integral_constant<T, v> type;
+
+ EA_CONSTEXPR operator value_type() const EA_NOEXCEPT { return value; }
+ EA_CONSTEXPR value_type operator()() const EA_NOEXCEPT { return value; }
+ };
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // true_type / false_type
+ //
+ // These are commonly used types in the implementation of type_traits.
+ // Other integral constant types can be defined, such as those based on int.
+ //
+ typedef integral_constant<bool, true> true_type;
+ typedef integral_constant<bool, false> false_type;
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // bool_constant
+ //
+ // This is a convenience helper for the often used integral_constant<bool, value>.
+ //
+ #if defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ template <bool B>
+ struct bool_constant : public integral_constant<bool, B> {};
+ #else
+ template <bool B>
+ using bool_constant = integral_constant<bool, B>;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // yes_type / no_type
+ //
+ // These are used as a utility to differentiate between two things.
+ //
+ typedef char yes_type; // sizeof(yes_type) == 1
+ struct no_type { char padding[8]; }; // sizeof(no_type) != 1
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // unused
+ //
+ // Used internally to denote a special template argument that means
+ // it's an unused argument.
+ //
+ struct unused { };
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // argument_sink
+ //
+ // Used as a type which constructs from anything.
+ //
+ // For compilers that support variadic templates we provide an
+ // alternative argument_sink which provides a constructor overload of
+ // the variadic pack of arguments by reference. This avoids issues of
+ // object alignment not being respected in Microsoft compilers. Seen
+ // in VS2015 preview. In general, since arguments are consumed and
+ // ignored its cheaper to consume references than passing by value
+ // which incurs a construction cost.
+ struct argument_sink
+ {
+ template<typename... Args>
+ argument_sink(Args&&...) {}
+ };
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // type_select
+ //
+ // This is used to declare a type from one of two type options.
+ // The result is based on the condition type. This has certain uses
+ // in template metaprogramming.
+ //
+ // Example usage:
+ // typedef ChosenType = typename type_select<is_integral<SomeType>::value, ChoiceAType, ChoiceBType>::type;
+ // or
+ // using ChosenType = type_select_t<is_integral_v<SomeType>, ChoiceAType, ChoiceBType>;
+ //
+ template <bool bCondition, class ConditionIsTrueType, class ConditionIsFalseType>
+ struct type_select { typedef ConditionIsTrueType type; };
+
+ template <typename ConditionIsTrueType, class ConditionIsFalseType>
+ struct type_select<false, ConditionIsTrueType, ConditionIsFalseType> { typedef ConditionIsFalseType type; };
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <bool bCondition, class ConditionIsTrueType, class ConditionIsFalseType>
+ using type_select_t = typename type_select<bCondition, ConditionIsTrueType, ConditionIsFalseType>::type;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // first_type_select
+ //
+ // Similar to type_select but unilaterally selects the first type.
+ //
+ template <typename T, typename = eastl::unused, typename = eastl::unused>
+ struct first_type_select { typedef T type; };
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // type_or
+ //
+ // This is a utility class for creating composite type traits.
+ //
+ template <bool b1, bool b2, bool b3 = false, bool b4 = false, bool b5 = false>
+ struct type_or;
+
+ template <bool b1, bool b2, bool b3, bool b4, bool b5>
+ struct type_or { static const bool value = true; };
+
+ template <>
+ struct type_or<false, false, false, false, false> { static const bool value = false; };
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // type_and
+ //
+ // This is a utility class for creating composite type traits.
+ //
+ template <bool b1, bool b2, bool b3 = true, bool b4 = true, bool b5 = true>
+ struct type_and;
+
+ template <bool b1, bool b2, bool b3, bool b4, bool b5>
+ struct type_and{ static const bool value = false; };
+
+ template <>
+ struct type_and<true, true, true, true, true>{ static const bool value = true; };
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // type_equal
+ //
+ // This is a utility class for creating composite type traits.
+ //
+ template <int b1, int b2>
+ struct type_equal{ static const bool value = (b1 == b2); };
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // type_not_equal
+ //
+ // This is a utility class for creating composite type traits.
+ //
+ template <int b1, int b2>
+ struct type_not_equal{ static const bool value = (b1 != b2); };
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // type_not
+ //
+ // This is a utility class for creating composite type traits.
+ //
+ template <bool b>
+ struct type_not{ static const bool value = true; };
+
+ template <>
+ struct type_not<true>{ static const bool value = false; };
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // enable_if, disable_if
+ //
+ // template <bool B, typename T = void> struct enable_if;
+ // template <bool B, typename T = void> struct disable_if;
+
+ template<bool B, typename T = void>
+ struct enable_if {};
+
+ template <typename T>
+ struct enable_if<true, T> { typedef T type; };
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <bool B, class T = void>
+ using enable_if_t = typename enable_if<B, T>::type;
+ #endif
+
+
+ template<bool B, typename T = void>
+ struct disable_if {};
+
+ template <typename T>
+ struct disable_if<false, T> { typedef T type; };
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <bool B, class T = void>
+ using disable_if_t = typename disable_if<B, T>::type;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // conditional
+ //
+ // Provides member typedef type which is defined as T if B is true at
+ // compile time, or as F if B is false.
+ //
+ template<bool B, typename T, typename F>
+ struct conditional { typedef T type; };
+
+ template <typename T, typename F>
+ struct conditional<false, T, F> { typedef F type; };
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <bool B, class T, class F>
+ using conditional_t = typename conditional<B, T, F>::type;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // conjunction
+ //
+ // This is a C++17 standard utility class that performs a short-circuiting
+ // logical AND on a sequence of type traits.
+ //
+ // http://en.cppreference.com/w/cpp/types/conjunction
+ //
+ template <class...>
+ struct conjunction : eastl::true_type {};
+
+ template <class B>
+ struct conjunction<B> : B {};
+
+ template <class B, class... Bn>
+ struct conjunction<B, Bn...> : conditional<bool(B::value), conjunction<Bn...>, B>::type {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <typename... Bn>
+ EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR bool conjunction_v = conjunction<Bn...>::value;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // disjunction
+ //
+ // This is a C++17 standard utility class that performs a short-circuiting
+ // logical OR on a sequence of type traits.
+ //
+ // http://en.cppreference.com/w/cpp/types/disjunction
+ //
+ template <class...>
+ struct disjunction : eastl::false_type {};
+
+ template <class B>
+ struct disjunction<B> : B {};
+
+ template <class B, class... Bn>
+ struct disjunction<B, Bn...> : conditional<bool(B::value), B, disjunction<Bn...>>::type {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <typename... B>
+ EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR bool disjunction_v = disjunction<B...>::value;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // negation
+ //
+ // This is a C++17 standard utility class that performs a logical NOT on a
+ // single type trait.
+ //
+ // http://en.cppreference.com/w/cpp/types/negation
+ //
+ template <class B>
+ struct negation : eastl::bool_constant<!bool(B::value)> {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <typename B>
+ EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR bool negation_v = negation<B>::value;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // identity
+ //
+ // The purpose of this is typically to deal with non-deduced template
+ // contexts. See the C++11 Standard, 14.8.2.5 p5.
+ // Also: http://cppquiz.org/quiz/question/109?result=CE&answer=&did_answer=Answer
+ //
+ // Dinkumware has an identity, but adds a member function to it:
+ // const T& operator()(const T& t) const{ return t; }
+ //
+ // NOTE(rparolin): Use 'eastl::type_identity' it was included in the C++20
+ // standard. This is a legacy EASTL type we continue to support for
+ // backwards compatibility.
+ //
+ template <typename T>
+ struct identity { using type = T; };
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <typename T>
+ using identity_t = typename identity<T>::type;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // type_identity
+ //
+ // The purpose of this is typically to deal with non-deduced template
+ // contexts. See the C++11 Standard, 14.8.2.5 p5.
+ // Also: http://cppquiz.org/quiz/question/109?result=CE&answer=&did_answer=Answer
+ //
+ // https://en.cppreference.com/w/cpp/types/type_identity
+ //
+ template <typename T>
+ struct type_identity { using type = T; };
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <typename T>
+ using type_identity_t = typename type_identity<T>::type;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_same
+ //
+ // Given two (possibly identical) types T and U, is_same<T, U>::value == true
+ // if and only if T and U are the same type.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_same_CONFORMANCE 1 // is_same is conforming; doesn't make mistakes.
+
+ template <typename T, typename U>
+ struct is_same : public eastl::false_type { };
+
+ template <typename T>
+ struct is_same<T, T> : public eastl::true_type { };
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T, class U>
+ EA_CONSTEXPR bool is_same_v = is_same<T, U>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_const
+ //
+ // is_const<T>::value == true if and only if T has const-qualification.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_const_CONFORMANCE 1 // is_const is conforming.
+
+ template <typename T> struct is_const : public eastl::false_type {};
+ template <typename T> struct is_const<const T> : public eastl::true_type {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_const_v = is_const<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_volatile
+ //
+ // is_volatile<T>::value == true if and only if T has volatile-qualification.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_volatile_CONFORMANCE 1 // is_volatile is conforming.
+
+ template <typename T> struct is_volatile : public eastl::false_type {};
+ template <typename T> struct is_volatile<volatile T> : public eastl::true_type {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_volatile_v = is_volatile<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_reference
+ //
+ // is_reference<T>::value == true if and only if T is a reference type (l-value reference or r-value reference).
+ // This category includes reference to function types.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_reference_CONFORMANCE 1 // is_reference is conforming; doesn't make mistakes.
+
+ template <typename T> struct is_reference : public eastl::false_type{};
+ template <typename T> struct is_reference<T&> : public eastl::true_type{};
+ template <typename T> struct is_reference<T&&> : public eastl::true_type{};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ EA_CONSTEXPR bool is_reference_v = is_reference<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_function
+ //
+ // is_function<T>::value == true if and only if T is a function type.
+ // A function type here does not include a member function type.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_function_CONFORMANCE 1 // is_function is conforming.
+
+ // afaik, original credit is to Walter Brown who described this implementation at CppCon 2019.
+ // libc++, libstdc++ and MS STL all use similar implementations.
+ // This relies on the fact that only function and reference types can't be const qualified.
+ // Rather than listing an obscene number of specializations for const, volatile, l- and r-value reference,
+ // noexcept and all relevant combinations we take advantage of this fact.
+#ifdef _MSC_VER
+ #pragma warning(push)
+ #pragma warning(disable: 4180) // qualifier applied to function type has no meaning; ignored
+#endif
+ template <typename T>
+ struct is_function
+ : public eastl::bool_constant<!eastl::is_reference<T>::value && !eastl::is_const<const T>::value>::type {};
+#ifdef _MSC_VER
+ #pragma warning(pop)
+#endif
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ EA_CONSTEXPR bool is_function_v = is_function<T>::value;
+ #endif
+
+
+ // The following remove utilities are defined here instead of in the headers
+ // below because they are core utilits that many other type traits need.
+
+ ///////////////////////////////////////////////////////////////////////
+ // remove_const
+ //
+ // Remove const from a type.
+ //
+ // The remove_const transformation trait removes top-level const
+ // qualification (if any) from the type to which it is applied. For a
+ // given type T, remove_const<T const>::type is equivalent to the type T.
+ // For example, remove_const<char*>::type is equivalent to char* while
+ // remove_const<const char*>::type is equivalent to const char*.
+ // In the latter case, the const qualifier modifies char, not *, and is
+ // therefore not at the top level.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_remove_const_CONFORMANCE 1 // remove_const is conforming.
+
+ template <typename T> struct remove_const { typedef T type; };
+ template <typename T> struct remove_const<const T> { typedef T type; };
+ template <typename T> struct remove_const<const T[]> { typedef T type[]; };
+ template <typename T, size_t N> struct remove_const<const T[N]> { typedef T type[N]; };
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ using remove_const_t = typename remove_const<T>::type;
+ #endif
+
+ ///////////////////////////////////////////////////////////////////////
+ // remove_volatile
+ //
+ // Remove volatile from a type.
+ //
+ // The remove_volatile transformation trait removes top-level volatile
+ // qualification (if any) from the type to which it is applied.
+ // For a given type T, the type remove_volatile <T volatile>::T is equivalent
+ // to the type T. For example, remove_volatile <char* volatile>::type is
+ // equivalent to char* while remove_volatile <volatile char*>::type is
+ // equivalent to volatile char*. In the latter case, the volatile qualifier
+ // modifies char, not *, and is therefore not at the top level.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_remove_volatile_CONFORMANCE 1 // remove_volatile is conforming.
+
+ template <typename T> struct remove_volatile { typedef T type; };
+ template <typename T> struct remove_volatile<volatile T> { typedef T type; };
+ template <typename T> struct remove_volatile<volatile T[]> { typedef T type[]; };
+ template <typename T, size_t N> struct remove_volatile<volatile T[N]> { typedef T type[N]; };
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ using remove_volatile_t = typename remove_volatile<T>::type;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // remove_cv
+ //
+ // Remove const and volatile from a type.
+ //
+ // The remove_cv transformation trait removes top-level const and/or volatile
+ // qualification (if any) from the type to which it is applied. For a given type T,
+ // remove_cv<T const volatile>::type is equivalent to T. For example,
+ // remove_cv<char* volatile>::type is equivalent to char*, while remove_cv<const char*>::type
+ // is equivalent to const char*. In the latter case, the const qualifier modifies
+ // char, not *, and is therefore not at the top level.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_remove_cv_CONFORMANCE 1 // remove_cv is conforming.
+
+ template <typename T>
+ struct remove_cv { typedef typename eastl::remove_volatile<typename eastl::remove_const<T>::type>::type type; };
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ using remove_cv_t = typename remove_cv<T>::type;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // add_reference
+ //
+ // Add reference to a type.
+ //
+ // The add_reference transformation trait adds a level of indirection
+ // by reference to the type to which it is applied. For a given type T,
+ // add_reference<T>::type is equivalent to T& if is_lvalue_reference<T>::value == false,
+ // and T otherwise.
+ //
+ // Note: due to the reference collapsing rules, if you supply an r-value reference such as T&&, it will collapse to T&.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_add_reference_CONFORMANCE 1 // add_reference is conforming.
+
+ template <typename T> struct add_reference_impl { typedef T& type; };
+ template <typename T> struct add_reference_impl<T&> { typedef T& type; };
+ template <> struct add_reference_impl<void>{ typedef void type; };
+ #if defined(_MSC_VER) && (_MSC_VER <= 1600) // VS2010 and earlier mistakenly report: "cannot add a reference to a zero-sized array." Actually they are allowed, but there's nothing we can do about it under VS2010 and earlier.
+ template <typename T> struct add_reference_impl<T[0]>{ typedef T type; };
+ #endif
+
+ template <typename T> struct add_reference { typedef typename add_reference_impl<T>::type type; };
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // remove_reference
+ //
+ // Remove reference from a type.
+ //
+ // The remove_reference transformation trait removes top-level of
+ // indirection by reference (if any) from the type to which it is applied.
+ // For a given type T, remove_reference<T&>::type is equivalent to T.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_remove_reference_CONFORMANCE 1
+
+ template <typename T> struct remove_reference { typedef T type; };
+ template <typename T> struct remove_reference<T&> { typedef T type; };
+ template <typename T> struct remove_reference<T&&>{ typedef T type; };
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ using remove_reference_t = typename remove_reference<T>::type;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // remove_cvref
+ //
+ // Remove const and volatile from a reference type.
+ //
+ // The remove_cvref transformation trait removes top-level const and/or volatile
+ // qualification (if any) from the reference type to which it is applied. For a given type T&,
+ // remove_cvref<T& const volatile>::type is equivalent to T. For example,
+ // remove_cv<int& volatile>::type is equivalent to int.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_remove_cvref_CONFORMANCE 1 // remove_cvref is conforming.
+
+ template <typename T>
+ struct remove_cvref { typedef typename eastl::remove_volatile<typename eastl::remove_const<typename eastl::remove_reference<T>::type>::type>::type type; };
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ using remove_cvref_t = typename remove_cvref<T>::type;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // add_lvalue_reference
+ //
+ // C++11 Standard, section 20.9.7.2
+ // If T names an object or function type then the member typedef type
+ // shall name T&; otherwise, if T names a type 'rvalue reference to T1' then
+ // the member typedef type shall name T1&; otherwise, type shall name T.
+ //
+ // Rules (8.3.2 p6):
+ // void + & -> void
+ // T + & -> T&
+ // T& + & -> T&
+ // T&& + & -> T&
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_add_lvalue_reference_CONFORMANCE 1 // add_lvalue_reference is conforming.
+
+ namespace internal
+ {
+ template <typename T>
+ auto try_add_lvalue_reference(int)->type_identity<T&>;
+
+ template <typename T>
+ auto try_add_lvalue_reference(...)->type_identity<T>;
+ }
+
+ template <typename T> struct add_lvalue_reference : decltype(internal::try_add_lvalue_reference<T>(0)) {};
+
+ #if defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ // To do: define macro.
+ #else
+ template <typename T>
+ using add_lvalue_reference_t = typename add_lvalue_reference<T>::type;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // add_rvalue_reference
+ //
+ // C++11 Standard, section 20.9.7.2
+ // If T names an object or function type then the member typedef type
+ // shall name T&&; otherwise, type shall name T. [ Note: This rule reflects
+ // the semantics of reference collapsing (8.3.2). For example, when a type T
+ // names a type T1&, the type add_rvalue_reference<T>::type is not an
+ // rvalue reference. end note ]
+ //
+ // Rules (8.3.2 p6):
+ // void + && -> void
+ // T + && -> T&&
+ // T& + && -> T&
+ // T&& + && -> T&&
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_add_rvalue_reference_CONFORMANCE 1
+
+ namespace internal
+ {
+ template <typename T>
+ auto try_add_rvalue_reference(int)->type_identity<T&&>;
+
+ template <typename T>
+ auto try_add_rvalue_reference(...)->type_identity<T>;
+ }
+
+ template <typename T> struct add_rvalue_reference : decltype(internal::try_add_rvalue_reference<T>(0)) {};
+
+ #if defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ // To do: define macro.
+ #else
+ template <typename T>
+ using add_rvalue_reference_t = typename add_rvalue_reference<T>::type;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // declval
+ //
+ // declval is normally found in <utility.h> but type traits need it and utility #includes this.
+ //
+ // Converts any type T to a reference type, making it possible to use member functions in
+ // decltype expressions without specifying constructors. It has no use outside decltype expressions.
+ // By design there is no implementation, as it's never executed but rather is used only in decltype expressions.
+ // The C++11 Standard section 20.2.4 states that we must declare this.
+ // http://en.cppreference.com/w/cpp/utility/declval
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_declval_CONFORMANCE 1
+
+ template <typename T>
+ typename eastl::add_rvalue_reference<T>::type declval() EA_NOEXCEPT;
+
+ #if !defined(EA_COMPILER_NO_DECLTYPE) && !EASTL_TYPE_TRAIT_declval_CONFORMANCE
+ #error decltype is supported by the compiler but declval is not. A lot of our type trait code assumes that if the compiler supports decltype then it supports rvalue references.
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // static_min / static_max
+ //
+ // These are primarily useful in templated code for meta programming.
+ // Currently we are limited to size_t, as C++ doesn't allow integral
+ // template parameters to be generic. We can expand the supported types
+ // to include additional integers if needed.
+ //
+ // These are not in the C++ Standard.
+ //
+ // Example usage:
+ // Printf("%zu", static_max<3, 7, 1, 5>::value); // prints "7"
+ //
+ ///////////////////////////////////////////////////////////////////////
+ #define EASTL_TYPE_TRAIT_static_min_CONFORMANCE 1
+ #define EASTL_TYPE_TRAIT_static_max_CONFORMANCE 1
+
+ template <size_t I0, size_t ...in>
+ struct static_min;
+
+ template <size_t I0>
+ struct static_min<I0>
+ { static const size_t value = I0; };
+
+ template <size_t I0, size_t I1, size_t ...in>
+ struct static_min<I0, I1, in...>
+ { static const size_t value = ((I0 <= I1) ? static_min<I0, in...>::value : static_min<I1, in...>::value); };
+
+ template <size_t I0, size_t ...in>
+ struct static_max;
+
+ template <size_t I0>
+ struct static_max<I0>
+ { static const size_t value = I0; };
+
+ template <size_t I0, size_t I1, size_t ...in>
+ struct static_max<I0, I1, in...>
+ { static const size_t value = ((I0 >= I1) ? static_max<I0, in...>::value : static_max<I1, in...>::value); };
+
+ ///////////////////////////////////////////////////////////////////////
+ /// This enum class is useful for detecting whether a system is little
+ /// or big endian. Mixed or middle endian is not modeled here as described
+ /// by the C++20 spec.
+ ///////////////////////////////////////////////////////////////////////
+ EA_DISABLE_VC_WARNING(4472) // 'endian' is a native enum: add an access specifier (private/public) to declare a managed enum
+ enum class endian
+ {
+ #ifdef EA_SYSTEM_LITTLE_ENDIAN
+ little = 1,
+ big = 0,
+ native = little
+ #else
+ little = 0,
+ big = 1,
+ native = big
+ #endif
+ };
+ EA_RESTORE_VC_WARNING();
+
+} // namespace eastl
+
+
+// The following files implement the type traits themselves.
+#include <EASTL/internal/type_fundamental.h>
+#include <EASTL/internal/type_transformations.h>
+#include <EASTL/internal/type_void_t.h>
+#include <EASTL/internal/type_properties.h>
+#include <EASTL/internal/type_compound.h>
+#include <EASTL/internal/type_pod.h>
+#include <EASTL/internal/type_detected.h>
+
+
+#endif // Header include guard
diff --git a/EASTL/include/EASTL/unique_ptr.h b/EASTL/include/EASTL/unique_ptr.h
new file mode 100644
index 0000000..195cc42
--- /dev/null
+++ b/EASTL/include/EASTL/unique_ptr.h
@@ -0,0 +1,735 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_UNIQUE_PTR_H
+#define EASTL_UNIQUE_PTR_H
+
+
+#include <EABase/nullptr.h>
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/smart_ptr.h> // Defines smart_ptr_deleter
+#include <EASTL/internal/move_help.h> // Defines EASTL_MOVE
+#include <EASTL/type_traits.h>
+#include <EASTL/utility.h>
+#include <EASTL/functional.h>
+#include <EASTL/bonus/compressed_pair.h>
+#include <stddef.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+namespace eastl
+{
+ /// class unique_ptr
+ ///
+ /// This class implements a unique_ptr template. This is a class which is
+ /// similar to the C++ auto_ptr template, except that it prohibits copying
+ /// of itself, for safety.
+ ///
+ /// More specifically, the unique_ptr class template stores a pointer to a
+ /// dynamically allocated object. The object pointed to is automatically
+ /// deleted on destructor of unique_ptr or can be manually deleted via the
+ /// unique_ptr::reset function.
+ ///
+ /// Memory allocation notes:
+ /// unique_ptr doesn't allocate memory; all allocated pointers are externally
+ /// derived. unique_ptr does deallocate memory, though always through the
+ /// user-provided deleter. You need to make sure you are consistent in providing
+ /// a deleter which frees memory in a way that matches how it was originally allocated.
+ /// Deleters have instance information and are moved between containers the same way
+ /// the allocated pointers are. Thus you can allocate memory via some heap and
+ /// provide a deleter which contains a pointer to that same heap, and regardless
+ /// of what you do with the unique_ptr, including moving it to another unique_ptr,
+ /// the deletion will use the originally provided heap.
+ ///
+ /// Example usage:
+ /// unique_ptr<int> p(new int);
+ /// *p = 4;
+ ///
+ /// unique_ptr<int[]> pArray(new int[4]);
+ /// p[0] = 4;
+ ///
+ /// Type completeness requirements
+ /// http://stackoverflow.com/questions/6012157/is-stdunique-ptrt-required-to-know-the-full-definition-of-t/6089065#6089065
+ /// Here is a table which documents several members of shared_ptr and unique_ptr with respect to completeness requirements.
+ /// If the member requires a complete type, the entry has a "C", otherwise the table entry is filled with "I".
+ ///
+ /// unique_ptr shared_ptr
+ /// +------------------------+---------------+---------------+
+ /// | P() | I | I |
+ /// | default constructor | | |
+ /// +------------------------+---------------+---------------+
+ /// | P(const P&) | N/A | I |
+ /// | copy constructor | | |
+ /// +------------------------+---------------+---------------+
+ /// | P(P&&) | I | I |
+ /// | move constructor | | |
+ /// +------------------------+---------------+---------------+
+ /// | ~P() | C | I |
+ /// | destructor | | |
+ /// +------------------------+---------------+---------------+
+ /// | P(A*) | I | C |
+ /// +------------------------+---------------+---------------+
+ /// | operator=(const P&) | N/A | I |
+ /// | copy assignment | | |
+ /// +------------------------+---------------+---------------+
+ /// | operator=(P&&) | C | I |
+ /// | move assignment | | |
+ /// +------------------------+---------------+---------------+
+ /// | reset() | C | I |
+ /// +------------------------+---------------+---------------+
+ /// | reset(A*) | C | C |
+ /// +------------------------+---------------+---------------+
+ ///
+ template <typename T, typename Deleter = eastl::default_delete<T> >
+ class unique_ptr
+ {
+ static_assert(!is_rvalue_reference<Deleter>::value, "The supplied Deleter cannot be a r-value reference.");
+ public:
+ typedef Deleter deleter_type;
+ typedef T element_type;
+ typedef unique_ptr<element_type, deleter_type> this_type;
+ typedef typename Internal::unique_pointer_type<element_type, deleter_type>::type pointer;
+
+ public:
+ /// unique_ptr
+ /// Construct a unique_ptr from a pointer allocated via new.
+ /// Example usage:
+ /// unique_ptr<int> ptr;
+ EA_CPP14_CONSTEXPR unique_ptr() EA_NOEXCEPT
+ : mPair(pointer())
+ {
+ static_assert(!eastl::is_pointer<deleter_type>::value, "unique_ptr deleter default-constructed with null pointer. Use a different constructor or change your deleter to a class.");
+ }
+
+ /// unique_ptr
+ /// Construct a unique_ptr from a null pointer.
+ /// Example usage:
+ /// unique_ptr<int> ptr(nullptr);
+ EA_CPP14_CONSTEXPR unique_ptr(std::nullptr_t) EA_NOEXCEPT
+ : mPair(pointer())
+ {
+ static_assert(!eastl::is_pointer<deleter_type>::value, "unique_ptr deleter default-constructed with null pointer. Use a different constructor or change your deleter to a class.");
+ }
+
+ /// unique_ptr
+ /// Construct a unique_ptr from a pointer allocated via new.
+ /// Example usage:
+ /// unique_ptr<int> ptr(new int(3));
+ explicit unique_ptr(pointer pValue) EA_NOEXCEPT
+ : mPair(pValue)
+ {
+ static_assert(!eastl::is_pointer<deleter_type>::value, "unique_ptr deleter default-constructed with null pointer. Use a different constructor or change your deleter to a class.");
+ }
+
+ /// unique_ptr
+ /// Constructs a unique_ptr with the owner pointer and deleter specified
+ /// Example usage:
+ /// eastl::smart_ptr_deleter<int> del;
+ /// unique_ptr<int> ptr(new int(3), del);
+ unique_ptr(pointer pValue, typename eastl::conditional<eastl::is_lvalue_reference<deleter_type>::value, deleter_type, typename eastl::add_lvalue_reference<const deleter_type>::type>::type deleter) EA_NOEXCEPT
+ : mPair(pValue, deleter) {}
+
+ /// unique_ptr
+ /// Constructs a unique_ptr with the owned pointer and deleter specified (rvalue)
+ /// Example usage:
+ /// unique_ptr<int> ptr(new int(3), eastl::smart_ptr_deleter<int>());
+ unique_ptr(pointer pValue, typename eastl::remove_reference<deleter_type>::type&& deleter) EA_NOEXCEPT
+ : mPair(pValue, eastl::move(deleter))
+ {
+ static_assert(!eastl::is_lvalue_reference<deleter_type>::value, "deleter_type reference refers to an rvalue deleter. The reference will probably become invalid before used. Change the deleter_type to not be a reference or construct with permanent deleter.");
+ }
+
+ /// unique_ptr
+ /// Move constructor
+ /// Example usage:
+ /// unique_ptr<int> ptr(new int(3));
+ /// unique_ptr<int> newPtr = eastl::move(ptr);
+ unique_ptr(this_type&& x) EA_NOEXCEPT
+ : mPair(x.release(), eastl::forward<deleter_type>(x.get_deleter())) {}
+
+ /// unique_ptr
+ /// Move constructor
+ /// Example usage:
+ /// unique_ptr<int> ptr(new int(3));
+ /// unique_ptr<int> newPtr = eastl::move(ptr);
+ template <typename U, typename E>
+ unique_ptr(unique_ptr<U, E>&& u, typename enable_if<!is_array<U>::value && is_convertible<typename unique_ptr<U, E>::pointer, pointer>::value && is_convertible<E, deleter_type>::value && (is_same<deleter_type, E>::value || !is_lvalue_reference<deleter_type>::value)>::type* = 0) EA_NOEXCEPT
+ : mPair(u.release(), eastl::forward<E>(u.get_deleter())) {}
+
+ /// unique_ptr
+ /// Move assignment
+ /// Example usage:
+ /// unique_ptr<int> ptr(new int(3));
+ /// unique_ptr<int> newPtr(new int(4));
+ /// ptr = eastl::move(newPtr); // Deletes int(3) and assigns mpValue to int(4)
+ this_type& operator=(this_type&& x) EA_NOEXCEPT
+ {
+ reset(x.release());
+ mPair.second() = eastl::move(eastl::forward<deleter_type>(x.get_deleter()));
+ return *this;
+ }
+
+ /// unique_ptr
+ /// Move assignment
+ template <typename U, typename E>
+ typename enable_if<!is_array<U>::value && is_convertible<typename unique_ptr<U, E>::pointer, pointer>::value && is_assignable<deleter_type&, E&&>::value, this_type&>::type
+ operator=(unique_ptr<U, E>&& u) EA_NOEXCEPT
+ {
+ reset(u.release());
+ mPair.second() = eastl::move(eastl::forward<E>(u.get_deleter()));
+ return *this;
+ }
+
+ /// operator=(nullptr_t)
+ this_type& operator=(std::nullptr_t) EA_NOEXCEPT
+ {
+ reset();
+ return *this;
+ }
+
+ /// ~unique_ptr
+ /// Destroys the owned pointer. The destructor for the object
+ /// referred to by the owned pointer will be called.
+ ~unique_ptr() EA_NOEXCEPT
+ {
+ reset();
+ }
+
+ /// reset
+ /// Deletes the owned pointer and takes ownership of the
+ /// passed in pointer. If the passed in pointer is the same
+ /// as the owned pointer, nothing is done.
+ /// Example usage:
+ /// unique_ptr<int> ptr(new int(3));
+ /// ptr.reset(new int(4)); // deletes int(3)
+ /// ptr.reset(NULL); // deletes int(4)
+ void reset(pointer pValue = pointer()) EA_NOEXCEPT
+ {
+ if (pValue != mPair.first())
+ {
+ if (auto first = eastl::exchange(mPair.first(), pValue))
+ get_deleter()(first);
+ }
+ }
+
+ /// release
+ /// This simply forgets the owned pointer. It doesn't
+ /// free it but rather assumes that the user does.
+ /// Example usage:
+ /// unique_ptr<int> ptr(new int(3));
+ /// int* pInt = ptr.release();
+ /// delete pInt;
+ pointer release() EA_NOEXCEPT
+ {
+ pointer const pTemp = mPair.first();
+ mPair.first() = pointer();
+ return pTemp;
+ }
+
+ /// detach
+ /// For backwards-compatibility with pre-C++11 code.
+ pointer detach() EA_NOEXCEPT { return release(); }
+
+ /// swap
+ /// Exchanges the owned pointer beween two unique_ptr objects.
+ void swap(this_type& x) EA_NOEXCEPT
+ {
+ mPair.swap(x.mPair);
+ }
+
+ /// operator*
+ /// Returns the owner pointer dereferenced.
+ /// Example usage:
+ /// unique_ptr<int> ptr(new int(3));
+ /// int x = *ptr;
+ typename add_lvalue_reference<T>::type operator*() const // Not noexcept, because the pointer may be NULL.
+ {
+ return *mPair.first();
+ }
+
+ /// operator->
+ /// Allows access to the owned pointer via operator->()
+ /// Example usage:
+ /// struct X{ void DoSomething(); };
+ /// unique_ptr<int> ptr(new X);
+ /// ptr->DoSomething();
+ pointer operator->() const EA_NOEXCEPT
+ {
+ return mPair.first();
+ }
+
+ /// get
+ /// Returns the owned pointer. Note that this class does
+ /// not provide an operator T() function. This is because such
+ /// a thing (automatic conversion) is deemed unsafe.
+ /// Example usage:
+ /// struct X{ void DoSomething(); };
+ /// unique_ptr<int> ptr(new X);
+ /// X* pX = ptr.get();
+ /// pX->DoSomething();
+ pointer get() const EA_NOEXCEPT
+ {
+ return mPair.first();
+ }
+
+ /// get_deleter
+ /// Returns the deleter used to delete the owned pointer
+ /// Example usage:
+ /// unique_ptr<int> ptr(new int(3));
+ /// eastl::smart_ptr_deleter<int>& del = ptr.get_deleter();
+ deleter_type& get_deleter() EA_NOEXCEPT
+ {
+ return mPair.second();
+ }
+
+ /// get_deleter
+ /// Const version for getting the deleter
+ const deleter_type& get_deleter() const EA_NOEXCEPT
+ {
+ return mPair.second();
+ }
+
+ #ifdef EA_COMPILER_NO_EXPLICIT_CONVERSION_OPERATORS
+ /// Note that below we do not use operator bool(). The reason for this
+ /// is that booleans automatically convert up to short, int, float, etc.
+ /// The result is that this: if(uniquePtr == 1) would yield true (bad).
+ typedef T* (this_type::*bool_)() const;
+ operator bool_() const EA_NOEXCEPT
+ {
+ if(mPair.first())
+ return &this_type::get;
+ return NULL;
+ }
+
+ bool operator!() const EA_NOEXCEPT
+ {
+ return (mPair.first() == pointer());
+ }
+ #else
+ /// operator bool
+ /// Allows for using a unique_ptr as a boolean.
+ /// Example usage:
+ /// unique_ptr<int> ptr(new int(3));
+ /// if(ptr)
+ /// ++*ptr;
+ ///
+ explicit operator bool() const EA_NOEXCEPT
+ {
+ return (mPair.first() != pointer());
+ }
+ #endif
+
+ /// These functions are deleted in order to prevent copying, for safety.
+ unique_ptr(const this_type&) = delete;
+ unique_ptr& operator=(const this_type&) = delete;
+ unique_ptr& operator=(pointer pValue) = delete;
+
+ protected:
+ eastl::compressed_pair<pointer, deleter_type> mPair;
+ }; // class unique_ptr
+
+
+
+ /// unique_ptr specialization for unbounded arrays.
+ ///
+ /// Differences from unique_ptr<T>:
+ /// - Conversions between different types of unique_ptr<T[], D> or to or
+ /// from the non-array forms of unique_ptr produce an ill-formed program.
+ /// - Pointers to types derived from T are rejected by the constructors, and by reset.
+ /// - The observers operator* and operator-> are not provided.
+ /// - The indexing observer operator[] is provided.
+ /// - The default deleter will call delete[].
+ ///
+ /// It's not possible to create a unique_ptr for arrays of a known bound (e.g. int[4] as opposed to int[]).
+ ///
+ /// Example usage:
+ /// unique_ptr<int[]> ptr(new int[10]);
+ /// ptr[4] = 4;
+ ///
+ template <typename T, typename Deleter>
+ class unique_ptr<T[], Deleter>
+ {
+ public:
+ typedef Deleter deleter_type;
+ typedef T element_type;
+ typedef unique_ptr<element_type[], deleter_type> this_type;
+ typedef typename Internal::unique_pointer_type<element_type, deleter_type>::type pointer;
+
+ public:
+ EA_CPP14_CONSTEXPR unique_ptr() EA_NOEXCEPT
+ : mPair(pointer())
+ {
+ static_assert(!eastl::is_pointer<deleter_type>::value, "unique_ptr deleter default-constructed with null pointer. Use a different constructor or change your deleter to a class.");
+ }
+
+ EA_CPP14_CONSTEXPR unique_ptr(std::nullptr_t) EA_NOEXCEPT
+ : mPair(pointer())
+ {
+ static_assert(!eastl::is_pointer<deleter_type>::value, "unique_ptr deleter default-constructed with null pointer. Use a different constructor or change your deleter to a class.");
+ }
+
+ template <typename P,
+ typename = eastl::enable_if_t<Internal::is_array_cv_convertible<P, pointer>::value>> // Pointers to types derived from T are rejected by the constructors, and by reset.
+ explicit unique_ptr(P pArray) EA_NOEXCEPT
+ : mPair(pArray)
+ {
+ static_assert(!eastl::is_pointer<deleter_type>::value,
+ "unique_ptr deleter default-constructed with null pointer. Use a different constructor or "
+ "change your deleter to a class.");
+ }
+
+ template <typename P>
+ unique_ptr(P pArray, typename eastl::conditional<eastl::is_lvalue_reference<deleter_type>::value, deleter_type,
+ typename eastl::add_lvalue_reference<const deleter_type>::type>::type deleter,
+ typename eastl::enable_if<Internal::is_array_cv_convertible<P, pointer>::value>::type* = 0) EA_NOEXCEPT
+ : mPair(pArray, deleter) {}
+
+ template <typename P>
+ unique_ptr(P pArray, typename eastl::remove_reference<deleter_type>::type&& deleter, eastl::enable_if_t<Internal::is_array_cv_convertible<P, pointer>::value>* = 0) EA_NOEXCEPT
+ : mPair(pArray, eastl::move(deleter))
+ {
+ static_assert(!eastl::is_lvalue_reference<deleter_type>::value, "deleter_type reference refers to an rvalue deleter. The reference will probably become invalid before used. Change the deleter_type to not be a reference or construct with permanent deleter.");
+ }
+
+ unique_ptr(this_type&& x) EA_NOEXCEPT
+ : mPair(x.release(), eastl::forward<deleter_type>(x.get_deleter())) {}
+
+ template <typename U, typename E>
+ unique_ptr(unique_ptr<U, E>&& u, typename enable_if<Internal::is_safe_array_conversion<T, pointer, U, typename unique_ptr<U, E>::pointer>::value &&
+ eastl::is_convertible<E, deleter_type>::value &&
+ (!eastl::is_lvalue_reference<deleter_type>::value || eastl::is_same<E, deleter_type>::value)>::type* = 0) EA_NOEXCEPT
+ : mPair(u.release(), eastl::forward<E>(u.get_deleter())) {}
+
+ this_type& operator=(this_type&& x) EA_NOEXCEPT
+ {
+ reset(x.release());
+ mPair.second() = eastl::move(eastl::forward<deleter_type>(x.get_deleter()));
+ return *this;
+ }
+
+ template <typename U, typename E>
+ typename enable_if<Internal::is_safe_array_conversion<T, pointer, U, typename unique_ptr<U, E>::pointer>::value && is_assignable<deleter_type&, E&&>::value, this_type&>::type
+ operator=(unique_ptr<U, E>&& u) EA_NOEXCEPT
+ {
+ reset(u.release());
+ mPair.second() = eastl::move(eastl::forward<E>(u.get_deleter()));
+ return *this;
+ }
+
+ this_type& operator=(std::nullptr_t) EA_NOEXCEPT
+ {
+ reset();
+ return *this;
+ }
+
+ ~unique_ptr() EA_NOEXCEPT
+ {
+ reset();
+ }
+
+ void reset(pointer pArray = pointer()) EA_NOEXCEPT
+ {
+ if(pArray != mPair.first())
+ {
+ if (auto first = eastl::exchange(mPair.first(), pArray))
+ get_deleter()(first);
+ }
+ }
+
+ pointer release() EA_NOEXCEPT
+ {
+ pointer const pTemp = mPair.first();
+ mPair.first() = pointer();
+ return pTemp;
+ }
+
+ /// detach
+ /// For backwards-compatibility with pre-C++11 code.
+ pointer detach() EA_NOEXCEPT { return release(); }
+
+ void swap(this_type& x) EA_NOEXCEPT
+ {
+ mPair.swap(x.mPair);
+ }
+
+ /// operator[]
+ /// Returns a reference to the specified item in the owned pointer
+ /// array.
+ /// Example usage:
+ /// unique_ptr<int> ptr(new int[6]);
+ /// int x = ptr[2];
+ typename add_lvalue_reference<T>::type operator[](ptrdiff_t i) const
+ {
+ // assert(mpArray && (i >= 0));
+ return mPair.first()[i];
+ }
+
+ pointer get() const EA_NOEXCEPT
+ {
+ return mPair.first();
+ }
+
+ deleter_type& get_deleter() EA_NOEXCEPT
+ {
+ return mPair.second();
+ }
+
+ const deleter_type& get_deleter() const EA_NOEXCEPT
+ {
+ return mPair.second();
+ }
+
+ #ifdef EA_COMPILER_NO_EXPLICIT_CONVERSION_OPERATORS
+ typedef T* (this_type::*bool_)() const;
+ operator bool_() const EA_NOEXCEPT
+ {
+ if(mPair.first())
+ return &this_type::get;
+ return NULL;
+ }
+
+ bool operator!() const EA_NOEXCEPT
+ {
+ return (mPair.first() == pointer());
+ }
+ #else
+ explicit operator bool() const EA_NOEXCEPT
+ {
+ return (mPair.first() != pointer());
+ }
+ #endif
+
+ /// These functions are deleted in order to prevent copying, for safety.
+ unique_ptr(const this_type&) = delete;
+ unique_ptr& operator=(const this_type&) = delete;
+ unique_ptr& operator=(pointer pArray) = delete;
+
+ protected:
+ eastl::compressed_pair<pointer, deleter_type> mPair;
+ };
+
+
+
+ /// make_unique
+ ///
+ /// The C++11 Standard doesn't have make_unique, but there's no agreed reason as to why.
+ /// http://stackoverflow.com/questions/12580432/why-does-c11-have-make-shared-but-not-make-unique
+ /// http://herbsutter.com/2013/05/29/gotw-89-solution-smart-pointers/
+ /// Herb's solution is OK but doesn't support unique_ptr<[]> (array version). We do the same
+ /// thing libc++ does and make a specialization of make_unique for arrays.
+ ///
+ /// make_unique has two cases where you can't use it and need to directly use unique_ptr:
+ /// - You need to construct the unique_ptr with a raw pointer.
+ /// - You need to specify a custom deleter.
+ ///
+ /// Note: This function uses global new T by default to create the ptr instance, as per
+ /// the C++11 Standard make_shared_ptr.
+ ///
+ /// Example usage:
+ /// struct Test{ Test(int, int){} };
+ /// auto p = make_unique<Test>(1, 2);
+ ///
+ /// auto pArray = make_unique<Test[]>(4);
+ ///
+ template <typename T, typename... Args>
+ inline typename eastl::enable_if<!eastl::is_array<T>::value, eastl::unique_ptr<T>>::type make_unique(Args&&... args)
+ { return unique_ptr<T>(new T(eastl::forward<Args>(args)...)); }
+
+ template <typename T>
+ inline typename eastl::enable_if<eastl::is_unbounded_array<T>::value, eastl::unique_ptr<T>>::type make_unique(size_t n)
+ {
+ typedef typename eastl::remove_extent<T>::type TBase;
+ return unique_ptr<T>(new TBase[n]);
+ }
+
+ // It's not possible to create a unique_ptr for arrays of a known bound (e.g. int[4] as opposed to int[]).
+ template <typename T, typename... Args>
+ typename eastl::enable_if<eastl::is_bounded_array<T>::value>::type
+ make_unique(Args&&...) = delete;
+
+
+
+
+ /// hash specialization for unique_ptr.
+ /// It simply returns eastl::hash(x.get()). If your unique_ptr pointer type (the return value of unique_ptr<T>::get) is
+ /// a custom type and not a built-in pointer type then you will need to independently define eastl::hash for that type.
+ template <typename T, typename D>
+ struct hash< unique_ptr<T, D> >
+ {
+ size_t operator()(const unique_ptr<T, D>& x) const EA_NOEXCEPT
+ { return eastl::hash<typename unique_ptr<T, D>::pointer>()(x.get()); }
+ };
+
+ /// swap
+ /// Exchanges the owned pointer beween two unique_ptr objects.
+ /// This non-member version is useful for compatibility of unique_ptr
+ /// objects with the C++ Standard Library and other libraries.
+ template <typename T, typename D>
+ inline void swap(unique_ptr<T, D>& a, unique_ptr<T, D>& b) EA_NOEXCEPT
+ {
+ a.swap(b);
+ }
+
+
+ template <typename T1, typename D1, typename T2, typename D2>
+ inline bool operator==(const unique_ptr<T1, D1>& a, const unique_ptr<T2, D2>& b)
+ {
+ return (a.get() == b.get());
+ }
+ #if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ template <typename T1, typename D1, typename T2, typename D2>
+ requires std::three_way_comparable_with<typename unique_ptr<T1, D1>::pointer, typename unique_ptr<T2, D2>::pointer>
+ inline std::compare_three_way_result_t<typename unique_ptr<T1, D1>::pointer, typename unique_ptr<T2, D2>::pointer> operator<=>(const unique_ptr<T1, D1>& a, const unique_ptr<T2, D2>& b)
+ {
+ return a.get() <=> b.get();
+ }
+ #else
+ template <typename T1, typename D1, typename T2, typename D2>
+ inline bool operator!=(const unique_ptr<T1, D1>& a, const unique_ptr<T2, D2>& b)
+ {
+ return !(a.get() == b.get());
+ }
+ #endif
+
+ /// Returns which unique_ptr is 'less' than the other. Useful when storing
+ /// sorted containers of unique_ptr objects.
+ template <typename T1, typename D1, typename T2, typename D2>
+ inline bool operator<(const unique_ptr<T1, D1>& a, const unique_ptr<T2, D2>& b)
+ {
+ //typedef typename eastl::unique_ptr<T1, D1>::pointer P1; // We currently need to make these temporary variables, as otherwise clang complains about CPointer being int*&&&.
+ //typedef typename eastl::unique_ptr<T2, D2>::pointer P2; // I think there's something wrong with our common_type type trait implementation.
+ //typedef typename eastl::common_type<P1, P2>::type PCommon; // "in instantiation of function template specialization 'eastl::operator<<int, int>, no known conversion from 'element_type *' (aka 'int *') to 'int *&&&' for 1st argument"
+ //return less<PCommon>()(a.get(), b.get()); // It looks like common_type is making CPointer be (e.g.) int*&& instead of int*, though the problem may be in how less<> deals with that.
+
+ typedef typename eastl::unique_ptr<T1, D1>::pointer P1;
+ typedef typename eastl::unique_ptr<T2, D2>::pointer P2;
+ typedef typename eastl::common_type<P1, P2>::type PCommon;
+ PCommon pT1 = a.get();
+ PCommon pT2 = b.get();
+ return less<PCommon>()(pT1, pT2);
+ }
+
+ template <typename T1, typename D1, typename T2, typename D2>
+ inline bool operator>(const unique_ptr<T1, D1>& a, const unique_ptr<T2, D2>& b)
+ {
+ return (b < a);
+ }
+
+ template <typename T1, typename D1, typename T2, typename D2>
+ inline bool operator<=(const unique_ptr<T1, D1>& a, const unique_ptr<T2, D2>& b)
+ {
+ return !(b < a);
+ }
+
+ template <typename T1, typename D1, typename T2, typename D2>
+ inline bool operator>=(const unique_ptr<T1, D1>& a, const unique_ptr<T2, D2>& b)
+ {
+ return !(a < b);
+ }
+
+
+ template <typename T, typename D>
+ inline bool operator==(const unique_ptr<T, D>& a, std::nullptr_t) EA_NOEXCEPT
+ {
+ return !a;
+ }
+
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ template <typename T, typename D>
+ requires std::three_way_comparable_with<typename unique_ptr<T, D>::pointer, std::nullptr_t>
+ inline std::compare_three_way_result_t<typename unique_ptr<T, D>::pointer, std::nullptr_t> operator<=>(const unique_ptr<T, D>& a, std::nullptr_t)
+ {
+ return a.get() <=> nullptr;
+ }
+#else
+ template <typename T, typename D>
+ inline bool operator==(std::nullptr_t, const unique_ptr<T, D>& a) EA_NOEXCEPT
+ {
+ return !a;
+ }
+
+ template <typename T, typename D>
+ inline bool operator!=(const unique_ptr<T, D>& a, std::nullptr_t) EA_NOEXCEPT
+ {
+ return static_cast<bool>(a);
+ }
+
+ template <typename T, typename D>
+ inline bool operator!=(std::nullptr_t, const unique_ptr<T, D>& a) EA_NOEXCEPT
+ {
+ return static_cast<bool>(a);
+ }
+#endif
+
+ template <typename T, typename D>
+ inline bool operator<(const unique_ptr<T, D>& a, std::nullptr_t)
+ {
+ typedef typename unique_ptr<T, D>::pointer pointer;
+ return less<pointer>()(a.get(), nullptr);
+ }
+
+ template <typename T, typename D>
+ inline bool operator<(std::nullptr_t, const unique_ptr<T, D>& b)
+ {
+ typedef typename unique_ptr<T, D>::pointer pointer;
+ pointer pT = b.get();
+ return less<pointer>()(nullptr, pT);
+ }
+
+ template <typename T, typename D>
+ inline bool operator>(const unique_ptr<T, D>& a, std::nullptr_t)
+ {
+ return (nullptr < a);
+ }
+
+ template <typename T, typename D>
+ inline bool operator>(std::nullptr_t, const unique_ptr<T, D>& b)
+ {
+ return (b < nullptr);
+ }
+
+ template <typename T, typename D>
+ inline bool operator<=(const unique_ptr<T, D>& a, std::nullptr_t)
+ {
+ return !(nullptr < a);
+ }
+
+ template <typename T, typename D>
+ inline bool operator<=(std::nullptr_t, const unique_ptr<T, D>& b)
+ {
+ return !(b < nullptr);
+ }
+
+ template <typename T, typename D>
+ inline bool operator>=(const unique_ptr<T, D>& a, std::nullptr_t)
+ {
+ return !(a < nullptr);
+ }
+
+ template <typename T, typename D>
+ inline bool operator>=(std::nullptr_t, const unique_ptr<T, D>& b)
+ {
+ return !(nullptr < b);
+ }
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/unordered_map.h b/EASTL/include/EASTL/unordered_map.h
new file mode 100644
index 0000000..10c6b88
--- /dev/null
+++ b/EASTL/include/EASTL/unordered_map.h
@@ -0,0 +1,55 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_UNORDERED_MAP_H
+#define EASTL_UNORDERED_MAP_H
+
+#include <EASTL/internal/config.h>
+#include <EASTL/hash_map.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+namespace eastl
+{
+ /// unordered_map
+ ///
+ /// The original TR1 (technical report 1) used "hash_map" to name a hash
+ /// table backed associative container of unique key-value pairs. When the
+ /// container was added to the C++11 standard the committee chose the name
+ /// "unordered_map" to clarify that internally the elements are NOT sorted in
+ /// any particular order. We provide a template alias here to ensure feature
+ /// parity with the original eastl::hash_map.
+ ///
+ #if !defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ template <typename Key,
+ typename T,
+ typename Hash = eastl::hash<Key>,
+ typename Predicate = eastl::equal_to<Key>,
+ typename Allocator = EASTLAllocatorType,
+ bool bCacheHashCode = false>
+ using unordered_map = hash_map<Key, T, Hash, Predicate, Allocator, bCacheHashCode>;
+ #endif
+
+
+ /// unordered_multimap
+ ///
+ /// Similar template alias as "unordered_map" except the contained elements
+ /// need not be unique. See "hash_multimap" for more details.
+ ///
+ #if !defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ template <typename Key,
+ typename T,
+ typename Hash = eastl::hash<Key>,
+ typename Predicate = eastl::equal_to<Key>,
+ typename Allocator = EASTLAllocatorType,
+ bool bCacheHashCode = false>
+ using unordered_multimap = hash_multimap<Key, T, Hash, Predicate, Allocator, bCacheHashCode>;
+ #endif
+
+} // namespace eastl
+
+#endif // Header include guard
+
diff --git a/EASTL/include/EASTL/unordered_set.h b/EASTL/include/EASTL/unordered_set.h
new file mode 100644
index 0000000..ecd7219
--- /dev/null
+++ b/EASTL/include/EASTL/unordered_set.h
@@ -0,0 +1,53 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_UNORDERED_SET_H
+#define EASTL_UNORDERED_SET_H
+
+#include <EASTL/internal/config.h>
+#include <EASTL/hash_set.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+namespace eastl
+{
+
+ /// unordered_set
+ ///
+ /// The original TR1 (technical report 1) used "hash_set" to name a hash
+ /// table backed associative container of unique "Key" type objects. When
+ /// the container was added to the C++11 standard the committee chose the
+ /// name "unordered_set" to clarify that internally the elements are NOT
+ /// sorted in any particular order. We provide a template alias here to
+ /// ensure feature parity with the original eastl::hash_set.
+ ///
+ #if !defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ template <typename Value,
+ typename Hash = eastl::hash<Value>,
+ typename Predicate = eastl::equal_to<Value>,
+ typename Allocator = EASTLAllocatorType,
+ bool bCacheHashCode = false>
+ using unordered_set = hash_set<Value, Hash, Predicate, Allocator, bCacheHashCode>;
+ #endif
+
+ /// unordered_multiset
+ ///
+ /// Similar template alias as "unordered_set" except the contained elements
+ /// need not be unique. See "hash_multiset" for more details.
+ ///
+ #if !defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ template <typename Value,
+ typename Hash = eastl::hash<Value>,
+ typename Predicate = eastl::equal_to<Value>,
+ typename Allocator = EASTLAllocatorType,
+ bool bCacheHashCode = false>
+ using unordered_multiset = hash_multiset<Value, Hash, Predicate, Allocator, bCacheHashCode>;
+ #endif
+
+} // namespace eastl
+
+#endif // Header include guard
+
diff --git a/EASTL/include/EASTL/utility.h b/EASTL/include/EASTL/utility.h
new file mode 100644
index 0000000..1e6b922
--- /dev/null
+++ b/EASTL/include/EASTL/utility.h
@@ -0,0 +1,968 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_UTILITY_H
+#define EASTL_UTILITY_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/type_traits.h>
+#include <EASTL/iterator.h>
+#include <EASTL/numeric_limits.h>
+#include <EASTL/compare.h>
+#include <EASTL/internal/functional_base.h>
+#include <EASTL/internal/move_help.h>
+#include <EABase/eahave.h>
+
+#include <EASTL/internal/integer_sequence.h>
+#include <EASTL/internal/tuple_fwd_decls.h>
+#include <EASTL/internal/in_place_t.h>
+#include <EASTL/internal/piecewise_construct_t.h>
+
+
+// 4619 - There is no warning number 'number'.
+// 4217 - Member template functions cannot be used for copy-assignment or copy-construction.
+// 4512 - 'class' : assignment operator could not be generated. // This disabling would best be put elsewhere.
+EA_DISABLE_VC_WARNING(4619 4217 4512);
+
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+
+ /// swap
+ ///
+ /// Assigns the contents of a to b and the contents of b to a.
+ /// A temporary instance of type T is created and destroyed
+ /// in the process.
+ ///
+ /// This function is used by numerous other algorithms, and as
+ /// such it may in some cases be feasible and useful for the user
+ /// to implement an override version of this function which is
+ /// more efficient in some way.
+ ///
+
+ template <typename T>
+ inline void swap(T& a, T& b) EA_NOEXCEPT_IF(eastl::is_nothrow_move_constructible<T>::value && eastl::is_nothrow_move_assignable<T>::value)
+ {
+ T temp(EASTL_MOVE(a)); // EASTL_MOVE uses EASTL::move when available, else is a no-op.
+ a = EASTL_MOVE(b);
+ b = EASTL_MOVE(temp);
+ }
+
+
+ /// is_swappable
+ ///
+ /// Determines if two types can be swapped via the swap function. This determines
+ /// only if there is a swap function that matches the types and not if the assignments
+ /// within the swap implementation are valid.
+ /// Returns false for pre-C++11 compilers that don't support decltype.
+ ///
+ /// This is a type trait, but it's not currently found within <type_traits.h>,
+ /// as it's dependent on the swap algorithm, which is at a higher level than
+ /// type traits.
+ ///
+ /// Example usage:
+ /// static_assert(is_swappable<int>::value, "int should be swappable");
+ ///
+ #if defined(EA_COMPILER_NO_DECLTYPE)
+ #define EASTL_TYPE_TRAIT_is_swappable_CONFORMANCE 0
+
+ template <typename>
+ struct is_swappable
+ : public eastl::false_type {};
+ #else
+ #define EASTL_TYPE_TRAIT_is_swappable_CONFORMANCE 1
+
+ // We declare this version of 'eastl::swap' to make compile-time existance checks for swap functions possible.
+ //
+ #if EASTL_VARIADIC_TEMPLATES_ENABLED
+ eastl::unused swap(eastl::argument_sink, eastl::argument_sink);
+ #else
+ // Compilers that do not support variadic templates suffer from a bug with variable arguments list that
+ // causes the construction of aligned types in unaligned memory. To prevent the aligned type construction we
+ // accept the parameters by reference.
+ eastl::unused swap(eastl::argument_sink&, eastl::argument_sink&);
+ #endif
+
+ template <typename T>
+ struct is_swappable
+ : public integral_constant<bool, !eastl::is_same<decltype(swap(eastl::declval<T&>(), eastl::declval<T&>())), eastl::unused>::value> {}; // Don't prefix swap with eastl:: as we want to allow user-defined swaps via argument-dependent lookup.
+ #endif
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_swappable_v = is_swappable<T>::value;
+ #endif
+
+
+
+ /// is_nothrow_swappable
+ ///
+ /// Evaluates to true if is_swappable, and swap is a nothrow function.
+ /// returns false for pre-C++11 compilers that don't support nothrow.
+ ///
+ /// This is a type trait, but it's not currently found within <type_traits.h>,
+ /// as it's dependent on the swap algorithm, which is at a higher level than
+ /// type traits.
+ ///
+ #define EASTL_TYPE_TRAIT_is_nothrow_swappable_CONFORMANCE EASTL_TYPE_TRAIT_is_swappable_CONFORMANCE
+
+ template <typename T>
+ struct is_nothrow_swappable_helper_noexcept_wrapper
+ { const static bool value = noexcept(swap(eastl::declval<T&>(), eastl::declval<T&>())); };
+
+ template <typename T, bool>
+ struct is_nothrow_swappable_helper
+ : public eastl::integral_constant<bool, is_nothrow_swappable_helper_noexcept_wrapper<T>::value> {}; // Don't prefix swap with eastl:: as we want to allow user-defined swaps via argument-dependent lookup.
+
+ template <typename T>
+ struct is_nothrow_swappable_helper<T, false>
+ : public eastl::false_type {};
+
+ template <typename T>
+ struct is_nothrow_swappable
+ : public eastl::is_nothrow_swappable_helper<T, eastl::is_swappable<T>::value> {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_nothrow_swappable_v = is_nothrow_swappable<T>::value;
+ #endif
+
+
+
+ /// is_swappable_with
+ ///
+ ///
+ template <typename T, typename U, bool OneTypeIsVoid = (eastl::is_void<T>::value || eastl::is_void<U>::value)>
+ struct is_swappable_with_helper
+ {
+ // Don't prefix swap with eastl:: as we want to allow user-defined swaps via argument-dependent lookup.
+ static const bool value =
+ !eastl::is_same<decltype(swap(eastl::declval<T>(), eastl::declval<U>())), eastl::unused>::value &&
+ !eastl::is_same<decltype(swap(eastl::declval<U>(), eastl::declval<T>())), eastl::unused>::value;
+ };
+
+ template <typename T, typename U>
+ struct is_swappable_with_helper<T,U, true> { static const bool value = false; };
+
+ template<typename T, typename U>
+ struct is_swappable_with
+ : public eastl::bool_constant<is_swappable_with_helper<T, U>::value> {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T, class U>
+ EA_CONSTEXPR bool is_swappable_with_v = is_swappable_with<T, U>::value;
+ #endif
+
+
+
+ /// is_nothrow_swappable_with
+ ///
+ ///
+ #if defined(EA_COMPILER_NO_DECLTYPE) || defined(EA_COMPILER_NO_NOEXCEPT)
+ #define EASTL_TYPE_TRAIT_is_nothrow_swappable_with_CONFORMANCE 0
+ template <typename T, typename U>
+ struct is_nothrow_swappable_with_helper { static const bool value = false; };
+ #else
+ #define EASTL_TYPE_TRAIT_is_nothrow_swappable_with_CONFORMANCE 1
+ template <typename T, typename U, bool OneTypeIsVoid = (eastl::is_void<T>::value || eastl::is_void<U>::value)>
+ struct is_nothrow_swappable_with_helper
+ {
+ // Don't prefix swap with eastl:: as we want to allow user-defined swaps via argument-dependent lookup.
+ static const bool value = noexcept(swap(eastl::declval<T>(), eastl::declval<U>())) &&
+ noexcept(swap(eastl::declval<U>(), eastl::declval<T>()));
+ };
+
+ template <typename T, typename U>
+ struct is_nothrow_swappable_with_helper<T,U, true> { static const bool value = false; };
+ #endif
+
+ template <typename T, typename U>
+ struct is_nothrow_swappable_with : public eastl::bool_constant<is_nothrow_swappable_with_helper<T, U>::value> {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T, class U>
+ EA_CONSTEXPR bool is_nothrow_swappable_with_v = is_nothrow_swappable_with<T, U>::value;
+ #endif
+
+
+
+ // iter_swap helper functions
+ //
+ template <bool bTypesAreEqual>
+ struct iter_swap_impl
+ {
+ // Handles the false case, where *a and *b are different types.
+ template <typename ForwardIterator1, typename ForwardIterator2>
+ static void iter_swap(ForwardIterator1 a, ForwardIterator2 b)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator1>::value_type value_type_a;
+
+ value_type_a temp(EASTL_MOVE(*a)); // EASTL_MOVE uses EASTL::move when available, else is a no-op.
+ *a = EASTL_MOVE(*b);
+ *b = EASTL_MOVE(temp);
+ }
+ };
+
+ template <>
+ struct iter_swap_impl<true>
+ {
+ template <typename ForwardIterator1, typename ForwardIterator2>
+ static void iter_swap(ForwardIterator1 a, ForwardIterator2 b)
+ {
+ swap(*a, *b); // Don't prefix swap with eastl:: as we want to allow user-defined swaps via argument-dependent lookup.
+ }
+ };
+
+
+ /// iter_swap
+ ///
+ /// Swaps the values of the elements the given iterators are pointing to.
+ ///
+ /// Equivalent to swap(*a, *b), though the user can provide an override to
+ /// iter_swap that is independent of an override which may exist for swap.
+ ///
+ /// We provide a version of iter_swap which uses swap when the swapped types
+ /// are equal but a manual implementation otherwise. We do this because the
+ /// C++ standard defect report says that iter_swap(a, b) must be implemented
+ /// as swap(*a, *b) when possible.
+ ///
+ template <typename ForwardIterator1, typename ForwardIterator2>
+ inline void iter_swap(ForwardIterator1 a, ForwardIterator2 b)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator1>::value_type value_type_a;
+ typedef typename eastl::iterator_traits<ForwardIterator2>::value_type value_type_b;
+ typedef typename eastl::iterator_traits<ForwardIterator1>::reference reference_a;
+ typedef typename eastl::iterator_traits<ForwardIterator2>::reference reference_b;
+
+ eastl::iter_swap_impl<eastl::type_and<eastl::is_same<value_type_a, value_type_b>::value, eastl::is_same<value_type_a&, reference_a>::value, eastl::is_same<value_type_b&, reference_b>::value >::value >::iter_swap(a, b);
+ }
+
+
+
+ /// swap_ranges
+ ///
+ /// Swaps each of the elements in the range [first1, last1) with the
+ /// corresponding element in the range [first2, first2 + (last1 - first1)).
+ ///
+ /// Effects: For each nonnegative integer n < (last1 - first1),
+ /// performs: swap(*(first1 + n), *(first2 + n)).
+ ///
+ /// Requires: The two ranges [first1, last1) and [first2, first2 + (last1 - first1))
+ /// shall not overlap.
+ ///
+ /// Returns: first2 + (last1 - first1). That is, returns the end of the second range.
+ ///
+ /// Complexity: Exactly 'last1 - first1' swaps.
+ ///
+ template <typename ForwardIterator1, typename ForwardIterator2>
+ inline ForwardIterator2
+ swap_ranges(ForwardIterator1 first1, ForwardIterator1 last1, ForwardIterator2 first2)
+ {
+ for(; first1 != last1; ++first1, ++first2)
+ iter_swap(first1, first2); // Don't prefix swap with eastl:: as we want to allow user-defined swaps via argument-dependent lookup.
+ return first2;
+ }
+
+
+ /// swap
+ ///
+ /// C++11 array swap
+ /// http://en.cppreference.com/w/cpp/algorithm/swap
+ ///
+ template <typename T, size_t N>
+ inline void
+ swap(T (&a)[N], T (&b)[N]) EA_NOEXCEPT_IF(eastl::is_nothrow_swappable<T>::value)
+ {
+ eastl::swap_ranges(a, a + N, b);
+ }
+
+
+ /// exchange
+ ///
+ /// Replaces the value of the first argument with the new value provided.
+ /// The return value is the previous value of first argument.
+ ///
+ /// http://en.cppreference.com/w/cpp/utility/exchange
+ ///
+ template <typename T, typename U = T>
+ inline T exchange(T& obj, U&& new_value)
+ {
+ T old_value = eastl::move(obj);
+ obj = eastl::forward<U>(new_value);
+ return old_value;
+ }
+
+
+ /// as_const
+ ///
+ /// Converts a 'T&' into a 'const T&' which simplifies calling const functions on non-const objects.
+ ///
+ /// http://en.cppreference.com/w/cpp/utility/as_const
+ ///
+ /// C++ proposal paper:
+ /// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2015/n4380.html
+ ///
+ template <class T>
+ EA_CONSTEXPR typename eastl::add_const<T>::type& as_const(T& t) EA_NOEXCEPT
+ { return t; }
+
+ // The C++17 forbids 'eastl::as_const' from accepting rvalues. Passing an rvalue reference to 'eastl::as_const'
+ // generates an 'const T&' or const lvalue reference to a temporary object.
+ template <class T>
+ void as_const(const T&&) = delete;
+
+
+ ///////////////////////////////////////////////////////////////////////
+ /// rel_ops
+ ///
+ /// rel_ops allow the automatic generation of operators !=, >, <=, >= from
+ /// just operators == and <. These are intentionally in the rel_ops namespace
+ /// so that they don't conflict with other similar operators. To use these
+ /// operators, add "using namespace std::rel_ops;" to an appropriate place in
+ /// your code, usually right in the function that you need them to work.
+ /// In fact, you will very likely have collision problems if you put such
+ /// using statements anywhere other than in the .cpp file like so and may
+ /// also have collisions when you do, as the using statement will affect all
+ /// code in the module. You need to be careful about use of rel_ops.
+ ///
+ namespace rel_ops
+ {
+ template <typename T>
+ inline bool operator!=(const T& x, const T& y)
+ { return !(x == y); }
+
+ template <typename T>
+ inline bool operator>(const T& x, const T& y)
+ { return (y < x); }
+
+ template <typename T>
+ inline bool operator<=(const T& x, const T& y)
+ { return !(y < x); }
+
+ template <typename T>
+ inline bool operator>=(const T& x, const T& y)
+ { return !(x < y); }
+ }
+
+
+ #if defined(EA_COMPILER_CPP20_ENABLED)
+ ///////////////////////////////////////////////////////////////////////
+ /// Safe Integral Comparisons
+ ///
+ template <typename T, typename U>
+ EA_CONSTEXPR bool cmp_equal(const T x, const U y) EA_NOEXCEPT
+ {
+ // Assert types are not chars, bools, etc.
+ static_assert(eastl::is_integral_v<T> && !eastl::is_same_v<eastl::remove_cv_t<T>, bool> && !eastl::is_same_v<eastl::remove_cv_t<T>, char>);
+ static_assert(eastl::is_integral_v<U> && !eastl::is_same_v<eastl::remove_cv_t<U>, bool> && !eastl::is_same_v<eastl::remove_cv_t<U>, char>);
+
+ using UT = eastl::make_unsigned_t<T>;
+ using UU = eastl::make_unsigned_t<U>;
+
+ if constexpr (eastl::is_signed_v<T> == eastl::is_signed_v<U>)
+ {
+ return x == y;
+ }
+ else if (eastl::is_signed_v<T>)
+ {
+ return (x < 0) ? false : UT(x) == y;
+ }
+ else
+ {
+ return (y < 0) ? false : x == UU(y);
+ }
+ }
+
+
+ template <typename T, typename U>
+ EA_CONSTEXPR bool cmp_not_equal(const T x, const U y) EA_NOEXCEPT
+ { return !eastl::cmp_equal(x, y); }
+
+
+ template <typename T, typename U>
+ EA_CONSTEXPR bool cmp_less(const T x, const U y) EA_NOEXCEPT
+ {
+ static_assert(eastl::is_integral_v<T> && !eastl::is_same_v<eastl::remove_cv_t<T>, bool> && !eastl::is_same_v<eastl::remove_cv_t<T>, char>);
+ static_assert(eastl::is_integral_v<U> && !eastl::is_same_v<eastl::remove_cv_t<U>, bool> && !eastl::is_same_v<eastl::remove_cv_t<U>, char>);
+
+ using UT = eastl::make_unsigned_t<T>;
+ using UU = eastl::make_unsigned_t<U>;
+
+ if constexpr (eastl::is_signed_v<T> == eastl::is_signed_v<U>)
+ {
+ return x < y;
+ }
+ else if (eastl::is_signed_v<T>)
+ {
+ return (x < 0) ? true : UT(x) < y;
+ }
+ else
+ {
+ return (y < 0) ? false : x < UU(y);
+ }
+ }
+
+
+ template <typename T, typename U>
+ EA_CONSTEXPR bool cmp_greater(const T x, const U y) EA_NOEXCEPT
+ { return eastl::cmp_less(y, x); }
+
+
+ template <typename T, typename U>
+ EA_CONSTEXPR bool cmp_less_equal(const T x, const U y) EA_NOEXCEPT
+ { return !eastl::cmp_greater(x, y); }
+
+
+ template <typename T, typename U>
+ EA_CONSTEXPR bool cmp_greater_equal(const T x, const U y) EA_NOEXCEPT
+ { return !eastl::cmp_less(x, y); }
+
+
+ template <typename T, typename U>
+ EA_CONSTEXPR bool in_range(const U x) EA_NOEXCEPT
+ {
+ static_assert(eastl::is_integral_v<T> && !eastl::is_same_v<eastl::remove_cv_t<T>, bool> && !eastl::is_same_v<eastl::remove_cv_t<T>, char>);
+ static_assert(eastl::is_integral_v<U> && !eastl::is_same_v<eastl::remove_cv_t<U>, bool> && !eastl::is_same_v<eastl::remove_cv_t<U>, char>);
+
+ return eastl::cmp_greater_equal(x, eastl::numeric_limits<T>::min()) && eastl::cmp_less_equal(x, eastl::numeric_limits<T>::max());
+ }
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ /// pair_first_construct
+ ///
+ /// Disambiguates when a user is requesting the 'single first element' pair constructor.
+ ///
+ struct pair_first_construct_t {};
+ EA_CONSTEXPR pair_first_construct_t pair_first_construct = pair_first_construct_t();
+
+
+ ///////////////////////////////////////////////////////////////////////
+ /// pair
+ ///
+ /// Implements a simple pair, just like the C++ std::pair.
+ ///
+ template <typename T1, typename T2>
+ struct pair
+ {
+ typedef T1 first_type;
+ typedef T2 second_type;
+ typedef pair<T1, T2> this_type;
+
+ T1 first;
+ T2 second;
+
+ template <typename TT1 = T1,
+ typename TT2 = T2,
+ class = eastl::enable_if_t<eastl::is_default_constructible_v<TT1> &&
+ eastl::is_default_constructible_v<TT2>>>
+ EA_CONSTEXPR pair()
+ : first(), second()
+ {
+ }
+
+ #if EASTL_ENABLE_PAIR_FIRST_ELEMENT_CONSTRUCTOR
+ template <typename TT1 = T1, typename TT2 = T2, typename = eastl::enable_if_t<eastl::is_default_constructible_v<TT2>>>
+ EA_CPP14_CONSTEXPR pair(const TT1& x)
+ : first(x), second()
+ {
+ }
+
+ // GCC has a bug with overloading rvalue and lvalue function templates.
+ // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=54425
+ //
+ // error: 'eastl::pair<T1, T2>::pair(T1&&) [with T1 = const int&; T2 = const int&]' cannot be overloaded
+ // error: with 'eastl::pair<T1, T2>::pair(const T1&) [with T1 = const int&; T2 = const int&]'
+ #if !defined(EA_COMPILER_GNUC)
+ template <typename TT2 = T2, typename = eastl::enable_if_t<eastl::is_default_constructible_v<TT2>>>
+ EA_CPP14_CONSTEXPR pair(T1&& x)
+ : first(eastl::move(x)), second()
+ {
+ }
+ #endif
+ #endif
+
+
+ // NOTE(rparolin):
+ // This is a workaround to a compiler intrinic bug which fails to correctly identify a nested class using
+ // non-static data member initialization as default constructible.
+ //
+ // See bug submitted to LLVM for more details.
+ // https://bugs.llvm.org/show_bug.cgi?id=38374
+ #if !defined(__clang__)
+ template<typename T>
+ using single_pair_ctor_sfinae = eastl::enable_if_t<eastl::is_default_constructible_v<T>>;
+ #else
+ template<typename>
+ using single_pair_ctor_sfinae = void;
+ #endif
+
+ template <typename TT1 = T1, typename TT2 = T2, typename = single_pair_ctor_sfinae<TT2>>
+ EA_CPP14_CONSTEXPR pair(pair_first_construct_t, const TT1& x)
+ : first(x), second()
+ {
+ }
+
+ // GCC has a bug with overloading rvalue and lvalue function templates.
+ // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=54425
+ //
+ // error: 'eastl::pair<T1, T2>::pair(T1&&) [with T1 = const int&; T2 = const int&]' cannot be overloaded
+ // error: with 'eastl::pair<T1, T2>::pair(const T1&) [with T1 = const int&; T2 = const int&]'
+ #if !defined(EA_COMPILER_GNUC)
+ template <typename TT2 = T2, typename = single_pair_ctor_sfinae<TT2>>
+ EA_CPP14_CONSTEXPR pair(pair_first_construct_t, T1&& x)
+ : first(eastl::move(x)), second()
+ {
+ }
+ #endif
+
+ template <
+ typename TT1 = T1,
+ typename TT2 = T2,
+ class = eastl::enable_if_t<eastl::is_copy_constructible_v<TT1> && eastl::is_copy_constructible_v<TT2>>>
+ EA_CPP14_CONSTEXPR pair(const T1& x, const T2& y)
+ : first(x), second(y)
+ {
+ }
+
+ EA_CPP14_CONSTEXPR pair(pair&& p) = default;
+ EA_CPP14_CONSTEXPR pair(const pair&) = default;
+
+ template <
+ typename U,
+ typename V,
+ class = eastl::enable_if_t<eastl::is_convertible_v<const U&, T1> && eastl::is_convertible_v<const V&, T2>>>
+ EA_CPP14_CONSTEXPR pair(const pair<U, V>& p)
+ : first(p.first), second(p.second)
+ {
+ }
+
+ template <typename U,
+ typename V,
+ typename = eastl::enable_if_t<eastl::is_convertible_v<U, T1> && eastl::is_convertible_v<V, T2>>>
+ EA_CPP14_CONSTEXPR pair(U&& u, V&& v)
+ : first(eastl::forward<U>(u)), second(eastl::forward<V>(v))
+ {
+ }
+
+ template <typename U, typename = eastl::enable_if_t<eastl::is_convertible_v<U, T1>>>
+ EA_CPP14_CONSTEXPR pair(U&& x, const T2& y)
+ : first(eastl::forward<U>(x)), second(y)
+ {
+ }
+
+ template <typename V, typename = eastl::enable_if_t<eastl::is_convertible_v<V, T2>>>
+ EA_CPP14_CONSTEXPR pair(const T1& x, V&& y)
+ : first(x), second(eastl::forward<V>(y))
+ {
+ }
+
+ template <typename U,
+ typename V,
+ typename = eastl::enable_if_t<eastl::is_convertible_v<U, T1> && eastl::is_convertible_v<V, T2>>>
+ EA_CPP14_CONSTEXPR pair(pair<U, V>&& p)
+ : first(eastl::forward<U>(p.first)), second(eastl::forward<V>(p.second))
+ {
+ }
+
+ // Initializes first with arguments of types Args1... obtained by forwarding the elements of first_args and
+ // initializes second with arguments of types Args2... obtained by forwarding the elements of second_args.
+ template <class... Args1,
+ class... Args2,
+ typename = eastl::enable_if_t<eastl::is_constructible_v<first_type, Args1&&...> &&
+ eastl::is_constructible_v<second_type, Args2&&...>>>
+ pair(eastl::piecewise_construct_t pwc, eastl::tuple<Args1...> first_args, eastl::tuple<Args2...> second_args)
+ : pair(pwc,
+ eastl::move(first_args),
+ eastl::move(second_args),
+ eastl::make_index_sequence<sizeof...(Args1)>(),
+ eastl::make_index_sequence<sizeof...(Args2)>())
+ {
+ }
+
+ private:
+ // NOTE(rparolin): Internal constructor used to expand the index_sequence required to expand the tuple elements.
+ template <class... Args1, class... Args2, size_t... I1, size_t... I2>
+ pair(eastl::piecewise_construct_t,
+ eastl::tuple<Args1...> first_args,
+ eastl::tuple<Args2...> second_args,
+ eastl::index_sequence<I1...>,
+ eastl::index_sequence<I2...>)
+ : first(eastl::forward<Args1>(eastl::get<I1>(first_args))...)
+ , second(eastl::forward<Args2>(eastl::get<I2>(second_args))...)
+ {
+ }
+
+ public:
+ pair& operator=(const pair& p)
+ EA_NOEXCEPT_IF(eastl::is_nothrow_copy_assignable_v<T1>&& eastl::is_nothrow_copy_assignable_v<T2>)
+ {
+ first = p.first;
+ second = p.second;
+ return *this;
+ }
+
+ template <typename U,
+ typename V,
+ typename = eastl::enable_if_t<eastl::is_convertible_v<U, T1> && eastl::is_convertible_v<V, T2>>>
+ pair& operator=(const pair<U, V>& p)
+ {
+ first = p.first;
+ second = p.second;
+ return *this;
+ }
+
+ pair& operator=(pair&& p)
+ EA_NOEXCEPT_IF(eastl::is_nothrow_move_assignable_v<T1>&& eastl::is_nothrow_move_assignable_v<T2>)
+ {
+ first = eastl::forward<T1>(p.first);
+ second = eastl::forward<T2>(p.second);
+ return *this;
+ }
+
+ template <typename U,
+ typename V,
+ typename = eastl::enable_if_t<eastl::is_convertible_v<U, T1> && eastl::is_convertible_v<V, T2>>>
+ pair& operator=(pair<U, V>&& p)
+ {
+ first = eastl::forward<U>(p.first);
+ second = eastl::forward<V>(p.second);
+ return *this;
+ }
+
+ void swap(pair& p) EA_NOEXCEPT_IF(eastl::is_nothrow_swappable_v<T1>&& eastl::is_nothrow_swappable_v<T2>)
+ {
+ eastl::iter_swap(&first, &p.first);
+ eastl::iter_swap(&second, &p.second);
+ }
+ };
+
+ #define EASTL_PAIR_CONFORMANCE 1
+
+
+
+ /// use_self
+ ///
+ /// operator()(x) simply returns x. Used in sets, as opposed to maps.
+ /// This is a template policy implementation; it is an alternative to
+ /// the use_first template implementation.
+ ///
+ /// The existance of use_self may seem odd, given that it does nothing,
+ /// but these kinds of things are useful, virtually required, for optimal
+ /// generic programming.
+ ///
+ template <typename T>
+ struct use_self // : public unary_function<T, T> // Perhaps we want to make it a subclass of unary_function.
+ {
+ typedef T result_type;
+
+ const T& operator()(const T& x) const
+ { return x; }
+ };
+
+ /// use_first
+ ///
+ /// operator()(x) simply returns x.first. Used in maps, as opposed to sets.
+ /// This is a template policy implementation; it is an alternative to
+ /// the use_self template implementation. This is the same thing as the
+ /// SGI SGL select1st utility.
+ ///
+ template <typename Pair>
+ struct use_first
+ {
+ typedef Pair argument_type;
+ typedef typename Pair::first_type result_type;
+
+ const result_type& operator()(const Pair& x) const
+ { return x.first; }
+ };
+
+ /// use_second
+ ///
+ /// operator()(x) simply returns x.second.
+ /// This is the same thing as the SGI SGL select2nd utility
+ ///
+ template <typename Pair>
+ struct use_second // : public unary_function<Pair, typename Pair::second_type> // Perhaps we want to make it a subclass of unary_function.
+ {
+ typedef Pair argument_type;
+ typedef typename Pair::second_type result_type;
+
+ const result_type& operator()(const Pair& x) const
+ { return x.second; }
+ };
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T1, typename T2>
+ EA_CPP14_CONSTEXPR inline bool operator==(const pair<T1, T2>& a, const pair<T1, T2>& b)
+ {
+ return ((a.first == b.first) && (a.second == b.second));
+ }
+
+ #if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ template <typename T1, typename T2>
+ EA_CONSTEXPR inline std::common_comparison_category_t<synth_three_way_result<T1>, synth_three_way_result<T2>> operator<=>(const pair<T1, T2>& a, const pair<T1, T2>& b)
+ {
+ if (auto result = synth_three_way{}(a.first, b.first); result != 0)
+ {
+ return result;
+ }
+ return synth_three_way{}(a.second, b.second);
+ }
+ #else
+ template <typename T1, typename T2>
+ EA_CPP14_CONSTEXPR inline bool operator<(const pair<T1, T2>& a, const pair<T1, T2>& b)
+ {
+ // Note that we use only operator < in this expression. Otherwise we could
+ // use the simpler: return (a.m1 == b.m1) ? (a.m2 < b.m2) : (a.m1 < b.m1);
+ // The user can write a specialization for this operator to get around this
+ // in cases where the highest performance is required.
+ return ((a.first < b.first) || (!(b.first < a.first) && (a.second < b.second)));
+ }
+
+
+ template <typename T1, typename T2>
+ EA_CPP14_CONSTEXPR inline bool operator!=(const pair<T1, T2>& a, const pair<T1, T2>& b)
+ {
+ return !(a == b);
+ }
+
+
+ template <typename T1, typename T2>
+ EA_CPP14_CONSTEXPR inline bool operator>(const pair<T1, T2>& a, const pair<T1, T2>& b)
+ {
+ return b < a;
+ }
+
+
+ template <typename T1, typename T2>
+ EA_CPP14_CONSTEXPR inline bool operator>=(const pair<T1, T2>& a, const pair<T1, T2>& b)
+ {
+ return !(a < b);
+ }
+
+
+ template <typename T1, typename T2>
+ EA_CPP14_CONSTEXPR inline bool operator<=(const pair<T1, T2>& a, const pair<T1, T2>& b)
+ {
+ return !(b < a);
+ }
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ /// make_pair / make_pair_ref
+ ///
+ /// make_pair is the same as std::make_pair specified by the C++ standard.
+ /// If you look at the C++ standard, you'll see that it specifies T& instead of T.
+ /// However, it has been determined that the C++ standard is incorrect and has
+ /// flagged it as a defect (http://www.open-std.org/jtc1/sc22/wg21/docs/lwg-defects.html#181).
+ /// In case you feel that you want a more efficient version that uses references,
+ /// we provide the make_pair_ref function below, though C++11 move support
+ /// makes that no longer necessary.
+ ///
+ /// Note: You don't usually need to use make_pair in order to make a pair.
+ /// The following code is equivalent, and the latter avoids one more level of inlining:
+ /// return make_pair(charPtr, charPtr);
+ /// return pair<char*, char*>(charPtr, charPtr);
+ ///
+ template <typename T1, typename T2>
+ EA_CPP14_CONSTEXPR inline pair<typename eastl::remove_reference_wrapper<typename eastl::decay<T1>::type>::type,
+ typename eastl::remove_reference_wrapper<typename eastl::decay<T2>::type>::type>
+ make_pair(T1&& a, T2&& b)
+ {
+ typedef typename eastl::remove_reference_wrapper<typename eastl::decay<T1>::type>::type T1Type;
+ typedef typename eastl::remove_reference_wrapper<typename eastl::decay<T2>::type>::type T2Type;
+
+ return eastl::pair<T1Type, T2Type>(eastl::forward<T1>(a), eastl::forward<T2>(b));
+ }
+
+
+ // Without the following, VC++ fails to compile code like this: pair<const char*, int> p = eastl::make_pair<const char*, int>("hello", 0);
+ // We define a const reference version alternative to the above. "hello" is of type char const(&)[6] (array of 6 const chars),
+ // but VC++ decays it to const char* and allows this make_pair to be called with that. VC++ fails below with make_pair("hello", "people")
+ // because you can't assign arrays and until we have a better solution we just disable this make_pair specialization for when T1 or T2
+ // are of type char const(&)[].
+ #if defined(_MSC_VER)
+ template <typename T1, typename T2>
+ EA_CPP14_CONSTEXPR inline pair<T1, T2> make_pair(
+ const T1& a,
+ const T2& b,
+ typename eastl::enable_if<!eastl::is_array<T1>::value && !eastl::is_array<T2>::value>::type* = 0)
+ {
+ return eastl::pair<T1, T2>(a, b);
+ }
+ #endif
+
+ // For backwards compatibility
+ template <typename T1, typename T2>
+ EA_CPP14_CONSTEXPR inline pair<typename eastl::remove_reference_wrapper<typename eastl::decay<T1>::type>::type,
+ typename eastl::remove_reference_wrapper<typename eastl::decay<T2>::type>::type>
+ make_pair_ref(T1&& a, T2&& b)
+ {
+ typedef typename eastl::remove_reference_wrapper<typename eastl::decay<T1>::type>::type T1Type;
+ typedef typename eastl::remove_reference_wrapper<typename eastl::decay<T2>::type>::type T2Type;
+
+ return eastl::pair<T1Type, T2Type>(eastl::forward<T1>(a), eastl::forward<T2>(b));
+ }
+
+#if EASTL_TUPLE_ENABLED
+
+ template <typename T1, typename T2>
+ class tuple_size<pair<T1, T2>> : public integral_constant<size_t, 2>
+ {
+ };
+
+ template <typename T1, typename T2>
+ class tuple_size<const pair<T1, T2>> : public integral_constant<size_t, 2>
+ {
+ };
+
+ template <typename T1, typename T2>
+ class tuple_element<0, pair<T1, T2>>
+ {
+ public:
+ typedef T1 type;
+ };
+
+ template <typename T1, typename T2>
+ class tuple_element<1, pair<T1, T2>>
+ {
+ public:
+ typedef T2 type;
+ };
+
+ template <typename T1, typename T2>
+ class tuple_element<0, const pair<T1, T2>>
+ {
+ public:
+ typedef const T1 type;
+ };
+
+ template <typename T1, typename T2>
+ class tuple_element<1, const pair<T1, T2>>
+ {
+ public:
+ typedef const T2 type;
+ };
+
+ template <size_t I>
+ struct GetPair;
+
+ template <>
+ struct GetPair<0>
+ {
+ template <typename T1, typename T2>
+ static EA_CONSTEXPR T1& getInternal(pair<T1, T2>& p)
+ {
+ return p.first;
+ }
+
+ template <typename T1, typename T2>
+ static EA_CONSTEXPR const T1& getInternal(const pair<T1, T2>& p)
+ {
+ return p.first;
+ }
+
+ template <typename T1, typename T2>
+ static EA_CONSTEXPR T1&& getInternal(pair<T1, T2>&& p)
+ {
+ return eastl::forward<T1>(p.first);
+ }
+ };
+
+ template <>
+ struct GetPair<1>
+ {
+ template <typename T1, typename T2>
+ static EA_CONSTEXPR T2& getInternal(pair<T1, T2>& p)
+ {
+ return p.second;
+ }
+
+ template <typename T1, typename T2>
+ static EA_CONSTEXPR const T2& getInternal(const pair<T1, T2>& p)
+ {
+ return p.second;
+ }
+
+ template <typename T1, typename T2>
+ static EA_CONSTEXPR T2&& getInternal(pair<T1, T2>&& p)
+ {
+ return eastl::forward<T2>(p.second);
+ }
+ };
+
+ template <size_t I, typename T1, typename T2>
+ tuple_element_t<I, pair<T1, T2>>& get(pair<T1, T2>& p)
+ {
+ return GetPair<I>::getInternal(p);
+ }
+
+ template <size_t I, typename T1, typename T2>
+ const tuple_element_t<I, pair<T1, T2>>& get(const pair<T1, T2>& p)
+ {
+ return GetPair<I>::getInternal(p);
+ }
+
+ template <size_t I, typename T1, typename T2>
+ tuple_element_t<I, pair<T1, T2>>&& get(pair<T1, T2>&& p)
+ {
+ return GetPair<I>::getInternal(eastl::move(p));
+ }
+
+#endif // EASTL_TUPLE_ENABLED
+
+
+} // namespace eastl
+
+///////////////////////////////////////////////////////////////
+// C++17 structured bindings support for eastl::pair
+//
+#ifndef EA_COMPILER_NO_STRUCTURED_BINDING
+ #include <tuple>
+ namespace std
+ {
+ // NOTE(rparolin): Some platform implementations didn't check the standard specification and implemented the
+ // "tuple_size" and "tuple_element" primary template with as a struct. The standard specifies they are
+ // implemented with the class keyword so we provide the template specializations as a class and disable the
+ // generated warning.
+ EA_DISABLE_CLANG_WARNING(-Wmismatched-tags)
+
+ template <class... Ts>
+ class tuple_size<::eastl::pair<Ts...>> : public ::eastl::integral_constant<size_t, sizeof...(Ts)>
+ {
+ };
+
+ template <size_t I, class... Ts>
+ class tuple_element<I, ::eastl::pair<Ts...>> : public ::eastl::tuple_element<I, ::eastl::pair<Ts...>>
+ {
+ };
+
+ EA_RESTORE_CLANG_WARNING()
+ }
+#endif
+
+
+EA_RESTORE_VC_WARNING();
+
+
+#endif // Header include guard
diff --git a/EASTL/include/EASTL/variant.h b/EASTL/include/EASTL/variant.h
new file mode 100644
index 0000000..a7af97b
--- /dev/null
+++ b/EASTL/include/EASTL/variant.h
@@ -0,0 +1,1588 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+
+///////////////////////////////////////////////////////////////////////////
+// Implements the class template variant represents a type-safe union. An
+// instance of variant at any given time either holds a value of one of its
+// alternative types, or it holds no value.
+//
+// As with unions, if a variant holds a value of some object type T, the object
+// representation of T is allocated directly within the object representation of
+// the variant itself.
+//
+// Variant is not allowed to allocate additional (dynamic) memory.
+//
+// A variant is not permitted to hold references, arrays, or the type void.
+// Empty variants are also ill-formed (variant<monostate> can be used instead).
+//
+// A variant is permitted to hold the same type more than once, and to hold
+// differently cv-qualified versions of the same type. As with unions, the
+// default-initialized variant holds a value of its first alternative, unless
+// that alternative is not default-constructible (in which case default
+// constructor won't compile: the helper class monostate can be used to make
+// such variants default-constructible)
+//
+// Given defect 2901, the eastl::variant implementation does not provide the
+// specified allocator-aware functions. This will be re-evaluated when the LWG
+// addresses this issue in future standardization updates.
+// LWG Defect 2901: https://cplusplus.github.io/LWG/issue2901
+//
+// Allocator-extended constructors
+// template <class Alloc> variant(allocator_arg_t, const Alloc&);
+// template <class Alloc> variant(allocator_arg_t, const Alloc&, const variant&);
+// template <class Alloc> variant(allocator_arg_t, const Alloc&, variant&&);
+// template <class Alloc, class T> variant(allocator_arg_t, const Alloc&, T&&);
+// template <class Alloc, class T, class... Args> variant(allocator_arg_t, const Alloc&, in_place_type_t<T>, Args&&...);
+// template <class Alloc, class T, class U, class... Args> variant(allocator_arg_t, const Alloc&, in_place_type_t<T>, initializer_list<U>, Args&&...);
+// template <class Alloc, size_t I, class... Args> variant(allocator_arg_t, const Alloc&, in_place_index_t<I>, Args&&...);
+// template <class Alloc, size_t I, class U, class... Args> variant(allocator_arg_t, const Alloc&, in_place_index_t<I>, initializer_list<U>, Args&&...);
+//
+// 20.7.12, allocator-related traits
+// template <class T, class Alloc> struct uses_allocator;
+// template <class... Types, class Alloc> struct uses_allocator<variant<Types...>, Alloc>;
+//
+// eastl::variant doesn't support:
+// * recursive variant support
+// * strong exception guarantees as specified (we punted on the assignment problem).
+// if an exception is thrown during assignment its undefined behaviour in our implementation.
+//
+// Reference:
+// * http://en.cppreference.com/w/cpp/utility/variant
+// * https://thenewcpp.wordpress.com/2012/02/15/variadic-templates-part-3-or-how-i-wrote-a-variant-class/
+///////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_VARIANT_H
+#define EASTL_VARIANT_H
+
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/in_place_t.h>
+#include <EASTL/internal/integer_sequence.h>
+#include <EASTL/meta.h>
+#include <EASTL/utility.h>
+#include <EASTL/functional.h>
+#include <EASTL/initializer_list.h>
+#include <EASTL/tuple.h>
+#include <EASTL/type_traits.h>
+#include <EASTL/array.h>
+
+#if EASTL_EXCEPTIONS_ENABLED
+ #include <stdexcept>
+ #include <exception>
+#endif
+
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+#ifndef EA_COMPILER_CPP14_ENABLED
+ static_assert(false, "eastl::variant requires a C++14 compatible compiler (at least) ");
+#endif
+
+EA_DISABLE_VC_WARNING(4625) // copy constructor was implicitly defined as deleted
+
+namespace eastl
+{
+ namespace internal
+ {
+ ///////////////////////////////////////////////////////////////////////////
+ // default_construct_if_supported<T>
+ //
+ // Utility class to remove default constructor calls for types that
+ // do not support default construction.
+ //
+ // We can remove these utilities when C++17 'constexpr if' is available.
+ //
+ template<typename T, bool = eastl::is_default_constructible_v<T>>
+ struct default_construct_if_supported
+ {
+ static void call(T* pThis)
+ {
+ new (pThis) T();
+ }
+ };
+
+ template<typename T>
+ struct default_construct_if_supported<T, false>
+ {
+ static void call(T*) {} // intentionally blank
+ };
+
+ ///////////////////////////////////////////////////////////////////////////
+ // destroy_if_supported<T>
+ //
+ // Utility class to remove default constructor calls for types that
+ // do not support default construction.
+ //
+ // We can remove these utilities when C++17 'constexpr if' is available.
+ //
+ template<typename T, bool = eastl::is_destructible_v<T>>
+ struct destroy_if_supported
+ {
+ static void call(T* pThis)
+ {
+ pThis->~T();
+ }
+ };
+
+ template<typename T>
+ struct destroy_if_supported<T, false>
+ {
+ static void call(T* pThis) {} // intentionally blank
+ };
+
+ ///////////////////////////////////////////////////////////////////////////
+ // copy_if_supported<T>
+ //
+ // Utility class to remove copy constructor calls for types that
+ // do not support copying.
+ //
+ // We can remove these utilities when C++17 'constexpr if' is available.
+ //
+ template<typename T, bool = eastl::is_copy_constructible_v<T>>
+ struct copy_if_supported
+ {
+ static void call(T* pThis, T* pOther)
+ {
+ new (pThis) T(*pOther);
+ }
+ };
+
+ template<typename T>
+ struct copy_if_supported<T, false>
+ {
+ static void call(T* pThis, T* pOther) {} // intentionally blank
+ };
+
+ ///////////////////////////////////////////////////////////////////////////
+ // move_if_supported<T>
+ //
+ // Utility class to remove move constructor calls for types that
+ // do not support moves.
+ //
+ // We can remove these utilities when C++17 'constexpr if' is available.
+ //
+ template<typename T, bool = eastl::is_move_constructible_v<T>>
+ struct move_if_supported
+ {
+ static void call(T* pThis, T* pOther)
+ {
+ new (pThis) T(eastl::move(*pOther));
+ }
+ };
+
+ template<typename T>
+ struct move_if_supported<T, false>
+ {
+ static void call(T* pThis, T* pOther) {} // intentionally blank
+ };
+ } // namespace internal
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // 20.7.3, variant_npos
+ //
+ EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR size_t variant_npos = size_t(-1);
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // 20.7.10, class bad_variant_access
+ //
+ #if EASTL_EXCEPTIONS_ENABLED
+ struct bad_variant_access : public std::logic_error
+ {
+ bad_variant_access() : std::logic_error("eastl::bad_variant_access exception") {}
+ virtual ~bad_variant_access() EA_NOEXCEPT {}
+ };
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // TODO(rparolin): JUST COPY/PASTE THIS CODE
+ //
+ inline void CheckVariantCondition(bool b)
+ {
+ EA_UNUSED(b);
+ #if EASTL_EXCEPTIONS_ENABLED
+ if (!b)
+ throw bad_variant_access();
+ #elif EASTL_ASSERT_ENABLED
+ EASTL_ASSERT_MSG(b, "eastl::bad_variant_access assert");
+ #endif
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // 20.7.7, class monostate
+ //
+ // Unit type intended for use as a well-behaved empty alternative in
+ // variant. A variant of non-default-constructible types may list monostate
+ // as its first alternative: this makes the variant itself default-contructible.
+ //
+ struct monostate {};
+
+ // 20.7.8, monostate relational operators
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ EA_CONSTEXPR std::strong_ordering operator<=>(monostate, monostate) EA_NOEXCEPT { return std::strong_ordering::equal; }
+#else
+ EA_CONSTEXPR bool operator> (monostate, monostate) EA_NOEXCEPT { return false; }
+ EA_CONSTEXPR bool operator< (monostate, monostate) EA_NOEXCEPT { return false; }
+ EA_CONSTEXPR bool operator!=(monostate, monostate) EA_NOEXCEPT { return false; }
+ EA_CONSTEXPR bool operator<=(monostate, monostate) EA_NOEXCEPT { return true; }
+ EA_CONSTEXPR bool operator>=(monostate, monostate) EA_NOEXCEPT { return true; }
+#endif
+ EA_CONSTEXPR bool operator==(monostate, monostate) EA_NOEXCEPT { return true; }
+
+ // 20.7.11, hash support
+ template <class T> struct hash;
+ template <> struct hash<monostate>
+ { size_t operator()(monostate) const { return static_cast<size_t>(-0x42); } };
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // variant_storage
+ //
+ // This is a utility class to simplify the implementation of a storage type
+ // for a distriminted union. This utility handles the alignment, size
+ // requirements, and data access required by the variant type.
+ //
+ template<bool IsTriviallyDestructible, class... Types>
+ struct variant_storage;
+
+
+ // variant_storage
+ //
+ // specialization for non-trivial types (must call constructors and destructors)
+ //
+ template<class... Types>
+ struct variant_storage<false, Types...>
+ {
+ enum class StorageOp
+ {
+ DEFAULT_CONSTRUCT,
+ DESTROY,
+ COPY,
+ MOVE
+ };
+
+ // handler function
+ using storage_handler_ptr = void(*)(StorageOp, void*, void*);
+ using aligned_storage_impl_t = aligned_union_t<16, Types...>;
+
+ aligned_storage_impl_t mBuffer;
+ storage_handler_ptr mpHandler = nullptr;
+
+ template<typename VariantStorageT>
+ inline void DoOp(StorageOp op, VariantStorageT&& other) // bind to both rvalue and lvalues
+ {
+ if(mpHandler)
+ DoOp(StorageOp::DESTROY);
+
+ if (other.mpHandler)
+ mpHandler = other.mpHandler;
+
+ if(mpHandler)
+ mpHandler(op, (void*)&mBuffer, (void*)&other.mBuffer);
+ }
+
+ inline void DoOp(StorageOp op)
+ {
+ if(mpHandler)
+ mpHandler(op, &mBuffer, nullptr);
+ }
+
+ template<typename T>
+ static void DoOpImpl(StorageOp op, T* pThis, T* pOther)
+ {
+ switch (op)
+ {
+ case StorageOp::DEFAULT_CONSTRUCT:
+ {
+ internal::default_construct_if_supported<T>::call(pThis);
+ }
+ break;
+
+ case StorageOp::DESTROY:
+ {
+ internal::destroy_if_supported<T>::call(pThis);
+ }
+ break;
+
+ case StorageOp::COPY:
+ {
+ internal::copy_if_supported<T>::call(pThis, pOther);
+ }
+ break;
+
+ case StorageOp::MOVE:
+ {
+ internal::move_if_supported<T>::call(pThis, pOther);
+ }
+ break;
+
+ default: {} break;
+ };
+ }
+
+ public:
+ variant_storage()
+ {
+ DoOp(StorageOp::DEFAULT_CONSTRUCT);
+ }
+
+ ~variant_storage()
+ {
+ DoOp(StorageOp::DESTROY);
+ }
+
+ variant_storage(const variant_storage& other)
+ {
+ DoOp(StorageOp::COPY, other);
+ }
+
+ variant_storage(variant_storage&& other)
+ {
+ DoOp(StorageOp::MOVE, other);
+ }
+
+ variant_storage& operator=(const variant_storage& other)
+ {
+ DoOp(StorageOp::COPY, other);
+ return *this;
+ }
+
+ variant_storage& operator=(variant_storage&& other)
+ {
+ DoOp(StorageOp::MOVE, eastl::move(other));
+ return *this;
+ }
+
+ template <typename T, typename... Args>
+ void set_as(Args&&... args)
+ {
+ // NOTE(rparolin): If this assert fires there is an EASTL problem picking the size of the local buffer which
+ // variant_storage used to store types. The size selected should be large enough to hold the largest type in
+ // the user provided variant type-list.
+ static_assert(sizeof(aligned_storage_impl_t) >= sizeof(T), "T is larger than local buffer size");
+
+ using RT = remove_reference_t<T>;
+
+ new (&mBuffer) RT(eastl::forward<Args>(args)...);
+
+ mpHandler = (storage_handler_ptr)&DoOpImpl<RT>;
+ }
+
+ template <typename T, typename U, typename... Args>
+ void set_as(std::initializer_list<U> il, Args&&... args)
+ {
+ // NOTE(rparolin): If this assert fires there is an EASTL problem picking the size of the local buffer which
+ // variant_storage used to store types. The size selected should be large enough to hold the largest type in
+ // the user provided variant type-list.
+ static_assert(sizeof(aligned_storage_impl_t) >= sizeof(T), "T is larger than local buffer size");
+
+ using RT = remove_reference_t<T>;
+
+ new (&mBuffer) RT(il, eastl::forward<Args>(args)...);
+
+ mpHandler = (storage_handler_ptr)&DoOpImpl<RT>;
+ }
+
+ template<typename T>
+ T get_as()
+ {
+ static_assert(eastl::is_pointer_v<T>, "T must be a pointer type");
+ return reinterpret_cast<T>(&mBuffer);
+ }
+
+ template<typename T>
+ const T get_as() const
+ {
+ static_assert(eastl::is_pointer_v<T>, "T must be a pointer type");
+ return reinterpret_cast<const T>(reinterpret_cast<uintptr_t>(&mBuffer));
+ }
+
+ void destroy()
+ {
+ DoOp(StorageOp::DESTROY);
+ }
+ };
+
+
+ // variant_storage
+ //
+ // specialization for trivial types
+ //
+ template<class... Types>
+ struct variant_storage<true, Types...>
+ {
+ using aligned_storage_impl_t = aligned_union_t<16, Types...>;
+ aligned_storage_impl_t mBuffer;
+
+ public:
+
+ // NOTE(rparolin): Since this is the specialization for trivial types can we potentially remove all the
+ // defaulted special constructors. Consider removing this.
+ //
+ // variant_storage() = default;
+ // ~variant_storage() = default;
+ // variant_storage(const variant_storage& other) = default;
+ // variant_storage(variant_storage&& other) = default;
+ // variant_storage& operator=(const variant_storage& other) = default;
+ // variant_storage& operator=(variant_storage&& other) = default;
+
+ template <typename T, typename... Args>
+ void set_as(Args&&... args)
+ {
+ // NOTE(rparolin): If this assert fires there is an EASTL problem picking the size of the local buffer which
+ // variant_storage used to store types. The size selected should be large enough to hold the largest type in
+ // the user provided variant type-list.
+ static_assert(sizeof(aligned_storage_impl_t) >= sizeof(T), "T is larger than local buffer size");
+ new (&mBuffer) remove_reference_t<T>(eastl::forward<Args>(args)...);
+
+ // mpHandler = ...; // member does not exist in this template specialization
+ }
+
+ template <typename T, typename U, typename... Args>
+ void set_as(std::initializer_list<U> il, Args&&... args)
+ {
+ // NOTE(rparolin): If this assert fires there is an EASTL problem picking the size of the local buffer which
+ // variant_storage used to store types. The size selected should be large enough to hold the largest type in
+ // the user provided variant type-list.
+ static_assert(sizeof(aligned_storage_impl_t) >= sizeof(T), "T is larger than local buffer size");
+ new (&mBuffer) remove_reference_t<T>(il, eastl::forward<Args>(args)...);
+
+ // mpHandler = ...; // member does not exist in this template specialization
+ }
+
+ template<typename T>
+ T get_as()
+ {
+ static_assert(eastl::is_pointer_v<T>, "T must be a pointer type");
+ return reinterpret_cast<T>(&mBuffer);
+ }
+
+ template<typename T>
+ const T get_as() const
+ {
+ static_assert(eastl::is_pointer_v<T>, "T must be a pointer type");
+ return reinterpret_cast<const T>(reinterpret_cast<uintptr_t>(&mBuffer));
+ }
+
+ void destroy() {}
+ };
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // 20.7.2, forward-declaration for types that depend on the variant
+ //
+ template <class... Types>
+ class variant;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // 20.7.3, variant_size, variant_size_v helper classes
+ //
+ template <class T> struct variant_size;
+ template <class T> struct variant_size<const T> : integral_constant<size_t, variant_size<T>::value> {};
+ template <class T> struct variant_size<volatile T> : integral_constant<size_t, variant_size<T>::value> {};
+ template <class T> struct variant_size<const volatile T> : integral_constant<size_t, variant_size<T>::value> {};
+ template <class... Types> struct variant_size<variant<Types...>> : integral_constant<size_t, sizeof...(Types)> {};
+
+ // variant_size_v template alias
+ template <typename T>
+ EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR size_t variant_size_v = variant_size<T>::value;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // variant_alternative_helper
+ //
+ // This helper does the heavy lifting of traversing the variadic type list
+ // and retrieving the type at the user provided index.
+ //
+ template <size_t I, typename... Ts>
+ struct variant_alternative_helper;
+
+ template <size_t I, typename Head, typename... Tail>
+ struct variant_alternative_helper<I, Head, Tail...>
+ { typedef typename variant_alternative_helper<I - 1, Tail...>::type type; };
+
+ template <typename Head, typename... Tail>
+ struct variant_alternative_helper<0, Head, Tail...>
+ { typedef Head type; };
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // 20.7.4, variant_alternative
+ //
+ template <size_t I, class T> struct variant_alternative;
+ template <size_t I, class... Types> struct variant_alternative<I, variant<Types...>> : variant_alternative_helper<I, Types...> {};
+
+ // ISO required cv-qualifer specializations
+ template <size_t I, class T> struct variant_alternative<I, const T> : add_const<typename variant_alternative<I, T>::type> {};
+ template <size_t I, class T> struct variant_alternative<I, volatile T> : add_volatile<typename variant_alternative<I, T>::type> {};
+ template <size_t I, class T> struct variant_alternative<I, const volatile T> : add_cv<typename variant_alternative<I, T>::type> {};
+
+ // variant_alternative_t template alias
+ template <size_t I, class T> using variant_alternative_t = typename variant_alternative<I, T>::type;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // 20.7.11, hash support
+ //
+ template <class... Types>
+ struct hash<variant<Types...> >
+ { size_t operator()(const variant<Types...>& val) const { return static_cast<size_t>(-0x42); } };
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // get_if
+ //
+ template <size_t I, class... Types>
+ EA_CONSTEXPR add_pointer_t<variant_alternative_t<I, variant<Types...>>> get_if(variant<Types...>* pv) EA_NOEXCEPT
+ {
+ static_assert(I < sizeof...(Types), "get_if is ill-formed if I is not a valid index in the variant typelist");
+ using return_type = add_pointer_t<variant_alternative_t<I, variant<Types...>>>;
+
+ return (!pv || pv->index() != I) ? nullptr : pv->mStorage.template get_as<return_type>();
+ }
+
+ template <size_t I, class... Types>
+ EA_CONSTEXPR add_pointer_t<const variant_alternative_t<I, variant<Types...>>> get_if(const variant<Types...>* pv) EA_NOEXCEPT
+ {
+ static_assert(I < sizeof...(Types), "get_if is ill-formed if I is not a valid index in the variant typelist");
+ using return_type = add_pointer_t<variant_alternative_t<I, variant<Types...>>>;
+
+ return (!pv || pv->index() != I) ? nullptr : pv->mStorage.template get_as<return_type>();
+ }
+
+ template <class T, class... Types, size_t I = meta::get_type_index_v<T, Types...>>
+ EA_CONSTEXPR add_pointer_t<T> get_if(variant<Types...>* pv) EA_NOEXCEPT
+ {
+ return get_if<I>(pv);
+ }
+
+ template <class T, class... Types, size_t I = meta::get_type_index_v<T, Types...>>
+ EA_CONSTEXPR add_pointer_t<const T> get_if(const variant<Types...>* pv) EA_NOEXCEPT
+ {
+ return get_if<I>(pv);
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // get
+ //
+ template <size_t I, class... Types>
+ EA_CONSTEXPR variant_alternative_t<I, variant<Types...>>& get(variant<Types...>& v)
+ {
+ static_assert(I < sizeof...(Types), "get is ill-formed if I is not a valid index in the variant typelist");
+ using return_type = add_pointer_t<variant_alternative_t<I, variant<Types...>>>;
+
+ return *v.mStorage.template get_as<return_type>();
+ }
+
+ template <size_t I, class... Types>
+ EA_CONSTEXPR variant_alternative_t<I, variant<Types...>>&& get(variant<Types...>&& v)
+ {
+ static_assert(I < sizeof...(Types), "get is ill-formed if I is not a valid index in the variant typelist");
+ using return_type = add_pointer_t<variant_alternative_t<I, variant<Types...>>>;
+
+ return eastl::move(*v.mStorage.template get_as<return_type>());
+ }
+
+ template <size_t I, class... Types>
+ EA_CONSTEXPR const variant_alternative_t<I, variant<Types...>>& get(const variant<Types...>& v)
+ {
+ static_assert(I < sizeof...(Types), "get is ill-formed if I is not a valid index in the variant typelist");
+ using return_type = add_pointer_t<variant_alternative_t<I, variant<Types...>>>;
+
+ return *v.mStorage.template get_as<return_type>();
+ }
+
+ template <size_t I, class... Types>
+ EA_CONSTEXPR const variant_alternative_t<I, variant<Types...>>&& get(const variant<Types...>&& v)
+ {
+ static_assert(I < sizeof...(Types), "get is ill-formed if I is not a valid index in the variant typelist");
+ using return_type = add_pointer_t<variant_alternative_t<I, variant<Types...>>>;
+
+ return eastl::move(*v.mStorage.template get_as<return_type>());
+ }
+
+ template <class T, class... Types, size_t I = meta::get_type_index_v<T, Types...>>
+ EA_CONSTEXPR T& get(variant<Types...>& v)
+ {
+ static_assert(I < sizeof...(Types), "get is ill-formed if I is not a valid index in the variant typelist");
+ return get<I>(v);
+ }
+
+ template <class T, class... Types, size_t I = meta::get_type_index_v<T, Types...>>
+ EA_CONSTEXPR T&& get(variant<Types...>&& v)
+ {
+ static_assert(I < sizeof...(Types), "get is ill-formed if I is not a valid index in the variant typelist");
+ return get<I>(eastl::move(v));
+ }
+
+ template <class T, class... Types, size_t I = meta::get_type_index_v<T, Types...>>
+ EA_CONSTEXPR const T& get(const variant<Types...>& v)
+ {
+ static_assert(I < sizeof...(Types), "get is ill-formed if I is not a valid index in the variant typelist");
+ return get<I>(v);
+ }
+
+ template <class T, class... Types, size_t I = meta::get_type_index_v<T, Types...>>
+ EA_CONSTEXPR const T&& get(const variant<Types...>&& v)
+ {
+ static_assert(I < sizeof...(Types), "get is ill-formed if I is not a valid index in the variant typelist");
+ return get<I>(v);
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // 20.7.4, value access
+ //
+ template <class T, class... Types, ssize_t I = meta::get_type_index_v<T, Types...>>
+ EA_CONSTEXPR bool holds_alternative(const variant<Types...>& v) EA_NOEXCEPT
+ {
+ // ssize_t template parameter because the value can be negative
+ return I == variant_npos ? false : (v.index() == I);
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // 20.7.2, variant
+ //
+ template <class... Types>
+ class variant
+ {
+ static_assert(sizeof...(Types) > 0, "variant must have at least 1 type (empty variants are ill-formed)");
+ static_assert(disjunction_v<is_void<Types>...> == false, "variant does not allow void as an alternative type");
+ static_assert(disjunction_v<is_reference<Types>...> == false, "variant does not allow references as an alternative type");
+ static_assert(disjunction_v<is_array<Types>...> == false, "variant does not allow arrays as an alternative type");
+
+ using variant_index_t = size_t;
+ using variant_storage_t = variant_storage<conjunction_v<is_trivially_destructible<Types>...>, Types...>;
+ using T_0 = variant_alternative_t<0, variant<Types...>>; // alias for the 1st type in the variadic pack
+
+ ///////////////////////////////////////////////////////////////////////////
+ // variant data members
+ //
+ variant_index_t mIndex;
+ variant_storage_t mStorage;
+
+ public:
+ ///////////////////////////////////////////////////////////////////////////
+ // 20.7.2.1, constructors
+ //
+
+ // Only participates in overload resolution when the first alternative is default constructible
+ template <typename TT0 = T_0, typename = enable_if_t<is_default_constructible_v<TT0>>>
+ EA_CONSTEXPR variant() EA_NOEXCEPT : mIndex(variant_npos), mStorage()
+ {
+ mIndex = static_cast<variant_index_t>(0);
+ mStorage.template set_as<T_0>();
+ }
+
+ // Only participates in overload resolution if is_copy_constructible_v<T_i> is true for all T_i in Types....
+ template <bool enable = conjunction_v<is_copy_constructible<Types>...>,
+ typename = enable_if_t<enable>> // add a dependent type to enable sfinae
+ variant(const variant& other)
+ {
+ if (this != &other)
+ {
+ mIndex = other.mIndex;
+ mStorage = other.mStorage;
+ }
+ }
+
+ // Only participates in overload resolution if is_move_constructible_v<T_i> is true for all T_i in Types...
+ template <bool enable = conjunction_v<is_move_constructible<Types>...>, typename = enable_if_t<enable>> // add a dependent type to enable sfinae
+ EA_CONSTEXPR variant(variant&& other) EA_NOEXCEPT(conjunction_v<is_move_constructible<Types>...>)
+ : mIndex(variant_npos), mStorage()
+ {
+ if(this != &other)
+ {
+ mIndex = other.mIndex;
+ mStorage = eastl::move(other.mStorage);
+ }
+ }
+
+ // Conversion constructor
+ template <typename T,
+ typename T_j = meta::overload_resolution_t<T, meta::overload_set<Types...>>,
+ typename = enable_if_t<!is_same_v<decay_t<T>, variant>>,
+ size_t I = meta::get_type_index_v<decay_t<T_j>, Types...>>
+ EA_CONSTEXPR variant(T&& t) EA_NOEXCEPT(is_nothrow_constructible_v<T_j, T>)
+ : mIndex(variant_npos), mStorage()
+ {
+ static_assert(I >= 0, "T not found in type-list.");
+ static_assert((meta::type_count_v<T_j, Types...> == 1), "function overload is not unique - duplicate types in type list");
+
+ mIndex = static_cast<variant_index_t>(I);
+ mStorage.template set_as<T_j>(eastl::forward<T>(t));
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // 20.7.2.1, in_place_t constructors
+ //
+ template <
+ class T,
+ class... Args,
+ class = enable_if_t<conjunction_v<meta::duplicate_type_check<T, Types...>, is_constructible<T, Args...>>, T>>
+ EA_CPP14_CONSTEXPR explicit variant(in_place_type_t<T>, Args&&... args)
+ : variant(in_place<meta::get_type_index_v<T, Types...>>, eastl::forward<Args>(args)...)
+ {}
+
+ template <
+ class T,
+ class U,
+ class... Args,
+ class = enable_if_t<conjunction_v<meta::duplicate_type_check<T, Types...>, is_constructible<T, Args...>>, T>>
+ EA_CPP14_CONSTEXPR explicit variant(in_place_type_t<T>, std::initializer_list<U> il, Args&&... args)
+ : variant(in_place<meta::get_type_index_v<T, Types...>>, il, eastl::forward<Args>(args)...)
+ {}
+
+ template <size_t I,
+ class... Args,
+ class = enable_if_t<conjunction_v<integral_constant<bool, (I < sizeof...(Types))>,
+ is_constructible<meta::get_type_at_t<I, Types...>, Args...>>>>
+ EA_CPP14_CONSTEXPR explicit variant(in_place_index_t<I>, Args&&... args)
+ : mIndex(I)
+ {
+ mStorage.template set_as<meta::get_type_at_t<I, Types...>>(eastl::forward<Args>(args)...);
+ }
+
+ template <size_t I,
+ class U,
+ class... Args,
+ class = enable_if_t<conjunction_v<integral_constant<bool, (I < sizeof...(Types))>,
+ is_constructible<meta::get_type_at_t<I, Types...>, Args...>>>>
+ EA_CPP14_CONSTEXPR explicit variant(in_place_index_t<I>, std::initializer_list<U> il, Args&&... args)
+ : mIndex(I)
+ {
+ mStorage.template set_as<meta::get_type_at_t<I, Types...>>(il, eastl::forward<Args>(args)...);
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // 20.7.2.2, destructor
+ //
+ ~variant() = default;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // 20.7.2.4, modifiers
+ //
+
+ // Equivalent to emplace<I>(std::forward<Args>(args)...), where I is the zero-based index of T in Types....
+ // This overload only participates in overload resolution if std::is_constructible_v<T, Args...> is true, and T
+ // occurs exactly once in Types...
+ template <
+ class T,
+ class... Args,
+ size_t I = meta::get_type_index_v<T, Types...>,
+ typename = enable_if_t<conjunction_v<is_constructible<T, Args...>, meta::duplicate_type_check<T, Types...>>>>
+ decltype(auto) emplace(Args&&... args)
+ {
+ return emplace<I>(eastl::forward<Args>(args)...);
+ }
+
+ // Equivalent to emplace<I>(il, std::forward<Args>(args)...), where I is the zero-based index of T in Types....
+ // This overload only participates in overload resolution if std::is_constructible_v<T,
+ // std::initializer_list<U>&, Args...> is true, and T occurs exactly once in Types...
+ template <class T,
+ class U,
+ class... Args,
+ size_t I = meta::get_type_index_v<T, Types...>,
+ typename = enable_if_t<conjunction_v<is_constructible<T, std::initializer_list<U>&, Args...>,
+ meta::duplicate_type_check<T, Types...>>>>
+ decltype(auto) emplace(std::initializer_list<U> il, Args&&... args)
+ {
+ return emplace<I>(il, eastl::forward<T>(args)...);
+ }
+
+ // First, destroys the currently contained value (if any). Then direct-initializes the contained value as if
+ // constructing a value of type T_I with the arguments std::forward<Args>(args).... If an exception is thrown,
+ // *this may become valueless_by_exception. This overload only participates in overload resolution if
+ // std::is_constructible_v<T_I, Args...> is true. The behavior is undefined if I is not less than
+ // sizeof...(Types).
+ //
+ template <size_t I,
+ class... Args,
+ typename T = meta::get_type_at_t<I, Types...>,
+ typename =
+ enable_if_t<conjunction_v<is_constructible<T, Args...>, meta::duplicate_type_check<T, Types...>>>>
+ variant_alternative_t<I, variant>& emplace(Args&&... args)
+ {
+ if (!valueless_by_exception())
+ {
+ mStorage.destroy();
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ mIndex = static_cast<variant_index_t>(variant_npos);
+ #endif
+ }
+
+ mStorage.template set_as<T>(eastl::forward<Args>(args)...);
+ mIndex = static_cast<variant_index_t>(I);
+ return *reinterpret_cast<T*>(&mStorage.mBuffer);
+ }
+
+ // First, destroys the currently contained value (if any). Then direct-initializes the contained value as if
+ // constructing a value of type T_I with the arguments il, std::forward<Args>(args).... If an exception is
+ // thrown, *this may become valueless_by_exception. This overload only participates in overload resolution if
+ // std::is_constructible_v<T_I, initializer_list<U>&, Args...> is true. The behavior is undefined if I is not
+ // less than sizeof...(Types).
+ //
+ template <size_t I,
+ class U,
+ class... Args,
+ typename T = meta::get_type_at_t<I, Types...>,
+ typename = enable_if_t<conjunction_v<is_constructible<T, std::initializer_list<U>&, Args...>, meta::duplicate_type_check<T, Types...>>>>
+ variant_alternative_t<I, variant>& emplace(std::initializer_list<U> il, Args&&... args)
+ {
+ if (!valueless_by_exception())
+ {
+ mStorage.destroy();
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ mIndex = static_cast<variant_index_t>(variant_npos);
+ #endif
+ }
+
+ mStorage.template set_as<T>(il, eastl::forward<Args>(args)...);
+ mIndex = static_cast<variant_index_t>(I);
+ return *reinterpret_cast<T*>(&mStorage.mBuffer);
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // 20.7.2.3, assignment
+ //
+ template <class T,
+ typename T_j = meta::overload_resolution_t<T, meta::overload_set<Types...>>,
+ ssize_t I = meta::get_type_index_v<decay_t<T_j>, Types...>,
+ typename = enable_if_t<!eastl::is_same_v<decay_t<T>, variant> && eastl::is_assignable_v<T_j&, T> &&
+ eastl::is_constructible_v<T_j, T>>>
+ EA_CPP14_CONSTEXPR variant& operator=(T&& t)
+ EA_NOEXCEPT(conjunction_v<is_nothrow_assignable<T_j&, T>, is_nothrow_constructible<T_j, T>>)
+ {
+ static_assert(I >= 0, "T not found in type-list.");
+ static_assert((meta::type_count_v<T_j, Types...> == 1),
+ "function overload is not unique - duplicate types in type list");
+
+ if (!valueless_by_exception())
+ mStorage.destroy();
+
+ mIndex = static_cast<variant_index_t>(I);
+ mStorage.template set_as<T_j>(eastl::forward<T>(t));
+ return *this;
+ }
+
+
+ // Only participates in overload resolution if is_copy_constructible_v<T_i> && is_copy_assignable_v<T_i> is true
+ // for all T_i in Types....
+ template <bool enable = conjunction_v<conjunction<is_copy_constructible<Types>...>,
+ conjunction<is_copy_assignable<Types>...>>,
+ typename = enable_if_t<enable>> // add a dependent type to enable sfinae
+ variant& operator=(const variant& other)
+ {
+ if (this != &other)
+ {
+ mIndex = other.mIndex;
+ mStorage = other.mStorage;
+ }
+ return *this;
+ }
+
+ // Only participates in overload resolution if is_move_constructible_v<T_i> && is_move_assignable_v<T_i> is true for all T_i in Types....
+ template <bool enable = conjunction_v<conjunction<is_move_constructible<Types>...>,
+ conjunction<is_move_assignable<Types>...>>,
+ typename = enable_if_t<enable>> // add a dependent type to enable sfinae
+ variant& operator=(variant&& other)
+ EA_NOEXCEPT(conjunction_v<conjunction<is_nothrow_move_constructible<Types>...>,
+ conjunction<is_nothrow_move_assignable<Types>...>>)
+ {
+ if (this != &other)
+ {
+ mIndex = eastl::move(other.mIndex);
+ mStorage = eastl::move(other.mStorage);
+ }
+ return *this;
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // 20.7.2.5, value status
+ //
+ EA_CONSTEXPR size_t index() const EA_NOEXCEPT
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ return valueless_by_exception() ? variant_npos : mIndex;
+ #else
+ return mIndex;
+ #endif
+ }
+
+ EA_CONSTEXPR bool valueless_by_exception() const EA_NOEXCEPT
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ return mIndex == variant_npos;
+ #else
+ return false;
+ #endif
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // 20.7.2.6, swap
+ //
+ void swap(variant& other)
+ EA_NOEXCEPT(conjunction_v<is_nothrow_move_constructible<Types>..., is_nothrow_swappable<Types>...>)
+ {
+ eastl::swap(mIndex, other.mIndex);
+ eastl::swap(mStorage, other.mStorage);
+ }
+
+ private:
+ // NOTE(rparolin): get_if accessors require internal access to the variant storage class
+ template <size_t I, class... Types2> friend EA_CONSTEXPR add_pointer_t< variant_alternative_t<I, variant<Types2...>>> get_if( variant<Types2...>* pv) EA_NOEXCEPT;
+ template <size_t I, class... Types2> friend EA_CONSTEXPR add_pointer_t<const variant_alternative_t<I, variant<Types2...>>> get_if(const variant<Types2...>* pv) EA_NOEXCEPT;
+
+ // NOTE(rparolin): get accessors require internal access to the variant storage class
+ template <size_t I, class... Types2> friend EA_CONSTEXPR variant_alternative_t<I, variant<Types2...>>& get(variant<Types2...>& v);
+ template <size_t I, class... Types2> friend EA_CONSTEXPR variant_alternative_t<I, variant<Types2...>>&& get(variant<Types2...>&& v);
+ template <size_t I, class... Types2> friend EA_CONSTEXPR const variant_alternative_t<I, variant<Types2...>>& get(const variant<Types2...>& v);
+ template <size_t I, class... Types2> friend EA_CONSTEXPR const variant_alternative_t<I, variant<Types2...>>&& get(const variant<Types2...>&& v);
+ };
+
+ ///////////////////////////////////////////////////////////////////////////
+ // 20.7.9, swap
+ //
+ template <class... Types>
+ void swap(variant<Types...>& lhs, variant<Types...>& rhs)
+ EA_NOEXCEPT(EA_NOEXCEPT(lhs.swap(rhs)))
+ {
+ lhs.swap(rhs);
+ }
+
+
+ // visit is a bit convoluted, in order to fulfill a few requirements:
+ // - It must support visiting multiple variants using a single visitor and a single function call. The
+ // visitor in this case should have one function for each possible combination of types:
+ //
+ // struct MyVisitor {
+ // void operator()(int, int);
+ // void operator()(string, string);
+ // void operator()(int, string);
+ // void operator()(string, int);
+ // };
+ //
+ // variant<int, string> a = 42;
+ // variant<int, string> b = "hello";
+ // visit(MyVisitor{}, a, b); // calls MyVisitor::operator()(int, string)
+ //
+ // - It must be declared constexpr
+ // - It must be constant-time for the case of visiting a single variant
+ //
+ // - 20.7.7 states that variant visitation requires all combinations of visitors to return the same type.
+ //
+ // NOTE(mwinkler):
+ // Visit creates an N-Dimensional matrix whereby each dimension is M wide.
+ // Where N == sizeof...(Variants) and M == variant_size_v<Variant>
+ //
+ // variant<int, bool, float> v;
+ // visit(Visitor{}, v, v);
+ //
+ // This creates a 3x3 matrix of potential visitors.
+ // The argument indices into the variants are as follows.
+ // [0, 0], [0, 1], [0, 2]
+ // [1, 0], [1, 1], [1, 2]
+ // [2, 0], [2, 1], [2, 2]
+ //
+ // These indices are compile-time constants but the variants have a runtime index.
+ // Therefore we must instantiate an NxNxN... matrix of function pointers who are
+ // templated on the indices based on their position in the matrix and then
+ // at runtime index into the array to call the correct function pointer that can
+ // get the correct alternatives in the variants.
+ //
+ // There are a couple of ways to do this. We can construct the matrix bottom up or top down.
+ //
+ // Constructing a matrix bottom up would look something as follows.
+ //
+ // make_visitor_matrix_recurse(eastl::index_sequence<>{}, eastl::make_index_sequence<eastl::variant_size_v<eastl::decay_t<Variants>>>{}...);
+ //
+ // make_visitor_matrix_recurse(eastl::index_sequence<Is...>) { return templated function pointer on Is... }
+ //
+ // make_visitor_matrix_recurse(eastl::index_sequence<Is...>, eastl::index_sequence<Js...>, RestIndex... rest)
+ // return make_array(make_visitor_matrix_recurse(eastl::index_sequence<Is..., Js>{}, rest...)...);
+ //
+ // Essentially we construct the matrix bottom up, row by row of indices and return an array of function pointers.
+ // The end result is a NxNxN... array on the stack which can be indexed by each variant in order as follows,
+ // array[v0.index()][v1.index()][vn.index()]();
+ //
+ // The downside with this approach is the massive NxNxN... array that is created on the stack.
+ //
+ // The other approach is to build the matrix top down and use tail recursion to ensure there is only one
+ // N sized array on the stack. The downside here is the extra function calls, but we feel this approach provides
+ // a good balance between performance and memory usage.
+ //
+ // We construct the matrix top down by first creating an N sized array that is indexed by the first variant.
+ // This calls a function that recursively creates another N sized array that is indexed by the second variant.
+ // The recursion continues until we reach the base case which is the last variant. At this point we know
+ // the compile-time value of the N indices needed to get each alternative from each variant to invoke the visitor upon.
+ // Essentially we create a tree of function pointers like so.
+ //
+ //
+ // +------------------------------------------------------------------+
+ // | |
+ // | 0 1 N |
+ // | |
+ // | |
+ // +----+---------------------------+---------------------------------+
+ // | |
+ // | |
+ // | |
+ // | |
+ // | |
+ // +--------------------------+-----------------+ +----+------------------------------------+
+ // | | | |
+ // |0,0 0,1 0,N| |1,0 1,1 1,N|
+ // | | | |
+ // | | | |
+ // +--------------------------------------------+ +-----------------------------------------+
+ //
+ // Essentially each call creates a N sized array of function pointers that is the concatention of the indices known so far
+ // and the index of itself in the array whereby the leaf function pointer does the final invoke of the visitor.
+ //
+
+ // Since decltype() is not one of the contexts where an overloaded function can be used without arguments;
+ // We use this function to deduce the function pointer types.
+ // We also return an eastl::array<> since we cannot return C-style arrays as value types.
+ template <typename T>
+ static EA_CONSTEXPR array<decay_t<T>, 1> make_visitor_array(T&& t)
+ {
+ return { { eastl::forward<T>(t) } };
+ }
+
+ template <typename T, typename... Ts>
+ static EA_CONSTEXPR array<decay_t<T>, sizeof...(Ts) + 1> make_visitor_array(T&& t, Ts&&... ts)
+ {
+ static_assert(conjunction_v<is_same<decay_t<T>, decay_t<Ts>>...>, "`visit` variant visitation requires that all visitors have the same return type!");
+
+ return { { eastl::forward<T>(t), eastl::forward<Ts>(ts)... } };
+ }
+
+
+ template <size_t N, typename Variant, typename... Variants, eastl::enable_if_t<N == 0, int> = 0>
+ static EA_CONSTEXPR decltype(auto) get_variant_n(Variant&& variant, Variants&&... variants)
+ {
+ return eastl::forward<Variant>(variant);
+ }
+
+ template <size_t N, typename Variant, typename... Variants, eastl::enable_if_t<N != 0, int> = 0>
+ static EA_CONSTEXPR decltype(auto) get_variant_n(Variant&& variant, Variants&&... variants)
+ {
+ return get_variant_n<N - 1>(eastl::forward<Variants>(variants)...);
+ }
+
+
+ template <typename Visitor, typename Index, typename Array, typename... Variants>
+ static EA_CONSTEXPR decltype(auto) call_visitor_at_index(Array&& array, Index index, Visitor&& visitor, Variants&&... variants)
+ {
+ return array[static_cast<typename Array::size_type>(index)](eastl::forward<Visitor>(visitor), eastl::forward<Variants>(variants)...);
+ }
+
+ template <size_t VariantsIndex, typename Visitor, typename Array, typename... Variants>
+ static EA_CONSTEXPR decltype(auto) call_visitor_at(Array&& array, Visitor&& visitor, Variants&&... variants)
+ {
+ return call_visitor_at_index(eastl::forward<Array>(array),
+ get_variant_n<VariantsIndex>(eastl::forward<Variants>(variants)...).index(),
+ eastl::forward<Visitor>(visitor),
+ eastl::forward<Variants>(variants)...);
+ }
+
+
+ // abstracts calling visit on 2 or more variants
+ template <typename VariantIndexSequence, typename Visitor, typename... Variants>
+ struct visitor_caller_n;
+
+ template <typename Visitor, typename... Variants, size_t... VariantIndices>
+ struct visitor_caller_n<index_sequence<VariantIndices...>, Visitor, Variants...>
+ {
+ using return_type = invoke_result_t<Visitor, variant_alternative_t<0, remove_reference_t<Variants>>...>;
+
+ template <size_t... VariantArgIndices>
+ static EA_CONSTEXPR return_type invoke_visitor_leaf(Visitor&& visitor, Variants&&... variants)
+ {
+ return eastl::invoke(eastl::forward<Visitor>(visitor),
+ eastl::get<VariantArgIndices>(eastl::forward<Variants>(variants))...);
+ }
+
+ template <size_t... VariantArgIndices>
+ static EA_CONSTEXPR auto make_invoke_visitor_leaf(index_sequence<VariantArgIndices...>)
+ {
+ return &invoke_visitor_leaf<VariantArgIndices...>;
+ }
+
+
+ template <size_t... VariantArgIndices>
+ static EA_CONSTEXPR return_type invoke_visitor_recurse(Visitor&& visitor, Variants&&... variants)
+ {
+ return call(index_sequence<VariantArgIndices...>{},
+ eastl::forward<Visitor>(visitor),
+ eastl::forward<Variants>(variants)...);
+ }
+
+ template <size_t... VariantArgIndices>
+ static EA_CONSTEXPR auto make_invoke_visitor_recurse(index_sequence<VariantArgIndices...>)
+ {
+ return &invoke_visitor_recurse<VariantArgIndices...>;
+ }
+
+
+ template <typename VariantArgIndexSequence, enable_if_t<internal::index_sequence_size_v<VariantArgIndexSequence> + 1 == sizeof...(Variants), int> = 0>
+ static EA_CPP14_CONSTEXPR decltype(auto) call(VariantArgIndexSequence, Visitor&& visitor, Variants&&... variants)
+ {
+ EA_CPP14_CONSTEXPR auto callers = make_visitor_array(make_invoke_visitor_leaf(meta::double_pack_expansion_t<VariantArgIndexSequence, VariantIndices>{})...);
+
+ return call_visitor_at<internal::index_sequence_size_v<VariantArgIndexSequence>>(eastl::move(callers),
+ eastl::forward<Visitor>(visitor),
+ eastl::forward<Variants>(variants)...);
+ }
+
+ template <typename VariantArgIndexSequence, enable_if_t<internal::index_sequence_size_v<VariantArgIndexSequence> + 1 != sizeof...(Variants), int> = 0>
+ static EA_CPP14_CONSTEXPR decltype(auto) call(VariantArgIndexSequence, Visitor&& visitor, Variants&&... variants)
+ {
+ EA_CPP14_CONSTEXPR auto callers = make_visitor_array(make_invoke_visitor_recurse(meta::double_pack_expansion_t<VariantArgIndexSequence, VariantIndices>{})...);
+
+ return call_visitor_at<internal::index_sequence_size_v<VariantArgIndexSequence>>(eastl::move(callers),
+ eastl::forward<Visitor>(visitor),
+ eastl::forward<Variants>(variants)...);
+ }
+
+ };
+
+ template <typename VariantIndexSequence, typename Visitor, typename... Variants>
+ static EA_CONSTEXPR decltype(auto) call_initial_n(VariantIndexSequence, Visitor&& visitor, Variants&&... variants)
+ {
+ return visitor_caller_n<VariantIndexSequence, Visitor, Variants...>::call(index_sequence<>{}, eastl::forward<Visitor>(visitor), eastl::forward<Variants>(variants)...);
+ }
+
+
+ // abstracts calling visit on 2 or more variants with return types convertible to R
+ template <typename R, typename VariantIndexSequence, typename Visitor, typename... Variants>
+ struct visitor_caller_n_r;
+
+ template <typename R, size_t... VariantIndices, typename Visitor, typename... Variants>
+ struct visitor_caller_n_r<R, index_sequence<VariantIndices...>, Visitor, Variants...>
+ {
+ template <typename R_, size_t... VariantArgIndices>
+ struct visitor_leaf_r
+ {
+ static EA_CONSTEXPR R_ invoke_visitor_leaf_r(Visitor&& visitor, Variants&&... variants)
+ {
+ return eastl::invoke(eastl::forward<Visitor>(visitor),
+ eastl::get<VariantArgIndices>(eastl::forward<Variants>(variants))...);
+ }
+ };
+
+ // void return type must discard the return values of the visitor even if the visitor returns a value.
+ template <size_t... VariantArgIndices>
+ struct visitor_leaf_r<void, VariantArgIndices...>
+ {
+ static EA_CONSTEXPR void invoke_visitor_leaf_r(Visitor&& visitor, Variants&&... variants)
+ {
+ eastl::invoke(eastl::forward<Visitor>(visitor),
+ eastl::get<VariantArgIndices>(eastl::forward<Variants>(variants))...);
+ }
+ };
+ template <size_t... VariantArgIndices> struct visitor_leaf_r<const void, VariantArgIndices...> : public visitor_leaf_r<void, VariantArgIndices...> {};
+ template <size_t... VariantArgIndices> struct visitor_leaf_r<volatile void, VariantArgIndices...> : public visitor_leaf_r<void, VariantArgIndices...> {};
+ template <size_t... VariantArgIndices> struct visitor_leaf_r<const volatile void, VariantArgIndices...> : public visitor_leaf_r<void, VariantArgIndices...> {};
+
+ template <typename R_, size_t... VariantArgIndices>
+ static EA_CONSTEXPR auto make_invoke_visitor_leaf_r(index_sequence<VariantArgIndices...>)
+ {
+ return &visitor_leaf_r<R_, VariantArgIndices...>::invoke_visitor_leaf_r;
+ }
+
+
+ template <typename R_, size_t... VariantArgIndices>
+ struct visitor_recurse_r
+ {
+ static EA_CONSTEXPR R_ invoke_visitor_recurse_r(Visitor&& visitor, Variants&&... variants)
+ {
+ return call_r(index_sequence<VariantArgIndices...>{},
+ eastl::forward<Visitor>(visitor),
+ eastl::forward<Variants>(variants)...);
+ }
+ };
+
+ template <typename R_, size_t... VariantArgIndices>
+ static EA_CONSTEXPR auto make_invoke_visitor_recurse_r(index_sequence<VariantArgIndices...>)
+ {
+ return &visitor_recurse_r<R_, VariantArgIndices...>::invoke_visitor_recurse_r;
+ }
+
+
+ template <typename VariantArgIndexSequence, enable_if_t<internal::index_sequence_size_v<VariantArgIndexSequence> + 1 == sizeof...(Variants), int> = 0>
+ static EA_CPP14_CONSTEXPR decltype(auto) call_r(VariantArgIndexSequence, Visitor&& visitor, Variants&&... variants)
+ {
+ EA_CPP14_CONSTEXPR auto callers = make_visitor_array(make_invoke_visitor_leaf_r<R>(meta::double_pack_expansion_t<VariantArgIndexSequence, VariantIndices>{})...);
+
+ return call_visitor_at<internal::index_sequence_size_v<VariantArgIndexSequence>>(eastl::move(callers),
+ eastl::forward<Visitor>(visitor),
+ eastl::forward<Variants>(variants)...);
+ }
+
+ template <typename VariantArgIndexSequence, enable_if_t<internal::index_sequence_size_v<VariantArgIndexSequence> + 1 != sizeof...(Variants), int> = 0>
+ static EA_CPP14_CONSTEXPR decltype(auto) call_r(VariantArgIndexSequence, Visitor&& visitor, Variants&&... variants)
+ {
+ EA_CPP14_CONSTEXPR auto callers = make_visitor_array(make_invoke_visitor_recurse_r<R>(meta::double_pack_expansion_t<VariantArgIndexSequence, VariantIndices>{})...);
+
+ return call_visitor_at<internal::index_sequence_size_v<VariantArgIndexSequence>>(eastl::move(callers),
+ eastl::forward<Visitor>(visitor),
+ eastl::forward<Variants>(variants)...);
+ }
+
+ };
+
+ template <typename R, typename VariantIndexSequence, typename Visitor, typename... Variants>
+ static EA_CONSTEXPR decltype(auto) call_initial_n_r(VariantIndexSequence, Visitor&& visitor, Variants&&... variants)
+ {
+ return visitor_caller_n_r<R, VariantIndexSequence, Visitor, Variants...>::call_r(index_sequence<>{}, eastl::forward<Visitor>(visitor), eastl::forward<Variants>(variants)...);
+ }
+
+
+ // abstracts calling visit on a single variant
+ struct visitor_caller_one
+ {
+
+ template <typename Visitor, typename Variant, size_t I>
+ static EA_CONSTEXPR decltype(auto) invoke_visitor(Visitor&& visitor, Variant&& variant)
+ {
+ return eastl::invoke(eastl::forward<Visitor>(visitor),
+ eastl::get<I>(eastl::forward<Variant>(variant)));
+ }
+
+ template <typename Visitor, typename Variant, size_t... VariantArgIndices>
+ static EA_CPP14_CONSTEXPR decltype(auto) call_index(Visitor&& visitor, Variant&& variant, index_sequence<VariantArgIndices...>)
+ {
+ EA_CPP14_CONSTEXPR auto callers = make_visitor_array((&invoke_visitor<Visitor, Variant, VariantArgIndices>)...);
+
+ return call_visitor_at_index(eastl::move(callers), eastl::forward<Variant>(variant).index(),
+ eastl::forward<Visitor>(visitor), eastl::forward<Variant>(variant));
+ }
+
+ template <typename Visitor, typename Variant>
+ static EA_CONSTEXPR decltype(auto) call(Visitor&& visitor, Variant&& variant)
+ {
+ return call_index(eastl::forward<Visitor>(visitor),
+ eastl::forward<Variant>(variant),
+ make_index_sequence<variant_size_v<decay_t<Variant>>>{});
+ }
+
+ };
+
+ template <typename R>
+ struct visitor_r
+ {
+ template <typename Visitor, typename Variant, size_t I>
+ static EA_CONSTEXPR R invoke_visitor_r(Visitor&& visitor, Variant&& variant)
+ {
+ return eastl::invoke(eastl::forward<Visitor>(visitor),
+ eastl::get<I>(eastl::forward<Variant>(variant)));
+ }
+ };
+
+ // void return type must discard the return values of the visitor even if the visitor returns a value.
+ template <>
+ struct visitor_r<void>
+ {
+ template <typename Visitor, typename Variant, size_t I>
+ static EA_CONSTEXPR void invoke_visitor_r(Visitor&& visitor, Variant&& variant)
+ {
+ eastl::invoke(eastl::forward<Visitor>(visitor),
+ eastl::get<I>(eastl::forward<Variant>(variant)));
+ }
+ };
+
+ template<> struct visitor_r<const void> : public visitor_r<void> {};
+ template<> struct visitor_r<volatile void> : public visitor_r<void> {};
+ template<> struct visitor_r<const volatile void> : public visitor_r<void> {};
+
+ // abstracts calling visit on a single variant with return types convertible to R
+ struct visitor_caller_one_r
+ {
+ template <typename R, typename Visitor, typename Variant, size_t... VariantArgIndices>
+ static EA_CPP14_CONSTEXPR decltype(auto) call_index_r(Visitor&& visitor, Variant&& variant, eastl::index_sequence<VariantArgIndices...>)
+ {
+ EA_CPP14_CONSTEXPR auto callers = make_visitor_array(&visitor_r<R>::template invoke_visitor_r<Visitor, Variant, VariantArgIndices>...);
+
+ return callers[static_cast<typename decltype(callers)::size_type>(eastl::forward<Variant>(variant).index())](eastl::forward<Visitor>(visitor),
+ eastl::forward<Variant>(variant));
+ }
+
+ template <typename R, typename Visitor, typename Variant>
+ static EA_CONSTEXPR decltype(auto) call_r(Visitor&& visitor, Variant&& variant)
+ {
+ return call_index_r<R>(eastl::forward<Visitor>(visitor), eastl::forward<Variant>(variant), eastl::make_index_sequence<eastl::variant_size_v<eastl::decay_t<Variant>>>());
+ }
+
+ };
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // 20.7.6, visitation
+ //
+ // Example:
+ // struct MyVisitor
+ // {
+ // auto operator()(int) {};
+ // auto operator()(long) {};
+ // auto operator()(string) {};
+ // };
+ //
+ // variant<int, long, string> v = "Hello, Variant";
+ // visit(MyVisitor{}, v); // calls MyVisitor::operator()(string) {}
+ //
+
+ template <typename... Variants>
+ static EA_CPP14_CONSTEXPR void visit_throw_bad_variant_access(Variants&&... variants)
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ using bool_array_type = bool[];
+ bool badAccess = false;
+
+ (void)bool_array_type{ (badAccess |= eastl::forward<Variants>(variants).valueless_by_exception(), false)... };
+
+ if (badAccess)
+ {
+ throw bad_variant_access();
+ }
+ #endif
+ }
+
+ template <typename... Variants>
+ static EA_CONSTEXPR void visit_static_assert_check(Variants&&... variants)
+ {
+ static_assert(sizeof...(Variants) > 0, "`visit` at least one variant instance must be passed as an argument to the visit function");
+
+ using variant_type = decay_t<meta::get_type_at_t<0, Variants...>>;
+ static_assert(conjunction_v<is_same<variant_type, decay_t<Variants>>...>,
+ "`visit` all variants passed to eastl::visit() must have the same type");
+ }
+
+
+ // visit
+ //
+ template <typename Visitor, typename Variant>
+ EA_CPP14_CONSTEXPR decltype(auto) visit(Visitor&& visitor, Variant&& variant)
+ {
+ visit_static_assert_check(eastl::forward<Variant>(variant));
+
+ visit_throw_bad_variant_access(eastl::forward<Variant>(variant));
+
+ return visitor_caller_one::call(eastl::forward<Visitor>(visitor),
+ eastl::forward<Variant>(variant));
+ }
+
+ template <typename Visitor, typename... Variants>
+ EA_CPP14_CONSTEXPR decltype(auto) visit(Visitor&& visitor, Variants&&... variants)
+ {
+ visit_static_assert_check(eastl::forward<Variants>(variants)...);
+
+ visit_throw_bad_variant_access(eastl::forward<Variants>(variants)...);
+
+ return call_initial_n(make_index_sequence<variant_size_v<decay_t<meta::get_type_at_t<0, Variants...>>>>{},
+ eastl::forward<Visitor>(visitor),
+ eastl::forward<Variants>(variants)...);
+
+ }
+
+ template <typename R, typename Visitor, typename Variant, eastl::enable_if_t<!eastl::is_same_v<R, Visitor>, int> = 0>
+ EA_CPP14_CONSTEXPR R visit(Visitor&& visitor, Variant&& variant)
+ {
+ visit_static_assert_check(eastl::forward<Variant>(variant));
+
+ visit_throw_bad_variant_access(eastl::forward<Variant>(variant));
+
+ return visitor_caller_one_r::call_r<R>(eastl::forward<Visitor>(visitor),
+ eastl::forward<Variant>(variant));
+ }
+
+ template <typename R, typename Visitor, typename... Variants, eastl::enable_if_t<!eastl::is_same_v<R, Visitor>, int> = 0>
+ EA_CPP14_CONSTEXPR R visit(Visitor&& visitor, Variants&&... variants)
+ {
+ visit_static_assert_check(eastl::forward<Variants>(variants)...);
+
+ visit_throw_bad_variant_access(eastl::forward<Variants>(variants)...);
+
+ return call_initial_n_r<R>(make_index_sequence<variant_size_v<decay_t<meta::get_type_at_t<0, Variants...>>>>{},
+ eastl::forward<Visitor>(visitor),
+ eastl::forward<Variants>(variants)...);
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // 20.7.5, relational operators
+ //
+ namespace internal
+ {
+
+ // For relational operators we do not need to create the NxN matrix of comparisons since we know already
+ // that both the lhs and rhs variants have the same index. We just need to compare the value of the types at that
+ // index for equality. Therefore the visitation is simpler than visit() for relational operators.
+ //
+ struct variant_relational_comparison
+ {
+ template <typename Compare, size_t I, typename Variant>
+ static EA_CONSTEXPR bool invoke_relational_visitor(const Variant& lhs, const Variant& rhs)
+ {
+ return eastl::invoke(Compare{}, eastl::get<I>(lhs), eastl::get<I>(rhs));
+ }
+
+ template <typename Compare, typename Variant, size_t... VariantArgIndices>
+ static EA_CPP14_CONSTEXPR bool call_index(const Variant& lhs, const Variant& rhs, eastl::index_sequence<VariantArgIndices...>)
+ {
+ using invoke_relational_visitor_func_ptr = bool (*)(const Variant&, const Variant&);
+
+ EA_CPP14_CONSTEXPR invoke_relational_visitor_func_ptr visitors[] = { static_cast<invoke_relational_visitor_func_ptr>(&invoke_relational_visitor<Compare, VariantArgIndices, Variant>)... };
+
+ return visitors[lhs.index()](lhs, rhs);
+ }
+
+ template <typename Compare, typename Variant>
+ static EA_CONSTEXPR bool call(const Variant& lhs, const Variant& rhs)
+ {
+ return call_index<Compare>(lhs, rhs, eastl::make_index_sequence<eastl::variant_size_v<eastl::decay_t<Variant>>>());
+ }
+
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ template <typename Compare, size_t I, typename Variant>
+ static EA_CONSTEXPR std::compare_three_way_result_t<Variant> invoke_relational_visitor_three_way(const Variant& lhs, const Variant& rhs)
+ {
+ return eastl::invoke(Compare{}, eastl::get<I>(lhs), eastl::get<I>(rhs));
+ }
+
+ template <typename Compare, typename Variant, size_t... VariantArgIndices>
+ static EA_CONSTEXPR std::compare_three_way_result_t<Variant> call_index_three_way(const Variant& lhs, const Variant& rhs, eastl::index_sequence<VariantArgIndices...>)
+ {
+ using invoke_relational_visitor_func_ptr = std::compare_three_way_result_t<Variant> (*)(const Variant&, const Variant&);
+
+ EA_CONSTEXPR invoke_relational_visitor_func_ptr visitors[] = {static_cast<invoke_relational_visitor_func_ptr>(&invoke_relational_visitor_three_way<Compare, VariantArgIndices, Variant>)...};
+
+ return visitors[lhs.index()](lhs, rhs);
+ }
+
+ template <typename Compare, typename Variant>
+ static EA_CONSTEXPR std::compare_three_way_result_t<Variant> call_three_way(const Variant& lhs, const Variant& rhs)
+ {
+ return call_index_three_way<Compare>(lhs, rhs, eastl::make_index_sequence<eastl::variant_size_v<eastl::decay_t<Variant>>>());
+ }
+#endif
+ };
+
+ template <typename Compare, typename Variant>
+ static EA_CONSTEXPR bool CompareVariantRelational(const Variant& lhs, const Variant& rhs)
+ {
+ return variant_relational_comparison::call<Compare>(lhs, rhs);
+ }
+
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ template <typename Compare, typename Variant>
+ static EA_CONSTEXPR std::compare_three_way_result_t<Variant> CompareVariantRelationalThreeWay(const Variant& lhs, const Variant& rhs)
+ {
+ return variant_relational_comparison::call_three_way<Compare>(lhs, rhs);
+ }
+#endif
+
+ } // namespace internal
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // 20.7.5, relational operators
+ //
+ template <class... Types>
+ EA_CPP14_CONSTEXPR bool operator==(const variant<Types...>& lhs, const variant<Types...>& rhs)
+ {
+ if (lhs.index() != rhs.index()) return false;
+ if (lhs.valueless_by_exception()) return true;
+
+ return internal::CompareVariantRelational<eastl::equal_to<>>(lhs, rhs);
+ }
+
+ template <class... Types>
+ EA_CPP14_CONSTEXPR bool operator!=(const variant<Types...>& lhs, const variant<Types...>& rhs)
+ {
+ if (lhs.index() != rhs.index()) return true;
+ if (lhs.valueless_by_exception()) return false;
+
+ return internal::CompareVariantRelational<eastl::not_equal_to<>>(lhs, rhs);
+ }
+
+ template <class... Types>
+ EA_CPP14_CONSTEXPR bool operator<(const variant<Types...>& lhs, const variant<Types...>& rhs)
+ {
+ if (rhs.valueless_by_exception()) return false;
+ if (lhs.valueless_by_exception()) return true;
+ if (lhs.index() < rhs.index()) return true;
+ if (lhs.index() > rhs.index()) return false;
+
+ return internal::CompareVariantRelational<eastl::less<>>(lhs, rhs);
+ }
+
+ template <class... Types>
+ EA_CPP14_CONSTEXPR bool operator>(const variant<Types...>& lhs, const variant<Types...>& rhs)
+ {
+ if (lhs.valueless_by_exception()) return false;
+ if (rhs.valueless_by_exception()) return true;
+ if (lhs.index() > rhs.index()) return true;
+ if (lhs.index() < rhs.index()) return false;
+
+ return internal::CompareVariantRelational<eastl::greater<>>(lhs, rhs);
+ }
+
+ template <class... Types>
+ EA_CPP14_CONSTEXPR bool operator<=(const variant<Types...>& lhs, const variant<Types...>& rhs)
+ {
+ if (lhs.valueless_by_exception()) return true;
+ if (rhs.valueless_by_exception()) return false;
+ if (lhs.index() < rhs.index()) return true;
+ if (lhs.index() > rhs.index()) return false;
+
+ return internal::CompareVariantRelational<eastl::less_equal<>>(lhs, rhs);
+ }
+
+ template <class... Types>
+ EA_CPP14_CONSTEXPR bool operator>=(const variant<Types...>& lhs, const variant<Types...>& rhs)
+ {
+ if (rhs.valueless_by_exception()) return true;
+ if (lhs.valueless_by_exception()) return false;
+ if (lhs.index() > rhs.index()) return true;
+ if (lhs.index() < rhs.index()) return false;
+
+ return internal::CompareVariantRelational<eastl::greater_equal<>>(lhs, rhs);
+ }
+
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ template <class... Types> requires (std::three_way_comparable<Types> && ...)
+ EA_CONSTEXPR std::common_comparison_category_t<std::compare_three_way_result_t<Types>...> operator<=>(const variant<Types...>& lhs, const variant<Types...>& rhs)
+ {
+ if (lhs.valueless_by_exception() && rhs.valueless_by_exception()) return std::strong_ordering::equal;
+ if (lhs.valueless_by_exception()) return std::strong_ordering::less;
+ if (rhs.valueless_by_exception()) return std::strong_ordering::greater;
+ if (auto result = (lhs.index() <=> rhs.index()); result != 0) return result;
+
+ return internal::CompareVariantRelationalThreeWay<std::compare_three_way>(lhs, rhs);
+
+ }
+#endif
+
+} // namespace eastl
+
+EA_RESTORE_VC_WARNING()
+
+#endif // EASTL_VARIANT_H
diff --git a/EASTL/include/EASTL/vector.h b/EASTL/include/EASTL/vector.h
new file mode 100644
index 0000000..b6ca8dc
--- /dev/null
+++ b/EASTL/include/EASTL/vector.h
@@ -0,0 +1,2084 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements a vector (array-like container), much like the C++
+// std::vector class.
+// The primary distinctions between this vector and std::vector are:
+// - vector has a couple extension functions that increase performance.
+// - vector can contain objects with alignment requirements. std::vector
+// cannot do so without a bit of tedious non-portable effort.
+// - vector supports debug memory naming natively.
+// - vector is easier to read, debug, and visualize.
+// - vector is savvy to an environment that doesn't have exception handling,
+// as is sometimes the case with console or embedded environments.
+// - vector has less deeply nested function calls and allows the user to
+// enable forced inlining in debug builds in order to reduce bloat.
+// - vector<bool> is a vector of boolean values and not a bit vector.
+// - vector guarantees that memory is contiguous and that vector::iterator
+// is nothing more than a pointer to T.
+// - vector has an explicit data() method for obtaining a pointer to storage
+// which is safe to call even if the block is empty. This avoids the
+// common &v[0], &v.front(), and &*v.begin() constructs that trigger false
+// asserts in STL debugging modes.
+// - vector data is guaranteed to be contiguous.
+// - vector has a set_capacity() function which frees excess capacity.
+// The only way to do this with std::vector is via the cryptic non-obvious
+// trick of using: vector<SomeClass>(x).swap(x);
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_VECTOR_H
+#define EASTL_VECTOR_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/allocator.h>
+#include <EASTL/type_traits.h>
+#include <EASTL/iterator.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/initializer_list.h>
+#include <EASTL/memory.h>
+#include <EASTL/bonus/compressed_pair.h>
+
+EA_DISABLE_ALL_VC_WARNINGS()
+#include <new>
+#include <stddef.h>
+#if EASTL_EXCEPTIONS_ENABLED
+ #include <stdexcept> // std::out_of_range, std::length_error.
+#endif
+EA_RESTORE_ALL_VC_WARNINGS()
+
+// 4530 - C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc
+// 4480 - nonstandard extension used: specifying underlying type for enum
+// 4571 - catch(...) semantics changed since Visual C++ 7.1; structured exceptions (SEH) are no longer caught.
+EA_DISABLE_VC_WARNING(4530 4480 4571);
+
+// 4345 - Behavior change: an object of POD type constructed with an initializer of the form () will be default-initialized
+// 4244 - Argument: conversion from 'int' to 'const eastl::vector<T>::value_type', possible loss of data
+// 4127 - Conditional expression is constant
+EA_DISABLE_VC_WARNING(4345 4244 4127);
+
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+#if EASTL_NOMINMAX
+ #ifdef min
+ #undef min
+ #endif
+ #ifdef max
+ #undef max
+ #endif
+#endif
+
+namespace eastl
+{
+
+ /// EASTL_VECTOR_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_VECTOR_DEFAULT_NAME
+ #define EASTL_VECTOR_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " vector" // Unless the user overrides something, this is "EASTL vector".
+ #endif
+
+
+ /// EASTL_VECTOR_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_VECTOR_DEFAULT_ALLOCATOR
+ #define EASTL_VECTOR_DEFAULT_ALLOCATOR allocator_type(EASTL_VECTOR_DEFAULT_NAME)
+ #endif
+
+
+
+ /// VectorBase
+ ///
+ /// The reason we have a VectorBase class is that it makes exception handling
+ /// simpler to implement because memory allocation is implemented entirely
+ /// in this class. If a user creates a vector which needs to allocate
+ /// memory in the constructor, VectorBase handles it. If an exception is thrown
+ /// by the allocator then the exception throw jumps back to the user code and
+ /// no try/catch code need be written in the vector or VectorBase constructor.
+ /// If an exception is thrown in the vector (not VectorBase) constructor, the
+ /// destructor for VectorBase will be called automatically (and free the allocated
+ /// memory) before the execution jumps back to the user code.
+ /// However, if the vector class were to handle both allocation and initialization
+ /// then it would have no choice but to implement an explicit try/catch statement
+ /// for all pathways that allocate memory. This increases code size and decreases
+ /// performance and makes the code a little harder read and maintain.
+ ///
+ /// The C++ standard (15.2 paragraph 2) states:
+ /// "An object that is partially constructed or partially destroyed will
+ /// have destructors executed for all its fully constructed subobjects,
+ /// that is, for subobjects for which the constructor has been completed
+ /// execution and the destructor has not yet begun execution."
+ ///
+ /// The C++ standard (15.3 paragraph 11) states:
+ /// "The fully constructed base classes and members of an object shall
+ /// be destroyed before entering the handler of a function-try-block
+ /// of a constructor or destructor for that block."
+ ///
+ template <typename T, typename Allocator>
+ struct VectorBase
+ {
+ typedef Allocator allocator_type;
+ typedef eastl_size_t size_type;
+ typedef ptrdiff_t difference_type;
+
+ #if defined(_MSC_VER) && (_MSC_VER >= 1400) && (_MSC_VER <= 1600) && !EASTL_STD_CPP_ONLY // _MSC_VER of 1400 means VS2005, 1600 means VS2010. VS2012 generates errors with usage of enum:size_type.
+ enum : size_type { // Use Microsoft enum language extension, allowing for smaller debug symbols than using a static const. Users have been affected by this.
+ npos = (size_type)-1,
+ kMaxSize = (size_type)-2
+ };
+ #else
+ static const size_type npos = (size_type)-1; /// 'npos' means non-valid position or simply non-position.
+ static const size_type kMaxSize = (size_type)-2; /// -1 is reserved for 'npos'. It also happens to be slightly beneficial that kMaxSize is a value less than -1, as it helps us deal with potential integer wraparound issues.
+ #endif
+
+ protected:
+ T* mpBegin;
+ T* mpEnd;
+ eastl::compressed_pair<T*, allocator_type> mCapacityAllocator;
+
+ T*& internalCapacityPtr() EA_NOEXCEPT { return mCapacityAllocator.first(); }
+ T* const& internalCapacityPtr() const EA_NOEXCEPT { return mCapacityAllocator.first(); }
+ allocator_type& internalAllocator() EA_NOEXCEPT { return mCapacityAllocator.second(); }
+ const allocator_type& internalAllocator() const EA_NOEXCEPT { return mCapacityAllocator.second(); }
+
+ public:
+ VectorBase();
+ VectorBase(const allocator_type& allocator);
+ VectorBase(size_type n, const allocator_type& allocator);
+
+ ~VectorBase();
+
+ const allocator_type& get_allocator() const EA_NOEXCEPT;
+ allocator_type& get_allocator() EA_NOEXCEPT;
+ void set_allocator(const allocator_type& allocator);
+
+ protected:
+ T* DoAllocate(size_type n);
+ void DoFree(T* p, size_type n);
+ size_type GetNewCapacity(size_type currentCapacity);
+
+ }; // VectorBase
+
+
+
+
+ /// vector
+ ///
+ /// Implements a dynamic array.
+ ///
+ template <typename T, typename Allocator = EASTLAllocatorType>
+ class vector : public VectorBase<T, Allocator>
+ {
+ typedef VectorBase<T, Allocator> base_type;
+ typedef vector<T, Allocator> this_type;
+
+ public:
+ typedef T value_type;
+ typedef T* pointer;
+ typedef const T* const_pointer;
+ typedef T& reference;
+ typedef const T& const_reference; // Maintainer note: We want to leave iterator defined as T* -- at least in release builds -- as this gives some algorithms an advantage that optimizers cannot get around.
+ typedef T* iterator; // Note: iterator is simply T* right now, but this will likely change in the future, at least for debug builds.
+ typedef const T* const_iterator; // Do not write code that relies on iterator being T*. The reason it will
+ typedef eastl::reverse_iterator<iterator> reverse_iterator; // change in the future is that a debugging iterator system will be created.
+ typedef eastl::reverse_iterator<const_iterator> const_reverse_iterator;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::difference_type difference_type;
+ typedef typename base_type::allocator_type allocator_type;
+
+ using base_type::mpBegin;
+ using base_type::mpEnd;
+ using base_type::mCapacityAllocator;
+ using base_type::npos;
+ using base_type::GetNewCapacity;
+ using base_type::DoAllocate;
+ using base_type::DoFree;
+ using base_type::internalCapacityPtr;
+ using base_type::internalAllocator;
+
+ public:
+ vector() EA_NOEXCEPT_IF(EA_NOEXCEPT_EXPR(EASTL_VECTOR_DEFAULT_ALLOCATOR));
+ explicit vector(const allocator_type& allocator) EA_NOEXCEPT;
+ explicit vector(size_type n, const allocator_type& allocator = EASTL_VECTOR_DEFAULT_ALLOCATOR);
+ vector(size_type n, const value_type& value, const allocator_type& allocator = EASTL_VECTOR_DEFAULT_ALLOCATOR);
+ vector(const this_type& x);
+ vector(const this_type& x, const allocator_type& allocator);
+ vector(this_type&& x) EA_NOEXCEPT;
+ vector(this_type&& x, const allocator_type& allocator);
+ vector(std::initializer_list<value_type> ilist, const allocator_type& allocator = EASTL_VECTOR_DEFAULT_ALLOCATOR);
+
+ template <typename InputIterator>
+ vector(InputIterator first, InputIterator last, const allocator_type& allocator = EASTL_VECTOR_DEFAULT_ALLOCATOR);
+
+ ~vector();
+
+ this_type& operator=(const this_type& x);
+ this_type& operator=(std::initializer_list<value_type> ilist);
+ this_type& operator=(this_type&& x); // TODO(c++17): noexcept(allocator_traits<Allocator>::propagate_on_container_move_assignment::value || allocator_traits<Allocator>::is_always_equal::value)
+
+ void swap(this_type& x); // TODO(c++17): noexcept(allocator_traits<Allocator>::propagate_on_container_move_assignment::value || allocator_traits<Allocator>::is_always_equal::value)
+
+ void assign(size_type n, const value_type& value);
+
+ template <typename InputIterator>
+ void assign(InputIterator first, InputIterator last);
+
+ void assign(std::initializer_list<value_type> ilist);
+
+ iterator begin() EA_NOEXCEPT;
+ const_iterator begin() const EA_NOEXCEPT;
+ const_iterator cbegin() const EA_NOEXCEPT;
+
+ iterator end() EA_NOEXCEPT;
+ const_iterator end() const EA_NOEXCEPT;
+ const_iterator cend() const EA_NOEXCEPT;
+
+ reverse_iterator rbegin() EA_NOEXCEPT;
+ const_reverse_iterator rbegin() const EA_NOEXCEPT;
+ const_reverse_iterator crbegin() const EA_NOEXCEPT;
+
+ reverse_iterator rend() EA_NOEXCEPT;
+ const_reverse_iterator rend() const EA_NOEXCEPT;
+ const_reverse_iterator crend() const EA_NOEXCEPT;
+
+ bool empty() const EA_NOEXCEPT;
+ size_type size() const EA_NOEXCEPT;
+ size_type capacity() const EA_NOEXCEPT;
+
+ void resize(size_type n, const value_type& value);
+ void resize(size_type n);
+ void reserve(size_type n);
+ void set_capacity(size_type n = base_type::npos); // Revises the capacity to the user-specified value. Resizes the container to match the capacity if the requested capacity n is less than the current size. If n == npos then the capacity is reallocated (if necessary) such that capacity == size.
+ void shrink_to_fit(); // C++11 function which is the same as set_capacity().
+
+ pointer data() EA_NOEXCEPT;
+ const_pointer data() const EA_NOEXCEPT;
+
+ reference operator[](size_type n);
+ const_reference operator[](size_type n) const;
+
+ reference at(size_type n);
+ const_reference at(size_type n) const;
+
+ reference front();
+ const_reference front() const;
+
+ reference back();
+ const_reference back() const;
+
+ void push_back(const value_type& value);
+ reference push_back();
+ void* push_back_uninitialized();
+ void push_back(value_type&& value);
+ void pop_back();
+
+ template<class... Args>
+ iterator emplace(const_iterator position, Args&&... args);
+
+ template<class... Args>
+ reference emplace_back(Args&&... args);
+
+ iterator insert(const_iterator position, const value_type& value);
+ iterator insert(const_iterator position, size_type n, const value_type& value);
+ iterator insert(const_iterator position, value_type&& value);
+ iterator insert(const_iterator position, std::initializer_list<value_type> ilist);
+
+ template <typename InputIterator>
+ iterator insert(const_iterator position, InputIterator first, InputIterator last);
+
+ iterator erase_first(const T& value);
+ iterator erase_first_unsorted(const T& value); // Same as erase, except it doesn't preserve order, but is faster because it simply copies the last item in the vector over the erased position.
+ reverse_iterator erase_last(const T& value);
+ reverse_iterator erase_last_unsorted(const T& value); // Same as erase, except it doesn't preserve order, but is faster because it simply copies the last item in the vector over the erased position.
+
+ iterator erase(const_iterator position);
+ iterator erase(const_iterator first, const_iterator last);
+ iterator erase_unsorted(const_iterator position); // Same as erase, except it doesn't preserve order, but is faster because it simply copies the last item in the vector over the erased position.
+
+ reverse_iterator erase(const_reverse_iterator position);
+ reverse_iterator erase(const_reverse_iterator first, const_reverse_iterator last);
+ reverse_iterator erase_unsorted(const_reverse_iterator position);
+
+ void clear() EA_NOEXCEPT;
+ void reset_lose_memory() EA_NOEXCEPT; // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs.
+
+ bool validate() const EA_NOEXCEPT;
+ int validate_iterator(const_iterator i) const EA_NOEXCEPT;
+
+ protected:
+ // These functions do the real work of maintaining the vector. You will notice
+ // that many of them have the same name but are specialized on iterator_tag
+ // (iterator categories). This is because in these cases there is an optimized
+ // implementation that can be had for some cases relative to others. Functions
+ // which aren't referenced are neither compiled nor linked into the application.
+ template <bool bMove> struct should_move_or_copy_tag{};
+ using should_copy_tag = should_move_or_copy_tag<false>;
+ using should_move_tag = should_move_or_copy_tag<true>;
+
+ template <typename ForwardIterator> // Allocates a pointer of array count n and copy-constructs it with [first,last).
+ pointer DoRealloc(size_type n, ForwardIterator first, ForwardIterator last, should_copy_tag);
+
+ template <typename ForwardIterator> // Allocates a pointer of array count n and copy-constructs it with [first,last).
+ pointer DoRealloc(size_type n, ForwardIterator first, ForwardIterator last, should_move_tag);
+
+ template <typename Integer>
+ void DoInit(Integer n, Integer value, true_type);
+
+ template <typename InputIterator>
+ void DoInit(InputIterator first, InputIterator last, false_type);
+
+ template <typename InputIterator>
+ void DoInitFromIterator(InputIterator first, InputIterator last, EASTL_ITC_NS::input_iterator_tag);
+
+ template <typename ForwardIterator>
+ void DoInitFromIterator(ForwardIterator first, ForwardIterator last, EASTL_ITC_NS::forward_iterator_tag);
+
+ template <typename Integer, bool bMove>
+ void DoAssign(Integer n, Integer value, true_type);
+
+ template <typename InputIterator, bool bMove>
+ void DoAssign(InputIterator first, InputIterator last, false_type);
+
+ void DoAssignValues(size_type n, const value_type& value);
+
+ template <typename InputIterator, bool bMove>
+ void DoAssignFromIterator(InputIterator first, InputIterator last, EASTL_ITC_NS::input_iterator_tag);
+
+ template <typename RandomAccessIterator, bool bMove>
+ void DoAssignFromIterator(RandomAccessIterator first, RandomAccessIterator last, EASTL_ITC_NS::random_access_iterator_tag);
+
+ template <typename Integer>
+ void DoInsert(const_iterator position, Integer n, Integer value, true_type);
+
+ template <typename InputIterator>
+ void DoInsert(const_iterator position, InputIterator first, InputIterator last, false_type);
+
+ template <typename InputIterator>
+ void DoInsertFromIterator(const_iterator position, InputIterator first, InputIterator last, EASTL_ITC_NS::input_iterator_tag);
+
+ template <typename BidirectionalIterator>
+ void DoInsertFromIterator(const_iterator position, BidirectionalIterator first, BidirectionalIterator last, EASTL_ITC_NS::bidirectional_iterator_tag);
+
+ void DoInsertValues(const_iterator position, size_type n, const value_type& value);
+
+ void DoInsertValuesEnd(size_type n); // Default constructs n values
+ void DoInsertValuesEnd(size_type n, const value_type& value);
+
+ template<typename... Args>
+ void DoInsertValue(const_iterator position, Args&&... args);
+
+ template<typename... Args>
+ void DoInsertValueEnd(Args&&... args);
+
+ void DoClearCapacity();
+
+ void DoGrow(size_type n);
+
+ void DoSwap(this_type& x);
+
+ }; // class vector
+
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // VectorBase
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Allocator>
+ inline VectorBase<T, Allocator>::VectorBase()
+ : mpBegin(NULL),
+ mpEnd(NULL),
+ mCapacityAllocator(NULL, allocator_type(EASTL_VECTOR_DEFAULT_NAME))
+ {
+ }
+
+
+ template <typename T, typename Allocator>
+ inline VectorBase<T, Allocator>::VectorBase(const allocator_type& allocator)
+ : mpBegin(NULL),
+ mpEnd(NULL),
+ mCapacityAllocator(NULL, allocator)
+ {
+ }
+
+
+ template <typename T, typename Allocator>
+ inline VectorBase<T, Allocator>::VectorBase(size_type n, const allocator_type& allocator)
+ : mCapacityAllocator(allocator)
+ {
+ mpBegin = DoAllocate(n);
+ mpEnd = mpBegin;
+ internalCapacityPtr() = mpBegin + n;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline VectorBase<T, Allocator>::~VectorBase()
+ {
+ if(mpBegin)
+ EASTLFree(internalAllocator(), mpBegin, (internalCapacityPtr() - mpBegin) * sizeof(T));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline const typename VectorBase<T, Allocator>::allocator_type&
+ VectorBase<T, Allocator>::get_allocator() const EA_NOEXCEPT
+ {
+ return internalAllocator();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename VectorBase<T, Allocator>::allocator_type&
+ VectorBase<T, Allocator>::get_allocator() EA_NOEXCEPT
+ {
+ return internalAllocator();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void VectorBase<T, Allocator>::set_allocator(const allocator_type& allocator)
+ {
+ internalAllocator() = allocator;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline T* VectorBase<T, Allocator>::DoAllocate(size_type n)
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(n >= 0x80000000))
+ EASTL_FAIL_MSG("vector::DoAllocate -- improbably large request.");
+ #endif
+
+ // If n is zero, then we allocate no memory and just return nullptr.
+ // This is fine, as our default ctor initializes with NULL pointers.
+ if(EASTL_LIKELY(n))
+ {
+ auto* p = (T*)allocate_memory(internalAllocator(), n * sizeof(T), EASTL_ALIGN_OF(T), 0);
+ EASTL_ASSERT_MSG(p != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined.");
+ return p;
+ }
+ else
+ {
+ return nullptr;
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void VectorBase<T, Allocator>::DoFree(T* p, size_type n)
+ {
+ if(p)
+ EASTLFree(internalAllocator(), p, n * sizeof(T));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename VectorBase<T, Allocator>::size_type
+ VectorBase<T, Allocator>::GetNewCapacity(size_type currentCapacity)
+ {
+ // This needs to return a value of at least currentCapacity and at least 1.
+ return (currentCapacity > 0) ? (2 * currentCapacity) : 1;
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // vector
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Allocator>
+ inline vector<T, Allocator>::vector() EA_NOEXCEPT_IF(EA_NOEXCEPT_EXPR(EASTL_VECTOR_DEFAULT_ALLOCATOR))
+ : base_type()
+ {
+ // Empty
+ }
+
+
+ template <typename T, typename Allocator>
+ inline vector<T, Allocator>::vector(const allocator_type& allocator) EA_NOEXCEPT
+ : base_type(allocator)
+ {
+ // Empty
+ }
+
+
+ template <typename T, typename Allocator>
+ inline vector<T, Allocator>::vector(size_type n, const allocator_type& allocator)
+ : base_type(n, allocator)
+ {
+ eastl::uninitialized_default_fill_n(mpBegin, n);
+ mpEnd = mpBegin + n;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline vector<T, Allocator>::vector(size_type n, const value_type& value, const allocator_type& allocator)
+ : base_type(n, allocator)
+ {
+ eastl::uninitialized_fill_n_ptr(mpBegin, n, value);
+ mpEnd = mpBegin + n;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline vector<T, Allocator>::vector(const this_type& x)
+ : base_type(x.size(), x.internalAllocator())
+ {
+ mpEnd = eastl::uninitialized_copy_ptr(x.mpBegin, x.mpEnd, mpBegin);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline vector<T, Allocator>::vector(const this_type& x, const allocator_type& allocator)
+ : base_type(x.size(), allocator)
+ {
+ mpEnd = eastl::uninitialized_copy_ptr(x.mpBegin, x.mpEnd, mpBegin);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline vector<T, Allocator>::vector(this_type&& x) EA_NOEXCEPT
+ : base_type(eastl::move(x.internalAllocator())) // vector requires move-construction of allocator in this case.
+ {
+ DoSwap(x);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline vector<T, Allocator>::vector(this_type&& x, const allocator_type& allocator)
+ : base_type(allocator)
+ {
+ if (internalAllocator() == x.internalAllocator()) // If allocators are equivalent...
+ DoSwap(x);
+ else
+ {
+ this_type temp(eastl::move(*this)); // move construct so we don't require the use of copy-ctors that prevent the use of move-only types.
+ temp.swap(x);
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ inline vector<T, Allocator>::vector(std::initializer_list<value_type> ilist, const allocator_type& allocator)
+ : base_type(allocator)
+ {
+ DoInit(ilist.begin(), ilist.end(), false_type());
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename InputIterator>
+ inline vector<T, Allocator>::vector(InputIterator first, InputIterator last, const allocator_type& allocator)
+ : base_type(allocator)
+ {
+ DoInit(first, last, is_integral<InputIterator>());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline vector<T, Allocator>::~vector()
+ {
+ // Call destructor for the values. Parent class will free the memory.
+ eastl::destruct(mpBegin, mpEnd);
+ }
+
+
+ template <typename T, typename Allocator>
+ typename vector<T, Allocator>::this_type&
+ vector<T, Allocator>::operator=(const this_type& x)
+ {
+ if(this != &x) // If not assigning to self...
+ {
+ // If (EASTL_ALLOCATOR_COPY_ENABLED == 1) and the current contents are allocated by an
+ // allocator that's unequal to x's allocator, we need to reallocate our elements with
+ // our current allocator and reallocate it with x's allocator. If the allocators are
+ // equal then we can use a more optimal algorithm that doesn't reallocate our elements
+ // but instead can copy them in place.
+
+ #if EASTL_ALLOCATOR_COPY_ENABLED
+ bool bSlowerPathwayRequired = (internalAllocator() != x.internalAllocator());
+ #else
+ bool bSlowerPathwayRequired = false;
+ #endif
+
+ if(bSlowerPathwayRequired)
+ {
+ DoClearCapacity(); // Must clear the capacity instead of clear because set_capacity frees our memory, unlike clear.
+
+ #if EASTL_ALLOCATOR_COPY_ENABLED
+ internalAllocator() = x.internalAllocator();
+ #endif
+ }
+
+ DoAssign<const_iterator, false>(x.begin(), x.end(), eastl::false_type());
+ }
+
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ typename vector<T, Allocator>::this_type&
+ vector<T, Allocator>::operator=(std::initializer_list<value_type> ilist)
+ {
+ typedef typename std::initializer_list<value_type>::iterator InputIterator;
+ typedef typename eastl::iterator_traits<InputIterator>::iterator_category IC;
+ DoAssignFromIterator<InputIterator, false>(ilist.begin(), ilist.end(), IC()); // initializer_list has const elements and so we can't move from them.
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ typename vector<T, Allocator>::this_type&
+ vector<T, Allocator>::operator=(this_type&& x)
+ {
+ if(this != &x)
+ {
+ DoClearCapacity(); // To consider: Are we really required to clear here? x is going away soon and will clear itself in its dtor.
+ swap(x); // member swap handles the case that x has a different allocator than our allocator by doing a copy.
+ }
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void vector<T, Allocator>::assign(size_type n, const value_type& value)
+ {
+ DoAssignValues(n, value);
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename InputIterator>
+ inline void vector<T, Allocator>::assign(InputIterator first, InputIterator last)
+ {
+ // It turns out that the C++ std::vector<int, int> specifies a two argument
+ // version of assign that takes (int size, int value). These are not iterators,
+ // so we need to do a template compiler trick to do the right thing.
+ DoAssign<InputIterator, false>(first, last, is_integral<InputIterator>());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void vector<T, Allocator>::assign(std::initializer_list<value_type> ilist)
+ {
+ typedef typename std::initializer_list<value_type>::iterator InputIterator;
+ typedef typename eastl::iterator_traits<InputIterator>::iterator_category IC;
+ DoAssignFromIterator<InputIterator, false>(ilist.begin(), ilist.end(), IC()); // initializer_list has const elements and so we can't move from them.
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::iterator
+ vector<T, Allocator>::begin() EA_NOEXCEPT
+ {
+ return mpBegin;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::const_iterator
+ vector<T, Allocator>::begin() const EA_NOEXCEPT
+ {
+ return mpBegin;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::const_iterator
+ vector<T, Allocator>::cbegin() const EA_NOEXCEPT
+ {
+ return mpBegin;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::iterator
+ vector<T, Allocator>::end() EA_NOEXCEPT
+ {
+ return mpEnd;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::const_iterator
+ vector<T, Allocator>::end() const EA_NOEXCEPT
+ {
+ return mpEnd;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::const_iterator
+ vector<T, Allocator>::cend() const EA_NOEXCEPT
+ {
+ return mpEnd;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::reverse_iterator
+ vector<T, Allocator>::rbegin() EA_NOEXCEPT
+ {
+ return reverse_iterator(mpEnd);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::const_reverse_iterator
+ vector<T, Allocator>::rbegin() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(mpEnd);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::const_reverse_iterator
+ vector<T, Allocator>::crbegin() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(mpEnd);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::reverse_iterator
+ vector<T, Allocator>::rend() EA_NOEXCEPT
+ {
+ return reverse_iterator(mpBegin);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::const_reverse_iterator
+ vector<T, Allocator>::rend() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(mpBegin);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::const_reverse_iterator
+ vector<T, Allocator>::crend() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(mpBegin);
+ }
+
+
+ template <typename T, typename Allocator>
+ bool vector<T, Allocator>::empty() const EA_NOEXCEPT
+ {
+ return (mpBegin == mpEnd);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::size_type
+ vector<T, Allocator>::size() const EA_NOEXCEPT
+ {
+ return (size_type)(mpEnd - mpBegin);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::size_type
+ vector<T, Allocator>::capacity() const EA_NOEXCEPT
+ {
+ return (size_type)(internalCapacityPtr() - mpBegin);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void vector<T, Allocator>::resize(size_type n, const value_type& value)
+ {
+ if(n > (size_type)(mpEnd - mpBegin)) // We expect that more often than not, resizes will be upsizes.
+ DoInsertValuesEnd(n - ((size_type)(mpEnd - mpBegin)), value);
+ else
+ {
+ eastl::destruct(mpBegin + n, mpEnd);
+ mpEnd = mpBegin + n;
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void vector<T, Allocator>::resize(size_type n)
+ {
+ // Alternative implementation:
+ // resize(n, value_type());
+
+ if(n > (size_type)(mpEnd - mpBegin)) // We expect that more often than not, resizes will be upsizes.
+ DoInsertValuesEnd(n - ((size_type)(mpEnd - mpBegin)));
+ else
+ {
+ eastl::destruct(mpBegin + n, mpEnd);
+ mpEnd = mpBegin + n;
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ void vector<T, Allocator>::reserve(size_type n)
+ {
+ // If the user wants to reduce the reserved memory, there is the set_capacity function.
+ if(n > size_type(internalCapacityPtr() - mpBegin)) // If n > capacity ...
+ DoGrow(n);
+ }
+
+
+ template <typename T, typename Allocator>
+ void vector<T, Allocator>::set_capacity(size_type n)
+ {
+ if((n == npos) || (n <= (size_type)(mpEnd - mpBegin))) // If new capacity <= size...
+ {
+ if(n == 0) // Very often n will be 0, and clear will be faster than resize and use less stack space.
+ clear();
+ else if(n < (size_type)(mpEnd - mpBegin))
+ resize(n);
+
+ shrink_to_fit();
+ }
+ else // Else new capacity > size.
+ {
+ pointer const pNewData = DoRealloc(n, mpBegin, mpEnd, should_move_tag());
+ eastl::destruct(mpBegin, mpEnd);
+ DoFree(mpBegin, (size_type)(internalCapacityPtr() - mpBegin));
+
+ const ptrdiff_t nPrevSize = mpEnd - mpBegin;
+ mpBegin = pNewData;
+ mpEnd = pNewData + nPrevSize;
+ internalCapacityPtr() = mpBegin + n;
+ }
+ }
+
+ template <typename T, typename Allocator>
+ inline void vector<T, Allocator>::shrink_to_fit()
+ {
+ // This is the simplest way to accomplish this, and it is as efficient as any other.
+ this_type temp = this_type(move_iterator<iterator>(begin()), move_iterator<iterator>(end()), internalAllocator());
+
+ // Call DoSwap() rather than swap() as we know our allocators match and we don't want to invoke the code path
+ // handling non matching allocators as it imposes additional restrictions on the type of T to be copyable
+ DoSwap(temp);
+ }
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::pointer
+ vector<T, Allocator>::data() EA_NOEXCEPT
+ {
+ return mpBegin;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::const_pointer
+ vector<T, Allocator>::data() const EA_NOEXCEPT
+ {
+ return mpBegin;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::reference
+ vector<T, Allocator>::operator[](size_type n)
+ {
+ #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(n >= (static_cast<size_type>(mpEnd - mpBegin))))
+ EASTL_FAIL_MSG("vector::operator[] -- out of range");
+ #elif EASTL_ASSERT_ENABLED
+ // We allow the user to use a reference to v[0] of an empty container. But this was merely grandfathered in and ideally we shouldn't allow such access to [0].
+ if (EASTL_UNLIKELY((n != 0) && (n >= (static_cast<size_type>(mpEnd - mpBegin)))))
+ EASTL_FAIL_MSG("vector::operator[] -- out of range");
+ #endif
+
+ return *(mpBegin + n);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::const_reference
+ vector<T, Allocator>::operator[](size_type n) const
+ {
+ #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(n >= (static_cast<size_type>(mpEnd - mpBegin))))
+ EASTL_FAIL_MSG("vector::operator[] -- out of range");
+ #elif EASTL_ASSERT_ENABLED
+ // We allow the user to use a reference to v[0] of an empty container. But this was merely grandfathered in and ideally we shouldn't allow such access to [0].
+ if (EASTL_UNLIKELY((n != 0) && (n >= (static_cast<size_type>(mpEnd - mpBegin)))))
+ EASTL_FAIL_MSG("vector::operator[] -- out of range");
+ #endif
+
+ return *(mpBegin + n);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::reference
+ vector<T, Allocator>::at(size_type n)
+ {
+ // The difference between at() and operator[] is it signals
+ // the requested position is out of range by throwing an
+ // out_of_range exception.
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ if(EASTL_UNLIKELY(n >= (static_cast<size_type>(mpEnd - mpBegin))))
+ throw std::out_of_range("vector::at -- out of range");
+ #elif EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(n >= (static_cast<size_type>(mpEnd - mpBegin))))
+ EASTL_FAIL_MSG("vector::at -- out of range");
+ #endif
+
+ return *(mpBegin + n);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::const_reference
+ vector<T, Allocator>::at(size_type n) const
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ if(EASTL_UNLIKELY(n >= (static_cast<size_type>(mpEnd - mpBegin))))
+ throw std::out_of_range("vector::at -- out of range");
+ #elif EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(n >= (static_cast<size_type>(mpEnd - mpBegin))))
+ EASTL_FAIL_MSG("vector::at -- out of range");
+ #endif
+
+ return *(mpBegin + n);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::reference
+ vector<T, Allocator>::front()
+ {
+ #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ if (EASTL_UNLIKELY((mpBegin == nullptr) || (mpEnd <= mpBegin))) // We don't allow the user to reference an empty container.
+ EASTL_FAIL_MSG("vector::front -- empty vector");
+ #else
+ // We allow the user to reference an empty container.
+ #endif
+
+ return *mpBegin;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::const_reference
+ vector<T, Allocator>::front() const
+ {
+ #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ if (EASTL_UNLIKELY((mpBegin == nullptr) || (mpEnd <= mpBegin))) // We don't allow the user to reference an empty container.
+ EASTL_FAIL_MSG("vector::front -- empty vector");
+ #else
+ // We allow the user to reference an empty container.
+ #endif
+
+ return *mpBegin;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::reference
+ vector<T, Allocator>::back()
+ {
+ #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ if (EASTL_UNLIKELY((mpBegin == nullptr) || (mpEnd <= mpBegin))) // We don't allow the user to reference an empty container.
+ EASTL_FAIL_MSG("vector::back -- empty vector");
+ #else
+ // We allow the user to reference an empty container.
+ #endif
+
+ return *(mpEnd - 1);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::const_reference
+ vector<T, Allocator>::back() const
+ {
+ #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ if (EASTL_UNLIKELY((mpBegin == nullptr) || (mpEnd <= mpBegin))) // We don't allow the user to reference an empty container.
+ EASTL_FAIL_MSG("vector::back -- empty vector");
+ #else
+ // We allow the user to reference an empty container.
+ #endif
+
+ return *(mpEnd - 1);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void vector<T, Allocator>::push_back(const value_type& value)
+ {
+ if(mpEnd < internalCapacityPtr())
+ ::new((void*)mpEnd++) value_type(value);
+ else
+ DoInsertValueEnd(value);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void vector<T, Allocator>::push_back(value_type&& value)
+ {
+ if (mpEnd < internalCapacityPtr())
+ ::new((void*)mpEnd++) value_type(eastl::move(value));
+ else
+ DoInsertValueEnd(eastl::move(value));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::reference
+ vector<T, Allocator>::push_back()
+ {
+ if(mpEnd < internalCapacityPtr())
+ ::new((void*)mpEnd++) value_type();
+ else // Note that in this case we create a temporary, which is less desirable.
+ DoInsertValueEnd(value_type());
+
+ return *(mpEnd - 1); // Same as return back();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void* vector<T, Allocator>::push_back_uninitialized()
+ {
+ if(mpEnd == internalCapacityPtr())
+ {
+ const size_type nPrevSize = size_type(mpEnd - mpBegin);
+ const size_type nNewSize = GetNewCapacity(nPrevSize);
+ DoGrow(nNewSize);
+ }
+
+ return mpEnd++;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void vector<T, Allocator>::pop_back()
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(mpEnd <= mpBegin))
+ EASTL_FAIL_MSG("vector::pop_back -- empty vector");
+ #endif
+
+ --mpEnd;
+ mpEnd->~value_type();
+ }
+
+
+ template <typename T, typename Allocator>
+ template<class... Args>
+ inline typename vector<T, Allocator>::iterator
+ vector<T, Allocator>::emplace(const_iterator position, Args&&... args)
+ {
+ const ptrdiff_t n = position - mpBegin; // Save this because we might reallocate.
+
+ if((mpEnd == internalCapacityPtr()) || (position != mpEnd))
+ DoInsertValue(position, eastl::forward<Args>(args)...);
+ else
+ {
+ ::new((void*)mpEnd) value_type(eastl::forward<Args>(args)...);
+ ++mpEnd; // Increment this after the construction above in case the construction throws an exception.
+ }
+
+ return mpBegin + n;
+ }
+
+ template <typename T, typename Allocator>
+ template<class... Args>
+ inline typename vector<T, Allocator>::reference
+ vector<T, Allocator>::emplace_back(Args&&... args)
+ {
+ if(mpEnd < internalCapacityPtr())
+ {
+ ::new((void*)mpEnd) value_type(eastl::forward<Args>(args)...); // If value_type has a move constructor, it will use it and this operation may be faster than otherwise.
+ ++mpEnd; // Increment this after the construction above in case the construction throws an exception.
+ }
+ else
+ DoInsertValueEnd(eastl::forward<Args>(args)...);
+
+ return back();
+ }
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::iterator
+ vector<T, Allocator>::insert(const_iterator position, const value_type& value)
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY((position < mpBegin) || (position > mpEnd)))
+ EASTL_FAIL_MSG("vector::insert -- invalid position");
+ #endif
+
+ // We implment a quick pathway for the case that the insertion position is at the end and we have free capacity for it.
+ const ptrdiff_t n = position - mpBegin; // Save this because we might reallocate.
+
+ if((mpEnd == internalCapacityPtr()) || (position != mpEnd))
+ DoInsertValue(position, value);
+ else
+ {
+ ::new((void*)mpEnd) value_type(value);
+ ++mpEnd; // Increment this after the construction above in case the construction throws an exception.
+ }
+
+ return mpBegin + n;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::iterator
+ vector<T, Allocator>::insert(const_iterator position, value_type&& value)
+ {
+ return emplace(position, eastl::move(value));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::iterator
+ vector<T, Allocator>::insert(const_iterator position, size_type n, const value_type& value)
+ {
+ const ptrdiff_t p = position - mpBegin; // Save this because we might reallocate.
+ DoInsertValues(position, n, value);
+ return mpBegin + p;
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename InputIterator>
+ inline typename vector<T, Allocator>::iterator
+ vector<T, Allocator>::insert(const_iterator position, InputIterator first, InputIterator last)
+ {
+ const ptrdiff_t n = position - mpBegin; // Save this because we might reallocate.
+ DoInsert(position, first, last, is_integral<InputIterator>());
+ return mpBegin + n;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::iterator
+ vector<T, Allocator>::insert(const_iterator position, std::initializer_list<value_type> ilist)
+ {
+ const ptrdiff_t n = position - mpBegin; // Save this because we might reallocate.
+ DoInsert(position, ilist.begin(), ilist.end(), false_type());
+ return mpBegin + n;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::iterator
+ vector<T, Allocator>::erase(const_iterator position)
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY((position < mpBegin) || (position >= mpEnd)))
+ EASTL_FAIL_MSG("vector::erase -- invalid position");
+ #endif
+
+ // C++11 stipulates that position is const_iterator, but the return value is iterator.
+ iterator destPosition = const_cast<value_type*>(position);
+
+ if((position + 1) < mpEnd)
+ eastl::move(destPosition + 1, mpEnd, destPosition);
+ --mpEnd;
+ mpEnd->~value_type();
+ return destPosition;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::iterator
+ vector<T, Allocator>::erase(const_iterator first, const_iterator last)
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY((first < mpBegin) || (first > mpEnd) || (last < mpBegin) || (last > mpEnd) || (last < first)))
+ EASTL_FAIL_MSG("vector::erase -- invalid position");
+ #endif
+
+ if (first != last)
+ {
+ iterator const position = const_cast<value_type*>(eastl::move(const_cast<value_type*>(last), const_cast<value_type*>(mpEnd), const_cast<value_type*>(first)));
+ eastl::destruct(position, mpEnd);
+ mpEnd -= (last - first);
+ }
+
+ return const_cast<value_type*>(first);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::iterator
+ vector<T, Allocator>::erase_unsorted(const_iterator position)
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY((position < mpBegin) || (position >= mpEnd)))
+ EASTL_FAIL_MSG("vector::erase -- invalid position");
+ #endif
+
+ // C++11 stipulates that position is const_iterator, but the return value is iterator.
+ iterator destPosition = const_cast<value_type*>(position);
+ *destPosition = eastl::move(*(mpEnd - 1));
+
+ // pop_back();
+ --mpEnd;
+ mpEnd->~value_type();
+
+ return destPosition;
+ }
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::iterator vector<T, Allocator>::erase_first(const T& value)
+ {
+ static_assert(eastl::has_equality_v<T>, "T must be comparable");
+
+ iterator it = eastl::find(begin(), end(), value);
+
+ if (it != end())
+ return erase(it);
+ else
+ return it;
+ }
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::iterator
+ vector<T, Allocator>::erase_first_unsorted(const T& value)
+ {
+ static_assert(eastl::has_equality_v<T>, "T must be comparable");
+
+ iterator it = eastl::find(begin(), end(), value);
+
+ if (it != end())
+ return erase_unsorted(it);
+ else
+ return it;
+ }
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::reverse_iterator
+ vector<T, Allocator>::erase_last(const T& value)
+ {
+ static_assert(eastl::has_equality_v<T>, "T must be comparable");
+
+ reverse_iterator it = eastl::find(rbegin(), rend(), value);
+
+ if (it != rend())
+ return erase(it);
+ else
+ return it;
+ }
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::reverse_iterator
+ vector<T, Allocator>::erase_last_unsorted(const T& value)
+ {
+ static_assert(eastl::has_equality_v<T>, "T must be comparable");
+
+ reverse_iterator it = eastl::find(rbegin(), rend(), value);
+
+ if (it != rend())
+ return erase_unsorted(it);
+ else
+ return it;
+ }
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::reverse_iterator
+ vector<T, Allocator>::erase(const_reverse_iterator position)
+ {
+ return reverse_iterator(erase((++position).base()));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::reverse_iterator
+ vector<T, Allocator>::erase(const_reverse_iterator first, const_reverse_iterator last)
+ {
+ // Version which erases in order from first to last.
+ // difference_type i(first.base() - last.base());
+ // while(i--)
+ // first = erase(first);
+ // return first;
+
+ // Version which erases in order from last to first, but is slightly more efficient:
+ return reverse_iterator(erase(last.base(), first.base()));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::reverse_iterator
+ vector<T, Allocator>::erase_unsorted(const_reverse_iterator position)
+ {
+ return reverse_iterator(erase_unsorted((++position).base()));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void vector<T, Allocator>::clear() EA_NOEXCEPT
+ {
+ eastl::destruct(mpBegin, mpEnd);
+ mpEnd = mpBegin;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void vector<T, Allocator>::reset_lose_memory() EA_NOEXCEPT
+ {
+ // The reset function is a special extension function which unilaterally
+ // resets the container to an empty state without freeing the memory of
+ // the contained objects. This is useful for very quickly tearing down a
+ // container built into scratch memory.
+ mpBegin = mpEnd = internalCapacityPtr() = NULL;
+ }
+
+
+ // swap exchanges the contents of two containers. With respect to the containers allocators,
+ // the C11++ Standard (23.2.1/7) states that the behavior of a call to a container's swap function
+ // is undefined unless the objects being swapped have allocators that compare equal or
+ // allocator_traits<allocator_type>::propagate_on_container_swap::value is true (propagate_on_container_swap
+ // is false by default). EASTL doesn't have allocator_traits and so this doesn't directly apply,
+ // but EASTL has the effective behavior of propagate_on_container_swap = false for all allocators.
+ template <typename T, typename Allocator>
+ inline void vector<T, Allocator>::swap(this_type& x)
+ {
+ #if defined(EASTL_VECTOR_LEGACY_SWAP_BEHAVIOUR_REQUIRES_COPY_CTOR) && EASTL_VECTOR_LEGACY_SWAP_BEHAVIOUR_REQUIRES_COPY_CTOR
+ if(internalAllocator() == x.internalAllocator()) // If allocators are equivalent...
+ DoSwap(x);
+ else // else swap the contents.
+ {
+ const this_type temp(*this); // Can't call eastl::swap because that would
+ *this = x; // itself call this member swap function.
+ x = temp;
+ }
+ #else
+ // NOTE(rparolin): The previous implementation required T to be copy-constructible in the fall-back case where
+ // allocators with unique instances copied elements. This was an unnecessary restriction and prevented the common
+ // usage of vector with non-copyable types (eg. eastl::vector<non_copyable> or eastl::vector<unique_ptr>).
+ //
+ // The previous implementation violated the following requirements of vector::swap so the fall-back code has
+ // been removed. EASTL implicitly defines 'propagate_on_container_swap = false' therefore the fall-back case is
+ // undefined behaviour. We simply swap the contents and the allocator as that is the common expectation of
+ // users and does not put the container into an invalid state since it can not free its memory via its current
+ // allocator instance.
+ //
+ // http://en.cppreference.com/w/cpp/container/vector/swap
+ // "Exchanges the contents of the container with those of other. Does not invoke any move, copy, or swap
+ // operations on individual elements."
+ //
+ // http://en.cppreference.com/w/cpp/concept/AllocatorAwareContainer
+ // "Swapping two containers with unequal allocators if propagate_on_container_swap is false is undefined
+ // behavior."
+
+ DoSwap(x);
+ #endif
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename ForwardIterator>
+ inline typename vector<T, Allocator>::pointer
+ vector<T, Allocator>::DoRealloc(size_type n, ForwardIterator first, ForwardIterator last, should_copy_tag)
+ {
+ T* const p = DoAllocate(n); // p is of type T* but is not constructed.
+ eastl::uninitialized_copy_ptr(first, last, p); // copy-constructs p from [first,last).
+ return p;
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename ForwardIterator>
+ inline typename vector<T, Allocator>::pointer
+ vector<T, Allocator>::DoRealloc(size_type n, ForwardIterator first, ForwardIterator last, should_move_tag)
+ {
+ T* const p = DoAllocate(n); // p is of type T* but is not constructed.
+ eastl::uninitialized_move_ptr_if_noexcept(first, last, p); // move-constructs p from [first,last).
+ return p;
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename Integer>
+ inline void vector<T, Allocator>::DoInit(Integer n, Integer value, true_type)
+ {
+ mpBegin = DoAllocate((size_type)n);
+ internalCapacityPtr() = mpBegin + n;
+ mpEnd = internalCapacityPtr();
+
+ typedef typename eastl::remove_const<T>::type non_const_value_type; // If T is a const type (e.g. const int) then we need to initialize it as if it were non-const.
+ eastl::uninitialized_fill_n_ptr<value_type, Integer>((non_const_value_type*)mpBegin, n, value);
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename InputIterator>
+ inline void vector<T, Allocator>::DoInit(InputIterator first, InputIterator last, false_type)
+ {
+ typedef typename eastl::iterator_traits<InputIterator>:: iterator_category IC;
+ DoInitFromIterator(first, last, IC());
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename InputIterator>
+ inline void vector<T, Allocator>::DoInitFromIterator(InputIterator first, InputIterator last, EASTL_ITC_NS::input_iterator_tag)
+ {
+ // To do: Use emplace_back instead of push_back(). Our emplace_back will work below without any ifdefs.
+ for(; first != last; ++first) // InputIterators by definition actually only allow you to iterate through them once.
+ push_back(*first); // Thus the standard *requires* that we do this (inefficient) implementation.
+ } // Luckily, InputIterators are in practice almost never used, so this code will likely never get executed.
+
+
+ template <typename T, typename Allocator>
+ template <typename ForwardIterator>
+ inline void vector<T, Allocator>::DoInitFromIterator(ForwardIterator first, ForwardIterator last, EASTL_ITC_NS::forward_iterator_tag)
+ {
+ const size_type n = (size_type)eastl::distance(first, last);
+ mpBegin = DoAllocate(n);
+ internalCapacityPtr() = mpBegin + n;
+ mpEnd = internalCapacityPtr();
+
+ typedef typename eastl::remove_const<T>::type non_const_value_type; // If T is a const type (e.g. const int) then we need to initialize it as if it were non-const.
+ eastl::uninitialized_copy_ptr(first, last, (non_const_value_type*)mpBegin);
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename Integer, bool bMove>
+ inline void vector<T, Allocator>::DoAssign(Integer n, Integer value, true_type)
+ {
+ DoAssignValues(static_cast<size_type>(n), static_cast<value_type>(value));
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename InputIterator, bool bMove>
+ inline void vector<T, Allocator>::DoAssign(InputIterator first, InputIterator last, false_type)
+ {
+ typedef typename eastl::iterator_traits<InputIterator>::iterator_category IC;
+ DoAssignFromIterator<InputIterator, bMove>(first, last, IC());
+ }
+
+
+ template <typename T, typename Allocator>
+ void vector<T, Allocator>::DoAssignValues(size_type n, const value_type& value)
+ {
+ if(n > size_type(internalCapacityPtr() - mpBegin)) // If n > capacity ...
+ {
+ this_type temp(n, value, internalAllocator()); // We have little choice but to reallocate with new memory.
+ swap(temp);
+ }
+ else if(n > size_type(mpEnd - mpBegin)) // If n > size ...
+ {
+ eastl::fill(mpBegin, mpEnd, value);
+ eastl::uninitialized_fill_n_ptr(mpEnd, n - size_type(mpEnd - mpBegin), value);
+ mpEnd += n - size_type(mpEnd - mpBegin);
+ }
+ else // else 0 <= n <= size
+ {
+ eastl::fill_n(mpBegin, n, value);
+ erase(mpBegin + n, mpEnd);
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename InputIterator, bool bMove>
+ void vector<T, Allocator>::DoAssignFromIterator(InputIterator first, InputIterator last, EASTL_ITC_NS::input_iterator_tag)
+ {
+ iterator position(mpBegin);
+
+ while((position != mpEnd) && (first != last))
+ {
+ *position = *first;
+ ++first;
+ ++position;
+ }
+ if(first == last)
+ erase(position, mpEnd);
+ else
+ insert(mpEnd, first, last);
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename RandomAccessIterator, bool bMove>
+ void vector<T, Allocator>::DoAssignFromIterator(RandomAccessIterator first, RandomAccessIterator last, EASTL_ITC_NS::random_access_iterator_tag)
+ {
+ const size_type n = (size_type)eastl::distance(first, last);
+
+ if(n > size_type(internalCapacityPtr() - mpBegin)) // If n > capacity ...
+ {
+ pointer const pNewData = DoRealloc(n, first, last, should_move_or_copy_tag<bMove>());
+ eastl::destruct(mpBegin, mpEnd);
+ DoFree(mpBegin, (size_type)(internalCapacityPtr() - mpBegin));
+
+ mpBegin = pNewData;
+ mpEnd = mpBegin + n;
+ internalCapacityPtr() = mpEnd;
+ }
+ else if(n <= size_type(mpEnd - mpBegin)) // If n <= size ...
+ {
+ pointer const pNewEnd = eastl::copy(first, last, mpBegin); // Since we are copying to mpBegin, we don't have to worry about needing copy_backward or a memmove-like copy (as opposed to memcpy-like copy).
+ eastl::destruct(pNewEnd, mpEnd);
+ mpEnd = pNewEnd;
+ }
+ else // else size < n <= capacity
+ {
+ RandomAccessIterator position = first + (mpEnd - mpBegin);
+ eastl::copy(first, position, mpBegin); // Since we are copying to mpBegin, we don't have to worry about needing copy_backward or a memmove-like copy (as opposed to memcpy-like copy).
+ mpEnd = eastl::uninitialized_copy_ptr(position, last, mpEnd);
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename Integer>
+ inline void vector<T, Allocator>::DoInsert(const_iterator position, Integer n, Integer value, true_type)
+ {
+ DoInsertValues(position, static_cast<size_type>(n), static_cast<value_type>(value));
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename InputIterator>
+ inline void vector<T, Allocator>::DoInsert(const_iterator position, InputIterator first, InputIterator last, false_type)
+ {
+ typedef typename eastl::iterator_traits<InputIterator>::iterator_category IC;
+ DoInsertFromIterator(position, first, last, IC());
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename InputIterator>
+ inline void vector<T, Allocator>::DoInsertFromIterator(const_iterator position, InputIterator first, InputIterator last, EASTL_ITC_NS::input_iterator_tag)
+ {
+ for(; first != last; ++first, ++position)
+ position = insert(position, *first);
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename BidirectionalIterator>
+ void vector<T, Allocator>::DoInsertFromIterator(const_iterator position, BidirectionalIterator first, BidirectionalIterator last, EASTL_ITC_NS::bidirectional_iterator_tag)
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY((position < mpBegin) || (position > mpEnd)))
+ EASTL_FAIL_MSG("vector::insert -- invalid position");
+ #endif
+
+ // C++11 stipulates that position is const_iterator, but the return value is iterator.
+ iterator destPosition = const_cast<value_type*>(position);
+
+ if(first != last)
+ {
+ const size_type n = (size_type)eastl::distance(first, last); // n is the number of elements we are inserting.
+
+ if(n <= size_type(internalCapacityPtr() - mpEnd)) // If n fits within the existing capacity...
+ {
+ const size_type nExtra = static_cast<size_type>(mpEnd - destPosition);
+
+ if(n < nExtra) // If the inserted values are entirely within initialized memory (i.e. are before mpEnd)...
+ {
+ eastl::uninitialized_move_ptr(mpEnd - n, mpEnd, mpEnd);
+ eastl::move_backward(destPosition, mpEnd - n, mpEnd); // We need move_backward because of potential overlap issues.
+ eastl::copy(first, last, destPosition);
+ }
+ else
+ {
+ BidirectionalIterator iTemp = first;
+ eastl::advance(iTemp, nExtra);
+ eastl::uninitialized_copy_ptr(iTemp, last, mpEnd);
+ eastl::uninitialized_move_ptr(destPosition, mpEnd, mpEnd + n - nExtra);
+ eastl::copy_backward(first, iTemp, destPosition + nExtra);
+ }
+
+ mpEnd += n;
+ }
+ else // else we need to expand our capacity.
+ {
+ const size_type nPrevSize = size_type(mpEnd - mpBegin);
+ const size_type nGrowSize = GetNewCapacity(nPrevSize);
+ const size_type nNewSize = nGrowSize > (nPrevSize + n) ? nGrowSize : (nPrevSize + n);
+ pointer const pNewData = DoAllocate(nNewSize);
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ pointer pNewEnd = pNewData;
+ try
+ {
+ pNewEnd = eastl::uninitialized_move_ptr_if_noexcept(mpBegin, destPosition, pNewData);
+ pNewEnd = eastl::uninitialized_copy_ptr(first, last, pNewEnd);
+ pNewEnd = eastl::uninitialized_move_ptr_if_noexcept(destPosition, mpEnd, pNewEnd);
+ }
+ catch(...)
+ {
+ eastl::destruct(pNewData, pNewEnd);
+ DoFree(pNewData, nNewSize);
+ throw;
+ }
+ #else
+ pointer pNewEnd = eastl::uninitialized_move_ptr_if_noexcept(mpBegin, destPosition, pNewData);
+ pNewEnd = eastl::uninitialized_copy_ptr(first, last, pNewEnd);
+ pNewEnd = eastl::uninitialized_move_ptr_if_noexcept(destPosition, mpEnd, pNewEnd);
+ #endif
+
+ eastl::destruct(mpBegin, mpEnd);
+ DoFree(mpBegin, (size_type)(internalCapacityPtr() - mpBegin));
+
+ mpBegin = pNewData;
+ mpEnd = pNewEnd;
+ internalCapacityPtr() = pNewData + nNewSize;
+ }
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ void vector<T, Allocator>::DoInsertValues(const_iterator position, size_type n, const value_type& value)
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY((position < mpBegin) || (position > mpEnd)))
+ EASTL_FAIL_MSG("vector::insert -- invalid position");
+ #endif
+
+ // C++11 stipulates that position is const_iterator, but the return value is iterator.
+ iterator destPosition = const_cast<value_type*>(position);
+
+ if(n <= size_type(internalCapacityPtr() - mpEnd)) // If n is <= capacity...
+ {
+ if(n > 0) // To do: See if there is a way we can eliminate this 'if' statement.
+ {
+ // To consider: Make this algorithm work more like DoInsertValue whereby a pointer to value is used.
+ const value_type temp = value;
+ const size_type nExtra = static_cast<size_type>(mpEnd - destPosition);
+
+ if(n < nExtra)
+ {
+ eastl::uninitialized_move_ptr(mpEnd - n, mpEnd, mpEnd);
+ eastl::move_backward(destPosition, mpEnd - n, mpEnd); // We need move_backward because of potential overlap issues.
+ eastl::fill(destPosition, destPosition + n, temp);
+ }
+ else
+ {
+ eastl::uninitialized_fill_n_ptr(mpEnd, n - nExtra, temp);
+ eastl::uninitialized_move_ptr(destPosition, mpEnd, mpEnd + n - nExtra);
+ eastl::fill(destPosition, mpEnd, temp);
+ }
+
+ mpEnd += n;
+ }
+ }
+ else // else n > capacity
+ {
+ const size_type nPrevSize = size_type(mpEnd - mpBegin);
+ const size_type nGrowSize = GetNewCapacity(nPrevSize);
+ const size_type nNewSize = nGrowSize > (nPrevSize + n) ? nGrowSize : (nPrevSize + n);
+ pointer const pNewData = DoAllocate(nNewSize);
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ pointer pNewEnd = pNewData;
+ try
+ {
+ pNewEnd = eastl::uninitialized_move_ptr_if_noexcept(mpBegin, destPosition, pNewData);
+ eastl::uninitialized_fill_n_ptr(pNewEnd, n, value);
+ pNewEnd = eastl::uninitialized_move_ptr_if_noexcept(destPosition, mpEnd, pNewEnd + n);
+ }
+ catch(...)
+ {
+ eastl::destruct(pNewData, pNewEnd);
+ DoFree(pNewData, nNewSize);
+ throw;
+ }
+ #else
+ pointer pNewEnd = eastl::uninitialized_move_ptr_if_noexcept(mpBegin, destPosition, pNewData);
+ eastl::uninitialized_fill_n_ptr(pNewEnd, n, value);
+ pNewEnd = eastl::uninitialized_move_ptr_if_noexcept(destPosition, mpEnd, pNewEnd + n);
+ #endif
+
+ eastl::destruct(mpBegin, mpEnd);
+ DoFree(mpBegin, (size_type)(internalCapacityPtr() - mpBegin));
+
+ mpBegin = pNewData;
+ mpEnd = pNewEnd;
+ internalCapacityPtr() = pNewData + nNewSize;
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ void vector<T, Allocator>::DoClearCapacity() // This function exists because set_capacity() currently indirectly requires value_type to be default-constructible,
+ { // and some functions that need to clear our capacity (e.g. operator=) aren't supposed to require default-constructibility.
+ clear();
+ this_type temp(eastl::move(*this)); // This is the simplest way to accomplish this,
+ swap(temp); // and it is as efficient as any other.
+ }
+
+
+ template <typename T, typename Allocator>
+ void vector<T, Allocator>::DoGrow(size_type n)
+ {
+ pointer const pNewData = DoAllocate(n);
+
+ pointer pNewEnd = eastl::uninitialized_move_ptr_if_noexcept(mpBegin, mpEnd, pNewData);
+
+ eastl::destruct(mpBegin, mpEnd);
+ DoFree(mpBegin, (size_type)(internalCapacityPtr() - mpBegin));
+
+ mpBegin = pNewData;
+ mpEnd = pNewEnd;
+ internalCapacityPtr() = pNewData + n;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void vector<T, Allocator>::DoSwap(this_type& x)
+ {
+ eastl::swap(mpBegin, x.mpBegin);
+ eastl::swap(mpEnd, x.mpEnd);
+ eastl::swap(mCapacityAllocator, x.mCapacityAllocator); // We do this even if EASTL_ALLOCATOR_COPY_ENABLED is 0.
+ }
+
+ // The code duplication between this and the version that takes no value argument and default constructs the values
+ // is unfortunate but not easily resolved without relying on C++11 perfect forwarding.
+ template <typename T, typename Allocator>
+ void vector<T, Allocator>::DoInsertValuesEnd(size_type n, const value_type& value)
+ {
+ if(n > size_type(internalCapacityPtr() - mpEnd))
+ {
+ const size_type nPrevSize = size_type(mpEnd - mpBegin);
+ const size_type nGrowSize = GetNewCapacity(nPrevSize);
+ const size_type nNewSize = eastl::max(nGrowSize, nPrevSize + n);
+ pointer const pNewData = DoAllocate(nNewSize);
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ pointer pNewEnd = pNewData; // Assign pNewEnd a value here in case the copy throws.
+ try
+ {
+ pNewEnd = eastl::uninitialized_move_ptr_if_noexcept(mpBegin, mpEnd, pNewData);
+ }
+ catch(...)
+ {
+ eastl::destruct(pNewData, pNewEnd);
+ DoFree(pNewData, nNewSize);
+ throw;
+ }
+ #else
+ pointer pNewEnd = eastl::uninitialized_move_ptr_if_noexcept(mpBegin, mpEnd, pNewData);
+ #endif
+
+ eastl::uninitialized_fill_n_ptr(pNewEnd, n, value);
+ pNewEnd += n;
+
+ eastl::destruct(mpBegin, mpEnd);
+ DoFree(mpBegin, (size_type)(internalCapacityPtr() - mpBegin));
+
+ mpBegin = pNewData;
+ mpEnd = pNewEnd;
+ internalCapacityPtr() = pNewData + nNewSize;
+ }
+ else
+ {
+ eastl::uninitialized_fill_n_ptr(mpEnd, n, value);
+ mpEnd += n;
+ }
+ }
+
+ template <typename T, typename Allocator>
+ void vector<T, Allocator>::DoInsertValuesEnd(size_type n)
+ {
+ if (n > size_type(internalCapacityPtr() - mpEnd))
+ {
+ const size_type nPrevSize = size_type(mpEnd - mpBegin);
+ const size_type nGrowSize = GetNewCapacity(nPrevSize);
+ const size_type nNewSize = eastl::max(nGrowSize, nPrevSize + n);
+ pointer const pNewData = DoAllocate(nNewSize);
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ pointer pNewEnd = pNewData; // Assign pNewEnd a value here in case the copy throws.
+ try { pNewEnd = eastl::uninitialized_move_ptr_if_noexcept(mpBegin, mpEnd, pNewData); }
+ catch (...)
+ {
+ eastl::destruct(pNewData, pNewEnd);
+ DoFree(pNewData, nNewSize);
+ throw;
+ }
+ #else
+ pointer pNewEnd = eastl::uninitialized_move_ptr_if_noexcept(mpBegin, mpEnd, pNewData);
+ #endif
+
+ eastl::uninitialized_default_fill_n(pNewEnd, n);
+ pNewEnd += n;
+
+ eastl::destruct(mpBegin, mpEnd);
+ DoFree(mpBegin, (size_type)(internalCapacityPtr() - mpBegin));
+
+ mpBegin = pNewData;
+ mpEnd = pNewEnd;
+ internalCapacityPtr() = pNewData + nNewSize;
+ }
+ else
+ {
+ eastl::uninitialized_default_fill_n(mpEnd, n);
+ mpEnd += n;
+ }
+ }
+
+ template <typename T, typename Allocator>
+ template<typename... Args>
+ void vector<T, Allocator>::DoInsertValue(const_iterator position, Args&&... args)
+ {
+ // To consider: It's feasible that the args is from a value_type comes from within the current sequence itself and
+ // so we need to be sure to handle that case. This is different from insert(position, const value_type&) because in
+ // this case value is potentially being modified.
+
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY((position < mpBegin) || (position > mpEnd)))
+ EASTL_FAIL_MSG("vector::insert/emplace -- invalid position");
+ #endif
+
+ // C++11 stipulates that position is const_iterator, but the return value is iterator.
+ iterator destPosition = const_cast<value_type*>(position);
+
+ if(mpEnd != internalCapacityPtr()) // If size < capacity ...
+ {
+ // We need to take into account the possibility that args is a value_type that comes from within the vector itself.
+ // creating a temporary value on the stack here is not an optimal way to solve this because sizeof(value_type) may be
+ // too much for the given platform. An alternative solution may be to specialize this function for the case of the
+ // argument being const value_type& or value_type&&.
+ EASTL_ASSERT(position < mpEnd); // While insert at end() is valid, our design is such that calling code should handle that case before getting here, as our streamlined logic directly doesn't handle this particular case due to resulting negative ranges.
+ #if EASTL_USE_FORWARD_WORKAROUND
+ auto value = value_type(eastl::forward<Args>(args)...); // Workaround for compiler bug in VS2013 which results in a compiler internal crash while compiling this code.
+ #else
+ value_type value(eastl::forward<Args>(args)...); // Need to do this before the move_backward below because maybe args refers to something within the moving range.
+ #endif
+ ::new(static_cast<void*>(mpEnd)) value_type(eastl::move(*(mpEnd - 1))); // mpEnd is uninitialized memory, so we must construct into it instead of move into it like we do with the other elements below.
+ eastl::move_backward(destPosition, mpEnd - 1, mpEnd); // We need to go backward because of potential overlap issues.
+ eastl::destruct(destPosition);
+ ::new(static_cast<void*>(destPosition)) value_type(eastl::move(value)); // Move the value argument to the given position.
+ ++mpEnd;
+ }
+ else // else (size == capacity)
+ {
+ const size_type nPosSize = size_type(destPosition - mpBegin); // Index of the insertion position.
+ const size_type nPrevSize = size_type(mpEnd - mpBegin);
+ const size_type nNewSize = GetNewCapacity(nPrevSize);
+ pointer const pNewData = DoAllocate(nNewSize);
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ pointer pNewEnd = pNewData;
+ try
+ { // To do: We are not handling exceptions properly below. In particular we don't want to
+ // call eastl::destruct on the entire range if only the first part of the range was constructed.
+ ::new((void*)(pNewData + nPosSize)) value_type(eastl::forward<Args>(args)...); // Because the old data is potentially being moved rather than copied, we need to move.
+ pNewEnd = NULL; // Set to NULL so that in catch we can tell the exception occurred during the next call.
+ pNewEnd = eastl::uninitialized_move_ptr_if_noexcept(mpBegin, destPosition, pNewData); // the value first, because it might possibly be a reference to the old data being moved.
+ pNewEnd = eastl::uninitialized_move_ptr_if_noexcept(destPosition, mpEnd, ++pNewEnd);
+ }
+ catch(...)
+ {
+ if(pNewEnd)
+ eastl::destruct(pNewData, pNewEnd); // Destroy what has been constructed so far.
+ else
+ eastl::destruct(pNewData + nPosSize); // The exception occurred during the first uninitialized move, so destroy only the value at nPosSize.
+ DoFree(pNewData, nNewSize);
+ throw;
+ }
+ #else
+ ::new((void*)(pNewData + nPosSize)) value_type(eastl::forward<Args>(args)...); // Because the old data is potentially being moved rather than copied, we need to move
+ pointer pNewEnd = eastl::uninitialized_move_ptr_if_noexcept(mpBegin, destPosition, pNewData); // the value first, because it might possibly be a reference to the old data being moved.
+ pNewEnd = eastl::uninitialized_move_ptr_if_noexcept(destPosition, mpEnd, ++pNewEnd); // Question: with exceptions disabled, do we assume all operations are noexcept and thus there's no need for uninitialized_move_ptr_if_noexcept?
+ #endif
+
+ eastl::destruct(mpBegin, mpEnd);
+ DoFree(mpBegin, (size_type)(internalCapacityPtr() - mpBegin));
+
+ mpBegin = pNewData;
+ mpEnd = pNewEnd;
+ internalCapacityPtr() = pNewData + nNewSize;
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ template<typename... Args>
+ void vector<T, Allocator>::DoInsertValueEnd(Args&&... args)
+ {
+ const size_type nPrevSize = size_type(mpEnd - mpBegin);
+ const size_type nNewSize = GetNewCapacity(nPrevSize);
+ pointer const pNewData = DoAllocate(nNewSize);
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ pointer pNewEnd = pNewData; // Assign pNewEnd a value here in case the copy throws.
+ try
+ {
+ pNewEnd = eastl::uninitialized_move_ptr_if_noexcept(mpBegin, mpEnd, pNewData);
+ ::new((void*)pNewEnd) value_type(eastl::forward<Args>(args)...);
+ pNewEnd++;
+ }
+ catch(...)
+ {
+ eastl::destruct(pNewData, pNewEnd);
+ DoFree(pNewData, nNewSize);
+ throw;
+ }
+ #else
+ pointer pNewEnd = eastl::uninitialized_move_ptr_if_noexcept(mpBegin, mpEnd, pNewData);
+ ::new((void*)pNewEnd) value_type(eastl::forward<Args>(args)...);
+ pNewEnd++;
+ #endif
+
+ eastl::destruct(mpBegin, mpEnd);
+ DoFree(mpBegin, (size_type)(internalCapacityPtr() - mpBegin));
+
+ mpBegin = pNewData;
+ mpEnd = pNewEnd;
+ internalCapacityPtr() = pNewData + nNewSize;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool vector<T, Allocator>::validate() const EA_NOEXCEPT
+ {
+ if(mpEnd < mpBegin)
+ return false;
+ if(internalCapacityPtr() < mpEnd)
+ return false;
+ return true;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline int vector<T, Allocator>::validate_iterator(const_iterator i) const EA_NOEXCEPT
+ {
+ if(i >= mpBegin)
+ {
+ if(i < mpEnd)
+ return (isf_valid | isf_current | isf_can_dereference);
+
+ if(i <= mpEnd)
+ return (isf_valid | isf_current);
+ }
+
+ return isf_none;
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Allocator>
+ inline bool operator==(const vector<T, Allocator>& a, const vector<T, Allocator>& b)
+ {
+ return ((a.size() == b.size()) && eastl::equal(a.begin(), a.end(), b.begin()));
+ }
+
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ template <typename T, typename Allocator>
+ inline synth_three_way_result<T> operator<=>(const vector<T, Allocator>& a, const vector<T, Allocator>& b)
+ {
+ return eastl::lexicographical_compare_three_way(a.begin(), a.end(), b.begin(), b.end(), synth_three_way{});
+ }
+#else
+ template <typename T, typename Allocator>
+ inline bool operator!=(const vector<T, Allocator>& a, const vector<T, Allocator>& b)
+ {
+ return ((a.size() != b.size()) || !eastl::equal(a.begin(), a.end(), b.begin()));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator<(const vector<T, Allocator>& a, const vector<T, Allocator>& b)
+ {
+ return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator>(const vector<T, Allocator>& a, const vector<T, Allocator>& b)
+ {
+ return b < a;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator<=(const vector<T, Allocator>& a, const vector<T, Allocator>& b)
+ {
+ return !(b < a);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator>=(const vector<T, Allocator>& a, const vector<T, Allocator>& b)
+ {
+ return !(a < b);
+ }
+#endif
+
+ template <typename T, typename Allocator>
+ inline void swap(vector<T, Allocator>& a, vector<T, Allocator>& b) EA_NOEXCEPT_IF(EA_NOEXCEPT_EXPR(a.swap(b)))
+ {
+ a.swap(b);
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // erase / erase_if
+ //
+ // https://en.cppreference.com/w/cpp/container/vector/erase2
+ ///////////////////////////////////////////////////////////////////////
+ template <class T, class Allocator, class U>
+ typename vector<T, Allocator>::size_type erase(vector<T, Allocator>& c, const U& value)
+ {
+ // Erases all elements that compare equal to value from the container.
+ auto origEnd = c.end();
+ auto newEnd = eastl::remove(c.begin(), origEnd, value);
+ auto numRemoved = eastl::distance(newEnd, origEnd);
+ c.erase(newEnd, origEnd);
+
+ // Note: This is technically a lossy conversion when size_type
+ // is 32bits and ptrdiff_t is 64bits (could happen on 64bit
+ // systems when EASTL_SIZE_T_32BIT is set). In practice this
+ // is fine because if EASTL_SIZE_T_32BIT is set then the vector
+ // should not have more elements than fit in a uint32_t and so
+ // the distance here should fit in a size_type.
+ return static_cast<typename vector<T, Allocator>::size_type>(numRemoved);
+ }
+
+ template <class T, class Allocator, class Predicate>
+ typename vector<T, Allocator>::size_type erase_if(vector<T, Allocator>& c, Predicate predicate)
+ {
+ // Erases all elements that satisfy the predicate pred from the container.
+ auto origEnd = c.end();
+ auto newEnd = eastl::remove_if(c.begin(), origEnd, predicate);
+ auto numRemoved = eastl::distance(newEnd, origEnd);
+ c.erase(newEnd, origEnd);
+
+ // Note: This is technically a lossy conversion when size_type
+ // is 32bits and ptrdiff_t is 64bits (could happen on 64bit
+ // systems when EASTL_SIZE_T_32BIT is set). In practice this
+ // is fine because if EASTL_SIZE_T_32BIT is set then the vector
+ // should not have more elements than fit in a uint32_t and so
+ // the distance here should fit in a size_type.
+ return static_cast<typename vector<T, Allocator>::size_type>(numRemoved);
+ }
+
+} // namespace eastl
+
+
+EA_RESTORE_VC_WARNING();
+EA_RESTORE_VC_WARNING();
+
+
+#endif // Header include guard
diff --git a/EASTL/include/EASTL/vector_map.h b/EASTL/include/EASTL/vector_map.h
new file mode 100644
index 0000000..14dec48
--- /dev/null
+++ b/EASTL/include/EASTL/vector_map.h
@@ -0,0 +1,906 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+//////////////////////////////////////////////////////////////////////////////
+
+//////////////////////////////////////////////////////////////////////////////
+// This file implements vector_map. It acts much like std::map, except its
+// underlying representation is a random access container such as vector.
+// These containers are sometimes also known as "sorted vectors."
+// vector_maps have an advantage over conventional maps in that their memory
+// is contiguous and node-less. The result is that lookups are faster, more
+// cache friendly (which potentially more so benefits speed), and the container
+// uses less memory. The downside is that inserting new items into the container
+// is slower if they are inserted in random order instead of in sorted order.
+// This tradeoff is well-worth it for many cases. Note that vector_map allows
+// you to use a deque or other random access container which may perform
+// better for you than vector.
+//
+// Note that with vector_set, vector_multiset, vector_map, vector_multimap
+// that the modification of the container potentially invalidates all
+// existing iterators into the container, unlike what happens with conventional
+// sets and maps.
+//////////////////////////////////////////////////////////////////////////////
+
+
+
+#ifndef EASTL_VECTOR_MAP_H
+#define EASTL_VECTOR_MAP_H
+
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/allocator.h>
+#include <EASTL/functional.h>
+#include <EASTL/vector.h>
+#include <EASTL/utility.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/initializer_list.h>
+#include <stddef.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+
+ /// EASTL_VECTOR_MAP_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_VECTOR_MAP_DEFAULT_NAME
+ #define EASTL_VECTOR_MAP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " vector_map" // Unless the user overrides something, this is "EASTL vector_map".
+ #endif
+
+
+ /// EASTL_VECTOR_MAP_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_VECTOR_MAP_DEFAULT_ALLOCATOR
+ #define EASTL_VECTOR_MAP_DEFAULT_ALLOCATOR allocator_type(EASTL_VECTOR_MAP_DEFAULT_NAME)
+ #endif
+
+
+
+ /// map_value_compare
+ ///
+ /// Our adapter for the comparison function in the template parameters.
+ ///
+ template <typename Key, typename Value, typename Compare>
+ class map_value_compare : public binary_function<Value, Value, bool>
+ {
+ public:
+ Compare c;
+
+ map_value_compare(const Compare& x)
+ : c(x) {}
+
+ public:
+ bool operator()(const Value& a, const Value& b) const
+ { return c(a.first, b.first); }
+
+ bool operator()(const Value& a, const Key& b) const
+ { return c(a.first, b); }
+
+ bool operator()(const Key& a, const Value& b) const
+ { return c(a, b.first); }
+
+ bool operator()(const Key& a, const Key& b) const
+ { return c(a, b); }
+
+ }; // map_value_compare
+
+
+
+ /// vector_map
+ ///
+ /// Implements a map via a random access container such as a vector.
+ ///
+ /// Note that with vector_set, vector_multiset, vector_map, vector_multimap
+ /// that the modification of the container potentially invalidates all
+ /// existing iterators into the container, unlike what happens with conventional
+ /// sets and maps.
+ ///
+ /// Note that the erase functions return iterator and not void. This allows for
+ /// more efficient use of the container and is consistent with the C++ language
+ /// defect report #130 (DR 130)
+ ///
+ /// Note that we set the value_type to be pair<Key, T> and not pair<const Key, T>.
+ /// This means that the underlying container (e.g vector) is a container of pair<Key, T>.
+ /// Our vector and deque implementations are optimized to assign values in-place and
+ /// using a vector of pair<const Key, T> (note the const) would make it hard to use
+ /// our existing vector implementation without a lot of headaches. As a result,
+ /// at least for the time being we do away with the const. This means that the
+ /// insertion type varies between map and vector_map in that the latter doesn't take
+ /// const. This also means that a certain amount of automatic safety provided by
+ /// the implementation is lost, as the compiler will let the wayward user modify
+ /// a key and thus make the container no longer ordered behind its back.
+ ///
+ template <typename Key, typename T, typename Compare = eastl::less<Key>,
+ typename Allocator = EASTLAllocatorType,
+ typename RandomAccessContainer = eastl::vector<eastl::pair<Key, T>, Allocator> >
+ class vector_map : public RandomAccessContainer
+ {
+ public:
+ typedef RandomAccessContainer base_type;
+ typedef vector_map<Key, T, Compare, Allocator, RandomAccessContainer> this_type;
+ typedef Allocator allocator_type;
+ typedef Key key_type;
+ typedef T mapped_type;
+ typedef eastl::pair<Key, T> value_type;
+ typedef Compare key_compare;
+ typedef map_value_compare<Key, value_type, Compare> value_compare;
+ typedef value_type* pointer;
+ typedef const value_type* const_pointer;
+ typedef value_type& reference;
+ typedef const value_type& const_reference;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::difference_type difference_type;
+ typedef typename base_type::iterator iterator;
+ typedef typename base_type::const_iterator const_iterator;
+ typedef typename base_type::reverse_iterator reverse_iterator;
+ typedef typename base_type::const_reverse_iterator const_reverse_iterator;
+ typedef eastl::pair<iterator, bool> insert_return_type;
+
+ using base_type::begin;
+ using base_type::end;
+ using base_type::get_allocator;
+
+ protected:
+ value_compare mValueCompare; // To do: Make this variable go away via the zero base size optimization.
+
+ public:
+ // We have an empty ctor and a ctor that takes an allocator instead of one for both
+ // because this way our RandomAccessContainer wouldn't be required to have an constructor
+ // that takes allocator_type.
+ vector_map();
+ explicit vector_map(const allocator_type& allocator);
+ explicit vector_map(const key_compare& comp, const allocator_type& allocator = EASTL_VECTOR_MAP_DEFAULT_ALLOCATOR);
+ vector_map(const this_type& x);
+ vector_map(this_type&& x);
+ vector_map(this_type&& x, const allocator_type& allocator);
+ vector_map(std::initializer_list<value_type> ilist, const key_compare& compare = key_compare(), const allocator_type& allocator = EASTL_VECTOR_MAP_DEFAULT_ALLOCATOR);
+
+ template <typename InputIterator>
+ vector_map(InputIterator first, InputIterator last); // allocator arg removed because VC7.1 fails on the default arg. To do: Make a second version of this function without a default arg.
+
+ template <typename InputIterator>
+ vector_map(InputIterator first, InputIterator last, const key_compare& compare); // allocator arg removed because VC7.1 fails on the default arg. To do: Make a second version of this function without a default arg.
+
+ this_type& operator=(const this_type& x);
+ this_type& operator=(std::initializer_list<value_type> ilist);
+ this_type& operator=(this_type&& x);
+
+ void swap(this_type& x);
+
+ const key_compare& key_comp() const;
+ key_compare& key_comp();
+
+ const value_compare& value_comp() const;
+ value_compare& value_comp();
+
+ // Inherited from base class:
+ //
+ // allocator_type& get_allocator();
+ // void set_allocator(const allocator_type& allocator);
+ //
+ // iterator begin();
+ // const_iterator begin() const;
+ // const_iterator cbegin() const;
+ //
+ // iterator end();
+ // const_iterator end() const;
+ // const_iterator cend() const;
+ //
+ // reverse_iterator rbegin();
+ // const_reverse_iterator rbegin() const;
+ // const_reverse_iterator crbegin() const;
+ //
+ // reverse_iterator rend();
+ // const_reverse_iterator rend() const;
+ // const_reverse_iterator crend() const;
+ //
+ // size_type size() const;
+ // bool empty() const;
+ // void clear();
+
+ template <class... Args>
+ eastl::pair<iterator, bool> emplace(Args&&... args);
+
+ template <class... Args>
+ iterator emplace_hint(const_iterator position, Args&&... args);
+
+ template <typename P, typename = eastl::enable_if_t<eastl::is_constructible_v<value_type, P&&>>>
+ pair<iterator, bool> insert(P&& otherValue);
+
+ eastl::pair<iterator, bool> insert(const value_type& value);
+ pair<iterator, bool> insert(const key_type& otherValue);
+ pair<iterator, bool> insert(key_type&& otherValue);
+ iterator insert(const_iterator position, const value_type& value);
+ iterator insert(const_iterator position, value_type&& value);
+ void insert(std::initializer_list<value_type> ilist);
+
+ template <typename InputIterator>
+ void insert(InputIterator first, InputIterator last);
+
+ iterator erase(const_iterator position);
+ iterator erase(const_iterator first, const_iterator last);
+ size_type erase(const key_type& k);
+ reverse_iterator erase(const_reverse_iterator position);
+ reverse_iterator erase(const_reverse_iterator first, const_reverse_iterator last);
+
+ iterator find(const key_type& k);
+ const_iterator find(const key_type& k) const;
+
+ template <typename U, typename BinaryPredicate>
+ iterator find_as(const U& u, BinaryPredicate predicate);
+
+ template <typename U, typename BinaryPredicate>
+ const_iterator find_as(const U& u, BinaryPredicate predicate) const;
+
+ size_type count(const key_type& k) const;
+
+ iterator lower_bound(const key_type& k);
+ const_iterator lower_bound(const key_type& k) const;
+
+ iterator upper_bound(const key_type& k);
+ const_iterator upper_bound(const key_type& k) const;
+
+ eastl::pair<iterator, iterator> equal_range(const key_type& k);
+ eastl::pair<const_iterator, const_iterator> equal_range(const key_type& k) const;
+
+ template <typename U, typename BinaryPredicate>
+ eastl::pair<iterator, iterator> equal_range(const U& u, BinaryPredicate predicate);
+
+ template <typename U, typename BinaryPredicate>
+ eastl::pair<const_iterator, const_iterator> equal_range(const U& u, BinaryPredicate) const;
+
+ // Note: vector_map operator[] returns a reference to the mapped_type, same as map does.
+ // But there's an important difference: This reference can be invalidated by -any- changes
+ // to the vector_map that cause it to change capacity. This is unlike map, with which
+ // mapped_type references are invalidated only if that mapped_type element itself is removed
+ // from the map. This is because vector is array-based and map is node-based. As a result
+ // the following code that is safe for map is unsafe for vector_map for the case that
+ // the vMap[100] doesn't already exist in the vector_map:
+ // vMap[100] = vMap[0]
+ mapped_type& operator[](const key_type& k);
+ mapped_type& operator[](key_type&& k);
+
+ // Functions which are disallowed due to being unsafe.
+ void push_back(const value_type& value) = delete;
+ reference push_back() = delete;
+ void* push_back_uninitialized() = delete;
+ template <class... Args>
+ reference emplace_back(Args&&...) = delete;
+
+ // NOTE(rparolin): It is undefined behaviour if user code fails to ensure the container
+ // invariants are respected by performing an explicit call to 'sort' before any other
+ // operations on the container are performed that do not clear the elements.
+ //
+ // 'push_back_unsorted' and 'emplace_back_unsorted' do not satisfy container invariants
+ // for being sorted. We provide these overloads explicitly labelled as '_unsorted' as an
+ // optimization opportunity when batch inserting elements so users can defer the cost of
+ // sorting the container once when all elements are contained. This was done to clarify
+ // the intent of code by leaving a trace that a manual call to sort is required.
+ //
+ template <typename... Args> decltype(auto) push_back_unsorted(Args&&... args)
+ { return base_type::push_back(eastl::forward<Args>(args)...); }
+ template <typename... Args> decltype(auto) emplace_back_unsorted(Args&&... args)
+ { return base_type::emplace_back(eastl::forward<Args>(args)...); }
+
+ }; // vector_map
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // vector_map
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline vector_map<K, T, C, A, RAC>::vector_map()
+ : base_type(), mValueCompare(C())
+ {
+ get_allocator().set_name(EASTL_VECTOR_MAP_DEFAULT_NAME);
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline vector_map<K, T, C, A, RAC>::vector_map(const allocator_type& allocator)
+ : base_type(allocator), mValueCompare(C())
+ {
+ // Empty
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline vector_map<K, T, C, A, RAC>::vector_map(const key_compare& comp, const allocator_type& allocator)
+ : base_type(allocator), mValueCompare(comp)
+ {
+ // Empty
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline vector_map<K, T, C, A, RAC>::vector_map(const this_type& x)
+ : base_type(x), mValueCompare(x.mValueCompare)
+ {
+ // Empty
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline vector_map<K, T, C, A, RAC>::vector_map(this_type&& x)
+ : base_type(eastl::move(x)), mValueCompare(x.mValueCompare)
+ {
+ // Empty. Note: x is left with empty contents but its original mValueCompare instead of the default one.
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline vector_map<K, T, C, A, RAC>::vector_map(this_type&& x, const allocator_type& allocator)
+ : base_type(eastl::move(x), allocator), mValueCompare(x.mValueCompare)
+ {
+ // Empty. Note: x is left with empty contents but its original mValueCompare instead of the default one.
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline vector_map<K, T, C, A, RAC>::vector_map(std::initializer_list<value_type> ilist, const key_compare& compare, const allocator_type& allocator)
+ : base_type(allocator), mValueCompare(compare)
+ {
+ insert(ilist.begin(), ilist.end());
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ template <typename InputIterator>
+ inline vector_map<K, T, C, A, RAC>::vector_map(InputIterator first, InputIterator last)
+ : base_type(EASTL_VECTOR_MAP_DEFAULT_ALLOCATOR), mValueCompare(key_compare())
+ {
+ insert(first, last);
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ template <typename InputIterator>
+ inline vector_map<K, T, C, A, RAC>::vector_map(InputIterator first, InputIterator last, const key_compare& compare)
+ : base_type(EASTL_VECTOR_MAP_DEFAULT_ALLOCATOR), mValueCompare(compare)
+ {
+ insert(first, last);
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline vector_map<K, T, C, A, RAC>&
+ vector_map<K, T, C, A, RAC>::operator=(const this_type& x)
+ {
+ base_type::operator=(x);
+ mValueCompare = value_compare(x.mValueCompare);
+ return *this;
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline vector_map<K, T, C, A, RAC>&
+ vector_map<K, T, C, A, RAC>::operator=(this_type&& x)
+ {
+ base_type::operator=(eastl::move(x));
+ eastl::swap(mValueCompare, x.mValueCompare);
+ return *this;
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline vector_map<K, T, C, A, RAC>&
+ vector_map<K, T, C, A, RAC>::operator=(std::initializer_list<value_type> ilist)
+ {
+ base_type::clear();
+ insert(ilist.begin(), ilist.end());
+ return *this;
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline void vector_map<K, T, C, A, RAC>::swap(this_type& x)
+ {
+ base_type::swap(x);
+ eastl::swap(mValueCompare, x.mValueCompare);
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline const typename vector_map<K, T, C, A, RAC>::key_compare&
+ vector_map<K, T, C, A, RAC>::key_comp() const
+ {
+ return mValueCompare.c;
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline typename vector_map<K, T, C, A, RAC>::key_compare&
+ vector_map<K, T, C, A, RAC>::key_comp()
+ {
+ return mValueCompare.c;
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline const typename vector_map<K, T, C, A, RAC>::value_compare&
+ vector_map<K, T, C, A, RAC>::value_comp() const
+ {
+ return mValueCompare;
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline typename vector_map<K, T, C, A, RAC>::value_compare&
+ vector_map<K, T, C, A, RAC>::value_comp()
+ {
+ return mValueCompare;
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ template <class... Args>
+ inline eastl::pair<typename vector_map<K, T, C, A, RAC>::iterator, bool>
+ vector_map<K, T, C, A, RAC>::emplace(Args&&... args)
+ {
+ #if EASTL_USE_FORWARD_WORKAROUND
+ auto value = value_type(eastl::forward<Args>(args)...); // Workaround for compiler bug in VS2013 which results in a compiler internal crash while compiling this code.
+ #else
+ value_type value(eastl::forward<Args>(args)...);
+ #endif
+ return insert(eastl::move(value));
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ template <class... Args>
+ inline typename vector_map<K, T, C, A, RAC>::iterator
+ vector_map<K, T, C, A, RAC>::emplace_hint(const_iterator position, Args&&... args)
+ {
+ #if EASTL_USE_FORWARD_WORKAROUND
+ auto value = value_type(eastl::forward<Args>(args)...); // Workaround for compiler bug in VS2013 which results in a compiler internal crash while compiling this code.
+ #else
+ value_type value(eastl::forward<Args>(args)...);
+ #endif
+
+ return insert(position, eastl::move(value));
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline eastl::pair<typename vector_map<K, T, C, A, RAC>::iterator, bool>
+ vector_map<K, T, C, A, RAC>::insert(const value_type& value)
+ {
+ const iterator itLB(lower_bound(value.first));
+
+ if((itLB != end()) && !mValueCompare(value, *itLB))
+ return eastl::pair<iterator, bool>(itLB, false);
+
+ return eastl::pair<iterator, bool>(base_type::insert(itLB, value), true);
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ template <typename P, typename>
+ inline eastl::pair<typename vector_map<K, T, C, A, RAC>::iterator, bool>
+ vector_map<K, T, C, A, RAC>::insert(P&& otherValue)
+ {
+ value_type value(eastl::forward<P>(otherValue));
+ const iterator itLB(lower_bound(value.first));
+
+ if((itLB != end()) && !mValueCompare(value, *itLB))
+ return eastl::pair<iterator, bool>(itLB, false);
+
+ return eastl::pair<iterator, bool>(base_type::insert(itLB, eastl::move(value)), true);
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline eastl::pair<typename vector_map<K, T, C, A, RAC>::iterator, bool>
+ vector_map<K, T, C, A, RAC>::insert(const key_type& otherValue)
+ {
+ value_type value(eastl::pair_first_construct, otherValue);
+ const iterator itLB(lower_bound(value.first));
+
+ if((itLB != end()) && !mValueCompare(value, *itLB))
+ return eastl::pair<iterator, bool>(itLB, false);
+
+ return eastl::pair<iterator, bool>(base_type::insert(itLB, eastl::move(value)), true);
+ }
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline eastl::pair<typename vector_map<K, T, C, A, RAC>::iterator, bool>
+ vector_map<K, T, C, A, RAC>::insert(key_type&& otherValue)
+ {
+ value_type value(eastl::pair_first_construct, eastl::move(otherValue));
+ const iterator itLB(lower_bound(value.first));
+
+ if((itLB != end()) && !mValueCompare(value, *itLB))
+ return eastl::pair<iterator, bool>(itLB, false);
+
+ return eastl::pair<iterator, bool>(base_type::insert(itLB, eastl::move(value)), true);
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ typename vector_map<K, T, C, A, RAC>::iterator
+ vector_map<K, T, C, A, RAC>::insert(const_iterator position, const value_type& value)
+ {
+ // We assume that the user knows what he is doing and has supplied us with
+ // a position that is right where value should be inserted (put in front of).
+ // We do a test to see if the position is correct. If so then we insert,
+ // if not then we ignore the input position.
+
+ if((position == end()) || mValueCompare(value, *position)) // If the element at position is greater than value...
+ {
+ if((position == begin()) || mValueCompare(*(position - 1), value)) // If the element before position is less than value...
+ return base_type::insert(position, value);
+ }
+
+ // In this case we either have an incorrect position or value is already present.
+ // We fall back to the regular insert function. An optimization would be to detect
+ // that the element is already present, but that's only useful if the user supplied
+ // a good position but a present element.
+ const eastl::pair<typename vector_map<K, T, C, A, RAC>::iterator, bool> result = insert(value);
+
+ return result.first;
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ typename vector_map<K, T, C, A, RAC>::iterator
+ vector_map<K, T, C, A, RAC>::insert(const_iterator position, value_type&& value)
+ {
+ if((position == end()) || mValueCompare(value, *position)) // If the element at position is greater than value...
+ {
+ if((position == begin()) || mValueCompare(*(position - 1), value)) // If the element before position is less than value...
+ return base_type::insert(position, eastl::move(value));
+ }
+
+ const eastl::pair<typename vector_map<K, T, C, A, RAC>::iterator, bool> result = insert(eastl::move(value));
+
+ return result.first;
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline void vector_map<K, T, C, A, RAC>::insert(std::initializer_list<value_type> ilist)
+ {
+ insert(ilist.begin(), ilist.end());
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ template <typename InputIterator>
+ inline void vector_map<K, T, C, A, RAC>::insert(InputIterator first, InputIterator last)
+ {
+ // To consider: Improve the speed of this by getting the length of the
+ // input range and resizing our container to that size
+ // before doing the insertions. We can't use reserve
+ // because we don't know if we are using a vector or not.
+ // Alternatively, force the user to do the reservation.
+ // To consider: When inserting values that come from a container
+ // like this container, use the property that they are
+ // known to be sorted and speed up the inserts here.
+ for(; first != last; ++first)
+ insert(*first);
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline typename vector_map<K, T, C, A, RAC>::iterator
+ vector_map<K, T, C, A, RAC>::erase(const_iterator position)
+ {
+ // Note that we return iterator and not void. This allows for more efficient use of
+ // the container and is consistent with the C++ language defect report #130 (DR 130)
+ return base_type::erase(position);
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline typename vector_map<K, T, C, A, RAC>::iterator
+ vector_map<K, T, C, A, RAC>::erase(const_iterator first, const_iterator last)
+ {
+ return base_type::erase(first, last);
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline typename vector_map<K, T, C, A, RAC>::size_type
+ vector_map<K, T, C, A, RAC>::erase(const key_type& k)
+ {
+ const iterator it(find(k));
+
+ if(it != end()) // If it exists...
+ {
+ erase(it);
+ return 1;
+ }
+ return 0;
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline typename vector_map<K, T, C, A, RAC>::reverse_iterator
+ vector_map<K, T, C, A, RAC>::erase(const_reverse_iterator position)
+ {
+ return reverse_iterator(base_type::erase((++position).base()));
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline typename vector_map<K, T, C, A, RAC>::reverse_iterator
+ vector_map<K, T, C, A, RAC>::erase(const_reverse_iterator first, const_reverse_iterator last)
+ {
+ return reverse_iterator(base_type::erase((++last).base(), (++first).base()));
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline typename vector_map<K, T, C, A, RAC>::iterator
+ vector_map<K, T, C, A, RAC>::find(const key_type& k)
+ {
+ const eastl::pair<iterator, iterator> pairIts(equal_range(k));
+ return (pairIts.first != pairIts.second) ? pairIts.first : end();
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline typename vector_map<K, T, C, A, RAC>::const_iterator
+ vector_map<K, T, C, A, RAC>::find(const key_type& k) const
+ {
+ const eastl::pair<const_iterator, const_iterator> pairIts(equal_range(k));
+ return (pairIts.first != pairIts.second) ? pairIts.first : end();
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ template <typename U, typename BinaryPredicate>
+ inline typename vector_map<K, T, C, A, RAC>::iterator
+ vector_map<K, T, C, A, RAC>::find_as(const U& u, BinaryPredicate predicate)
+ {
+ const eastl::pair<iterator, iterator> pairIts(equal_range(u, predicate));
+ return (pairIts.first != pairIts.second) ? pairIts.first : end();
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ template <typename U, typename BinaryPredicate>
+ inline typename vector_map<K, T, C, A, RAC>::const_iterator
+ vector_map<K, T, C, A, RAC>::find_as(const U& u, BinaryPredicate predicate) const
+ {
+ const eastl::pair<const_iterator, const_iterator> pairIts(equal_range(u, predicate));
+ return (pairIts.first != pairIts.second) ? pairIts.first : end();
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline typename vector_map<K, T, C, A, RAC>::size_type
+ vector_map<K, T, C, A, RAC>::count(const key_type& k) const
+ {
+ const const_iterator it(find(k));
+ return (it != end()) ? (size_type)1 : (size_type)0;
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline typename vector_map<K, T, C, A, RAC>::iterator
+ vector_map<K, T, C, A, RAC>::lower_bound(const key_type& k)
+ {
+ return eastl::lower_bound(begin(), end(), k, mValueCompare);
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline typename vector_map<K, T, C, A, RAC>::const_iterator
+ vector_map<K, T, C, A, RAC>::lower_bound(const key_type& k) const
+ {
+ return eastl::lower_bound(begin(), end(), k, mValueCompare);
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline typename vector_map<K, T, C, A, RAC>::iterator
+ vector_map<K, T, C, A, RAC>::upper_bound(const key_type& k)
+ {
+ return eastl::upper_bound(begin(), end(), k, mValueCompare);
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline typename vector_map<K, T, C, A, RAC>::const_iterator
+ vector_map<K, T, C, A, RAC>::upper_bound(const key_type& k) const
+ {
+ return eastl::upper_bound(begin(), end(), k, mValueCompare);
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline eastl::pair<typename vector_map<K, T, C, A, RAC>::iterator, typename vector_map<K, T, C, A, RAC>::iterator>
+ vector_map<K, T, C, A, RAC>::equal_range(const key_type& k)
+ {
+ // The resulting range will either be empty or have one element,
+ // so instead of doing two tree searches (one for lower_bound and
+ // one for upper_bound), we do just lower_bound and see if the
+ // result is a range of size zero or one.
+ const iterator itLower(lower_bound(k));
+
+ if((itLower == end()) || mValueCompare(k, *itLower)) // If at the end or if (k is < itLower)...
+ return eastl::pair<iterator, iterator>(itLower, itLower);
+
+ iterator itUpper(itLower);
+ return eastl::pair<iterator, iterator>(itLower, ++itUpper);
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline eastl::pair<typename vector_map<K, T, C, A, RAC>::const_iterator, typename vector_map<K, T, C, A, RAC>::const_iterator>
+ vector_map<K, T, C, A, RAC>::equal_range(const key_type& k) const
+ {
+ // The resulting range will either be empty or have one element,
+ // so instead of doing two tree searches (one for lower_bound and
+ // one for upper_bound), we do just lower_bound and see if the
+ // result is a range of size zero or one.
+ const const_iterator itLower(lower_bound(k));
+
+ if((itLower == end()) || mValueCompare(k, *itLower)) // If at the end or if (k is < itLower)...
+ return eastl::pair<const_iterator, const_iterator>(itLower, itLower);
+
+ const_iterator itUpper(itLower);
+ return eastl::pair<const_iterator, const_iterator>(itLower, ++itUpper);
+ }
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ template <typename U, typename BinaryPredicate>
+ inline eastl::pair<typename vector_map<K, T, C, A, RAC>::iterator, typename vector_map<K, T, C, A, RAC>::iterator>
+ vector_map<K, T, C, A, RAC>::equal_range(const U& u, BinaryPredicate predicate)
+ {
+ // The resulting range will either be empty or have one element,
+ // so instead of doing two tree searches (one for lower_bound and
+ // one for upper_bound), we do just lower_bound and see if the
+ // result is a range of size zero or one.
+ map_value_compare<U, value_type, BinaryPredicate> predicate_cmp(predicate);
+
+ const iterator itLower(eastl::lower_bound(begin(), end(), u, predicate_cmp));
+
+ if((itLower == end()) || predicate_cmp(u, *itLower)) // If at the end or if (k is < itLower)...
+ return eastl::pair<iterator, iterator>(itLower, itLower);
+
+ iterator itUpper(itLower);
+ return eastl::pair<iterator, iterator>(itLower, ++itUpper);
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ template <typename U, typename BinaryPredicate>
+ inline eastl::pair<typename vector_map<K, T, C, A, RAC>::const_iterator, typename vector_map<K, T, C, A, RAC>::const_iterator>
+ vector_map<K, T, C, A, RAC>::equal_range(const U& u, BinaryPredicate predicate) const
+ {
+ // The resulting range will either be empty or have one element,
+ // so instead of doing two tree searches (one for lower_bound and
+ // one for upper_bound), we do just lower_bound and see if the
+ // result is a range of size zero or one.
+ map_value_compare<U, value_type, BinaryPredicate> predicate_cmp(predicate);
+
+ const const_iterator itLower(eastl::lower_bound(begin(), end(), u, predicate_cmp));
+
+ if((itLower == end()) || predicate_cmp(u, *itLower)) // If at the end or if (k is < itLower)...
+ return eastl::pair<const_iterator, const_iterator>(itLower, itLower);
+
+ const_iterator itUpper(itLower);
+ return eastl::pair<const_iterator, const_iterator>(itLower, ++itUpper);
+ }
+
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline typename vector_map<K, T, C, A, RAC>::mapped_type&
+ vector_map<K, T, C, A, RAC>::operator[](const key_type& k)
+ {
+ iterator itLB(lower_bound(k));
+
+ if((itLB == end()) || key_comp()(k, (*itLB).first))
+ itLB = insert(itLB, value_type(k, mapped_type()));
+ return (*itLB).second;
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline typename vector_map<K, T, C, A, RAC>::mapped_type&
+ vector_map<K, T, C, A, RAC>::operator[](key_type&& k)
+ {
+ iterator itLB(lower_bound(k));
+
+ if((itLB == end()) || key_comp()(k, (*itLB).first))
+ itLB = insert(itLB, value_type(eastl::move(k), mapped_type()));
+ return (*itLB).second;
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////////
+
+ template <typename Key, typename T, typename Compare, typename Allocator, typename RandomAccessContainer>
+ inline bool operator==(const vector_map<Key, T, Compare, Allocator, RandomAccessContainer>& a,
+ const vector_map<Key, T, Compare, Allocator, RandomAccessContainer>& b)
+ {
+ return (a.size() == b.size()) && eastl::equal(b.begin(), b.end(), a.begin());
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator, typename RandomAccessContainer>
+ inline bool operator<(const vector_map<Key, T, Compare, Allocator, RandomAccessContainer>& a,
+ const vector_map<Key, T, Compare, Allocator, RandomAccessContainer>& b)
+ {
+ return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end(), a.value_comp());
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator, typename RandomAccessContainer>
+ inline bool operator!=(const vector_map<Key, T, Compare, Allocator, RandomAccessContainer>& a,
+ const vector_map<Key, T, Compare, Allocator, RandomAccessContainer>& b)
+ {
+ return !(a == b);
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator, typename RandomAccessContainer>
+ inline bool operator>(const vector_map<Key, T, Compare, Allocator, RandomAccessContainer>& a,
+ const vector_map<Key, T, Compare, Allocator, RandomAccessContainer>& b)
+ {
+ return b < a;
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator, typename RandomAccessContainer>
+ inline bool operator<=(const vector_map<Key, T, Compare, Allocator, RandomAccessContainer>& a,
+ const vector_map<Key, T, Compare, Allocator, RandomAccessContainer>& b)
+ {
+ return !(b < a);
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator, typename RandomAccessContainer>
+ inline bool operator>=(const vector_map<Key, T, Compare, Allocator, RandomAccessContainer>& a,
+ const vector_map<Key, T, Compare, Allocator, RandomAccessContainer>& b)
+ {
+ return !(a < b);
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator, typename RandomAccessContainer>
+ inline void swap(vector_map<Key, T, Compare, Allocator, RandomAccessContainer>& a,
+ vector_map<Key, T, Compare, Allocator, RandomAccessContainer>& b)
+ {
+ a.swap(b);
+ }
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/vector_multimap.h b/EASTL/include/EASTL/vector_multimap.h
new file mode 100644
index 0000000..235f671
--- /dev/null
+++ b/EASTL/include/EASTL/vector_multimap.h
@@ -0,0 +1,843 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+//////////////////////////////////////////////////////////////////////////////
+
+//////////////////////////////////////////////////////////////////////////////
+// This file implements vector_multimap. It acts much like std::multimap, except
+// its underlying representation is a random access container such as vector.
+// These containers are sometimes also known as "sorted vectors."
+// vector_maps have an advantage over conventional maps in that their memory
+// is contiguous and node-less. The result is that lookups are faster, more
+// cache friendly (which potentially more so benefits speed), and the container
+// uses less memory. The downside is that inserting new items into the container
+// is slower if they are inserted in random order instead of in sorted order.
+// This tradeoff is well-worth it for many cases. Note that vector_multimap allows
+// you to use a deque or other random access container which may perform
+// better for you than vector.
+//
+// Note that with vector_set, vector_multiset, vector_map, vector_multimap
+// that the modification of the container potentially invalidates all
+// existing iterators into the container, unlike what happens with conventional
+// sets and maps.
+//////////////////////////////////////////////////////////////////////////////
+
+
+
+#ifndef EASTL_VECTOR_MULTIMAP_H
+#define EASTL_VECTOR_MULTIMAP_H
+
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/allocator.h>
+#include <EASTL/functional.h>
+#include <EASTL/vector.h>
+#include <EASTL/utility.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/initializer_list.h>
+#include <stddef.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+
+ /// EASTL_VECTOR_MULTIMAP_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_VECTOR_MULTIMAP_DEFAULT_NAME
+ #define EASTL_VECTOR_MULTIMAP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " vector_multimap" // Unless the user overrides something, this is "EASTL vector_multimap".
+ #endif
+
+
+ /// EASTL_VECTOR_MULTIMAP_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_VECTOR_MULTIMAP_DEFAULT_ALLOCATOR
+ #define EASTL_VECTOR_MULTIMAP_DEFAULT_ALLOCATOR allocator_type(EASTL_VECTOR_MULTIMAP_DEFAULT_NAME)
+ #endif
+
+
+
+ /// multimap_value_compare
+ ///
+ /// Our adapter for the comparison function in the template parameters.
+ ///
+ template <typename Key, typename Value, typename Compare>
+ class multimap_value_compare : public binary_function<Value, Value, bool>
+ {
+ public:
+ Compare c;
+
+ multimap_value_compare(const Compare& x)
+ : c(x) {}
+
+ public:
+ bool operator()(const Value& a, const Value& b) const
+ { return c(a.first, b.first); }
+
+ bool operator()(const Value& a, const Key& b) const
+ { return c(a.first, b); }
+
+ bool operator()(const Key& a, const Value& b) const
+ { return c(a, b.first); }
+
+ bool operator()(const Key& a, const Key& b) const
+ { return c(a, b); }
+
+ }; // multimap_value_compare
+
+
+
+ /// vector_multimap
+ ///
+ /// Implements a multimap via a random access container such as a vector.
+ ///
+ /// Note that with vector_set, vector_multiset, vector_map, vector_multimap
+ /// that the modification of the container potentially invalidates all
+ /// existing iterators into the container, unlike what happens with conventional
+ /// sets and maps.
+ ///
+ /// Note that the erase functions return iterator and not void. This allows for
+ /// more efficient use of the container and is consistent with the C++ language
+ /// defect report #130 (DR 130)
+ ///
+ /// Note that we set the value_type to be pair<Key, T> and not pair<const Key, T>.
+ /// This means that the underlying container (e.g vector) is a container of pair<Key, T>.
+ /// Our vector and deque implementations are optimized to assign values in-place and
+ /// using a vector of pair<const Key, T> (note the const) would make it hard to use
+ /// our existing vector implementation without a lot of headaches. As a result,
+ /// at least for the time being we do away with the const. This means that the
+ /// insertion type varies between map and vector_map in that the latter doesn't take
+ /// const. This also means that a certain amount of automatic safety provided by
+ /// the implementation is lost, as the compiler will let the wayward user modify
+ /// a key and thus make the container no longer ordered behind its back.
+ ///
+ template <typename Key, typename T, typename Compare = eastl::less<Key>,
+ typename Allocator = EASTLAllocatorType,
+ typename RandomAccessContainer = eastl::vector<eastl::pair<Key, T>, Allocator> >
+ class vector_multimap : public RandomAccessContainer
+ {
+ public:
+ typedef RandomAccessContainer base_type;
+ typedef vector_multimap<Key, T, Compare, Allocator, RandomAccessContainer> this_type;
+ typedef Allocator allocator_type;
+ typedef Key key_type;
+ typedef T mapped_type;
+ typedef eastl::pair<Key, T> value_type;
+ typedef Compare key_compare;
+ typedef multimap_value_compare<Key, value_type, Compare> value_compare;
+ typedef value_type* pointer;
+ typedef const value_type* const_pointer;
+ typedef value_type& reference;
+ typedef const value_type& const_reference;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::difference_type difference_type;
+ typedef typename base_type::iterator iterator;
+ typedef typename base_type::const_iterator const_iterator;
+ typedef typename base_type::reverse_iterator reverse_iterator;
+ typedef typename base_type::const_reverse_iterator const_reverse_iterator;
+
+ using base_type::begin;
+ using base_type::end;
+ using base_type::get_allocator;
+
+ protected:
+ value_compare mValueCompare;
+
+ public:
+ // We have an empty ctor and a ctor that takes an allocator instead of one for both
+ // because this way our RandomAccessContainer wouldn't be required to have an constructor
+ // that takes allocator_type.
+ vector_multimap();
+ explicit vector_multimap(const allocator_type& allocator);
+ explicit vector_multimap(const key_compare& comp, const allocator_type& allocator = EASTL_VECTOR_MULTIMAP_DEFAULT_ALLOCATOR);
+ vector_multimap(const this_type& x);
+ vector_multimap(this_type&& x);
+ vector_multimap(this_type&& x, const allocator_type& allocator);
+ vector_multimap(std::initializer_list<value_type> ilist, const key_compare& compare = key_compare(), const allocator_type& allocator = EASTL_VECTOR_MULTIMAP_DEFAULT_ALLOCATOR);
+
+ template <typename InputIterator>
+ vector_multimap(InputIterator first, InputIterator last); // allocator arg removed because VC7.1 fails on the default arg. To do: Make a second version of this function without a default arg.
+
+ template <typename InputIterator>
+ vector_multimap(InputIterator first, InputIterator last, const key_compare& compare); // allocator arg removed because VC7.1 fails on the default arg. To do: Make a second version of this function without a default arg.
+
+ this_type& operator=(const this_type& x);
+ this_type& operator=(std::initializer_list<value_type> ilist);
+ this_type& operator=(this_type&& x);
+
+ void swap(this_type& x);
+
+ const key_compare& key_comp() const;
+ key_compare& key_comp();
+
+ const value_compare& value_comp() const;
+ value_compare& value_comp();
+
+ // Inherited from base class:
+ //
+ // allocator_type& get_allocator();
+ // void set_allocator(const allocator_type& allocator);
+ //
+ // iterator begin();
+ // const_iterator begin() const;
+ // const_iterator cbegin() const;
+ //
+ // iterator end();
+ // const_iterator end() const;
+ // const_iterator cend() const;
+ //
+ // reverse_iterator rbegin();
+ // const_reverse_iterator rbegin() const;
+ // const_reverse_iterator crbegin() const;
+ //
+ // reverse_iterator rend();
+ // const_reverse_iterator rend() const;
+ // const_reverse_iterator crend() const;
+ //
+ // size_type size() const;
+ // bool empty() const;
+ // void clear();
+
+ template <class... Args>
+ iterator emplace(Args&&... args);
+
+ template <class... Args>
+ iterator emplace_hint(const_iterator position, Args&&... args);
+
+ iterator insert(const value_type& value); // The signature of this function was change in EASTL v2.05.00 from (the mistaken) pair<iterator, bool> to (the correct) iterator.
+
+ template <typename P, typename = eastl::enable_if_t<eastl::is_constructible_v<value_type, P&&>>>
+ iterator insert(P&& otherValue);
+
+ iterator insert(const key_type& otherValue);
+ iterator insert(key_type&& otherValue);
+
+ iterator insert(const_iterator position, const value_type& value);
+ iterator insert(const_iterator position, value_type&& value);
+
+ void insert(std::initializer_list<value_type> ilist);
+
+ template <typename InputIterator>
+ void insert(InputIterator first, InputIterator last);
+
+ iterator erase(const_iterator position);
+ iterator erase(const_iterator first, const_iterator last);
+ size_type erase(const key_type& k);
+
+ reverse_iterator erase(const_reverse_iterator position);
+ reverse_iterator erase(const_reverse_iterator first, const_reverse_iterator last);
+
+ iterator find(const key_type& k);
+ const_iterator find(const key_type& k) const;
+
+ template <typename U, typename BinaryPredicate>
+ iterator find_as(const U& u, BinaryPredicate predicate);
+
+ template <typename U, typename BinaryPredicate>
+ const_iterator find_as(const U& u, BinaryPredicate predicate) const;
+
+ size_type count(const key_type& k) const;
+
+ iterator lower_bound(const key_type& k);
+ const_iterator lower_bound(const key_type& k) const;
+
+ iterator upper_bound(const key_type& k);
+ const_iterator upper_bound(const key_type& k) const;
+
+ eastl::pair<iterator, iterator> equal_range(const key_type& k);
+ eastl::pair<const_iterator, const_iterator> equal_range(const key_type& k) const;
+
+ /// equal_range_small
+ /// This is a special version of equal_range which is optimized for the
+ /// case of there being few or no duplicated keys in the tree.
+ eastl::pair<iterator, iterator> equal_range_small(const key_type& k)
+ {
+ // Defined inline because VC7.1 is broken for when it's defined outside.
+ const iterator itLower(lower_bound(k));
+ iterator itUpper(itLower);
+
+ while((itUpper != end()) && !mValueCompare(k, *itUpper))
+ ++itUpper;
+
+ return eastl::pair<iterator, iterator>(itLower, itUpper);
+ }
+ eastl::pair<const_iterator, const_iterator> equal_range_small(const key_type& k) const;
+
+ // Functions which are disallowed due to being unsafe.
+ void push_back(const value_type& value) = delete;
+ reference push_back() = delete;
+ void* push_back_uninitialized() = delete;
+ template <class... Args>
+ reference emplace_back(Args&&...) = delete;
+
+ // NOTE(rparolin): It is undefined behaviour if user code fails to ensure the container
+ // invariants are respected by performing an explicit call to 'sort' before any other
+ // operations on the container are performed that do not clear the elements.
+ //
+ // 'push_back_unsorted' and 'emplace_back_unsorted' do not satisfy container invariants
+ // for being sorted. We provide these overloads explicitly labelled as '_unsorted' as an
+ // optimization opportunity when batch inserting elements so users can defer the cost of
+ // sorting the container once when all elements are contained. This was done to clarify
+ // the intent of code by leaving a trace that a manual call to sort is required.
+ //
+ template <typename... Args> decltype(auto) push_back_unsorted(Args&&... args)
+ { return base_type::push_back(eastl::forward<Args>(args)...); }
+ template <typename... Args> decltype(auto) emplace_back_unsorted(Args&&... args)
+ { return base_type::emplace_back(eastl::forward<Args>(args)...); }
+
+ }; // vector_multimap
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // vector_multimap
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline vector_multimap<K, T, C, A, RAC>::vector_multimap()
+ : base_type(), mValueCompare(C())
+ {
+ #if EASTL_NAME_ENABLED
+ get_allocator().set_name(EASTL_VECTOR_MULTIMAP_DEFAULT_NAME);
+ #endif
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline vector_multimap<K, T, C, A, RAC>::vector_multimap(const allocator_type& allocator)
+ : base_type(allocator), mValueCompare(C())
+ {
+ // Empty
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline vector_multimap<K, T, C, A, RAC>::vector_multimap(const key_compare& comp, const allocator_type& allocator)
+ : base_type(allocator), mValueCompare(comp)
+ {
+ // Empty
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline vector_multimap<K, T, C, A, RAC>::vector_multimap(const this_type& x)
+ : base_type(x), mValueCompare(x.mValueCompare)
+ {
+ // Empty
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline vector_multimap<K, T, C, A, RAC>::vector_multimap(this_type&& x)
+ : base_type(eastl::move(x)), mValueCompare(x.mValueCompare)
+ {
+ // Empty. Note: x is left with empty contents but its original mValueCompare instead of the default one.
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline vector_multimap<K, T, C, A, RAC>::vector_multimap(this_type&& x, const allocator_type& allocator)
+ : base_type(eastl::move(x), allocator), mValueCompare(x.mValueCompare)
+ {
+ // Empty. Note: x is left with empty contents but its original mValueCompare instead of the default one.
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline vector_multimap<K, T, C, A, RAC>::vector_multimap(std::initializer_list<value_type> ilist, const key_compare& compare, const allocator_type& allocator)
+ : base_type(allocator), mValueCompare(compare)
+ {
+ insert(ilist.begin(), ilist.end());
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ template <typename InputIterator>
+ inline vector_multimap<K, T, C, A, RAC>::vector_multimap(InputIterator first, InputIterator last)
+ : base_type(EASTL_VECTOR_MULTIMAP_DEFAULT_ALLOCATOR), mValueCompare(key_compare())
+ {
+ insert(first, last);
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ template <typename InputIterator>
+ inline vector_multimap<K, T, C, A, RAC>::vector_multimap(InputIterator first, InputIterator last, const key_compare& compare)
+ : base_type(EASTL_VECTOR_MULTIMAP_DEFAULT_ALLOCATOR), mValueCompare(compare)
+ {
+ insert(first, last);
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline typename vector_multimap<K, T, C, A, RAC>::this_type&
+ vector_multimap<K, T, C, A, RAC>::operator=(const this_type& x)
+ {
+ base_type::operator=(x);
+ mValueCompare = value_compare(x.mValueCompare);
+ return *this;
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline typename vector_multimap<K, T, C, A, RAC>::this_type&
+ vector_multimap<K, T, C, A, RAC>::operator=(this_type&& x)
+ {
+ base_type::operator=(eastl::move(x));
+ eastl::swap(mValueCompare, x.mValueCompare);
+ return *this;
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline typename vector_multimap<K, T, C, A, RAC>::this_type&
+ vector_multimap<K, T, C, A, RAC>::operator=(std::initializer_list<value_type> ilist)
+ {
+ base_type::clear();
+ insert(ilist.begin(), ilist.end());
+ return *this;
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline void vector_multimap<K, T, C, A, RAC>::swap(this_type& x)
+ {
+ base_type::swap(x);
+ eastl::swap(mValueCompare, x.mValueCompare);
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline const typename vector_multimap<K, T, C, A, RAC>::key_compare&
+ vector_multimap<K, T, C, A, RAC>::key_comp() const
+ {
+ return mValueCompare.c;
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline typename vector_multimap<K, T, C, A, RAC>::key_compare&
+ vector_multimap<K, T, C, A, RAC>::key_comp()
+ {
+ return mValueCompare.c;
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline const typename vector_multimap<K, T, C, A, RAC>::value_compare&
+ vector_multimap<K, T, C, A, RAC>::value_comp() const
+ {
+ return mValueCompare;
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline typename vector_multimap<K, T, C, A, RAC>::value_compare&
+ vector_multimap<K, T, C, A, RAC>::value_comp()
+ {
+ return mValueCompare;
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ template <class... Args>
+ inline typename vector_multimap<K, T, C, A, RAC>::iterator
+ vector_multimap<K, T, C, A, RAC>::emplace(Args&&... args)
+ {
+ #if EASTL_USE_FORWARD_WORKAROUND
+ auto value = value_type(eastl::forward<Args>(args)...); // Workaround for compiler bug in VS2013 which results in a compiler internal crash while compiling this code.
+ #else
+ value_type value(eastl::forward<Args>(args)...);
+ #endif
+ return insert(eastl::move(value));
+ }
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ template <class... Args>
+ inline typename vector_multimap<K, T, C, A, RAC>::iterator
+ vector_multimap<K, T, C, A, RAC>::emplace_hint(const_iterator position, Args&&... args)
+ {
+ #if EASTL_USE_FORWARD_WORKAROUND
+ auto value = value_type(eastl::forward<Args>(args)...); // Workaround for compiler bug in VS2013 which results in a compiler internal crash while compiling this code.
+ #else
+ value_type value(eastl::forward<Args>(args)...);
+ #endif
+ return insert(position, eastl::move(value));
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline typename vector_multimap<K, T, C, A, RAC>::iterator
+ vector_multimap<K, T, C, A, RAC>::insert(const value_type& value)
+ {
+ const iterator itUB(upper_bound(value.first));
+ return base_type::insert(itUB, value);
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ template <typename P, typename>
+ inline typename vector_multimap<K, T, C, A, RAC>::iterator
+ vector_multimap<K, T, C, A, RAC>::insert(P&& otherValue)
+ {
+ value_type value(eastl::forward<P>(otherValue));
+ const iterator itUB(upper_bound(value.first));
+ return base_type::insert(itUB, eastl::move(value));
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline typename vector_multimap<K, T, C, A, RAC>::iterator
+ vector_multimap<K, T, C, A, RAC>::insert(const key_type& otherValue)
+ {
+ value_type value(eastl::pair_first_construct, otherValue);
+ const iterator itUB(upper_bound(value.first));
+ return base_type::insert(itUB, eastl::move(value));
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline typename vector_multimap<K, T, C, A, RAC>::iterator
+ vector_multimap<K, T, C, A, RAC>::insert(key_type&& otherValue)
+ {
+ value_type value(eastl::pair_first_construct, eastl::move(otherValue));
+ const iterator itUB(upper_bound(value.first));
+ return base_type::insert(itUB, eastl::move(value));
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline typename vector_multimap<K, T, C, A, RAC>::iterator
+ vector_multimap<K, T, C, A, RAC>::insert(const_iterator position, const value_type& value)
+ {
+ // We assume that the user knows what he is doing and has supplied us with
+ // a position that is right where value should be inserted (put in front of).
+ // We do a test to see if the position is correct. If so then we insert,
+ // if not then we ignore the input position. However,
+
+ if((position == end()) || !mValueCompare(*position, value)) // If value is <= the element at position...
+ {
+ if((position == begin()) || !mValueCompare(value, *(position - 1))) // If value is >= the element before position...
+ return base_type::insert(position, value);
+ }
+
+ // In this case we have an incorrect position. We fall back to the regular insert function.
+ return insert(value);
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline typename vector_multimap<K, T, C, A, RAC>::iterator
+ vector_multimap<K, T, C, A, RAC>::insert(const_iterator position, value_type&& value)
+ {
+ if((position == end()) || !mValueCompare(*position, value)) // If value is <= the element at position...
+ {
+ if((position == begin()) || !mValueCompare(value, *(position - 1))) // If value is >= the element before position...
+ return base_type::insert(position, eastl::move(value));
+ }
+
+ // In this case we have an incorrect position. We fall back to the regular insert function.
+ return insert(eastl::move(value));
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline void vector_multimap<K, T, C, A, RAC>::insert(std::initializer_list<value_type> ilist)
+ {
+ insert(ilist.begin(), ilist.end());
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ template <typename InputIterator>
+ inline void vector_multimap<K, T, C, A, RAC>::insert(InputIterator first, InputIterator last)
+ {
+ // To consider: Improve the speed of this by getting the length of the
+ // input range and resizing our container to that size
+ // before doing the insertions. We can't use reserve
+ // because we don't know if we are using a vector or not.
+ // Alternatively, force the user to do the reservation.
+ // To consider: When inserting values that come from a container
+ // like this container, use the property that they are
+ // known to be sorted and speed up the inserts here.
+ for(; first != last; ++first)
+ base_type::insert(upper_bound((*first).first), *first);
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline typename vector_multimap<K, T, C, A, RAC>::iterator
+ vector_multimap<K, T, C, A, RAC>::erase(const_iterator position)
+ {
+ // Note that we return iterator and not void. This allows for more efficient use of
+ // the container and is consistent with the C++ language defect report #130 (DR 130)
+ return base_type::erase(position);
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline typename vector_multimap<K, T, C, A, RAC>::iterator
+ vector_multimap<K, T, C, A, RAC>::erase(const_iterator first, const_iterator last)
+ {
+ return base_type::erase(first, last);
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline typename vector_multimap<K, T, C, A, RAC>::size_type
+ vector_multimap<K, T, C, A, RAC>::erase(const key_type& k)
+ {
+ const eastl::pair<iterator, iterator> pairIts(equal_range(k));
+
+ if(pairIts.first != pairIts.second)
+ base_type::erase(pairIts.first, pairIts.second);
+
+ return (size_type)eastl::distance(pairIts.first, pairIts.second); // This can result in any value >= 0.
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline typename vector_multimap<K, T, C, A, RAC>::reverse_iterator
+ vector_multimap<K, T, C, A, RAC>::erase(const_reverse_iterator position)
+ {
+ return reverse_iterator(base_type::erase((++position).base()));
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline typename vector_multimap<K, T, C, A, RAC>::reverse_iterator
+ vector_multimap<K, T, C, A, RAC>::erase(const_reverse_iterator first, const_reverse_iterator last)
+ {
+ return reverse_iterator(base_type::erase((++last).base(), (++first).base()));
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline typename vector_multimap<K, T, C, A, RAC>::iterator
+ vector_multimap<K, T, C, A, RAC>::find(const key_type& k)
+ {
+ const eastl::pair<iterator, iterator> pairIts(equal_range(k));
+
+ if(pairIts.first != pairIts.second)
+ return pairIts.first;
+ return end();
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline typename vector_multimap<K, T, C, A, RAC>::const_iterator
+ vector_multimap<K, T, C, A, RAC>::find(const key_type& k) const
+ {
+ const eastl::pair<const_iterator, const_iterator> pairIts(equal_range(k));
+
+ if(pairIts.first != pairIts.second)
+ return pairIts.first;
+ return end();
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ template <typename U, typename BinaryPredicate>
+ inline typename vector_multimap<K, T, C, A, RAC>::const_iterator
+ vector_multimap<K, T, C, A, RAC>::find_as(const U& u, BinaryPredicate predicate) const
+ {
+ multimap_value_compare<U, value_type, BinaryPredicate> predicate_cmp(predicate);
+ const eastl::pair<const_iterator, const_iterator> pairIts(eastl::equal_range(begin(), end(), u, predicate_cmp));
+ return (pairIts.first != pairIts.second) ? pairIts.first : end();
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ template <typename U, typename BinaryPredicate>
+ inline typename vector_multimap<K, T, C, A, RAC>::iterator
+ vector_multimap<K, T, C, A, RAC>::find_as(const U& u, BinaryPredicate predicate)
+ {
+ multimap_value_compare<U, value_type, BinaryPredicate> predicate_cmp(predicate);
+ const eastl::pair<iterator, iterator> pairIts(eastl::equal_range(begin(), end(), u, predicate_cmp));
+ return (pairIts.first != pairIts.second) ? pairIts.first : end();
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline typename vector_multimap<K, T, C, A, RAC>::size_type
+ vector_multimap<K, T, C, A, RAC>::count(const key_type& k) const
+ {
+ const eastl::pair<const_iterator, const_iterator> pairIts(equal_range(k));
+ return (size_type)eastl::distance(pairIts.first, pairIts.second);
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline typename vector_multimap<K, T, C, A, RAC>::iterator
+ vector_multimap<K, T, C, A, RAC>::lower_bound(const key_type& k)
+ {
+ return eastl::lower_bound(begin(), end(), k, mValueCompare);
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline typename vector_multimap<K, T, C, A, RAC>::const_iterator
+ vector_multimap<K, T, C, A, RAC>::lower_bound(const key_type& k) const
+ {
+ return eastl::lower_bound(begin(), end(), k, mValueCompare);
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline typename vector_multimap<K, T, C, A, RAC>::iterator
+ vector_multimap<K, T, C, A, RAC>::upper_bound(const key_type& k)
+ {
+ return eastl::upper_bound(begin(), end(), k, mValueCompare);
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline typename vector_multimap<K, T, C, A, RAC>::const_iterator
+ vector_multimap<K, T, C, A, RAC>::upper_bound(const key_type& k) const
+ {
+ return eastl::upper_bound(begin(), end(), k, mValueCompare);
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline eastl::pair<typename vector_multimap<K, T, C, A, RAC>::iterator, typename vector_multimap<K, T, C, A, RAC>::iterator>
+ vector_multimap<K, T, C, A, RAC>::equal_range(const key_type& k)
+ {
+ return eastl::equal_range(begin(), end(), k, mValueCompare);
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline eastl::pair<typename vector_multimap<K, T, C, A, RAC>::const_iterator, typename vector_multimap<K, T, C, A, RAC>::const_iterator>
+ vector_multimap<K, T, C, A, RAC>::equal_range(const key_type& k) const
+ {
+ return eastl::equal_range(begin(), end(), k, mValueCompare);
+ }
+
+
+ /*
+ // VC++ fails to compile this when defined here, saying the function isn't a member of vector_multimap.
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline eastl::pair<typename vector_multimap<K, T, C, A, RAC>::iterator, typename vector_multimap<K, T, C, A, RAC>::iterator>
+ vector_multimap<K, T, C, A, RAC>::equal_range_small(const key_type& k)
+ {
+ const iterator itLower(lower_bound(k));
+ iterator itUpper(itLower);
+
+ while((itUpper != end()) && !mValueCompare(k, *itUpper))
+ ++itUpper;
+
+ return eastl::pair<iterator, iterator>(itLower, itUpper);
+ }
+ */
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline eastl::pair<typename vector_multimap<K, T, C, A, RAC>::const_iterator, typename vector_multimap<K, T, C, A, RAC>::const_iterator>
+ vector_multimap<K, T, C, A, RAC>::equal_range_small(const key_type& k) const
+ {
+ const const_iterator itLower(lower_bound(k));
+ const_iterator itUpper(itLower);
+
+ while((itUpper != end()) && !mValueCompare(k, *itUpper))
+ ++itUpper;
+
+ return eastl::pair<const_iterator, const_iterator>(itLower, itUpper);
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////////
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline bool operator==(const vector_multimap<K, T, C, A, RAC>& a,
+ const vector_multimap<K, T, C, A, RAC>& b)
+ {
+ return (a.size() == b.size()) && eastl::equal(b.begin(), b.end(), a.begin());
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline bool operator<(const vector_multimap<K, T, C, A, RAC>& a,
+ const vector_multimap<K, T, C, A, RAC>& b)
+ {
+ return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end(), a.value_comp());
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline bool operator!=(const vector_multimap<K, T, C, A, RAC>& a,
+ const vector_multimap<K, T, C, A, RAC>& b)
+ {
+ return !(a == b);
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline bool operator>(const vector_multimap<K, T, C, A, RAC>& a,
+ const vector_multimap<K, T, C, A, RAC>& b)
+ {
+ return b < a;
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline bool operator<=(const vector_multimap<K, T, C, A, RAC>& a,
+ const vector_multimap<K, T, C, A, RAC>& b)
+ {
+ return !(b < a);
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline bool operator>=(const vector_multimap<K, T, C, A, RAC>& a,
+ const vector_multimap<K, T, C, A, RAC>& b)
+ {
+ return !(a < b);
+ }
+
+
+ template <typename K, typename T, typename C, typename A, typename RAC>
+ inline void swap(vector_multimap<K, T, C, A, RAC>& a,
+ vector_multimap<K, T, C, A, RAC>& b)
+ {
+ a.swap(b);
+ }
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/vector_multiset.h b/EASTL/include/EASTL/vector_multiset.h
new file mode 100644
index 0000000..7fd10a5
--- /dev/null
+++ b/EASTL/include/EASTL/vector_multiset.h
@@ -0,0 +1,764 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+//////////////////////////////////////////////////////////////////////////////
+
+//////////////////////////////////////////////////////////////////////////////
+// This file implements vector_multiset. It acts much like std::multiset, except
+// its underlying representation is a random access container such as vector.
+// These containers are sometimes also known as "sorted vectors."
+// vector_sets have an advantage over conventional sets in that their memory
+// is contiguous and node-less. The result is that lookups are faster, more
+// cache friendly (which potentially more so benefits speed), and the container
+// uses less memory. The downside is that inserting new items into the container
+// is slower if they are inserted in random order instead of in sorted order.
+// This tradeoff is well-worth it for many cases. Note that vector_multiset allows
+// you to use a deque or other random access container which may perform
+// better for you than vector.
+//
+// Note that with vector_set, vector_multiset, vector_map, vector_multimap
+// that the modification of the container potentially invalidates all
+// existing iterators into the container, unlike what happens with conventional
+// sets and maps.
+//////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_VECTOR_MULTISET_H
+#define EASTL_VECTOR_MULTISET_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/allocator.h>
+#include <EASTL/functional.h>
+#include <EASTL/vector.h>
+#include <EASTL/utility.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/initializer_list.h>
+#include <stddef.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+
+ /// EASTL_VECTOR_MULTISET_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_VECTOR_MULTISET_DEFAULT_NAME
+ #define EASTL_VECTOR_MULTISET_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " vector_multiset" // Unless the user overrides something, this is "EASTL vector_multiset".
+ #endif
+
+
+ /// EASTL_VECTOR_MULTISET_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_VECTOR_MULTISET_DEFAULT_ALLOCATOR
+ #define EASTL_VECTOR_MULTISET_DEFAULT_ALLOCATOR allocator_type(EASTL_VECTOR_MULTISET_DEFAULT_NAME)
+ #endif
+
+
+
+ /// vector_multiset
+ ///
+ /// Implements a multiset via a random access container such as a vector.
+ /// This container is also known as a sorted_vector. We choose to call it
+ /// vector_multiset, as that is a more consistent universally applicable name
+ /// for it in this library.
+ ///
+ /// Note that with vector_set, vector_multiset, vector_map, vector_multimap
+ /// that the modification of the container potentially invalidates all
+ /// existing iterators into the container, unlike what happens with conventional
+ /// sets and maps.
+ ///
+ /// To consider: std::multiset has the limitation that values in the set cannot
+ /// be modified, with the idea that modifying them would change their sort
+ /// order. We have the opportunity to make it so that values can be modified
+ /// via changing iterators to be non-const, with the downside being that
+ /// the container can get screwed up if the user screws up. Alternatively,
+ /// we can do what std STL does and require the user to make their stored
+ /// classes use 'mutable' as needed. See the C++ standard defect report
+ /// #103 (DR 103) for a discussion of this.
+ ///
+ /// Note that the erase functions return iterator and not void. This allows for
+ /// more efficient use of the container and is consistent with the C++ language
+ /// defect report #130 (DR 130)
+ ///
+ template <typename Key, typename Compare = eastl::less<Key>, typename Allocator = EASTLAllocatorType,
+ typename RandomAccessContainer = eastl::vector<Key, Allocator> >
+ class vector_multiset : public RandomAccessContainer
+ {
+ public:
+ typedef RandomAccessContainer base_type;
+ typedef vector_multiset<Key, Compare, Allocator, RandomAccessContainer> this_type;
+ typedef Allocator allocator_type;
+ typedef Key key_type;
+ typedef Key value_type;
+ typedef Compare key_compare;
+ typedef Compare value_compare;
+ typedef value_type* pointer;
+ typedef const value_type* const_pointer;
+ typedef value_type& reference;
+ typedef const value_type& const_reference;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::difference_type difference_type;
+ typedef typename base_type::iterator iterator; // **Currently typedefing from iterator instead of const_iterator due to const issues **: Note that we typedef from const_iterator. This is by design, as sets are sorted and values cannot be modified. To consider: allow values to be modified and thus risk changing their sort values.
+ typedef typename base_type::const_iterator const_iterator;
+ typedef typename base_type::reverse_iterator reverse_iterator; // See notes directly above regarding const_iterator.
+ typedef typename base_type::const_reverse_iterator const_reverse_iterator;
+
+ using base_type::begin;
+ using base_type::end;
+ using base_type::get_allocator;
+
+ protected:
+ value_compare mCompare; // To consider: Declare this instead as: 'key_compare mKeyCompare'
+
+ public:
+ // We have an empty ctor and a ctor that takes an allocator instead of one for both
+ // because this way our RandomAccessContainer wouldn't be required to have an constructor
+ // that takes allocator_type.
+ vector_multiset();
+ explicit vector_multiset(const allocator_type& allocator);
+ explicit vector_multiset(const key_compare& comp, const allocator_type& allocator = EASTL_VECTOR_MULTISET_DEFAULT_ALLOCATOR);
+ vector_multiset(const this_type& x);
+ vector_multiset(this_type&& x);
+ vector_multiset(this_type&& x, const allocator_type& allocator);
+ vector_multiset(std::initializer_list<value_type> ilist, const key_compare& compare = key_compare(), const allocator_type& allocator = EASTL_VECTOR_MULTISET_DEFAULT_ALLOCATOR);
+
+ template <typename InputIterator>
+ vector_multiset(InputIterator first, InputIterator last); // allocator arg removed because VC7.1 fails on the default arg. To do: Make a second version of this function without a default arg.
+
+ template <typename InputIterator>
+ vector_multiset(InputIterator first, InputIterator last, const key_compare& compare); // allocator arg removed because VC7.1 fails on the default arg. To do: Make a second version of this function without a default arg.
+
+ this_type& operator=(const this_type& x);
+ this_type& operator=(std::initializer_list<value_type> ilist);
+ this_type& operator=(this_type&& x);
+
+ void swap(this_type& x);
+
+ const key_compare& key_comp() const;
+ key_compare& key_comp();
+
+ const value_compare& value_comp() const;
+ value_compare& value_comp();
+
+ // Inherited from base class:
+ //
+ // allocator_type& get_allocator();
+ // void set_allocator(const allocator_type& allocator);
+ //
+ // iterator begin();
+ // const_iterator begin() const;
+ // const_iterator cbegin() const;
+ //
+ // iterator end();
+ // const_iterator end() const;
+ // const_iterator cend() const;
+ //
+ // reverse_iterator rbegin();
+ // const_reverse_iterator rbegin() const;
+ // const_reverse_iterator crbegin() const;
+ //
+ // reverse_iterator rend();
+ // const_reverse_iterator rend() const;
+ // const_reverse_iterator crend() const;
+ //
+ // size_type size() const;
+ // bool empty() const;
+ // void clear();
+
+ template <class... Args>
+ iterator emplace(Args&&... args);
+
+ template <class... Args>
+ iterator emplace_hint(const_iterator position, Args&&... args);
+
+ iterator insert(const value_type& value); // The signature of this function was change in EASTL v2.05.00 from (the mistaken) pair<iterator, bool> to (the correct) iterator.
+ iterator insert(const_iterator position, const value_type& value);
+ iterator insert(const_iterator position, value_type&& value);
+ void insert(std::initializer_list<value_type> ilist);
+
+ template <typename P>
+ iterator insert(P&& otherValue);
+
+ template <typename InputIterator>
+ void insert(InputIterator first, InputIterator last);
+
+ iterator erase(const_iterator position);
+ iterator erase(const_iterator first, const_iterator last);
+ size_type erase(const key_type& k);
+ reverse_iterator erase(const_reverse_iterator position);
+ reverse_iterator erase(const_reverse_iterator first, const_reverse_iterator last);
+
+ iterator find(const key_type& k);
+ const_iterator find(const key_type& k) const;
+
+ template <typename U, typename BinaryPredicate>
+ iterator find_as(const U& u, BinaryPredicate predicate);
+
+ template <typename U, typename BinaryPredicate>
+ const_iterator find_as(const U& u, BinaryPredicate predicate) const;
+
+ size_type count(const key_type& k) const;
+
+ iterator lower_bound(const key_type& k);
+ const_iterator lower_bound(const key_type& k) const;
+
+ iterator upper_bound(const key_type& k);
+ const_iterator upper_bound(const key_type& k) const;
+
+ eastl::pair<iterator, iterator> equal_range(const key_type& k);
+ eastl::pair<const_iterator, const_iterator> equal_range(const key_type& k) const;
+
+ /// equal_range_small
+ /// This is a special version of equal_range which is optimized for the
+ /// case of there being few or no duplicated keys in the tree.
+ eastl::pair<iterator, iterator> equal_range_small(const key_type& k)
+ {
+ // Defined inline because VC7.1 is broken for when it's defined outside.
+ const iterator itLower(lower_bound(k));
+ iterator itUpper(itLower);
+
+ while((itUpper != end()) && !mCompare(k, *itUpper))
+ ++itUpper;
+
+ return eastl::pair<iterator, iterator>(itLower, itUpper);
+ }
+ eastl::pair<const_iterator, const_iterator> equal_range_small(const key_type& k) const;
+
+ // Functions which are disallowed due to being unsafe.
+ void push_back(const value_type& value) = delete;
+ reference push_back() = delete;
+ void* push_back_uninitialized() = delete;
+ template <class... Args>
+ reference emplace_back(Args&&...) = delete;
+
+ // NOTE(rparolin): It is undefined behaviour if user code fails to ensure the container
+ // invariants are respected by performing an explicit call to 'sort' before any other
+ // operations on the container are performed that do not clear the elements.
+ //
+ // 'push_back_unsorted' and 'emplace_back_unsorted' do not satisfy container invariants
+ // for being sorted. We provide these overloads explicitly labelled as '_unsorted' as an
+ // optimization opportunity when batch inserting elements so users can defer the cost of
+ // sorting the container once when all elements are contained. This was done to clarify
+ // the intent of code by leaving a trace that a manual call to sort is required.
+ //
+ template <typename... Args> decltype(auto) push_back_unsorted(Args&&... args)
+ { return base_type::push_back(eastl::forward<Args>(args)...); }
+ template <typename... Args> decltype(auto) emplace_back_unsorted(Args&&... args)
+ { return base_type::emplace_back(eastl::forward<Args>(args)...); }
+
+ }; // vector_multiset
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // vector_multiset
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline vector_multiset<K, C, A, RAC>::vector_multiset()
+ : base_type(), mCompare(C())
+ {
+ get_allocator().set_name(EASTL_VECTOR_MULTISET_DEFAULT_NAME);
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline vector_multiset<K, C, A, RAC>::vector_multiset(const allocator_type& allocator)
+ : base_type(allocator), mCompare(C())
+ {
+ // Empty
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline vector_multiset<K, C, A, RAC>::vector_multiset(const key_compare& comp, const allocator_type& allocator)
+ : base_type(allocator), mCompare(comp)
+ {
+ // Empty
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ template <typename InputIterator>
+ inline vector_multiset<K, C, A, RAC>::vector_multiset(InputIterator first, InputIterator last)
+ : base_type(EASTL_VECTOR_MULTISET_DEFAULT_ALLOCATOR), mCompare(key_compare())
+ {
+ insert(first, last);
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ template <typename InputIterator>
+ inline vector_multiset<K, C, A, RAC>::vector_multiset(InputIterator first, InputIterator last, const key_compare& compare)
+ : base_type(EASTL_VECTOR_MULTISET_DEFAULT_ALLOCATOR), mCompare(compare)
+ {
+ insert(first, last);
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline vector_multiset<K, C, A, RAC>::vector_multiset(const this_type& x)
+ : base_type(x), mCompare(x.mCompare)
+ {
+ // Empty
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline vector_multiset<K, C, A, RAC>::vector_multiset(this_type&& x)
+ : base_type(eastl::move(x)), mCompare(x.mCompare)
+ {
+ // Empty. Note: x is left with empty contents but its original mValueCompare instead of the default one.
+ }
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline vector_multiset<K, C, A, RAC>::vector_multiset(this_type&& x, const allocator_type& allocator)
+ : base_type(eastl::move(x), allocator), mCompare(x.mCompare)
+ {
+ // Empty. Note: x is left with empty contents but its original mValueCompare instead of the default one.
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline vector_multiset<K, C, A, RAC>::vector_multiset(std::initializer_list<value_type> ilist, const key_compare& compare, const allocator_type& allocator)
+ : base_type(allocator), mCompare(compare)
+ {
+ insert(ilist.begin(), ilist.end());
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline vector_multiset<K, C, A, RAC>&
+ vector_multiset<K, C, A, RAC>::operator=(const this_type& x)
+ {
+ base_type::operator=(x);
+ mCompare = value_compare(x.mCompare);
+ return *this;
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline vector_multiset<K, C, A, RAC>&
+ vector_multiset<K, C, A, RAC>::operator=(this_type&& x)
+ {
+ base_type::operator=(eastl::move(x));
+ eastl::swap(mCompare, x.mCompare);
+ return *this;
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline vector_multiset<K, C, A, RAC>&
+ vector_multiset<K, C, A, RAC>::operator=(std::initializer_list<value_type> ilist)
+ {
+ base_type::clear();
+ insert(ilist.begin(), ilist.end());
+ return *this;
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline void vector_multiset<K, C, A, RAC>::swap(this_type& x)
+ {
+ base_type::swap(x);
+ eastl::swap(mCompare, x.mCompare);
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline const typename vector_multiset<K, C, A, RAC>::key_compare&
+ vector_multiset<K, C, A, RAC>::key_comp() const
+ {
+ return mCompare;
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline typename vector_multiset<K, C, A, RAC>::key_compare&
+ vector_multiset<K, C, A, RAC>::key_comp()
+ {
+ return mCompare;
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline const typename vector_multiset<K, C, A, RAC>::value_compare&
+ vector_multiset<K, C, A, RAC>::value_comp() const
+ {
+ return mCompare;
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline typename vector_multiset<K, C, A, RAC>::value_compare&
+ vector_multiset<K, C, A, RAC>::value_comp()
+ {
+ return mCompare;
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ template <class... Args>
+ typename vector_multiset<K, C, A, RAC>::iterator
+ vector_multiset<K, C, A, RAC>::emplace(Args&&... args)
+ {
+ #if EASTL_USE_FORWARD_WORKAROUND
+ auto value = value_type(eastl::forward<Args>(args)...); // Workaround for compiler bug in VS2013 which results in a compiler internal crash while compiling this code.
+ #else
+ value_type value(eastl::forward<Args>(args)...);
+ #endif
+ return insert(eastl::move(value));
+ }
+
+ template <typename K, typename C, typename A, typename RAC>
+ template <class... Args>
+ typename vector_multiset<K, C, A, RAC>::iterator
+ vector_multiset<K, C, A, RAC>::emplace_hint(const_iterator position, Args&&... args)
+ {
+ #if EASTL_USE_FORWARD_WORKAROUND
+ auto value = value_type(eastl::forward<Args>(args)...); // Workaround for compiler bug in VS2013 which results in a compiler internal crash while compiling this code.
+ #else
+ value_type value(eastl::forward<Args>(args)...);
+ #endif
+ return insert(position, eastl::move(value));
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline typename vector_multiset<K, C, A, RAC>::iterator
+ vector_multiset<K, C, A, RAC>::insert(const value_type& value)
+ {
+ const iterator itUB(upper_bound(value));
+ return base_type::insert(itUB, value);
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ template <typename P>
+ typename vector_multiset<K, C, A, RAC>::iterator
+ vector_multiset<K, C, A, RAC>::insert(P&& otherValue)
+ {
+ value_type value(eastl::forward<P>(otherValue));
+ const iterator itUB(upper_bound(value));
+ return base_type::insert(itUB, eastl::move(value));
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline void vector_multiset<K, C, A, RAC>::insert(std::initializer_list<value_type> ilist)
+ {
+ insert(ilist.begin(), ilist.end());
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline typename vector_multiset<K, C, A, RAC>::iterator
+ vector_multiset<K, C, A, RAC>::insert(const_iterator position, const value_type& value)
+ {
+ // We assume that the user knows what he is doing and has supplied us with
+ // a position that is right where value should be inserted (put in front of).
+ // We do a test to see if the position is correct. If so then we insert,
+ // if not then we ignore the input position. However,
+
+ if((position == end()) || !mCompare(*position, value)) // If value is <= the element at position...
+ {
+ if((position == begin()) || !mCompare(value, *(position - 1))) // If value is >= the element before position...
+ return base_type::insert(position, value);
+ }
+
+ // In this case we have an incorrect position. We fall back to the regular insert function.
+ return insert(value);
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ typename vector_multiset<K, C, A, RAC>::iterator
+ vector_multiset<K, C, A, RAC>::insert(const_iterator position, value_type&& value)
+ {
+ if((position == end()) || !mCompare(*position, value)) // If value is <= the element at position...
+ {
+ if((position == begin()) || !mCompare(value, *(position - 1))) // If value is >= the element before position...
+ return base_type::insert(position, eastl::move(value));
+ }
+
+ // In this case we have an incorrect position. We fall back to the regular insert function.
+ return insert(eastl::move(value));
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ template <typename InputIterator>
+ inline void vector_multiset<K, C, A, RAC>::insert(InputIterator first, InputIterator last)
+ {
+ // To consider: Improve the speed of this by getting the length of the
+ // input range and resizing our container to that size
+ // before doing the insertions. We can't use reserve
+ // because we don't know if we are using a vector or not.
+ // Alternatively, force the user to do the reservation.
+ // To consider: When inserting values that come from a container
+ // like this container, use the property that they are
+ // known to be sorted and speed up the inserts here.
+ for(; first != last; ++first)
+ base_type::insert(upper_bound(*first), *first);
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline typename vector_multiset<K, C, A, RAC>::iterator
+ vector_multiset<K, C, A, RAC>::erase(const_iterator position)
+ {
+ // Note that we return iterator and not void. This allows for more efficient use of
+ // the container and is consistent with the C++ language defect report #130 (DR 130)
+ return base_type::erase(position);
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline typename vector_multiset<K, C, A, RAC>::iterator
+ vector_multiset<K, C, A, RAC>::erase(const_iterator first, const_iterator last)
+ {
+ return base_type::erase(first, last);
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline typename vector_multiset<K, C, A, RAC>::reverse_iterator
+ vector_multiset<K, C, A, RAC>::erase(const_reverse_iterator position)
+ {
+ return reverse_iterator(base_type::erase((++position).base()));
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline typename vector_multiset<K, C, A, RAC>::reverse_iterator
+ vector_multiset<K, C, A, RAC>::erase(const_reverse_iterator first, const_reverse_iterator last)
+ {
+ return reverse_iterator(base_type::erase((++last).base(), (++first).base()));
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline typename vector_multiset<K, C, A, RAC>::size_type
+ vector_multiset<K, C, A, RAC>::erase(const key_type& k)
+ {
+ const eastl::pair<iterator, iterator> pairIts(equal_range(k));
+
+ if(pairIts.first != pairIts.second)
+ base_type::erase(pairIts.first, pairIts.second);
+
+ return (size_type)eastl::distance(pairIts.first, pairIts.second); // This can result in any value >= 0.
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline typename vector_multiset<K, C, A, RAC>::iterator
+ vector_multiset<K, C, A, RAC>::find(const key_type& k)
+ {
+ const eastl::pair<iterator, iterator> pairIts(equal_range(k));
+ return (pairIts.first != pairIts.second) ? pairIts.first : end();
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ template <typename U, typename BinaryPredicate>
+ inline typename vector_multiset<K, C, A, RAC>::iterator
+ vector_multiset<K, C, A, RAC>::find_as(const U& u, BinaryPredicate predicate)
+ {
+ const eastl::pair<iterator, iterator> pairIts(eastl::equal_range(begin(), end(), u, predicate));
+ return (pairIts.first != pairIts.second) ? pairIts.first : end();
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ template <typename U, typename BinaryPredicate>
+ inline typename vector_multiset<K, C, A, RAC>::const_iterator
+ vector_multiset<K, C, A, RAC>::find_as(const U& u, BinaryPredicate predicate) const
+ {
+ const eastl::pair<const_iterator, const_iterator> pairIts(eastl::equal_range(begin(), end(), u, predicate));
+ return (pairIts.first != pairIts.second) ? pairIts.first : end();
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline typename vector_multiset<K, C, A, RAC>::const_iterator
+ vector_multiset<K, C, A, RAC>::find(const key_type& k) const
+ {
+ const eastl::pair<const_iterator, const_iterator> pairIts(equal_range(k));
+ return (pairIts.first != pairIts.second) ? pairIts.first : end();
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline typename vector_multiset<K, C, A, RAC>::size_type
+ vector_multiset<K, C, A, RAC>::count(const key_type& k) const
+ {
+ const eastl::pair<const_iterator, const_iterator> pairIts(equal_range(k));
+ return (size_type)eastl::distance(pairIts.first, pairIts.second);
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline typename vector_multiset<K, C, A, RAC>::iterator
+ vector_multiset<K, C, A, RAC>::lower_bound(const key_type& k)
+ {
+ return eastl::lower_bound(begin(), end(), k, mCompare);
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline typename vector_multiset<K, C, A, RAC>::const_iterator
+ vector_multiset<K, C, A, RAC>::lower_bound(const key_type& k) const
+ {
+ return eastl::lower_bound(begin(), end(), k, mCompare);
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline typename vector_multiset<K, C, A, RAC>::iterator
+ vector_multiset<K, C, A, RAC>::upper_bound(const key_type& k)
+ {
+ return eastl::upper_bound(begin(), end(), k, mCompare);
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline typename vector_multiset<K, C, A, RAC>::const_iterator
+ vector_multiset<K, C, A, RAC>::upper_bound(const key_type& k) const
+ {
+ return eastl::upper_bound(begin(), end(), k, mCompare);
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline eastl::pair<typename vector_multiset<K, C, A, RAC>::iterator, typename vector_multiset<K, C, A, RAC>::iterator>
+ vector_multiset<K, C, A, RAC>::equal_range(const key_type& k)
+ {
+ return eastl::equal_range(begin(), end(), k, mCompare);
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline eastl::pair<typename vector_multiset<K, C, A, RAC>::const_iterator, typename vector_multiset<K, C, A, RAC>::const_iterator>
+ vector_multiset<K, C, A, RAC>::equal_range(const key_type& k) const
+ {
+ return eastl::equal_range(begin(), end(), k, mCompare);
+ }
+
+
+ /*
+ // VC++ fails to compile this when defined here, saying the function isn't a memgber of vector_multimap.
+ template <typename K, typename C, typename A, typename RAC>
+ inline eastl::pair<typename vector_multiset<K, C, A, RAC>::iterator, typename vector_multiset<K, C, A, RAC>::iterator>
+ vector_multiset<K, C, A, RAC>::equal_range_small(const key_type& k)
+ {
+ const iterator itLower(lower_bound(k));
+ iterator itUpper(itLower);
+
+ while((itUpper != end()) && !mCompare(k, *itUpper))
+ ++itUpper;
+
+ return eastl::pair<iterator, iterator>(itLower, itUpper);
+ }
+ */
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline eastl::pair<typename vector_multiset<K, C, A, RAC>::const_iterator, typename vector_multiset<K, C, A, RAC>::const_iterator>
+ vector_multiset<K, C, A, RAC>::equal_range_small(const key_type& k) const
+ {
+ const const_iterator itLower(lower_bound(k));
+ const_iterator itUpper(itLower);
+
+ while((itUpper != end()) && !mCompare(k, *itUpper))
+ ++itUpper;
+
+ return eastl::pair<const_iterator, const_iterator>(itLower, itUpper);
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////////
+
+ template <typename Key, typename Compare, typename Allocator, typename RandomAccessContainer>
+ inline bool operator==(const vector_multiset<Key, Compare, Allocator, RandomAccessContainer>& a,
+ const vector_multiset<Key, Compare, Allocator, RandomAccessContainer>& b)
+ {
+ return (a.size() == b.size()) && eastl::equal(b.begin(), b.end(), a.begin());
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator, typename RandomAccessContainer>
+ inline bool operator<(const vector_multiset<Key, Compare, Allocator, RandomAccessContainer>& a,
+ const vector_multiset<Key, Compare, Allocator, RandomAccessContainer>& b)
+ {
+ return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end(), a.value_comp());
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator, typename RandomAccessContainer>
+ inline bool operator!=(const vector_multiset<Key, Compare, Allocator, RandomAccessContainer>& a,
+ const vector_multiset<Key, Compare, Allocator, RandomAccessContainer>& b)
+ {
+ return !(a == b);
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator, typename RandomAccessContainer>
+ inline bool operator>(const vector_multiset<Key, Compare, Allocator, RandomAccessContainer>& a,
+ const vector_multiset<Key, Compare, Allocator, RandomAccessContainer>& b)
+ {
+ return b < a;
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator, typename RandomAccessContainer>
+ inline bool operator<=(const vector_multiset<Key, Compare, Allocator, RandomAccessContainer>& a,
+ const vector_multiset<Key, Compare, Allocator, RandomAccessContainer>& b)
+ {
+ return !(b < a);
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator, typename RandomAccessContainer>
+ inline bool operator>=(const vector_multiset<Key, Compare, Allocator, RandomAccessContainer>& a,
+ const vector_multiset<Key, Compare, Allocator, RandomAccessContainer>& b)
+ {
+ return !(a < b);
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator, typename RandomAccessContainer>
+ inline void swap(vector_multiset<Key, Compare, Allocator, RandomAccessContainer>& a,
+ vector_multiset<Key, Compare, Allocator, RandomAccessContainer>& b)
+ {
+ a.swap(b);
+ }
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/include/EASTL/vector_set.h b/EASTL/include/EASTL/vector_set.h
new file mode 100644
index 0000000..c03ec55
--- /dev/null
+++ b/EASTL/include/EASTL/vector_set.h
@@ -0,0 +1,793 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+//////////////////////////////////////////////////////////////////////////////
+
+//////////////////////////////////////////////////////////////////////////////
+// This file implements vector_set. It acts much like std::set, except its
+// underlying representation is a random access container such as vector.
+// These containers are sometimes also known as "sorted vectors."
+// vector_sets have an advantage over conventional sets in that their memory
+// is contiguous and node-less. The result is that lookups are faster, more
+// cache friendly (which potentially more so benefits speed), and the container
+// uses less memory. The downside is that inserting new items into the container
+// is slower if they are inserted in random order instead of in sorted order.
+// This tradeoff is well-worth it for many cases. Note that vector_set allows
+// you to use a deque or other random access container which may perform
+// better for you than vector.
+//
+// Note that with vector_set, vector_multiset, vector_map, vector_multimap
+// that the modification of the container potentially invalidates all
+// existing iterators into the container, unlike what happens with conventional
+// sets and maps.
+//////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_VECTOR_SET_H
+#define EASTL_VECTOR_SET_H
+
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/allocator.h>
+#include <EASTL/functional.h>
+#include <EASTL/vector.h>
+#include <EASTL/utility.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/initializer_list.h>
+#include <stddef.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+
+ /// EASTL_VECTOR_SET_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_VECTOR_SET_DEFAULT_NAME
+ #define EASTL_VECTOR_SET_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " vector_set" // Unless the user overrides something, this is "EASTL vector_set".
+ #endif
+
+
+ /// EASTL_VECTOR_SET_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_VECTOR_SET_DEFAULT_ALLOCATOR
+ #define EASTL_VECTOR_SET_DEFAULT_ALLOCATOR allocator_type(EASTL_VECTOR_SET_DEFAULT_NAME)
+ #endif
+
+
+
+ /// vector_set
+ ///
+ /// Implements a set via a random access container such as a vector.
+ /// This container is also known as a sorted_vector. We choose to call it
+ /// vector_set, as that is a more consistent universally applicable name
+ /// for it in this library.
+ ///
+ /// Note that with vector_set, vector_multiset, vector_map, vector_multimap
+ /// that the modification of the container potentially invalidates all
+ /// existing iterators into the container, unlike what happens with conventional
+ /// sets and maps.
+ ///
+ /// To consider: std::set has the limitation that values in the set cannot
+ /// be modified, with the idea that modifying them would change their sort
+ /// order. We have the opportunity to make it so that values can be modified
+ /// via changing iterators to be non-const, with the downside being that
+ /// the container can get screwed up if the user screws up. Alternatively,
+ /// we can do what std STL does and require the user to make their stored
+ /// classes use 'mutable' as needed. See the C++ standard defect report
+ /// #103 (DR 103) for a discussion of this.
+ ///
+ /// Note that the erase functions return iterator and not void. This allows for
+ /// more efficient use of the container and is consistent with the C++ language
+ /// defect report #130 (DR 130)
+ ///
+ template <typename Key, typename Compare = eastl::less<Key>, typename Allocator = EASTLAllocatorType,
+ typename RandomAccessContainer = eastl::vector<Key, Allocator> >
+ class vector_set : public RandomAccessContainer
+ {
+ public:
+ typedef RandomAccessContainer base_type;
+ typedef vector_set<Key, Compare, Allocator, RandomAccessContainer> this_type;
+ typedef Allocator allocator_type;
+ typedef Key key_type;
+ typedef Key value_type;
+ typedef Compare key_compare;
+ typedef Compare value_compare;
+ typedef value_type* pointer;
+ typedef const value_type* const_pointer;
+ typedef value_type& reference;
+ typedef const value_type& const_reference;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::difference_type difference_type;
+ typedef typename base_type::iterator iterator; // **Currently typedefing from iterator instead of const_iterator due to const issues **: Note that we typedef from const_iterator. This is by design, as sets are sorted and values cannot be modified. To consider: allow values to be modified and thus risk changing their sort values.
+ typedef typename base_type::const_iterator const_iterator;
+ typedef typename base_type::reverse_iterator reverse_iterator; // See notes directly above regarding const_iterator.
+ typedef typename base_type::const_reverse_iterator const_reverse_iterator;
+ typedef eastl::pair<iterator, bool> insert_return_type;
+
+ using base_type::begin;
+ using base_type::end;
+ using base_type::get_allocator;
+
+ protected:
+ value_compare mCompare; // To consider: Declare this instead as: 'key_compare mKeyCompare'
+
+ public:
+ // We have an empty ctor and a ctor that takes an allocator instead of one for both
+ // because this way our RandomAccessContainer wouldn't be required to have an constructor
+ // that takes allocator_type.
+ vector_set();
+ explicit vector_set(const allocator_type& allocator);
+ explicit vector_set(const key_compare& compare, const allocator_type& allocator = EASTL_VECTOR_SET_DEFAULT_ALLOCATOR);
+ vector_set(const this_type& x);
+ vector_set(this_type&& x);
+ vector_set(this_type&& x, const allocator_type& allocator);
+ vector_set(std::initializer_list<value_type> ilist, const key_compare& compare = key_compare(), const allocator_type& allocator = EASTL_VECTOR_SET_DEFAULT_ALLOCATOR);
+
+ template <typename InputIterator>
+ vector_set(InputIterator first, InputIterator last); // allocator arg removed because VC7.1 fails on the default arg. To do: Make a second version of this function without a default arg.
+
+ template <typename InputIterator>
+ vector_set(InputIterator first, InputIterator last, const key_compare& compare); // allocator arg removed because VC7.1 fails on the default arg. To do: Make a second version of this function without a default arg.
+
+ this_type& operator=(const this_type& x);
+ this_type& operator=(std::initializer_list<value_type> ilist);
+ this_type& operator=(this_type&& x);
+
+ void swap(this_type& x);
+
+ const key_compare& key_comp() const;
+ key_compare& key_comp();
+
+ const value_compare& value_comp() const;
+ value_compare& value_comp();
+
+ // Inherited from base class:
+ //
+ // allocator_type& get_allocator();
+ // void set_allocator(const allocator_type& allocator);
+ //
+ // iterator begin();
+ // const_iterator begin() const;
+ // const_iterator cbegin() const;
+ //
+ // iterator end();
+ // const_iterator end() const;
+ // const_iterator cend() const;
+ //
+ // reverse_iterator rbegin();
+ // const_reverse_iterator rbegin() const;
+ // const_reverse_iterator crbegin() const;
+ //
+ // reverse_iterator rend();
+ // const_reverse_iterator rend() const;
+ // const_reverse_iterator crend() const;
+ //
+ // size_type size() const;
+ // bool empty() const;
+ // void clear();
+
+ template <class... Args>
+ eastl::pair<iterator, bool> emplace(Args&&... args);
+
+ template <class... Args>
+ iterator emplace_hint(const_iterator position, Args&&... args);
+
+ eastl::pair<iterator, bool> insert(const value_type& value);
+ template <typename P>
+ pair<iterator, bool> insert(P&& otherValue);
+
+ iterator insert(const_iterator position, const value_type& value);
+ iterator insert(const_iterator position, value_type&& value);
+
+ void insert(std::initializer_list<value_type> ilist);
+
+ template <typename InputIterator>
+ void insert(InputIterator first, InputIterator last);
+
+ iterator erase(const_iterator position);
+ iterator erase(const_iterator first, const_iterator last);
+ size_type erase(const key_type& k);
+
+ reverse_iterator erase(const_reverse_iterator position);
+ reverse_iterator erase(const_reverse_iterator first, const_reverse_iterator last);
+
+ iterator find(const key_type& k);
+ const_iterator find(const key_type& k) const;
+
+ template <typename U, typename BinaryPredicate>
+ iterator find_as(const U& u, BinaryPredicate predicate);
+
+ template <typename U, typename BinaryPredicate>
+ const_iterator find_as(const U& u, BinaryPredicate predicate) const;
+
+ size_type count(const key_type& k) const;
+
+ iterator lower_bound(const key_type& k);
+ const_iterator lower_bound(const key_type& k) const;
+
+ iterator upper_bound(const key_type& k);
+ const_iterator upper_bound(const key_type& k) const;
+
+ eastl::pair<iterator, iterator> equal_range(const key_type& k);
+ eastl::pair<const_iterator, const_iterator> equal_range(const key_type& k) const;
+
+ template <typename U, typename BinaryPredicate>
+ eastl::pair<iterator, iterator> equal_range(const U& u, BinaryPredicate predicate);
+
+ template <typename U, typename BinaryPredicate>
+ eastl::pair<const_iterator, const_iterator> equal_range(const U& u, BinaryPredicate) const;
+
+ // Functions which are disallowed due to being unsafe.
+ void push_back(const value_type& value) = delete;
+ reference push_back() = delete;
+ void* push_back_uninitialized() = delete;
+ template <class... Args>
+ reference emplace_back(Args&&...) = delete;
+
+ // NOTE(rparolin): It is undefined behaviour if user code fails to ensure the container
+ // invariants are respected by performing an explicit call to 'sort' before any other
+ // operations on the container are performed that do not clear the elements.
+ //
+ // 'push_back_unsorted' and 'emplace_back_unsorted' do not satisfy container invariants
+ // for being sorted. We provide these overloads explicitly labelled as '_unsorted' as an
+ // optimization opportunity when batch inserting elements so users can defer the cost of
+ // sorting the container once when all elements are contained. This was done to clarify
+ // the intent of code by leaving a trace that a manual call to sort is required.
+ //
+ template <typename... Args> decltype(auto) push_back_unsorted(Args&&... args)
+ { return base_type::push_back(eastl::forward<Args>(args)...); }
+ template <typename... Args> decltype(auto) emplace_back_unsorted(Args&&... args)
+ { return base_type::emplace_back(eastl::forward<Args>(args)...); }
+
+ }; // vector_set
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // vector_set
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline vector_set<K, C, A, RAC>::vector_set()
+ : base_type(), mCompare(C())
+ {
+ get_allocator().set_name(EASTL_VECTOR_SET_DEFAULT_NAME);
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline vector_set<K, C, A, RAC>::vector_set(const allocator_type& allocator)
+ : base_type(allocator), mCompare(C())
+ {
+ // Empty
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline vector_set<K, C, A, RAC>::vector_set(const key_compare& compare, const allocator_type& allocator)
+ : base_type(allocator), mCompare(compare)
+ {
+ // Empty
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline vector_set<K, C, A, RAC>::vector_set(const this_type& x)
+ : base_type(x), mCompare(x.mCompare)
+ {
+ // Empty
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline vector_set<K, C, A, RAC>::vector_set(this_type&& x)
+ : base_type(eastl::move(x)), mCompare(x.mCompare)
+ {
+ // Empty. Note: x is left with empty contents but its original mValueCompare instead of the default one.
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline vector_set<K, C, A, RAC>::vector_set(this_type&& x, const allocator_type& allocator)
+ : base_type(eastl::move(x), allocator), mCompare(x.mCompare)
+ {
+ // Empty. Note: x is left with empty contents but its original mValueCompare instead of the default one.
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline vector_set<K, C, A, RAC>::vector_set(std::initializer_list<value_type> ilist, const key_compare& compare, const allocator_type& allocator)
+ : base_type(allocator), mCompare(compare)
+ {
+ insert(ilist.begin(), ilist.end());
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ template <typename InputIterator>
+ inline vector_set<K, C, A, RAC>::vector_set(InputIterator first, InputIterator last)
+ : base_type(EASTL_VECTOR_SET_DEFAULT_ALLOCATOR), mCompare(key_compare())
+ {
+ insert(first, last);
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ template <typename InputIterator>
+ inline vector_set<K, C, A, RAC>::vector_set(InputIterator first, InputIterator last, const key_compare& compare)
+ : base_type(EASTL_VECTOR_SET_DEFAULT_ALLOCATOR), mCompare(compare)
+ {
+ insert(first, last);
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline vector_set<K, C, A, RAC>&
+ vector_set<K, C, A, RAC>::operator=(const this_type& x)
+ {
+ base_type::operator=(x);
+ mCompare = value_compare(x.mCompare);
+ return *this;
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline vector_set<K, C, A, RAC>&
+ vector_set<K, C, A, RAC>::operator=(this_type&& x)
+ {
+ base_type::operator=(eastl::move(x));
+ eastl::swap(mCompare, x.mCompare);
+ return *this;
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline vector_set<K, C, A, RAC>&
+ vector_set<K, C, A, RAC>::operator=(std::initializer_list<value_type> ilist)
+ {
+ base_type::clear();
+ insert(ilist.begin(), ilist.end());
+ return *this;
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline void vector_set<K, C, A, RAC>::swap(this_type& x)
+ {
+ base_type::swap(x);
+ eastl::swap(mCompare, x.mCompare);
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline const typename vector_set<K, C, A, RAC>::key_compare&
+ vector_set<K, C, A, RAC>::key_comp() const
+ {
+ return mCompare;
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline typename vector_set<K, C, A, RAC>::key_compare&
+ vector_set<K, C, A, RAC>::key_comp()
+ {
+ return mCompare;
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline const typename vector_set<K, C, A, RAC>::value_compare&
+ vector_set<K, C, A, RAC>::value_comp() const
+ {
+ return mCompare;
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline typename vector_set<K, C, A, RAC>::value_compare&
+ vector_set<K, C, A, RAC>::value_comp()
+ {
+ return mCompare;
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ template <class... Args>
+ inline eastl::pair<typename vector_set<K, C, A, RAC>::iterator, bool>
+ vector_set<K, C, A, RAC>::emplace(Args&&... args)
+ {
+ #if EASTL_USE_FORWARD_WORKAROUND
+ auto value = value_type(eastl::forward<Args>(args)...); // Workaround for compiler bug in VS2013 which results in a compiler internal crash while compiling this code.
+ #else
+ value_type value(eastl::forward<Args>(args)...);
+ #endif
+
+ return insert(eastl::move(value));
+ }
+
+ template <typename K, typename C, typename A, typename RAC>
+ template <class... Args>
+ inline typename vector_set<K, C, A, RAC>::iterator
+ vector_set<K, C, A, RAC>::emplace_hint(const_iterator position, Args&&... args)
+ {
+ #if EASTL_USE_FORWARD_WORKAROUND
+ auto value = value_type(eastl::forward<Args>(args)...); // Workaround for compiler bug in VS2013 which results in a compiler internal crash while compiling this code.
+ #else
+ value_type value(eastl::forward<Args>(args)...);
+ #endif
+
+ return insert(position, eastl::move(value));
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline eastl::pair<typename vector_set<K, C, A, RAC>::iterator, bool>
+ vector_set<K, C, A, RAC>::insert(const value_type& value)
+ {
+ const iterator itLB(lower_bound(value));
+
+ if((itLB != end()) && !mCompare(value, *itLB))
+ return eastl::pair<iterator, bool>(itLB, false);
+ return eastl::pair<iterator, bool>(base_type::insert(itLB, value), true);
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ template <typename P>
+ inline eastl::pair<typename vector_set<K, C, A, RAC>::iterator, bool>
+ vector_set<K, C, A, RAC>::insert(P&& otherValue)
+ {
+ value_type value(eastl::forward<P>(otherValue));
+ const iterator itLB(lower_bound(value));
+
+ if((itLB != end()) && !mCompare(value, *itLB))
+ return eastl::pair<iterator, bool>(itLB, false);
+ return eastl::pair<iterator, bool>(base_type::insert(itLB, eastl::move(value)), true);
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline typename vector_set<K, C, A, RAC>::iterator
+ vector_set<K, C, A, RAC>::insert(const_iterator position, const value_type& value)
+ {
+ // We assume that the user knows what he is doing and has supplied us with
+ // a position that is right where value should be inserted (put in front of).
+ // We do a test to see if the position is correct. If so then we insert,
+ // if not then we ignore the input position.
+
+ if((position == end()) || mCompare(value, *position)) // If the element at position is greater than value...
+ {
+ if((position == begin()) || mCompare(*(position - 1), value)) // If the element before position is less than value...
+ return base_type::insert(position, value);
+ }
+
+ // In this case we either have an incorrect position or value is already present.
+ // We fall back to the regular insert function. An optimization would be to detect
+ // that the element is already present, but that's only useful if the user supplied
+ // a good position but a present element.
+ const eastl::pair<iterator, bool> result = insert(value);
+
+ return result.first;
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline typename vector_set<K, C, A, RAC>::iterator
+ vector_set<K, C, A, RAC>::insert(const_iterator position, value_type&& value)
+ {
+ // See the other version of this function for documentation.
+ if((position == end()) || mCompare(value, *position)) // If the element at position is greater than value...
+ {
+ if((position == begin()) || mCompare(*(position - 1), value)) // If the element before position is less than value...
+ return base_type::insert(position, eastl::move(value));
+ }
+
+ const eastl::pair<iterator, bool> result = insert(eastl::move(value));
+
+ return result.first;
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline void vector_set<K, C, A, RAC>::insert(std::initializer_list<value_type> ilist)
+ {
+ insert(ilist.begin(), ilist.end());
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ template <typename InputIterator>
+ inline void vector_set<K, C, A, RAC>::insert(InputIterator first, InputIterator last)
+ {
+ // To consider: Improve the speed of this by getting the length of the
+ // input range and resizing our container to that size
+ // before doing the insertions. We can't use reserve
+ // because we don't know if we are using a vector or not.
+ // Alternatively, force the user to do the reservation.
+ // To consider: When inserting values that come from a container
+ // like this container, use the property that they are
+ // known to be sorted and speed up the inserts here.
+ for(; first != last; ++first)
+ insert(*first);
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline typename vector_set<K, C, A, RAC>::iterator
+ vector_set<K, C, A, RAC>::erase(const_iterator position)
+ {
+ // Note that we return iterator and not void. This allows for more efficient use of
+ // the container and is consistent with the C++ language defect report #130 (DR 130)
+ return base_type::erase(position);
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline typename vector_set<K, C, A, RAC>::iterator
+ vector_set<K, C, A, RAC>::erase(const_iterator first, const_iterator last)
+ {
+ return base_type::erase(first, last);
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline typename vector_set<K, C, A, RAC>::size_type
+ vector_set<K, C, A, RAC>::erase(const key_type& k)
+ {
+ const iterator it(find(k));
+
+ if(it != end()) // If it exists...
+ {
+ erase(it);
+ return 1;
+ }
+ return 0;
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline typename vector_set<K, C, A, RAC>::reverse_iterator
+ vector_set<K, C, A, RAC>::erase(const_reverse_iterator position)
+ {
+ return reverse_iterator(base_type::erase((++position).base()));
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline typename vector_set<K, C, A, RAC>::reverse_iterator
+ vector_set<K, C, A, RAC>::erase(const_reverse_iterator first, const_reverse_iterator last)
+ {
+ return reverse_iterator(base_type::erase((++last).base(), (++first).base()));
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline typename vector_set<K, C, A, RAC>::iterator
+ vector_set<K, C, A, RAC>::find(const key_type& k)
+ {
+ const eastl::pair<iterator, iterator> pairIts(equal_range(k));
+ return (pairIts.first != pairIts.second) ? pairIts.first : end();
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline typename vector_set<K, C, A, RAC>::const_iterator
+ vector_set<K, C, A, RAC>::find(const key_type& k) const
+ {
+ const eastl::pair<const_iterator, const_iterator> pairIts(equal_range(k));
+ return (pairIts.first != pairIts.second) ? pairIts.first : end();
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ template <typename U, typename BinaryPredicate>
+ inline typename vector_set<K, C, A, RAC>::iterator
+ vector_set<K, C, A, RAC>::find_as(const U& u, BinaryPredicate predicate)
+ {
+ const eastl::pair<iterator, iterator> pairIts(equal_range(u, predicate));
+ return (pairIts.first != pairIts.second) ? pairIts.first : end();
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ template <typename U, typename BinaryPredicate>
+ inline typename vector_set<K, C, A, RAC>::const_iterator
+ vector_set<K, C, A, RAC>::find_as(const U& u, BinaryPredicate predicate) const
+ {
+ const eastl::pair<const_iterator, const_iterator> pairIts(equal_range(u, predicate));
+ return (pairIts.first != pairIts.second) ? pairIts.first : end();
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline typename vector_set<K, C, A, RAC>::size_type
+ vector_set<K, C, A, RAC>::count(const key_type& k) const
+ {
+ const const_iterator it(find(k));
+ return (it != end()) ? (size_type)1 : (size_type)0;
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline typename vector_set<K, C, A, RAC>::iterator
+ vector_set<K, C, A, RAC>::lower_bound(const key_type& k)
+ {
+ return eastl::lower_bound(begin(), end(), k, mCompare);
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline typename vector_set<K, C, A, RAC>::const_iterator
+ vector_set<K, C, A, RAC>::lower_bound(const key_type& k) const
+ {
+ return eastl::lower_bound(begin(), end(), k, mCompare);
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline typename vector_set<K, C, A, RAC>::iterator
+ vector_set<K, C, A, RAC>::upper_bound(const key_type& k)
+ {
+ return eastl::upper_bound(begin(), end(), k, mCompare);
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline typename vector_set<K, C, A, RAC>::const_iterator
+ vector_set<K, C, A, RAC>::upper_bound(const key_type& k) const
+ {
+ return eastl::upper_bound(begin(), end(), k, mCompare);
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline eastl::pair<typename vector_set<K, C, A, RAC>::iterator, typename vector_set<K, C, A, RAC>::iterator>
+ vector_set<K, C, A, RAC>::equal_range(const key_type& k)
+ {
+ // The resulting range will either be empty or have one element,
+ // so instead of doing two tree searches (one for lower_bound and
+ // one for upper_bound), we do just lower_bound and see if the
+ // result is a range of size zero or one.
+ const iterator itLower(lower_bound(k));
+
+ if((itLower == end()) || mCompare(k, *itLower)) // If at the end or if (k is < itLower)...
+ return eastl::pair<iterator, iterator>(itLower, itLower);
+
+ iterator itUpper(itLower);
+ return eastl::pair<iterator, iterator>(itLower, ++itUpper);
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ inline eastl::pair<typename vector_set<K, C, A, RAC>::const_iterator, typename vector_set<K, C, A, RAC>::const_iterator>
+ vector_set<K, C, A, RAC>::equal_range(const key_type& k) const
+ {
+ // The resulting range will either be empty or have one element,
+ // so instead of doing two tree searches (one for lower_bound and
+ // one for upper_bound), we do just lower_bound and see if the
+ // result is a range of size zero or one.
+ const const_iterator itLower(lower_bound(k));
+
+ if((itLower == end()) || mCompare(k, *itLower)) // If at the end or if (k is < itLower)...
+ return eastl::pair<const_iterator, const_iterator>(itLower, itLower);
+
+ const_iterator itUpper(itLower);
+ return eastl::pair<const_iterator, const_iterator>(itLower, ++itUpper);
+ }
+
+
+ template <typename K, typename C, typename A, typename RAC>
+ template<typename U, typename BinaryPredicate>
+ inline eastl::pair<typename vector_set<K, C, A, RAC>::iterator, typename vector_set<K, C, A, RAC>::iterator>
+ vector_set<K, C, A, RAC>::equal_range(const U& u, BinaryPredicate predicate)
+ {
+ // The resulting range will either be empty or have one element,
+ // so instead of doing two tree searches (one for lower_bound and
+ // one for upper_bound), we do just lower_bound and see if the
+ // result is a range of size zero or one.
+ const iterator itLower(eastl::lower_bound(begin(), end(), u, predicate));
+
+ if((itLower == end()) || predicate(u, *itLower)) // If at the end or if (k is < itLower)...
+ return eastl::pair<iterator, iterator>(itLower, itLower);
+
+ iterator itUpper(itLower);
+ return eastl::pair<iterator, iterator>(itLower, ++itUpper);
+ }
+
+ template <typename K, typename C, typename A, typename RAC>
+ template<typename U, typename BinaryPredicate>
+ inline eastl::pair<typename vector_set<K, C, A, RAC>::const_iterator, typename vector_set<K, C, A, RAC>::const_iterator>
+ vector_set<K, C, A, RAC>::equal_range(const U& u, BinaryPredicate predicate) const
+ {
+ // The resulting range will either be empty or have one element,
+ // so instead of doing two tree searches (one for lower_bound and
+ // one for upper_bound), we do just lower_bound and see if the
+ // result is a range of size zero or one.
+ const const_iterator itLower(eastl::lower_bound(begin(), end(), u, predicate));
+
+ if((itLower == end()) || predicate(u, *itLower)) // If at the end or if (k is < itLower)...
+ return eastl::pair<const_iterator, const_iterator>(itLower, itLower);
+
+ const_iterator itUpper(itLower);
+ return eastl::pair<const_iterator, const_iterator>(itLower, ++itUpper);
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////////
+
+ template <typename Key, typename Compare, typename Allocator, typename RandomAccessContainer>
+ inline bool operator==(const vector_set<Key, Compare, Allocator, RandomAccessContainer>& a,
+ const vector_set<Key, Compare, Allocator, RandomAccessContainer>& b)
+ {
+ return (a.size() == b.size()) && eastl::equal(b.begin(), b.end(), a.begin());
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator, typename RandomAccessContainer>
+ inline bool operator<(const vector_set<Key, Compare, Allocator, RandomAccessContainer>& a,
+ const vector_set<Key, Compare, Allocator, RandomAccessContainer>& b)
+ {
+ return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end(), a.value_comp());
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator, typename RandomAccessContainer>
+ inline bool operator!=(const vector_set<Key, Compare, Allocator, RandomAccessContainer>& a,
+ const vector_set<Key, Compare, Allocator, RandomAccessContainer>& b)
+ {
+ return !(a == b);
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator, typename RandomAccessContainer>
+ inline bool operator>(const vector_set<Key, Compare, Allocator, RandomAccessContainer>& a,
+ const vector_set<Key, Compare, Allocator, RandomAccessContainer>& b)
+ {
+ return b < a;
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator, typename RandomAccessContainer>
+ inline bool operator<=(const vector_set<Key, Compare, Allocator, RandomAccessContainer>& a,
+ const vector_set<Key, Compare, Allocator, RandomAccessContainer>& b)
+ {
+ return !(b < a);
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator, typename RandomAccessContainer>
+ inline bool operator>=(const vector_set<Key, Compare, Allocator, RandomAccessContainer>& a,
+ const vector_set<Key, Compare, Allocator, RandomAccessContainer>& b)
+ {
+ return !(a < b);
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator, typename RandomAccessContainer>
+ inline void swap(vector_set<Key, Compare, Allocator, RandomAccessContainer>& a,
+ vector_set<Key, Compare, Allocator, RandomAccessContainer>& b)
+ {
+ a.swap(b);
+ }
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
diff --git a/EASTL/include/EASTL/version.h b/EASTL/include/EASTL/version.h
new file mode 100644
index 0000000..0dee15f
--- /dev/null
+++ b/EASTL/include/EASTL/version.h
@@ -0,0 +1,15 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_VERSION_H
+#define EASTL_VERSION_H
+
+#include <EABase/eabase.h>
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <EASTL/internal/config.h>
+
+#endif
diff --git a/EASTL/include/EASTL/weak_ptr.h b/EASTL/include/EASTL/weak_ptr.h
new file mode 100644
index 0000000..4272696
--- /dev/null
+++ b/EASTL/include/EASTL/weak_ptr.h
@@ -0,0 +1,17 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_WEAK_PTR_H
+#define EASTL_WEAK_PTR_H
+
+
+// This header file is deprecated. The implementation has moved:
+#include <EASTL/shared_ptr.h>
+
+
+#endif
+
+
+
diff --git a/EASTL/scripts/CMake/CommonCppFlags.cmake b/EASTL/scripts/CMake/CommonCppFlags.cmake
new file mode 100644
index 0000000..08b6af5
--- /dev/null
+++ b/EASTL/scripts/CMake/CommonCppFlags.cmake
@@ -0,0 +1,83 @@
+#-------------------------------------------------------------------------------------------
+# Compiler Flag Detection
+#-------------------------------------------------------------------------------------------
+include(CheckCXXCompilerFlag)
+
+check_cxx_compiler_flag("-fchar8_t" EASTL_HAS_FCHAR8T_FLAG)
+check_cxx_compiler_flag("/Zc:char8_t" EASTL_HAS_ZCCHAR8T_FLAG)
+
+if(EASTL_HAS_FCHAR8T_FLAG)
+ set(EASTL_CHAR8T_FLAG "-fchar8_t")
+ set(EASTL_NO_CHAR8T_FLAG "-fno-char8_t")
+elseif(EASTL_HAS_ZCCHAR8T_FLAG)
+ set(EASTL_CHAR8T_FLAG "/Zc:char8_t")
+ set(EASTL_NO_CHAR8T_FLAG "/Zc:char8_t-")
+endif()
+
+#-------------------------------------------------------------------------------------------
+# Compiler Flags
+#-------------------------------------------------------------------------------------------
+if(UNIX AND "${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel" )
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -fasm-blocks" )
+endif()
+
+# NOT LESS == GREATER_OR_EQUAL; CMake doesn't support this out of the box.
+if(CMAKE_CXX_COMPILER_ID MATCHES "AppleClang")
+ if(NOT (CMAKE_CXX_COMPILER_VERSION VERSION_LESS "6.2"))
+ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17")
+ endif()
+ if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "6.2" AND (NOT (CMAKE_CXX_COMPILER_VERSION VERSION_LESS "6.1")))
+ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14")
+ endif()
+ if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "6.1" AND (NOT (CMAKE_CXX_COMPILER_VERSION VERSION_LESS "4.3")))
+ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++1y")
+ endif()
+ # It seems Apple started changing version numbers after 3.1, going straight to 4.0 after 3.1.
+ if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "4.3" AND (NOT (CMAKE_CXX_COMPILER_VERSION VERSION_LESS "3.1")))
+ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
+ endif()
+ if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "3.1")
+ message(FATAL_ERROR "Building with a Apple clang version less than 3.1 is not supported.")
+ endif()
+elseif(CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND NOT CMAKE_CXX_SIMULATE_ID MATCHES "MSVC") # clang, but not clang-cl.
+ # non-Apple clangs uses different versioning.
+ if(NOT (CMAKE_CXX_COMPILER_VERSION VERSION_LESS "5.0.0"))
+ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17")
+ endif()
+ if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "5.0.0" AND (NOT (CMAKE_CXX_COMPILER_VERSION VERSION_LESS "3.5.0")))
+ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14")
+ endif()
+ if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "3.5.0" AND (NOT (CMAKE_CXX_COMPILER_VERSION VERSION_LESS "3.2")))
+ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++1y")
+ endif()
+ if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "3.2" AND (NOT (CMAKE_CXX_COMPILER_VERSION VERSION_LESS "3.0")))
+ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
+ endif()
+ if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "3.0")
+ message(FATAL_ERROR "Building with a clang version less than 3.0 is not supported.")
+ endif()
+elseif(CMAKE_CXX_COMPILER_ID MATCHES "GNU")
+ if(NOT (CMAKE_CXX_COMPILER_VERSION VERSION_LESS "7.0.0"))
+ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17")
+ endif()
+ if((CMAKE_CXX_COMPILER_VERSION VERSION_LESS "7.0.0") AND (NOT (CMAKE_CXX_COMPILER_VERSION VERSION_LESS "5.2.0")))
+ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14")
+ endif()
+ if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "5.2.0" AND (NOT (CMAKE_CXX_COMPILER_VERSION VERSION_LESS "4.8.1")))
+ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++1y")
+ endif()
+ if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "4.8.1" AND (NOT (CMAKE_CXX_COMPILER_VERSION VERSION_LESS "4.7.3")))
+ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
+ endif()
+ if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "4.7.3")
+ message(FATAL_ERROR "Building with a gcc version less than 4.7.3 is not supported.")
+ endif()
+elseif(CMAKE_CXX_COMPILER_ID MATCHES "MSVC")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /std:c++latest /W4 /permissive-")
+endif()
+
+
+if (CMAKE_CXX_COMPILER_ID MATCHES "Clang" OR CMAKE_CXX_COMPILER_ID MATCHES "GNU")
+ set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -D_DEBUG")
+ set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -D_DEBUG")
+endif()
diff --git a/EASTL/scripts/build.sh b/EASTL/scripts/build.sh
new file mode 100755
index 0000000..c062501
--- /dev/null
+++ b/EASTL/scripts/build.sh
@@ -0,0 +1,27 @@
+build_folder=build
+
+rm -rf $build_folder
+mkdir $build_folder
+pushd $build_folder
+
+cmake .. -DEASTL_BUILD_TESTS:BOOL=OFF -DEASTL_BUILD_BENCHMARK:BOOL=ON
+cmake --build . --config Release -- -j 32
+
+cmake .. -DEASTL_BUILD_TESTS:BOOL=OFF -DEASTL_BUILD_BENCHMARK:BOOL=OFF
+cmake --build . --config Release -- -j 32
+
+cmake .. -DEASTL_BUILD_TESTS:BOOL=ON -DEASTL_BUILD_BENCHMARK:BOOL=OFF
+cmake --build . --config Release -- -j 32
+
+cmake .. -DEASTL_BUILD_TESTS:BOOL=ON -DEASTL_BUILD_BENCHMARK:BOOL=ON
+cmake --build . --config Release -- -j 32
+cmake --build . --config Debug -- -j 32
+cmake --build . --config RelWithDebInfo -- -j 32
+cmake --build . --config MinSizeRel -- -j 32
+pushd test
+ctest -C Release -V
+ctest -C Debug -V
+ctest -C RelWithDebInfo -V
+ctest -C MinSizeRel -V
+popd
+popd
diff --git a/EASTL/source/allocator_eastl.cpp b/EASTL/source/allocator_eastl.cpp
new file mode 100644
index 0000000..6b48168
--- /dev/null
+++ b/EASTL/source/allocator_eastl.cpp
@@ -0,0 +1,56 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/allocator.h>
+
+
+///////////////////////////////////////////////////////////////////////////////
+// ReadMe
+//
+// This file implements the default application allocator.
+// You can replace this allocator.cpp file with a different one,
+// you can define EASTL_USER_DEFINED_ALLOCATOR below to ignore this file,
+// or you can modify the EASTL config.h file to redefine how allocators work.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_USER_DEFINED_ALLOCATOR // If the user hasn't declared that he has defined an allocator implementation elsewhere...
+
+ namespace eastl
+ {
+
+ /// gDefaultAllocator
+ /// Default global allocator instance.
+ EASTL_API allocator gDefaultAllocator;
+ EASTL_API allocator* gpDefaultAllocator = &gDefaultAllocator;
+
+ EASTL_API allocator* GetDefaultAllocator()
+ {
+ return gpDefaultAllocator;
+ }
+
+ EASTL_API allocator* SetDefaultAllocator(allocator* pAllocator)
+ {
+ allocator* const pPrevAllocator = gpDefaultAllocator;
+ gpDefaultAllocator = pAllocator;
+ return pPrevAllocator;
+ }
+
+ } // namespace eastl
+
+
+#endif // EASTL_USER_DEFINED_ALLOCATOR
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/source/assert.cpp b/EASTL/source/assert.cpp
new file mode 100644
index 0000000..63b444a
--- /dev/null
+++ b/EASTL/source/assert.cpp
@@ -0,0 +1,116 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/string.h>
+#include <EABase/eabase.h>
+
+#if defined(EA_PLATFORM_WINDOWS_KERNEL)
+ #include <Wdm.h>
+#elif defined(EA_PLATFORM_MICROSOFT)
+ EA_DISABLE_ALL_VC_WARNINGS();
+ #if defined(EA_COMPILER_MSVC)
+ #include <crtdbg.h>
+ #endif
+ #ifndef WIN32_LEAN_AND_MEAN
+ #define WIN32_LEAN_AND_MEAN
+ #endif
+ #ifdef __MINGW64__
+ #include <windows.h>
+ #else
+ #include <Windows.h>
+ #endif
+ EA_RESTORE_ALL_VC_WARNINGS();
+#elif defined(EA_PLATFORM_ANDROID)
+ #include <android/log.h>
+#else
+ #include <stdio.h>
+#endif
+
+
+
+
+namespace eastl
+{
+
+ /// gpAssertionFailureFunction
+ ///
+ /// Global assertion failure function pointer. Set by SetAssertionFailureFunction.
+ ///
+ EASTL_API EASTL_AssertionFailureFunction gpAssertionFailureFunction = AssertionFailureFunctionDefault;
+ EASTL_API void* gpAssertionFailureFunctionContext = NULL;
+
+
+
+ /// SetAssertionFailureFunction
+ ///
+ /// Sets the function called when an assertion fails. If this function is not called
+ /// by the user, a default function will be used. The user may supply a context parameter
+ /// which will be passed back to the user in the function call. This is typically used
+ /// to store a C++ 'this' pointer, though other things are possible.
+ ///
+ /// There is no thread safety here, so the user needs to externally make sure that
+ /// this function is not called in a thread-unsafe way. The easiest way to do this is
+ /// to just call this function once from the main thread on application startup.
+ ///
+ EASTL_API void SetAssertionFailureFunction(EASTL_AssertionFailureFunction pAssertionFailureFunction, void* pContext)
+ {
+ gpAssertionFailureFunction = pAssertionFailureFunction;
+ gpAssertionFailureFunctionContext = pContext;
+ }
+
+
+
+ /// AssertionFailureFunctionDefault
+ ///
+ EASTL_API void AssertionFailureFunctionDefault(const char* pExpression, void* /*pContext*/)
+ {
+ #if EASTL_ASSERT_ENABLED
+ #if defined(EA_PLATFORM_WINDOWS_KERNEL)
+ DbgPrintEx(DPFLTR_IHVDRIVER_ID, DPFLTR_ERROR_LEVEL, "%s", pExpression);
+ #elif defined(EA_PLATFORM_MICROSOFT)
+ printf("%s\n", pExpression); // Write the message to stdout
+ if( ::IsDebuggerPresent())
+ {
+ OutputDebugStringA(pExpression);
+ }
+ #elif defined(EA_PLATFORM_ANDROID)
+ __android_log_print(ANDROID_LOG_INFO, "PRINTF", "%s\n", pExpression);
+ #else
+ printf("%s\n", pExpression); // Write the message to stdout, which happens to be the trace view for many console debug machines.
+ #endif
+ #else
+ EA_UNUSED(pExpression);
+ #endif
+
+ EASTL_DEBUG_BREAK();
+ }
+
+
+ /// AssertionFailure
+ ///
+ EASTL_API void AssertionFailure(const char* pExpression)
+ {
+ if(gpAssertionFailureFunction)
+ gpAssertionFailureFunction(pExpression, gpAssertionFailureFunctionContext);
+ }
+
+
+} // namespace eastl
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/source/atomic.cpp b/EASTL/source/atomic.cpp
new file mode 100644
index 0000000..38cda30
--- /dev/null
+++ b/EASTL/source/atomic.cpp
@@ -0,0 +1,25 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#include <EASTL/atomic.h>
+
+
+namespace eastl
+{
+
+namespace internal
+{
+
+
+static void EastlCompilerBarrierDataDependencyFunc(void*)
+{
+}
+
+volatile CompilerBarrierDataDependencyFuncPtr gCompilerBarrierDataDependencyFunc = &EastlCompilerBarrierDataDependencyFunc;
+
+
+} // namespace internal
+
+} // namespace eastl
diff --git a/EASTL/source/fixed_pool.cpp b/EASTL/source/fixed_pool.cpp
new file mode 100644
index 0000000..73b9be0
--- /dev/null
+++ b/EASTL/source/fixed_pool.cpp
@@ -0,0 +1,70 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#include <EASTL/internal/fixed_pool.h>
+#include <EASTL/fixed_allocator.h>
+
+
+
+namespace eastl
+{
+
+
+ EASTL_API void fixed_pool_base::init(void* pMemory, size_t memorySize, size_t nodeSize,
+ size_t alignment, size_t /*alignmentOffset*/)
+ {
+ // To do: Support alignmentOffset.
+
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED
+ mnCurrentSize = 0;
+ mnPeakSize = 0;
+ #endif
+
+ if(pMemory)
+ {
+ // Assert that alignment is a power of 2 value (e.g. 1, 2, 4, 8, 16, etc.)
+ EASTL_ASSERT((alignment & (alignment - 1)) == 0);
+
+ // Make sure alignment is a valid value.
+ if(alignment < 1)
+ alignment = 1;
+
+ mpNext = (Link*)(((uintptr_t)pMemory + (alignment - 1)) & ~(alignment - 1));
+ memorySize -= (uintptr_t)mpNext - (uintptr_t)pMemory;
+ pMemory = mpNext;
+
+ // The node size must be at least as big as a Link, which itself is sizeof(void*).
+ if(nodeSize < sizeof(Link))
+ nodeSize = ((sizeof(Link) + (alignment - 1))) & ~(alignment - 1);
+
+ // If the user passed in a memory size that wasn't a multiple of the node size,
+ // we need to chop down the memory size so that the last node is not a whole node.
+ memorySize = (memorySize / nodeSize) * nodeSize;
+
+ mpCapacity = (Link*)((uintptr_t)pMemory + memorySize);
+ mpHead = NULL;
+ mnNodeSize = nodeSize;
+ }
+ }
+
+
+} // namespace eastl
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/source/hashtable.cpp b/EASTL/source/hashtable.cpp
new file mode 100644
index 0000000..8d31663
--- /dev/null
+++ b/EASTL/source/hashtable.cpp
@@ -0,0 +1,177 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#include <EASTL/internal/hashtable.h>
+#include <EASTL/utility.h>
+#include <math.h> // Not all compilers support <cmath> and std::ceilf(), which we need below.
+#include <stddef.h>
+
+
+EA_DISABLE_VC_WARNING(4267); // 'argument' : conversion from 'size_t' to 'const uint32_t', possible loss of data. This is a bogus warning resulting from a bug in VC++.
+
+
+namespace eastl
+{
+
+ /// gpEmptyBucketArray
+ ///
+ /// A shared representation of an empty hash table. This is present so that
+ /// a new empty hashtable allocates no memory. It has two entries, one for
+ /// the first lone empty (NULL) bucket, and one for the non-NULL trailing sentinel.
+ ///
+ EASTL_API void* gpEmptyBucketArray[2] = { NULL, (void*)uintptr_t(~0) };
+
+
+
+ /// gPrimeNumberArray
+ ///
+ /// This is an array of prime numbers. This is the same set of prime
+ /// numbers suggested by the C++ standard proposal. These are numbers
+ /// which are separated by 8% per entry.
+ ///
+ /// To consider: Allow the user to specify their own prime number array.
+ ///
+ const uint32_t gPrimeNumberArray[] =
+ {
+ 2u, 3u, 5u, 7u, 11u, 13u, 17u, 19u, 23u, 29u, 31u,
+ 37u, 41u, 43u, 47u, 53u, 59u, 61u, 67u, 71u, 73u, 79u,
+ 83u, 89u, 97u, 103u, 109u, 113u, 127u, 137u, 139u, 149u,
+ 157u, 167u, 179u, 193u, 199u, 211u, 227u, 241u, 257u,
+ 277u, 293u, 313u, 337u, 359u, 383u, 409u, 439u, 467u,
+ 503u, 541u, 577u, 619u, 661u, 709u, 761u, 823u, 887u,
+ 953u, 1031u, 1109u, 1193u, 1289u, 1381u, 1493u, 1613u,
+ 1741u, 1879u, 2029u, 2179u, 2357u, 2549u, 2753u, 2971u,
+ 3209u, 3469u, 3739u, 4027u, 4349u, 4703u, 5087u, 5503u,
+ 5953u, 6427u, 6949u, 7517u, 8123u, 8783u, 9497u, 10273u,
+ 11113u, 12011u, 12983u, 14033u, 15173u, 16411u, 17749u,
+ 19183u, 20753u, 22447u, 24281u, 26267u, 28411u, 30727u,
+ 33223u, 35933u, 38873u, 42043u, 45481u, 49201u, 53201u,
+ 57557u, 62233u, 67307u, 72817u, 78779u, 85229u, 92203u,
+ 99733u, 107897u, 116731u, 126271u, 136607u, 147793u,
+ 159871u, 172933u, 187091u, 202409u, 218971u, 236897u,
+ 256279u, 277261u, 299951u, 324503u, 351061u, 379787u,
+ 410857u, 444487u, 480881u, 520241u, 562841u, 608903u,
+ 658753u, 712697u, 771049u, 834181u, 902483u, 976369u,
+ 1056323u, 1142821u, 1236397u, 1337629u, 1447153u, 1565659u,
+ 1693859u, 1832561u, 1982627u, 2144977u, 2320627u, 2510653u,
+ 2716249u, 2938679u, 3179303u, 3439651u, 3721303u, 4026031u,
+ 4355707u, 4712381u, 5098259u, 5515729u, 5967347u, 6456007u,
+ 6984629u, 7556579u, 8175383u, 8844859u, 9569143u, 10352717u,
+ 11200489u, 12117689u, 13109983u, 14183539u, 15345007u,
+ 16601593u, 17961079u, 19431899u, 21023161u, 22744717u,
+ 24607243u, 26622317u, 28802401u, 31160981u, 33712729u,
+ 36473443u, 39460231u, 42691603u, 46187573u, 49969847u,
+ 54061849u, 58488943u, 63278561u, 68460391u, 74066549u,
+ 80131819u, 86693767u, 93793069u, 101473717u, 109783337u,
+ 118773397u, 128499677u, 139022417u, 150406843u, 162723577u,
+ 176048909u, 190465427u, 206062531u, 222936881u, 241193053u,
+ 260944219u, 282312799u, 305431229u, 330442829u, 357502601u,
+ 386778277u, 418451333u, 452718089u, 489790921u, 529899637u,
+ 573292817u, 620239453u, 671030513u, 725980837u, 785430967u,
+ 849749479u, 919334987u, 994618837u, 1076067617u, 1164186217u,
+ 1259520799u, 1362662261u, 1474249943u, 1594975441u,
+ 1725587117u, 1866894511u, 2019773507u, 2185171673u,
+ 2364114217u, 2557710269u, 2767159799u, 2993761039u,
+ 3238918481u, 3504151727u, 3791104843u, 4101556399u,
+ 4294967291u,
+ 4294967291u // Sentinel so we don't have to test result of lower_bound
+ };
+
+
+ /// kPrimeCount
+ ///
+ /// The number of prime numbers in gPrimeNumberArray.
+ ///
+ const uint32_t kPrimeCount = (sizeof(gPrimeNumberArray) / sizeof(gPrimeNumberArray[0]) - 1);
+
+
+ /// GetPrevBucketCountOnly
+ /// Return a bucket count no greater than nBucketCountHint.
+ ///
+ uint32_t prime_rehash_policy::GetPrevBucketCountOnly(uint32_t nBucketCountHint)
+ {
+ const uint32_t nPrime = *(eastl::upper_bound(gPrimeNumberArray, gPrimeNumberArray + kPrimeCount, nBucketCountHint) - 1);
+ return nPrime;
+ }
+
+
+ /// GetPrevBucketCount
+ /// Return a bucket count no greater than nBucketCountHint.
+ /// This function has a side effect of updating mnNextResize.
+ ///
+ uint32_t prime_rehash_policy::GetPrevBucketCount(uint32_t nBucketCountHint) const
+ {
+ const uint32_t nPrime = *(eastl::upper_bound(gPrimeNumberArray, gPrimeNumberArray + kPrimeCount, nBucketCountHint) - 1);
+
+ mnNextResize = (uint32_t)ceilf(nPrime * mfMaxLoadFactor);
+ return nPrime;
+ }
+
+
+ /// GetNextBucketCount
+ /// Return a prime no smaller than nBucketCountHint.
+ /// This function has a side effect of updating mnNextResize.
+ ///
+ uint32_t prime_rehash_policy::GetNextBucketCount(uint32_t nBucketCountHint) const
+ {
+ const uint32_t nPrime = *eastl::lower_bound(gPrimeNumberArray, gPrimeNumberArray + kPrimeCount, nBucketCountHint);
+
+ mnNextResize = (uint32_t)ceilf(nPrime * mfMaxLoadFactor);
+ return nPrime;
+ }
+
+
+ /// GetBucketCount
+ /// Return the smallest prime p such that alpha p >= nElementCount, where alpha
+ /// is the load factor. This function has a side effect of updating mnNextResize.
+ ///
+ uint32_t prime_rehash_policy::GetBucketCount(uint32_t nElementCount) const
+ {
+ const uint32_t nMinBucketCount = (uint32_t)(nElementCount / mfMaxLoadFactor);
+ const uint32_t nPrime = *eastl::lower_bound(gPrimeNumberArray, gPrimeNumberArray + kPrimeCount, nMinBucketCount);
+
+ mnNextResize = (uint32_t)ceilf(nPrime * mfMaxLoadFactor);
+ return nPrime;
+ }
+
+
+ /// GetRehashRequired
+ /// Finds the smallest prime p such that alpha p > nElementCount + nElementAdd.
+ /// If p > nBucketCount, return pair<bool, uint32_t>(true, p); otherwise return
+ /// pair<bool, uint32_t>(false, 0). In principle this isn't very different from GetBucketCount.
+ /// This function has a side effect of updating mnNextResize.
+ ///
+ eastl::pair<bool, uint32_t>
+ prime_rehash_policy::GetRehashRequired(uint32_t nBucketCount, uint32_t nElementCount, uint32_t nElementAdd) const
+ {
+ if((nElementCount + nElementAdd) > mnNextResize) // It is significant that we specify > next resize and not >= next resize.
+ {
+ if(nBucketCount == 1) // We force rehashing to occur if the bucket count is < 2.
+ nBucketCount = 0;
+
+ float fMinBucketCount = (nElementCount + nElementAdd) / mfMaxLoadFactor;
+
+ if(fMinBucketCount > (float)nBucketCount)
+ {
+ fMinBucketCount = eastl::max_alt(fMinBucketCount, mfGrowthFactor * nBucketCount);
+ const uint32_t nPrime = *eastl::lower_bound(gPrimeNumberArray, gPrimeNumberArray + kPrimeCount, (uint32_t)fMinBucketCount);
+ mnNextResize = (uint32_t)ceilf(nPrime * mfMaxLoadFactor);
+
+ return eastl::pair<bool, uint32_t>(true, nPrime);
+ }
+ else
+ {
+ mnNextResize = (uint32_t)ceilf(nBucketCount * mfMaxLoadFactor);
+ return eastl::pair<bool, uint32_t>(false, (uint32_t)0);
+ }
+ }
+
+ return eastl::pair<bool, uint32_t>(false, (uint32_t)0);
+ }
+
+
+} // namespace eastl
+
+EA_RESTORE_VC_WARNING();
diff --git a/EASTL/source/intrusive_list.cpp b/EASTL/source/intrusive_list.cpp
new file mode 100644
index 0000000..c8e8a25
--- /dev/null
+++ b/EASTL/source/intrusive_list.cpp
@@ -0,0 +1,87 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+#include <EASTL/intrusive_list.h>
+
+
+namespace eastl
+{
+
+
+ EASTL_API void intrusive_list_base::reverse() EA_NOEXCEPT
+ {
+ intrusive_list_node* pNode = &mAnchor;
+ do {
+ intrusive_list_node* const pTemp = pNode->mpNext;
+ pNode->mpNext = pNode->mpPrev;
+ pNode->mpPrev = pTemp;
+ pNode = pNode->mpPrev;
+ }
+ while(pNode != &mAnchor);
+ }
+
+
+
+ EASTL_API bool intrusive_list_base::validate() const
+ {
+ const intrusive_list_node *p = &mAnchor;
+ const intrusive_list_node *q = p;
+
+ // We do two tests below:
+ //
+ // 1) Prev and next pointers are symmetric. We check (p->next->prev == p)
+ // for each node, which is enough to verify all links.
+ //
+ // 2) Loop check. We bump the q pointer at one-half rate compared to the
+ // p pointer; (p == q) if and only if we are at the start (which we
+ // don't check) or if there is a loop somewhere in the list.
+
+ do {
+ // validate node (even phase)
+ if (p->mpNext->mpPrev != p)
+ return false; // broken linkage detected
+
+ // bump only fast pointer
+ p = p->mpNext;
+ if (p == &mAnchor)
+ break;
+
+ if (p == q)
+ return false; // loop detected
+
+ // validate node (odd phase)
+ if (p->mpNext->mpPrev != p)
+ return false; // broken linkage detected
+
+ // bump both pointers
+ p = p->mpNext;
+ q = q->mpNext;
+
+ if (p == q)
+ return false; // loop detected
+
+ } while(p != &mAnchor);
+
+ return true;
+ }
+
+
+} // namespace eastl
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/source/numeric_limits.cpp b/EASTL/source/numeric_limits.cpp
new file mode 100644
index 0000000..90b1d75
--- /dev/null
+++ b/EASTL/source/numeric_limits.cpp
@@ -0,0 +1,598 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#include <EASTL/numeric_limits.h>
+
+
+#if EASTL_CUSTOM_FLOAT_CONSTANTS_REQUIRED
+ #include <limits> // See notes below about usage of this header.
+
+ namespace eastl
+ {
+ namespace Internal
+ {
+ // For this platformc/compiler combination we fall back to using std::numeric_limits,
+ // which is available for with most compilers and platforms, though it doesn't necessarily
+ // support the C++11 functionality that we do. However, we need it just for the four
+ // floating point types. Note that this code isn't used for most EA platforms, as
+ // most platforms use GCC, clang, VC++ (yvals), or Dinkumware (yvals).
+ // To do: Initialize these values via a means that doesn't depend on std::numeric_limits.
+
+ EASTL_API float gFloatInfinity = std::numeric_limits<float>::infinity();
+ EASTL_API float gFloatNaN = std::numeric_limits<float>::quiet_NaN();
+ EASTL_API float gFloatSNaN = std::numeric_limits<float>::signaling_NaN();
+ EASTL_API float gFloatDenorm = std::numeric_limits<float>::denorm_min();
+
+ EASTL_API double gDoubleInfinity = std::numeric_limits<double>::infinity();
+ EASTL_API double gDoubleNaN = std::numeric_limits<double>::quiet_NaN();
+ EASTL_API double gDoubleSNaN = std::numeric_limits<double>::signaling_NaN();
+ EASTL_API double gDoubleDenorm = std::numeric_limits<double>::denorm_min();
+
+ EASTL_API long double gLongDoubleInfinity = std::numeric_limits<long double>::infinity();
+ EASTL_API long double gLongDoubleNaN = std::numeric_limits<long double>::quiet_NaN();
+ EASTL_API long double gLongDoubleSNaN = std::numeric_limits<long double>::signaling_NaN();
+ EASTL_API long double gLongDoubleDenorm = std::numeric_limits<long double>::denorm_min();
+ }
+ }
+#endif
+
+
+#if defined(_MSC_VER) && !defined(EA_COMPILER_CLANG_CL)
+ // VC++ has a long-standing bug: it fails to allow the definition of static const member variables
+ // outside the declaration within the class. The C++ Standard actually requires that they be defined
+ // and some other compilers fail to link if they aren't. So we simply don't define the members for VC++.
+ // See the C++ Standard Sec. 9.4.2 paragraph 4, which makes this clear.
+ // http://bytes.com/topic/c/answers/710704-const-static-initialization-visual-studio
+#else
+
+ namespace eastl
+ {
+ namespace Internal
+ {
+ EA_CONSTEXPR_OR_CONST bool numeric_limits_base::is_specialized;
+ EA_CONSTEXPR_OR_CONST int numeric_limits_base::digits;
+ EA_CONSTEXPR_OR_CONST int numeric_limits_base::digits10;
+ EA_CONSTEXPR_OR_CONST int numeric_limits_base::max_digits10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits_base::is_signed;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits_base::is_integer;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits_base::is_exact;
+ EA_CONSTEXPR_OR_CONST int numeric_limits_base::radix;
+ EA_CONSTEXPR_OR_CONST int numeric_limits_base::min_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits_base::min_exponent10;
+ EA_CONSTEXPR_OR_CONST int numeric_limits_base::max_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits_base::max_exponent10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits_base::is_bounded;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits_base::is_modulo;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits_base::traps;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits_base::tinyness_before;
+ EA_CONSTEXPR_OR_CONST float_round_style numeric_limits_base::round_style;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits_base::has_infinity;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits_base::has_quiet_NaN;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits_base::has_signaling_NaN;
+ EA_CONSTEXPR_OR_CONST float_denorm_style numeric_limits_base::has_denorm;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits_base::has_denorm_loss;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits_base::is_iec559;
+ }
+
+ // bool
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<bool>::is_specialized;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<bool>::digits;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<bool>::digits10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<bool>::is_signed;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<bool>::is_integer;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<bool>::is_exact;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<bool>::radix;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<bool>::min_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<bool>::min_exponent10;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<bool>::max_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<bool>::max_exponent10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<bool>::is_bounded;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<bool>::is_modulo;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<bool>::traps;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<bool>::tinyness_before;
+ EA_CONSTEXPR_OR_CONST float_round_style numeric_limits<bool>::round_style;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<bool>::has_infinity;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<bool>::has_quiet_NaN;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<bool>::has_signaling_NaN;
+ EA_CONSTEXPR_OR_CONST float_denorm_style numeric_limits<bool>::has_denorm;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<bool>::has_denorm_loss;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<bool>::is_iec559;
+
+ // char
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char>::is_specialized;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<char>::digits;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<char>::digits10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char>::is_signed;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char>::is_integer;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char>::is_exact;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<char>::radix;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<char>::min_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<char>::min_exponent10;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<char>::max_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<char>::max_exponent10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char>::is_bounded;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char>::is_modulo;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char>::traps;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char>::tinyness_before;
+ EA_CONSTEXPR_OR_CONST float_round_style numeric_limits<char>::round_style;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char>::has_infinity;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char>::has_quiet_NaN;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char>::has_signaling_NaN;
+ EA_CONSTEXPR_OR_CONST float_denorm_style numeric_limits<char>::has_denorm;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char>::has_denorm_loss;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char>::is_iec559;
+
+ // unsigned char
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned char>::is_specialized;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<unsigned char>::digits;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<unsigned char>::digits10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned char>::is_signed;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned char>::is_integer;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned char>::is_exact;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<unsigned char>::radix;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<unsigned char>::min_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<unsigned char>::min_exponent10;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<unsigned char>::max_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<unsigned char>::max_exponent10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned char>::is_bounded;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned char>::is_modulo;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned char>::traps;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned char>::tinyness_before;
+ EA_CONSTEXPR_OR_CONST float_round_style numeric_limits<unsigned char>::round_style;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned char>::has_infinity;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned char>::has_quiet_NaN;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned char>::has_signaling_NaN;
+ EA_CONSTEXPR_OR_CONST float_denorm_style numeric_limits<unsigned char>::has_denorm;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned char>::has_denorm_loss;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned char>::is_iec559;
+
+ // signed char
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<signed char>::is_specialized;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<signed char>::digits;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<signed char>::digits10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<signed char>::is_signed;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<signed char>::is_integer;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<signed char>::is_exact;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<signed char>::radix;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<signed char>::min_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<signed char>::min_exponent10;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<signed char>::max_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<signed char>::max_exponent10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<signed char>::is_bounded;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<signed char>::is_modulo;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<signed char>::traps;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<signed char>::tinyness_before;
+ EA_CONSTEXPR_OR_CONST float_round_style numeric_limits<signed char>::round_style;
+
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<signed char>::has_infinity;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<signed char>::has_quiet_NaN;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<signed char>::has_signaling_NaN;
+ EA_CONSTEXPR_OR_CONST float_denorm_style numeric_limits<signed char>::has_denorm;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<signed char>::has_denorm_loss;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<signed char>::is_iec559;
+
+ // wchar_t
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<wchar_t>::is_specialized;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<wchar_t>::digits;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<wchar_t>::digits10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<wchar_t>::is_signed;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<wchar_t>::is_integer;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<wchar_t>::is_exact;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<wchar_t>::radix;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<wchar_t>::min_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<wchar_t>::min_exponent10;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<wchar_t>::max_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<wchar_t>::max_exponent10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<wchar_t>::is_bounded;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<wchar_t>::is_modulo;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<wchar_t>::traps;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<wchar_t>::tinyness_before;
+ EA_CONSTEXPR_OR_CONST float_round_style numeric_limits<wchar_t>::round_style;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<wchar_t>::has_infinity;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<wchar_t>::has_quiet_NaN;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<wchar_t>::has_signaling_NaN;
+ EA_CONSTEXPR_OR_CONST float_denorm_style numeric_limits<wchar_t>::has_denorm;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<wchar_t>::has_denorm_loss;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<wchar_t>::is_iec559;
+
+ // char8_t
+ #if defined(EA_CHAR8_UNIQUE) && EA_CHAR8_UNIQUE // If char8_t is a true unique type (as called for by the C++20 Standard)
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char8_t>::is_specialized;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<char8_t>::digits;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<char8_t>::digits10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char8_t>::is_signed;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char8_t>::is_integer;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char8_t>::is_exact;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<char8_t>::radix;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<char8_t>::min_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<char8_t>::min_exponent10;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<char8_t>::max_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<char8_t>::max_exponent10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char8_t>::is_bounded;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char8_t>::is_modulo;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char8_t>::traps;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char8_t>::tinyness_before;
+ EA_CONSTEXPR_OR_CONST float_round_style numeric_limits<char8_t>::round_style;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char8_t>::has_infinity;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char8_t>::has_quiet_NaN;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char8_t>::has_signaling_NaN;
+ EA_CONSTEXPR_OR_CONST float_denorm_style numeric_limits<char8_t>::has_denorm;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char8_t>::has_denorm_loss;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char8_t>::is_iec559;
+ #endif
+
+ // char16_t
+ #if EA_CHAR16_NATIVE // If char16_t is a true unique type (as called for by the C++11 Standard)...
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char16_t>::is_specialized;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<char16_t>::digits;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<char16_t>::digits10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char16_t>::is_signed;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char16_t>::is_integer;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char16_t>::is_exact;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<char16_t>::radix;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<char16_t>::min_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<char16_t>::min_exponent10;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<char16_t>::max_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<char16_t>::max_exponent10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char16_t>::is_bounded;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char16_t>::is_modulo;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char16_t>::traps;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char16_t>::tinyness_before;
+ EA_CONSTEXPR_OR_CONST float_round_style numeric_limits<char16_t>::round_style;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char16_t>::has_infinity;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char16_t>::has_quiet_NaN;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char16_t>::has_signaling_NaN;
+ EA_CONSTEXPR_OR_CONST float_denorm_style numeric_limits<char16_t>::has_denorm;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char16_t>::has_denorm_loss;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char16_t>::is_iec559;
+ #endif
+
+ // char32_t
+ #if EA_CHAR32_NATIVE // If char32_t is a true unique type (as called for by the C++11 Standard)...
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char32_t>::is_specialized;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<char32_t>::digits;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<char32_t>::digits10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char32_t>::is_signed;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char32_t>::is_integer;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char32_t>::is_exact;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<char32_t>::radix;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<char32_t>::min_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<char32_t>::min_exponent10;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<char32_t>::max_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<char32_t>::max_exponent10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char32_t>::is_bounded;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char32_t>::is_modulo;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char32_t>::traps;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char32_t>::tinyness_before;
+ EA_CONSTEXPR_OR_CONST float_round_style numeric_limits<char32_t>::round_style;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char32_t>::has_infinity;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char32_t>::has_quiet_NaN;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char32_t>::has_signaling_NaN;
+ EA_CONSTEXPR_OR_CONST float_denorm_style numeric_limits<char32_t>::has_denorm;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char32_t>::has_denorm_loss;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<char32_t>::is_iec559;
+ #endif
+
+ // unsigned short
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned short>::is_specialized;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<unsigned short>::digits;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<unsigned short>::digits10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned short>::is_signed;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned short>::is_integer;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned short>::is_exact;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<unsigned short>::radix;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<unsigned short>::min_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<unsigned short>::min_exponent10;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<unsigned short>::max_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<unsigned short>::max_exponent10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned short>::is_bounded;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned short>::is_modulo;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned short>::traps;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned short>::tinyness_before;
+ EA_CONSTEXPR_OR_CONST float_round_style numeric_limits<unsigned short>::round_style;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned short>::has_infinity;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned short>::has_quiet_NaN;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned short>::has_signaling_NaN;
+ EA_CONSTEXPR_OR_CONST float_denorm_style numeric_limits<unsigned short>::has_denorm;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned short>::has_denorm_loss;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned short>::is_iec559;
+
+ // short
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<short>::is_specialized;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<short>::digits;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<short>::digits10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<short>::is_signed;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<short>::is_integer;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<short>::is_exact;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<short>::radix;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<short>::min_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<short>::min_exponent10;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<short>::max_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<short>::max_exponent10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<short>::is_bounded;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<short>::is_modulo;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<short>::traps;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<short>::tinyness_before;
+ EA_CONSTEXPR_OR_CONST float_round_style numeric_limits<short>::round_style;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<short>::has_infinity;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<short>::has_quiet_NaN;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<short>::has_signaling_NaN;
+ EA_CONSTEXPR_OR_CONST float_denorm_style numeric_limits<short>::has_denorm;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<short>::has_denorm_loss;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<short>::is_iec559;
+
+ // unsigned int
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned int>::is_specialized;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<unsigned int>::digits;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<unsigned int>::digits10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned int>::is_signed;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned int>::is_integer;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned int>::is_exact;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<unsigned int>::radix;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<unsigned int>::min_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<unsigned int>::min_exponent10;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<unsigned int>::max_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<unsigned int>::max_exponent10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned int>::is_bounded;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned int>::is_modulo;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned int>::traps;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned int>::tinyness_before;
+ EA_CONSTEXPR_OR_CONST float_round_style numeric_limits<unsigned int>::round_style;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned int>::has_infinity;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned int>::has_quiet_NaN;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned int>::has_signaling_NaN;
+ EA_CONSTEXPR_OR_CONST float_denorm_style numeric_limits<unsigned int>::has_denorm;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned int>::has_denorm_loss;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned int>::is_iec559;
+
+ // int
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<int>::is_specialized;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<int>::digits;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<int>::digits10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<int>::is_signed;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<int>::is_integer;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<int>::is_exact;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<int>::radix;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<int>::min_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<int>::min_exponent10;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<int>::max_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<int>::max_exponent10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<int>::is_bounded;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<int>::is_modulo;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<int>::traps;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<int>::tinyness_before;
+ EA_CONSTEXPR_OR_CONST float_round_style numeric_limits<int>::round_style;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<int>::has_infinity;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<int>::has_quiet_NaN;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<int>::has_signaling_NaN;
+ EA_CONSTEXPR_OR_CONST float_denorm_style numeric_limits<int>::has_denorm;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<int>::has_denorm_loss;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<int>::is_iec559;
+
+ // unsigned long
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned long>::is_specialized;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<unsigned long>::digits;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<unsigned long>::digits10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned long>::is_signed;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned long>::is_integer;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned long>::is_exact;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<unsigned long>::radix;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<unsigned long>::min_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<unsigned long>::min_exponent10;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<unsigned long>::max_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<unsigned long>::max_exponent10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned long>::is_bounded;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned long>::is_modulo;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned long>::traps;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned long>::tinyness_before;
+ EA_CONSTEXPR_OR_CONST float_round_style numeric_limits<unsigned long>::round_style;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned long>::has_infinity;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned long>::has_quiet_NaN;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned long>::has_signaling_NaN;
+ EA_CONSTEXPR_OR_CONST float_denorm_style numeric_limits<unsigned long>::has_denorm;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned long>::has_denorm_loss;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned long>::is_iec559;
+
+ // long
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long>::is_specialized;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<long>::digits;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<long>::digits10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long>::is_signed;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long>::is_integer;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long>::is_exact;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<long>::radix;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<long>::min_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<long>::min_exponent10;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<long>::max_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<long>::max_exponent10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long>::is_bounded;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long>::is_modulo;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long>::traps;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long>::tinyness_before;
+ EA_CONSTEXPR_OR_CONST float_round_style numeric_limits<long>::round_style;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long>::has_infinity;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long>::has_quiet_NaN;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long>::has_signaling_NaN;
+ EA_CONSTEXPR_OR_CONST float_denorm_style numeric_limits<long>::has_denorm;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long>::has_denorm_loss;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long>::is_iec559;
+
+ // unsigned long long
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned long long>::is_specialized;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<unsigned long long>::digits;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<unsigned long long>::digits10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned long long>::is_signed;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned long long>::is_integer;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned long long>::is_exact;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<unsigned long long>::radix;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<unsigned long long>::min_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<unsigned long long>::min_exponent10;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<unsigned long long>::max_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<unsigned long long>::max_exponent10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned long long>::is_bounded;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned long long>::is_modulo;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned long long>::traps;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned long long>::tinyness_before;
+ EA_CONSTEXPR_OR_CONST float_round_style numeric_limits<unsigned long long>::round_style;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned long long>::has_infinity;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned long long>::has_quiet_NaN;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned long long>::has_signaling_NaN;
+ EA_CONSTEXPR_OR_CONST float_denorm_style numeric_limits<unsigned long long>::has_denorm;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned long long>::has_denorm_loss;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<unsigned long long>::is_iec559;
+
+ // long long
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long long>::is_specialized;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<long long>::digits;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<long long>::digits10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long long>::is_signed;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long long>::is_integer;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long long>::is_exact;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<long long>::radix;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<long long>::min_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<long long>::min_exponent10;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<long long>::max_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<long long>::max_exponent10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long long>::is_bounded;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long long>::is_modulo;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long long>::traps;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long long>::tinyness_before;
+ EA_CONSTEXPR_OR_CONST float_round_style numeric_limits<long long>::round_style;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long long>::has_infinity;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long long>::has_quiet_NaN;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long long>::has_signaling_NaN;
+ EA_CONSTEXPR_OR_CONST float_denorm_style numeric_limits<long long>::has_denorm;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long long>::has_denorm_loss;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long long>::is_iec559;
+
+ // __uint128_t
+ #if (EA_COMPILER_INTMAX_SIZE >= 16) && (defined(EA_COMPILER_GNUC) || defined(__clang__)) // If __int128_t/__uint128_t is supported...
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<__uint128_t>::is_specialized;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<__uint128_t>::digits;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<__uint128_t>::digits10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<__uint128_t>::is_signed;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<__uint128_t>::is_integer;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<__uint128_t>::is_exact;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<__uint128_t>::radix;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<__uint128_t>::min_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<__uint128_t>::min_exponent10;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<__uint128_t>::max_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<__uint128_t>::max_exponent10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<__uint128_t>::is_bounded;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<__uint128_t>::is_modulo;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<__uint128_t>::traps;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<__uint128_t>::tinyness_before;
+ EA_CONSTEXPR_OR_CONST float_round_style numeric_limits<__uint128_t>::round_style;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<__uint128_t>::has_infinity;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<__uint128_t>::has_quiet_NaN;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<__uint128_t>::has_signaling_NaN;
+ EA_CONSTEXPR_OR_CONST float_denorm_style numeric_limits<__uint128_t>::has_denorm;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<__uint128_t>::has_denorm_loss;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<__uint128_t>::is_iec559;
+ #endif
+
+ // __int128_t
+ #if (EA_COMPILER_INTMAX_SIZE >= 16) && (defined(EA_COMPILER_GNUC) || defined(__clang__)) // If __int128_t/__uint128_t is supported...
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<__int128_t>::is_specialized;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<__int128_t>::digits;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<__int128_t>::digits10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<__int128_t>::is_signed;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<__int128_t>::is_integer;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<__int128_t>::is_exact;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<__int128_t>::radix;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<__int128_t>::min_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<__int128_t>::min_exponent10;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<__int128_t>::max_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<__int128_t>::max_exponent10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<__int128_t>::is_bounded;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<__int128_t>::is_modulo;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<__int128_t>::traps;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<__int128_t>::tinyness_before;
+ EA_CONSTEXPR_OR_CONST float_round_style numeric_limits<__int128_t>::round_style;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<__int128_t>::has_infinity;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<__int128_t>::has_quiet_NaN;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<__int128_t>::has_signaling_NaN;
+ EA_CONSTEXPR_OR_CONST float_denorm_style numeric_limits<__int128_t>::has_denorm;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<__int128_t>::has_denorm_loss;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<__int128_t>::is_iec559;
+ #endif
+
+ // float
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<float>::is_specialized;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<float>::digits;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<float>::digits10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<float>::is_signed;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<float>::is_integer;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<float>::is_exact;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<float>::radix;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<float>::min_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<float>::min_exponent10;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<float>::max_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<float>::max_exponent10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<float>::is_bounded;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<float>::is_modulo;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<float>::traps;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<float>::tinyness_before;
+ EA_CONSTEXPR_OR_CONST float_round_style numeric_limits<float>::round_style;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<float>::has_infinity;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<float>::has_quiet_NaN;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<float>::has_signaling_NaN;
+ EA_CONSTEXPR_OR_CONST float_denorm_style numeric_limits<float>::has_denorm;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<float>::has_denorm_loss;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<float>::is_iec559;
+
+ // double
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<double>::is_specialized;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<double>::digits;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<double>::digits10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<double>::is_signed;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<double>::is_integer;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<double>::is_exact;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<double>::radix;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<double>::min_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<double>::min_exponent10;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<double>::max_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<double>::max_exponent10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<double>::is_bounded;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<double>::is_modulo;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<double>::traps;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<double>::tinyness_before;
+ EA_CONSTEXPR_OR_CONST float_round_style numeric_limits<double>::round_style;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<double>::has_infinity;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<double>::has_quiet_NaN;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<double>::has_signaling_NaN;
+ EA_CONSTEXPR_OR_CONST float_denorm_style numeric_limits<double>::has_denorm;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<double>::has_denorm_loss;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<double>::is_iec559;
+
+ // long double
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long double>::is_specialized;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<long double>::digits;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<long double>::digits10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long double>::is_signed;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long double>::is_integer;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long double>::is_exact;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<long double>::radix;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<long double>::min_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<long double>::min_exponent10;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<long double>::max_exponent;
+ EA_CONSTEXPR_OR_CONST int numeric_limits<long double>::max_exponent10;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long double>::is_bounded;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long double>::is_modulo;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long double>::traps;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long double>::tinyness_before;
+ EA_CONSTEXPR_OR_CONST float_round_style numeric_limits<long double>::round_style;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long double>::has_infinity;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long double>::has_quiet_NaN;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long double>::has_signaling_NaN;
+ EA_CONSTEXPR_OR_CONST float_denorm_style numeric_limits<long double>::has_denorm;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long double>::has_denorm_loss;
+ EA_CONSTEXPR_OR_CONST bool numeric_limits<long double>::is_iec559;
+
+ } // namespace eastl
+
+#endif // (VC++ 2010 or earlier)
+
+
diff --git a/EASTL/source/red_black_tree.cpp b/EASTL/source/red_black_tree.cpp
new file mode 100644
index 0000000..d9797b9
--- /dev/null
+++ b/EASTL/source/red_black_tree.cpp
@@ -0,0 +1,518 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+
+///////////////////////////////////////////////////////////////////////////////
+// The tree insert and erase functions below are based on the original
+// HP STL tree functions. Use of these functions was been approved by
+// EA legal on November 4, 2005 and the approval documentation is available
+// from the EASTL maintainer or from the EA legal deparatment on request.
+//
+// Copyright (c) 1994
+// Hewlett-Packard Company
+//
+// Permission to use, copy, modify, distribute and sell this software
+// and its documentation for any purpose is hereby granted without fee,
+// provided that the above copyright notice appear in all copies and
+// that both that copyright notice and this permission notice appear
+// in supporting documentation. Hewlett-Packard Company makes no
+// representations about the suitability of this software for any
+// purpose. It is provided "as is" without express or implied warranty.
+///////////////////////////////////////////////////////////////////////////////
+
+
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/red_black_tree.h>
+#include <stddef.h>
+
+
+
+namespace eastl
+{
+ // Forward declarations
+ rbtree_node_base* RBTreeRotateLeft(rbtree_node_base* pNode, rbtree_node_base* pNodeRoot);
+ rbtree_node_base* RBTreeRotateRight(rbtree_node_base* pNode, rbtree_node_base* pNodeRoot);
+
+
+
+ /// RBTreeIncrement
+ /// Returns the next item in a sorted red-black tree.
+ ///
+ EASTL_API rbtree_node_base* RBTreeIncrement(const rbtree_node_base* pNode)
+ {
+ if(pNode->mpNodeRight)
+ {
+ pNode = pNode->mpNodeRight;
+
+ while(pNode->mpNodeLeft)
+ pNode = pNode->mpNodeLeft;
+ }
+ else
+ {
+ rbtree_node_base* pNodeTemp = pNode->mpNodeParent;
+
+ while(pNode == pNodeTemp->mpNodeRight)
+ {
+ pNode = pNodeTemp;
+ pNodeTemp = pNodeTemp->mpNodeParent;
+ }
+
+ if(pNode->mpNodeRight != pNodeTemp)
+ pNode = pNodeTemp;
+ }
+
+ return const_cast<rbtree_node_base*>(pNode);
+ }
+
+
+
+ /// RBTreeIncrement
+ /// Returns the previous item in a sorted red-black tree.
+ ///
+ EASTL_API rbtree_node_base* RBTreeDecrement(const rbtree_node_base* pNode)
+ {
+ if((pNode->mpNodeParent->mpNodeParent == pNode) && (pNode->mColor == kRBTreeColorRed))
+ return pNode->mpNodeRight;
+ else if(pNode->mpNodeLeft)
+ {
+ rbtree_node_base* pNodeTemp = pNode->mpNodeLeft;
+
+ while(pNodeTemp->mpNodeRight)
+ pNodeTemp = pNodeTemp->mpNodeRight;
+
+ return pNodeTemp;
+ }
+
+ rbtree_node_base* pNodeTemp = pNode->mpNodeParent;
+
+ while(pNode == pNodeTemp->mpNodeLeft)
+ {
+ pNode = pNodeTemp;
+ pNodeTemp = pNodeTemp->mpNodeParent;
+ }
+
+ return const_cast<rbtree_node_base*>(pNodeTemp);
+ }
+
+
+
+ /// RBTreeGetBlackCount
+ /// Counts the number of black nodes in an red-black tree, from pNode down to the given bottom node.
+ /// We don't count red nodes because red-black trees don't really care about
+ /// red node counts; it is black node counts that are significant in the
+ /// maintenance of a balanced tree.
+ ///
+ EASTL_API size_t RBTreeGetBlackCount(const rbtree_node_base* pNodeTop, const rbtree_node_base* pNodeBottom)
+ {
+ size_t nCount = 0;
+
+ for(; pNodeBottom; pNodeBottom = pNodeBottom->mpNodeParent)
+ {
+ if(pNodeBottom->mColor == kRBTreeColorBlack)
+ ++nCount;
+
+ if(pNodeBottom == pNodeTop)
+ break;
+ }
+
+ return nCount;
+ }
+
+
+ /// RBTreeRotateLeft
+ /// Does a left rotation about the given node.
+ /// If you want to understand tree rotation, any book on algorithms will
+ /// discuss the topic in detail.
+ ///
+ rbtree_node_base* RBTreeRotateLeft(rbtree_node_base* pNode, rbtree_node_base* pNodeRoot)
+ {
+ rbtree_node_base* const pNodeTemp = pNode->mpNodeRight;
+
+ pNode->mpNodeRight = pNodeTemp->mpNodeLeft;
+
+ if(pNodeTemp->mpNodeLeft)
+ pNodeTemp->mpNodeLeft->mpNodeParent = pNode;
+ pNodeTemp->mpNodeParent = pNode->mpNodeParent;
+
+ if(pNode == pNodeRoot)
+ pNodeRoot = pNodeTemp;
+ else if(pNode == pNode->mpNodeParent->mpNodeLeft)
+ pNode->mpNodeParent->mpNodeLeft = pNodeTemp;
+ else
+ pNode->mpNodeParent->mpNodeRight = pNodeTemp;
+
+ pNodeTemp->mpNodeLeft = pNode;
+ pNode->mpNodeParent = pNodeTemp;
+
+ return pNodeRoot;
+ }
+
+
+
+ /// RBTreeRotateRight
+ /// Does a right rotation about the given node.
+ /// If you want to understand tree rotation, any book on algorithms will
+ /// discuss the topic in detail.
+ ///
+ rbtree_node_base* RBTreeRotateRight(rbtree_node_base* pNode, rbtree_node_base* pNodeRoot)
+ {
+ rbtree_node_base* const pNodeTemp = pNode->mpNodeLeft;
+
+ pNode->mpNodeLeft = pNodeTemp->mpNodeRight;
+
+ if(pNodeTemp->mpNodeRight)
+ pNodeTemp->mpNodeRight->mpNodeParent = pNode;
+ pNodeTemp->mpNodeParent = pNode->mpNodeParent;
+
+ if(pNode == pNodeRoot)
+ pNodeRoot = pNodeTemp;
+ else if(pNode == pNode->mpNodeParent->mpNodeRight)
+ pNode->mpNodeParent->mpNodeRight = pNodeTemp;
+ else
+ pNode->mpNodeParent->mpNodeLeft = pNodeTemp;
+
+ pNodeTemp->mpNodeRight = pNode;
+ pNode->mpNodeParent = pNodeTemp;
+
+ return pNodeRoot;
+ }
+
+
+
+
+ /// RBTreeInsert
+ /// Insert a node into the tree and rebalance the tree as a result of the
+ /// disturbance the node introduced.
+ ///
+ EASTL_API void RBTreeInsert(rbtree_node_base* pNode,
+ rbtree_node_base* pNodeParent,
+ rbtree_node_base* pNodeAnchor,
+ RBTreeSide insertionSide)
+ {
+ rbtree_node_base*& pNodeRootRef = pNodeAnchor->mpNodeParent;
+
+ // Initialize fields in new node to insert.
+ pNode->mpNodeParent = pNodeParent;
+ pNode->mpNodeRight = NULL;
+ pNode->mpNodeLeft = NULL;
+ pNode->mColor = kRBTreeColorRed;
+
+ // Insert the node.
+ if(insertionSide == kRBTreeSideLeft)
+ {
+ pNodeParent->mpNodeLeft = pNode; // Also makes (leftmost = pNode) when (pNodeParent == pNodeAnchor)
+
+ if(pNodeParent == pNodeAnchor)
+ {
+ pNodeAnchor->mpNodeParent = pNode;
+ pNodeAnchor->mpNodeRight = pNode;
+ }
+ else if(pNodeParent == pNodeAnchor->mpNodeLeft)
+ pNodeAnchor->mpNodeLeft = pNode; // Maintain leftmost pointing to min node
+ }
+ else
+ {
+ pNodeParent->mpNodeRight = pNode;
+
+ if(pNodeParent == pNodeAnchor->mpNodeRight)
+ pNodeAnchor->mpNodeRight = pNode; // Maintain rightmost pointing to max node
+ }
+
+ // Rebalance the tree.
+ while((pNode != pNodeRootRef) && (pNode->mpNodeParent->mColor == kRBTreeColorRed))
+ {
+ EA_ANALYSIS_ASSUME(pNode->mpNodeParent != NULL);
+ rbtree_node_base* const pNodeParentParent = pNode->mpNodeParent->mpNodeParent;
+
+ if(pNode->mpNodeParent == pNodeParentParent->mpNodeLeft)
+ {
+ rbtree_node_base* const pNodeTemp = pNodeParentParent->mpNodeRight;
+
+ if(pNodeTemp && (pNodeTemp->mColor == kRBTreeColorRed))
+ {
+ pNode->mpNodeParent->mColor = kRBTreeColorBlack;
+ pNodeTemp->mColor = kRBTreeColorBlack;
+ pNodeParentParent->mColor = kRBTreeColorRed;
+ pNode = pNodeParentParent;
+ }
+ else
+ {
+ if(pNode->mpNodeParent && pNode == pNode->mpNodeParent->mpNodeRight)
+ {
+ pNode = pNode->mpNodeParent;
+ pNodeRootRef = RBTreeRotateLeft(pNode, pNodeRootRef);
+ }
+
+ EA_ANALYSIS_ASSUME(pNode->mpNodeParent != NULL);
+ pNode->mpNodeParent->mColor = kRBTreeColorBlack;
+ pNodeParentParent->mColor = kRBTreeColorRed;
+ pNodeRootRef = RBTreeRotateRight(pNodeParentParent, pNodeRootRef);
+ }
+ }
+ else
+ {
+ rbtree_node_base* const pNodeTemp = pNodeParentParent->mpNodeLeft;
+
+ if(pNodeTemp && (pNodeTemp->mColor == kRBTreeColorRed))
+ {
+ pNode->mpNodeParent->mColor = kRBTreeColorBlack;
+ pNodeTemp->mColor = kRBTreeColorBlack;
+ pNodeParentParent->mColor = kRBTreeColorRed;
+ pNode = pNodeParentParent;
+ }
+ else
+ {
+ EA_ANALYSIS_ASSUME(pNode != NULL && pNode->mpNodeParent != NULL);
+
+ if(pNode == pNode->mpNodeParent->mpNodeLeft)
+ {
+ pNode = pNode->mpNodeParent;
+ pNodeRootRef = RBTreeRotateRight(pNode, pNodeRootRef);
+ }
+
+ pNode->mpNodeParent->mColor = kRBTreeColorBlack;
+ pNodeParentParent->mColor = kRBTreeColorRed;
+ pNodeRootRef = RBTreeRotateLeft(pNodeParentParent, pNodeRootRef);
+ }
+ }
+ }
+
+ EA_ANALYSIS_ASSUME(pNodeRootRef != NULL);
+ pNodeRootRef->mColor = kRBTreeColorBlack;
+
+ } // RBTreeInsert
+
+
+
+
+ /// RBTreeErase
+ /// Erase a node from the tree.
+ ///
+ EASTL_API void RBTreeErase(rbtree_node_base* pNode, rbtree_node_base* pNodeAnchor)
+ {
+ rbtree_node_base*& pNodeRootRef = pNodeAnchor->mpNodeParent;
+ rbtree_node_base*& pNodeLeftmostRef = pNodeAnchor->mpNodeLeft;
+ rbtree_node_base*& pNodeRightmostRef = pNodeAnchor->mpNodeRight;
+ rbtree_node_base* pNodeSuccessor = pNode;
+ rbtree_node_base* pNodeChild = NULL;
+ rbtree_node_base* pNodeChildParent = NULL;
+
+ if(pNodeSuccessor->mpNodeLeft == NULL) // pNode has at most one non-NULL child.
+ pNodeChild = pNodeSuccessor->mpNodeRight; // pNodeChild might be null.
+ else if(pNodeSuccessor->mpNodeRight == NULL) // pNode has exactly one non-NULL child.
+ pNodeChild = pNodeSuccessor->mpNodeLeft; // pNodeChild is not null.
+ else
+ {
+ // pNode has two non-null children. Set pNodeSuccessor to pNode's successor. pNodeChild might be NULL.
+ pNodeSuccessor = pNodeSuccessor->mpNodeRight;
+
+ while(pNodeSuccessor->mpNodeLeft)
+ pNodeSuccessor = pNodeSuccessor->mpNodeLeft;
+
+ pNodeChild = pNodeSuccessor->mpNodeRight;
+ }
+
+ // Here we remove pNode from the tree and fix up the node pointers appropriately around it.
+ if(pNodeSuccessor == pNode) // If pNode was a leaf node (had both NULL children)...
+ {
+ pNodeChildParent = pNodeSuccessor->mpNodeParent; // Assign pNodeReplacement's parent.
+
+ if(pNodeChild)
+ pNodeChild->mpNodeParent = pNodeSuccessor->mpNodeParent;
+
+ if(pNode == pNodeRootRef) // If the node being deleted is the root node...
+ pNodeRootRef = pNodeChild; // Set the new root node to be the pNodeReplacement.
+ else
+ {
+ if(pNode == pNode->mpNodeParent->mpNodeLeft) // If pNode is a left node...
+ pNode->mpNodeParent->mpNodeLeft = pNodeChild; // Make pNode's replacement node be on the same side.
+ else
+ pNode->mpNodeParent->mpNodeRight = pNodeChild;
+ // Now pNode is disconnected from the bottom of the tree (recall that in this pathway pNode was determined to be a leaf).
+ }
+
+ if(pNode == pNodeLeftmostRef) // If pNode is the tree begin() node...
+ {
+ // Because pNode is the tree begin(), pNode->mpNodeLeft must be NULL.
+ // Here we assign the new begin() (first node).
+ if(pNode->mpNodeRight && pNodeChild)
+ {
+ EASTL_ASSERT(pNodeChild != NULL); // Logically pNodeChild should always be valid.
+ pNodeLeftmostRef = RBTreeGetMinChild(pNodeChild);
+ }
+ else
+ pNodeLeftmostRef = pNode->mpNodeParent; // This makes (pNodeLeftmostRef == end()) if (pNode == root node)
+ }
+
+ if(pNode == pNodeRightmostRef) // If pNode is the tree last (rbegin()) node...
+ {
+ // Because pNode is the tree rbegin(), pNode->mpNodeRight must be NULL.
+ // Here we assign the new rbegin() (last node)
+ if(pNode->mpNodeLeft && pNodeChild)
+ {
+ EASTL_ASSERT(pNodeChild != NULL); // Logically pNodeChild should always be valid.
+ pNodeRightmostRef = RBTreeGetMaxChild(pNodeChild);
+ }
+ else // pNodeChild == pNode->mpNodeLeft
+ pNodeRightmostRef = pNode->mpNodeParent; // makes pNodeRightmostRef == &mAnchor if pNode == pNodeRootRef
+ }
+ }
+ else // else (pNodeSuccessor != pNode)
+ {
+ // Relink pNodeSuccessor in place of pNode. pNodeSuccessor is pNode's successor.
+ // We specifically set pNodeSuccessor to be on the right child side of pNode, so fix up the left child side.
+ pNode->mpNodeLeft->mpNodeParent = pNodeSuccessor;
+ pNodeSuccessor->mpNodeLeft = pNode->mpNodeLeft;
+
+ if(pNodeSuccessor == pNode->mpNodeRight) // If pNode's successor was at the bottom of the tree... (yes that's effectively what this statement means)
+ pNodeChildParent = pNodeSuccessor; // Assign pNodeReplacement's parent.
+ else
+ {
+ pNodeChildParent = pNodeSuccessor->mpNodeParent;
+
+ if(pNodeChild)
+ pNodeChild->mpNodeParent = pNodeChildParent;
+
+ pNodeChildParent->mpNodeLeft = pNodeChild;
+
+ pNodeSuccessor->mpNodeRight = pNode->mpNodeRight;
+ pNode->mpNodeRight->mpNodeParent = pNodeSuccessor;
+ }
+
+ if(pNode == pNodeRootRef)
+ pNodeRootRef = pNodeSuccessor;
+ else if(pNode == pNode->mpNodeParent->mpNodeLeft)
+ pNode->mpNodeParent->mpNodeLeft = pNodeSuccessor;
+ else
+ pNode->mpNodeParent->mpNodeRight = pNodeSuccessor;
+
+ // Now pNode is disconnected from the tree.
+
+ pNodeSuccessor->mpNodeParent = pNode->mpNodeParent;
+ eastl::swap(pNodeSuccessor->mColor, pNode->mColor);
+ }
+
+ // Here we do tree balancing as per the conventional red-black tree algorithm.
+ if(pNode->mColor == kRBTreeColorBlack)
+ {
+ while((pNodeChild != pNodeRootRef) && ((pNodeChild == NULL) || (pNodeChild->mColor == kRBTreeColorBlack)))
+ {
+ if(pNodeChild == pNodeChildParent->mpNodeLeft)
+ {
+ rbtree_node_base* pNodeTemp = pNodeChildParent->mpNodeRight;
+
+ if(pNodeTemp->mColor == kRBTreeColorRed)
+ {
+ pNodeTemp->mColor = kRBTreeColorBlack;
+ pNodeChildParent->mColor = kRBTreeColorRed;
+ pNodeRootRef = RBTreeRotateLeft(pNodeChildParent, pNodeRootRef);
+ pNodeTemp = pNodeChildParent->mpNodeRight;
+ }
+
+ if(((pNodeTemp->mpNodeLeft == NULL) || (pNodeTemp->mpNodeLeft->mColor == kRBTreeColorBlack)) &&
+ ((pNodeTemp->mpNodeRight == NULL) || (pNodeTemp->mpNodeRight->mColor == kRBTreeColorBlack)))
+ {
+ pNodeTemp->mColor = kRBTreeColorRed;
+ pNodeChild = pNodeChildParent;
+ pNodeChildParent = pNodeChildParent->mpNodeParent;
+ }
+ else
+ {
+ if((pNodeTemp->mpNodeRight == NULL) || (pNodeTemp->mpNodeRight->mColor == kRBTreeColorBlack))
+ {
+ pNodeTemp->mpNodeLeft->mColor = kRBTreeColorBlack;
+ pNodeTemp->mColor = kRBTreeColorRed;
+ pNodeRootRef = RBTreeRotateRight(pNodeTemp, pNodeRootRef);
+ pNodeTemp = pNodeChildParent->mpNodeRight;
+ }
+
+ pNodeTemp->mColor = pNodeChildParent->mColor;
+ pNodeChildParent->mColor = kRBTreeColorBlack;
+
+ if(pNodeTemp->mpNodeRight)
+ pNodeTemp->mpNodeRight->mColor = kRBTreeColorBlack;
+
+ pNodeRootRef = RBTreeRotateLeft(pNodeChildParent, pNodeRootRef);
+ break;
+ }
+ }
+ else
+ {
+ // The following is the same as above, with mpNodeRight <-> mpNodeLeft.
+ rbtree_node_base* pNodeTemp = pNodeChildParent->mpNodeLeft;
+
+ if(pNodeTemp->mColor == kRBTreeColorRed)
+ {
+ pNodeTemp->mColor = kRBTreeColorBlack;
+ pNodeChildParent->mColor = kRBTreeColorRed;
+
+ pNodeRootRef = RBTreeRotateRight(pNodeChildParent, pNodeRootRef);
+ pNodeTemp = pNodeChildParent->mpNodeLeft;
+ }
+
+ if(((pNodeTemp->mpNodeRight == NULL) || (pNodeTemp->mpNodeRight->mColor == kRBTreeColorBlack)) &&
+ ((pNodeTemp->mpNodeLeft == NULL) || (pNodeTemp->mpNodeLeft->mColor == kRBTreeColorBlack)))
+ {
+ pNodeTemp->mColor = kRBTreeColorRed;
+ pNodeChild = pNodeChildParent;
+ pNodeChildParent = pNodeChildParent->mpNodeParent;
+ }
+ else
+ {
+ if((pNodeTemp->mpNodeLeft == NULL) || (pNodeTemp->mpNodeLeft->mColor == kRBTreeColorBlack))
+ {
+ pNodeTemp->mpNodeRight->mColor = kRBTreeColorBlack;
+ pNodeTemp->mColor = kRBTreeColorRed;
+
+ pNodeRootRef = RBTreeRotateLeft(pNodeTemp, pNodeRootRef);
+ pNodeTemp = pNodeChildParent->mpNodeLeft;
+ }
+
+ pNodeTemp->mColor = pNodeChildParent->mColor;
+ pNodeChildParent->mColor = kRBTreeColorBlack;
+
+ if(pNodeTemp->mpNodeLeft)
+ pNodeTemp->mpNodeLeft->mColor = kRBTreeColorBlack;
+
+ pNodeRootRef = RBTreeRotateRight(pNodeChildParent, pNodeRootRef);
+ break;
+ }
+ }
+ }
+
+ if(pNodeChild)
+ pNodeChild->mColor = kRBTreeColorBlack;
+ }
+
+ } // RBTreeErase
+
+
+
+} // namespace eastl
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/source/string.cpp b/EASTL/source/string.cpp
new file mode 100644
index 0000000..ae73f11
--- /dev/null
+++ b/EASTL/source/string.cpp
@@ -0,0 +1,464 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/string.h>
+#include <EABase/eabase.h>
+#include <string.h>
+
+
+namespace eastl
+{
+ ///////////////////////////////////////////////////////////////////////////////
+ // Converters for DecodePart
+ //
+ // For some decent documentation about conversions, see:
+ // http://tidy.sourceforge.net/cgi-bin/lxr/source/src/utf8.c
+ //
+ ///////////////////////////////////////////////////////////////////////////////
+
+ // Requires that pDest have a capacity of at least 6 chars.
+ // Sets pResult to '\1' in the case that c is an invalid UCS4 char.
+ inline bool UCS4ToUTF8(uint32_t c, char*& pResult)
+ {
+ if(c < 0x00000080)
+ *pResult++ = (char)(uint8_t)c;
+ else if(c < 0x0800)
+ {
+ *pResult++ = (char)(uint8_t)(0xC0 | (c >> 6));
+ *pResult++ = (char)(uint8_t)(0x80 | (c & 0x3F));
+ }
+ else if(c <= 0x0000FFFF)
+ {
+ *pResult++ = (char)(uint8_t)(0xE0 | (c >> 12));
+ *pResult++ = (char)(uint8_t)(0x80 | ((c >> 6) & 0x3F));
+ *pResult++ = (char)(uint8_t)(0x80 | (c & 0x3F));
+ }
+ else if(c <= 0x001FFFFF)
+ {
+ *pResult++ = (char)(uint8_t)(0xF0 | (c >> 18));
+ *pResult++ = (char)(uint8_t)(0x80 | ((c >> 12) & 0x3F));
+ *pResult++ = (char)(uint8_t)(0x80 | ((c >> 6) & 0x3F));
+ *pResult++ = (char)(uint8_t)(0x80 | (c & 0x3F));
+ }
+ else if(c <= 0x003FFFFFF)
+ {
+ *pResult++ = (char)(uint8_t)(0xF8 | (c >> 24));
+ *pResult++ = (char)(uint8_t)(0x80 | (c >> 18));
+ *pResult++ = (char)(uint8_t)(0x80 | ((c >> 12) & 0x3F));
+ *pResult++ = (char)(uint8_t)(0x80 | ((c >> 6) & 0x3F));
+ *pResult++ = (char)(uint8_t)(0x80 | (c & 0x3F));
+ }
+ else if(c <= 0x7FFFFFFF)
+ {
+ *pResult++ = (char)(uint8_t)(0xFC | (c >> 30));
+ *pResult++ = (char)(uint8_t)(0x80 | ((c >> 24) & 0x3F));
+ *pResult++ = (char)(uint8_t)(0x80 | ((c >> 18) & 0x3F));
+ *pResult++ = (char)(uint8_t)(0x80 | ((c >> 12) & 0x3F));
+ *pResult++ = (char)(uint8_t)(0x80 | ((c >> 6) & 0x3F));
+ *pResult++ = (char)(uint8_t)(0x80 | (c & 0x3F));
+ }
+ else
+ {
+ // values >= 0x80000000 can't be converted to UTF8.
+ *pResult++ = '\1';
+ return false;
+ }
+
+ return true;
+ }
+
+
+ // Requires that pResult have a capacity of at least 3 chars.
+ // Sets pResult to '\1' in the case that c is an invalid UCS4 char.
+ inline bool UCS2ToUTF8(uint16_t c, char*& pResult)
+ {
+ return UCS4ToUTF8(c, pResult);
+ }
+
+
+ // Sets result to 0xffff in the case that the input UTF8 sequence is bad.
+ // 32 bit 0xffffffff is an invalid UCS4 code point, so we can't use that as an error return value.
+ inline bool UTF8ToUCS4(const char*& p, const char* pEnd, uint32_t& result)
+ {
+ // This could likely be implemented in a faster-executing way that uses tables.
+
+ bool success = true;
+ uint32_t c = 0xffff;
+ const char* pNext = NULL;
+
+ if(p < pEnd)
+ {
+ uint8_t cChar0((uint8_t)*p), cChar1, cChar2, cChar3;
+
+ // Asserts are disabled because we don't necessarily want to interrupt runtime execution due to this.
+ // EASTL_ASSERT((cChar0 != 0xFE) && (cChar0 != 0xFF)); // No byte can be 0xFE or 0xFF
+ // Code below will effectively catch this error as it goes.
+
+ if(cChar0 < 0x80)
+ {
+ pNext = p + 1;
+ c = cChar0;
+ }
+ else
+ {
+ //EASTL_ASSERT((cChar0 & 0xC0) == 0xC0); // The top two bits need to be equal to 1
+ if((cChar0 & 0xC0) != 0xC0)
+ {
+ success = false;
+ goto Failure;
+ }
+
+ if((cChar0 & 0xE0) == 0xC0)
+ {
+ pNext = p + 2;
+
+ if(pNext <= pEnd)
+ {
+ c = (uint32_t)((cChar0 & 0x1F) << 6);
+ cChar1 = static_cast<uint8_t>(p[1]);
+ c |= cChar1 & 0x3F;
+
+ //EASTL_ASSERT((cChar1 & 0xC0) == 0x80); // All subsequent code should be b10xxxxxx
+ //EASTL_ASSERT(c >= 0x0080 && c < 0x0800); // Check that we have the smallest coding
+ if(!((cChar1 & 0xC0) == 0x80) ||
+ !(c >= 0x0080 && c < 0x0800))
+ {
+ success = false;
+ goto Failure;
+ }
+ }
+ else
+ {
+ success = false;
+ goto Failure;
+ }
+ }
+ else if((cChar0 & 0xF0) == 0xE0)
+ {
+ pNext = p + 3;
+
+ if(pNext <= pEnd)
+ {
+ c = (uint32_t)((cChar0 & 0xF) << 12);
+ cChar1 = static_cast<uint8_t>(p[1]);
+ c |= (cChar1 & 0x3F) << 6;
+ cChar2 = static_cast<uint8_t>(p[2]);
+ c |= cChar2 & 0x3F;
+
+ //EASTL_ASSERT((cChar1 & 0xC0) == 0x80); // All subsequent code should be b10xxxxxx
+ //EASTL_ASSERT((cChar2 & 0xC0) == 0x80); // All subsequent code should be b10xxxxxx
+ //EASTL_ASSERT(c >= 0x00000800 && c < 0x00010000); // Check that we have the smallest coding
+ if(!((cChar1 & 0xC0) == 0x80) ||
+ !((cChar2 & 0xC0) == 0x80) ||
+ !(c >= 0x00000800 && c < 0x00010000))
+ {
+ success = false;
+ goto Failure;
+ }
+ }
+ else
+ {
+ success = false;
+ goto Failure;
+ }
+ }
+ else if((cChar0 & 0xF8) == 0xF0)
+ {
+ pNext = p + 4;
+
+ if(pNext <= pEnd)
+ {
+ c = (uint32_t)((cChar0 & 0x7) << 18);
+ cChar1 = static_cast<uint8_t>(p[1]);
+ c |= (uint32_t)((cChar1 & 0x3F) << 12);
+ cChar2 = static_cast<uint8_t>(p[2]);
+ c |= (cChar2 & 0x3F) << 6;
+ cChar3 = static_cast<uint8_t>(p[3]);
+ c |= cChar3 & 0x3F;
+
+ //EASTL_ASSERT((cChar0 & 0xf8) == 0xf0); // We handle the unicode but not UCS-4
+ //EASTL_ASSERT((cChar1 & 0xC0) == 0x80); // All subsequent code should be b10xxxxxx
+ //EASTL_ASSERT((cChar2 & 0xC0) == 0x80); // All subsequent code should be b10xxxxxx
+ //EASTL_ASSERT((cChar3 & 0xC0) == 0x80); // All subsequent code should be b10xxxxxx
+ //EASTL_ASSERT(c >= 0x00010000 && c <= 0x0010FFFF); // Check that we have the smallest coding, Unicode and not ucs-4
+ if(!((cChar0 & 0xf8) == 0xf0) ||
+ !((cChar1 & 0xC0) == 0x80) ||
+ !((cChar2 & 0xC0) == 0x80) ||
+ !(c >= 0x00010000 && c <= 0x0010FFFF))
+ {
+ success = false;
+ goto Failure;
+ }
+ }
+ else
+ {
+ success = false;
+ goto Failure;
+ }
+ }
+ else if((cChar0 & 0xFC) == 0xF8)
+ {
+ pNext = p + 4;
+
+ if(pNext <= pEnd)
+ {
+ // To do. We don't currently support extended UCS4 characters.
+ }
+ else
+ {
+ success = false;
+ goto Failure;
+ }
+ }
+ else if((cChar0 & 0xFE) == 0xFC)
+ {
+ pNext = p + 5;
+
+ if(pNext <= pEnd)
+ {
+ // To do. We don't currently support extended UCS4 characters.
+ }
+ else
+ {
+ success = false;
+ goto Failure;
+ }
+ }
+ else
+ {
+ success = false;
+ goto Failure;
+ }
+ }
+ }
+ else
+ success = false;
+
+ Failure:
+ if(success)
+ {
+ p = pNext;
+ result = c;
+ }
+ else
+ {
+ p = p + 1;
+ result = 0xffff;
+ }
+
+ return success;
+ }
+
+ // Sets result to 0xffff in the case that the input UTF8 sequence is bad.
+ // The effect of converting UTF8 codepoints > 0xffff to UCS2 (char16_t) is to set all
+ // such codepoints to 0xffff. EASTL doesn't have a concept of setting or maintaining
+ // error state for string conversions, though it does have a policy of converting
+ // impossible values to something without generating invalid strings or throwing exceptions.
+ inline bool UTF8ToUCS2(const char*& p, const char* pEnd, uint16_t& result)
+ {
+ uint32_t u32;
+
+ if(UTF8ToUCS4(p, pEnd, u32))
+ {
+ if(u32 <= 0xffff)
+ {
+ result = (uint16_t)u32;
+ return true;
+ }
+ }
+
+ result = 0xffff;
+ return false;
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // DecodePart
+ ///////////////////////////////////////////////////////////////////////////
+
+ EASTL_API bool DecodePart(const char*& pSrc, const char* pSrcEnd, char*& pDest, char* pDestEnd)
+ {
+ size_t sourceSize = (size_t)(pSrcEnd - pSrc);
+ size_t destSize = (size_t)(pDestEnd - pDest);
+
+ if(sourceSize > destSize)
+ sourceSize = destSize;
+
+ memmove(pDest, pSrc, sourceSize * sizeof(*pSrcEnd));
+
+ pSrc += sourceSize;
+ pDest += sourceSize; // Intentionally add sourceSize here.
+
+ return true;
+ }
+
+ EASTL_API bool DecodePart(const char*& pSrc, const char* pSrcEnd, char16_t*& pDest, char16_t* pDestEnd)
+ {
+ bool success = true;
+
+ while(success && (pSrc < pSrcEnd) && (pDest < pDestEnd))
+ success = UTF8ToUCS2(pSrc, pSrcEnd, (uint16_t&)*pDest++);
+
+ return success;
+ }
+
+ EASTL_API bool DecodePart(const char*& pSrc, const char* pSrcEnd, char32_t*& pDest, char32_t* pDestEnd)
+ {
+ bool success = true;
+
+ while(success && (pSrc < pSrcEnd) && (pDest < pDestEnd))
+ success = UTF8ToUCS4(pSrc, pSrcEnd, (uint32_t&)*pDest++);
+
+ return success;
+ }
+
+
+ EASTL_API bool DecodePart(const char16_t*& pSrc, const char16_t* pSrcEnd, char*& pDest, char* pDestEnd)
+ {
+ bool success = true;
+
+ EASTL_ASSERT((pDest + 6) < pDestEnd); // The user must provide ample buffer space, preferably 256 chars or more.
+ pDestEnd -= 6; // Do this so that we can avoid dest buffer size checking in the loop below and the function it calls.
+
+ while(success && (pSrc < pSrcEnd) && (pDest < pDestEnd))
+ success = UCS2ToUTF8(*pSrc++, pDest);
+
+ return success;
+ }
+
+ EASTL_API bool DecodePart(const char16_t*& pSrc, const char16_t* pSrcEnd, char16_t*& pDest, char16_t* pDestEnd)
+ {
+ size_t sourceSize = (size_t)(pSrcEnd - pSrc);
+ size_t destSize = (size_t)(pDestEnd - pDest);
+
+ if(sourceSize > destSize)
+ sourceSize = destSize;
+
+ memmove(pDest, pSrc, sourceSize * sizeof(*pSrcEnd));
+
+ pSrc += sourceSize;
+ pDest += sourceSize; // Intentionally add sourceSize here.
+
+ return true;
+ }
+
+ EASTL_API bool DecodePart(const char16_t*& pSrc, const char16_t* pSrcEnd, char32_t*& pDest, char32_t* pDestEnd)
+ {
+ size_t sourceSize = (size_t)(pSrcEnd - pSrc);
+ size_t destSize = (size_t)(pDestEnd - pDest);
+
+ if(sourceSize > destSize)
+ pSrcEnd = pSrc + destSize;
+
+ while(pSrc != pSrcEnd) // To consider: Improve this by unrolling this loop. Other tricks can improve its speed as well.
+ *pDest++ = (char32_t)*pSrc++;
+
+ return true;
+ }
+
+
+ EASTL_API bool DecodePart(const char32_t*& pSrc, const char32_t* pSrcEnd, char*& pDest, char* pDestEnd)
+ {
+ bool success = true;
+
+ EASTL_ASSERT((pDest + 6) < pDestEnd); // The user must provide ample buffer space, preferably 256 chars or more.
+ pDestEnd -= 6; // Do this so that we can avoid dest buffer size checking in the loop below and the function it calls.
+
+ while(success && (pSrc < pSrcEnd) && (pDest < pDestEnd))
+ success = UCS4ToUTF8(*pSrc++, pDest);
+
+ return success;
+ }
+
+ EASTL_API bool DecodePart(const char32_t*& pSrc, const char32_t* pSrcEnd, char16_t*& pDest, char16_t* pDestEnd)
+ {
+ size_t sourceSize = (size_t)(pSrcEnd - pSrc);
+ size_t destSize = (size_t)(pDestEnd - pDest);
+
+ if(sourceSize > destSize)
+ pSrcEnd = pSrc + destSize;
+
+ while(pSrc != pSrcEnd) // To consider: Improve this by unrolling this loop. Other tricks can improve its speed as well.
+ *pDest++ = (char16_t)*pSrc++; // This is potentially losing data. We are not converting to UTF16; we are converting to UCS2.
+
+ return true;
+ }
+
+ EASTL_API bool DecodePart(const char32_t*& pSrc, const char32_t* pSrcEnd, char32_t*& pDest, char32_t* pDestEnd)
+ {
+ size_t sourceSize = (size_t)(pSrcEnd - pSrc);
+ size_t destSize = (size_t)(pDestEnd - pDest);
+
+ if(sourceSize > destSize)
+ sourceSize = destSize;
+
+ memmove(pDest, pSrc, sourceSize * sizeof(*pSrcEnd));
+
+ pSrc += sourceSize;
+ pDest += sourceSize; // Intentionally add sourceSize here.
+
+ return true;
+ }
+
+ EASTL_API bool DecodePart(const int*& pSrc, const int* pSrcEnd, char*& pDest, char* pDestEnd)
+ {
+ bool success = true;
+
+ EASTL_ASSERT((pDest + 6) < pDestEnd); // The user must provide ample buffer space, preferably 256 chars or more.
+ pDestEnd -= 6; // Do this so that we can avoid dest buffer size checking in the loop below and the function it calls.
+
+ while(success && (pSrc < pSrcEnd) && (pDest < pDestEnd))
+ success = UCS4ToUTF8((uint32_t)(unsigned)*pSrc++, pDest);
+
+ return success;
+ }
+
+ EASTL_API bool DecodePart(const int*& pSrc, const int* pSrcEnd, char16_t*& pDest, char16_t* pDestEnd)
+ {
+ size_t sourceSize = (size_t)(pSrcEnd - pSrc);
+ size_t destSize = (size_t)(pDestEnd - pDest);
+
+ if(sourceSize > destSize)
+ pSrcEnd = pSrc + destSize;
+
+ while(pSrc != pSrcEnd) // To consider: Improve this by unrolling this loop. Other tricks can improve its speed as well.
+ *pDest++ = (char16_t)*pSrc++; // This is potentially losing data. We are not converting to UTF16; we are converting to UCS2.
+
+ return true;
+ }
+
+ EASTL_API bool DecodePart(const int*& pSrc, const int* pSrcEnd, char32_t*& pDest, char32_t* pDestEnd)
+ {
+ size_t sourceSize = (size_t)(pSrcEnd - pSrc);
+ size_t destSize = (size_t)(pDestEnd - pDest);
+
+ if(sourceSize > destSize)
+ pSrcEnd = pSrc + destSize;
+
+ while(pSrc != pSrcEnd) // To consider: Improve this by unrolling this loop. Other tricks can improve its speed as well.
+ *pDest++ = (char32_t)*pSrc++; // This is potentially losing data. We are not converting to UTF16; we are converting to UCS2.
+
+ return true;
+ }
+
+
+
+} // namespace eastl
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/source/thread_support.cpp b/EASTL/source/thread_support.cpp
new file mode 100644
index 0000000..693dd24
--- /dev/null
+++ b/EASTL/source/thread_support.cpp
@@ -0,0 +1,129 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/thread_support.h>
+#include <EASTL/type_traits.h>
+#include <EASTL/memory.h>
+
+#if defined(EA_PLATFORM_MICROSOFT)
+ EA_DISABLE_ALL_VC_WARNINGS();
+ #ifndef WIN32_LEAN_AND_MEAN
+ #define WIN32_LEAN_AND_MEAN
+ #endif
+ #ifdef __MINGW64__
+ #include <windows.h>
+ #else
+ #include <Windows.h>
+ #endif
+ EA_RESTORE_ALL_VC_WARNINGS();
+#endif
+
+
+namespace eastl
+{
+ namespace Internal
+ {
+ #if EASTL_CPP11_MUTEX_ENABLED
+ // We use the C++11 Standard Library mutex as-is.
+ #else
+ /////////////////////////////////////////////////////////////////
+ // mutex
+ /////////////////////////////////////////////////////////////////
+
+ mutex::mutex()
+ {
+ #if defined(EA_PLATFORM_MICROSOFT)
+ static_assert(sizeof(mMutexBuffer) == sizeof(CRITICAL_SECTION), "mMutexBuffer size failure");
+ //static_assert(EA_ALIGN_OF(mMutexBuffer) >= EA_ALIGN_OF(CRITICAL_SECTION), "mMutexBuffer alignment failure"); // Enabling this causes the VS2012 compiler to crash.
+
+ #if !defined(_WIN32_WINNT) || (_WIN32_WINNT < 0x0403)
+ InitializeCriticalSection((CRITICAL_SECTION*)mMutexBuffer);
+ #elif !EA_WINAPI_FAMILY_PARTITION(EA_WINAPI_PARTITION_DESKTOP)
+ BOOL result = InitializeCriticalSectionEx((CRITICAL_SECTION*)mMutexBuffer, 10, 0);
+ EASTL_ASSERT(result != 0); EA_UNUSED(result);
+ #else
+ BOOL result = InitializeCriticalSectionAndSpinCount((CRITICAL_SECTION*)mMutexBuffer, 10);
+ EASTL_ASSERT(result != 0); EA_UNUSED(result);
+ #endif
+
+ #elif defined(EA_PLATFORM_POSIX)
+ pthread_mutexattr_t attr;
+
+ pthread_mutexattr_init(&attr);
+
+ #if defined(EA_HAVE_pthread_mutexattr_setpshared_DECL)
+ pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_PRIVATE);
+ #endif
+
+ pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
+ pthread_mutex_init(&mMutex, &attr);
+ pthread_mutexattr_destroy(&attr);
+ #endif
+ }
+
+ mutex::~mutex()
+ {
+ #if defined(EA_PLATFORM_MICROSOFT)
+ DeleteCriticalSection((CRITICAL_SECTION*)mMutexBuffer);
+ #elif defined(EA_PLATFORM_POSIX)
+ pthread_mutex_destroy(&mMutex);
+ #endif
+ }
+
+ void mutex::lock()
+ {
+ #if defined(EA_PLATFORM_MICROSOFT)
+ EnterCriticalSection((CRITICAL_SECTION*)mMutexBuffer);
+ #elif defined(EA_PLATFORM_POSIX)
+ pthread_mutex_lock(&mMutex);
+ #else
+ EASTL_FAIL_MSG("EASTL thread safety is not implemented yet. See EAThread for how to do this for the given platform.");
+ #endif
+ }
+
+ void mutex::unlock()
+ {
+ #if defined(EA_PLATFORM_MICROSOFT)
+ LeaveCriticalSection((CRITICAL_SECTION*)mMutexBuffer);
+ #elif defined(EA_PLATFORM_POSIX)
+ pthread_mutex_unlock(&mMutex);
+ #endif
+ }
+ #endif
+
+
+ /////////////////////////////////////////////////////////////////
+ // shared_ptr_auto_mutex
+ /////////////////////////////////////////////////////////////////
+
+ // We could solve this by having single global mutex for all shared_ptrs, a set of mutexes for shared_ptrs,
+ // a single mutex for every shared_ptr, or have a template parameter that enables mutexes for just some shared_ptrs.
+ eastl::late_constructed<mutex, true> gSharedPtrMutex;
+
+ shared_ptr_auto_mutex::shared_ptr_auto_mutex(const void* /*pSharedPtr*/)
+ : auto_mutex(*gSharedPtrMutex.get())
+ {
+ }
+
+
+ } // namespace Internal
+
+} // namespace eastl
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/test/CMakeLists.txt b/EASTL/test/CMakeLists.txt
new file mode 100644
index 0000000..ff16189
--- /dev/null
+++ b/EASTL/test/CMakeLists.txt
@@ -0,0 +1,102 @@
+#-------------------------------------------------------------------------------------------
+# Copyright (C) Electronic Arts Inc. All rights reserved.
+#-------------------------------------------------------------------------------------------
+
+#-------------------------------------------------------------------------------------------
+# CMake info
+#-------------------------------------------------------------------------------------------
+cmake_minimum_required(VERSION 3.1)
+project(EASTLTest CXX)
+include(CTest)
+
+#-------------------------------------------------------------------------------------------
+# Defines
+#-------------------------------------------------------------------------------------------
+add_definitions(-D_CRT_SECURE_NO_WARNINGS)
+add_definitions(-D_SCL_SECURE_NO_WARNINGS)
+add_definitions(-DEASTL_OPENSOURCE=1)
+add_definitions(-D_CHAR16T)
+add_definitions(-DEASTL_THREAD_SUPPORT_AVAILABLE=0)
+if (EASTL_STD_ITERATOR_CATEGORY_ENABLED)
+ add_definitions(-DEASTL_STD_ITERATOR_CATEGORY_ENABLED=1)
+endif()
+
+#-------------------------------------------------------------------------------------------
+# Compiler Flags
+#-------------------------------------------------------------------------------------------
+set (CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH};${CMAKE_CURRENT_SOURCE_DIR}/../scripts/CMake")
+include(CommonCppFlags)
+
+if (MSVC)
+ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /MP")
+endif()
+
+if (CMAKE_CXX_COMPILER_ID MATCHES "Clang")
+ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-pointer-bool-conversion -Wno-unknown-warning-option")
+endif()
+
+# Parts of the test suite fail to compile if char8_t is enabled, so we
+# disable it and only enable for specific source files later on.
+if (EASTL_NO_CHAR8T_FLAG)
+ add_compile_options(${EASTL_NO_CHAR8T_FLAG})
+endif()
+
+#-------------------------------------------------------------------------------------------
+# Source files
+#-------------------------------------------------------------------------------------------
+file(GLOB EASTLTEST_SOURCES "source/*.cpp" "source/*.inl" "source/*.h")
+set(SOURCES ${EASTLTEST_SOURCES})
+
+# Compile a subset of tests with explicit char8_t support if available.
+if (EASTL_CHAR8T_FLAG)
+ message(STATUS "Building with char8_t support in tests.")
+ set(EASTLTEST_CHAR8T_SOURCES "source/TestString.cpp" "source/TestStringView.cpp")
+
+ set_source_files_properties(${EASTLTEST_CHAR8T_SOURCES} PROPERTIES
+ COMPILE_FLAGS ${EASTL_CHAR8T_FLAG}
+ COMPILE_DEFINITIONS "EASTL_EXPECT_CHAR8T_SUPPORT")
+endif()
+
+#-------------------------------------------------------------------------------------------
+# Executable definition
+#-------------------------------------------------------------------------------------------
+add_executable(EASTLTest ${SOURCES})
+
+#-------------------------------------------------------------------------------------------
+# Include directories
+#-------------------------------------------------------------------------------------------
+target_include_directories(EASTLTest PUBLIC include)
+
+#-------------------------------------------------------------------------------------------
+# Dependencies
+#-------------------------------------------------------------------------------------------
+add_subdirectory(packages/EABase)
+add_subdirectory(packages/EAAssert)
+add_subdirectory(packages/EAStdC)
+add_subdirectory(packages/EAMain)
+add_subdirectory(packages/EATest)
+add_subdirectory(packages/EAThread)
+
+target_link_libraries(EASTLTest EABase)
+target_link_libraries(EASTLTest EAAssert)
+target_link_libraries(EASTLTest EAMain)
+target_link_libraries(EASTLTest EASTL)
+target_link_libraries(EASTLTest EAStdC)
+target_link_libraries(EASTLTest EATest)
+target_link_libraries(EASTLTest EAThread)
+
+set(THREADS_PREFER_PTHREAD_FLAG ON)
+find_package(Threads REQUIRED)
+
+if((NOT APPLE) AND (NOT WIN32))
+ target_link_libraries(EASTLTest ${EASTLTest_Libraries} Threads::Threads rt)
+else()
+ target_link_libraries(EASTLTest ${EASTLTest_Libraries} Threads::Threads)
+endif()
+
+#-------------------------------------------------------------------------------------------
+# Run Unit tests and verify the results.
+#-------------------------------------------------------------------------------------------
+add_test(EASTLTestRuns EASTLTest)
+set_tests_properties (EASTLTestRuns PROPERTIES PASS_REGULAR_EXPRESSION "RETURNCODE=0")
+
diff --git a/EASTL/test/packages/EABase/.gitignore b/EASTL/test/packages/EABase/.gitignore
new file mode 100644
index 0000000..8d148cd
--- /dev/null
+++ b/EASTL/test/packages/EABase/.gitignore
@@ -0,0 +1,49 @@
+tags
+cscope.out
+**/*.swp
+**/*.swo
+.swp
+*.swp
+.swo
+.TMP
+-.d
+eastl_build_out
+build_bench
+bench.bat
+build.bat
+.p4config
+
+## CMake generated files
+CMakeCache.txt
+cmake_install.cmake
+
+## Patch files
+*.patch
+
+## For Visual Studio Generated projects
+*.sln
+**/*.vcxproj
+**/*.vcxproj.filters
+*.VC.opendb
+*.sdf
+**/*.suo
+**/*.user
+.vs/*
+**/Debug/*
+CMakeFiles/*
+EASTL.dir/**
+RelWithDebInfo/*
+Release/*
+Win32/*
+x64/*
+MinSizeRel/*
+build*/*
+Testing/*
+%ALLUSERSPROFILE%/*
+
+# Buck
+/buck-out/
+/.buckd/
+/buckaroo/
+.buckconfig.local
+BUCKAROO_DEPS
diff --git a/EASTL/test/packages/EABase/.p4ignore b/EASTL/test/packages/EABase/.p4ignore
new file mode 100644
index 0000000..b660f25
--- /dev/null
+++ b/EASTL/test/packages/EABase/.p4ignore
@@ -0,0 +1,4 @@
+tags
+
+.p4config
+/.git/
diff --git a/EASTL/test/packages/EABase/.travis.yml b/EASTL/test/packages/EABase/.travis.yml
new file mode 100644
index 0000000..1e4a16e
--- /dev/null
+++ b/EASTL/test/packages/EABase/.travis.yml
@@ -0,0 +1,68 @@
+language: cpp
+
+os:
+ - linux
+ - osx
+ - windows
+
+compiler:
+ - gcc
+ - clang
+ - msvc
+
+env:
+ - EA_CONFIG=Debug
+ - EA_CONFIG=Release
+
+addons:
+ apt:
+ sources:
+ - ubuntu-toolchain-r-test
+ - george-edison55-precise-backports
+ - llvm-toolchain-trusty-7
+ packages:
+ - cmake
+ - cmake-data
+ - g++-7
+ - clang-7
+
+matrix:
+ exclude:
+ - os: osx
+ compiler: gcc
+ - os: osx
+ compiler: msvc
+ - os: linux
+ compiler: msvc
+ - os: windows
+ compiler: clang
+ - os: windows
+ compiler: gcc
+
+# Handle git submodules yourself
+git:
+ submodules: false
+
+# Use sed to replace the SSH URL with the public URL, then initialize submodules
+before_install:
+ - sed --version >/dev/null 2>&1 && sed -i 's/git@github.com:/https:\/\/github.com\//' .gitmodules || sed -i "" 's/git@github.com:/https:\/\/github.com\//' .gitmodules
+ - git submodule update --init
+
+install:
+ - if [[ "$CXX" == "g++" ]]; then export CC="gcc-7" ;fi
+ - if [[ "$CXX" == "g++" ]]; then export CXX="g++-7" ;fi
+ - if [[ "$CXX" == "clang++" && "${TRAVIS_OS_NAME}" != "osx" ]]; then export CC="clang-7" ;fi
+ - if [[ "$CXX" == "clang++" && "${TRAVIS_OS_NAME}" != "osx" ]]; then export CXX="clang++-7" ;fi
+
+# Universal Setup
+before_script:
+ - mkdir build_$EA_CONFIG
+ - cd build_$EA_CONFIG
+ - cmake .. -DEABASE_BUILD_TESTS:BOOL=ON
+ - cmake --build . --config $EA_CONFIG
+
+script:
+ # Run Tests
+ - cd $TRAVIS_BUILD_DIR/build_$EA_CONFIG/test
+ - ctest -C $EA_CONFIG -V || exit 1
+
diff --git a/EASTL/test/packages/EABase/CMakeLists.txt b/EASTL/test/packages/EABase/CMakeLists.txt
new file mode 100644
index 0000000..fba98ea
--- /dev/null
+++ b/EASTL/test/packages/EABase/CMakeLists.txt
@@ -0,0 +1,32 @@
+#-------------------------------------------------------------------------------------------
+# Copyright (C) Electronic Arts Inc. All rights reserved.
+#-------------------------------------------------------------------------------------------
+cmake_minimum_required(VERSION 3.1)
+project(EABase CXX)
+
+#-------------------------------------------------------------------------------------------
+# Options
+#-------------------------------------------------------------------------------------------
+option(EABASE_BUILD_TESTS "Enable generation of build files for tests" OFF)
+
+#-------------------------------------------------------------------------------------------
+# Package Tests
+#-------------------------------------------------------------------------------------------
+if(EABASE_BUILD_TESTS)
+ add_subdirectory(test)
+endif()
+
+#-------------------------------------------------------------------------------------------
+# Defines
+#-------------------------------------------------------------------------------------------
+add_definitions(-D_CHAR16T)
+
+#-------------------------------------------------------------------------------------------
+# Header only library
+#-------------------------------------------------------------------------------------------
+add_library(EABase INTERFACE)
+
+#-------------------------------------------------------------------------------------------
+# Include dirs
+#-------------------------------------------------------------------------------------------
+target_include_directories(EABase INTERFACE include/Common)
diff --git a/EASTL/test/packages/EABase/CONTRIBUTING.md b/EASTL/test/packages/EABase/CONTRIBUTING.md
new file mode 100644
index 0000000..015e1e7
--- /dev/null
+++ b/EASTL/test/packages/EABase/CONTRIBUTING.md
@@ -0,0 +1,73 @@
+## Contributing
+
+Before you can contribute, EA must have a Contributor License Agreement (CLA) on file that has been signed by each contributor.
+You can sign here: [Go to CLA](https://electronicarts.na1.echosign.com/public/esignWidget?wid=CBFCIBAA3AAABLblqZhByHRvZqmltGtliuExmuV-WNzlaJGPhbSRg2ufuPsM3P0QmILZjLpkGslg24-UJtek*)
+
+### Pull Request Policy
+
+All code contributions are submitted as [Github pull requests](https://help.github.com/articles/using-pull-requests/). All pull requests will be reviewed by a maintainer according to the guidelines found in the next section.
+
+Your pull request should:
+
+* merge cleanly
+* come with tests
+ * tests should be minimal and stable
+ * fail before your fix is applied
+* pass the test suite
+* code formatting is encoded in clang format
+ * limit using clang format on new code
+ * do not deviate from style already established in the files
+
+
+### Running the Unit Tests
+
+EAAssert uses CMake as its build system.
+
+* Create and navigate to "your_build_folder":
+ * mkdir your_build_folder && cd your_build_folder
+* Generate build scripts:
+ * cmake source_folder -DEABASE_BUILD_TESTS:BOOL=ON
+* Build unit tests for "your_config":
+ * cmake --build . --config your_config
+* Run the unit tests for "your_config" from the test folder:
+ * cd test && ctest -C your_config
+
+
+Here is an example batch file.
+```batch
+set build_folder=out
+mkdir %build_folder%
+pushd %build_folder%
+call cmake .. -DEABASE_BUILD_TESTS:BOOL=ON
+call cmake --build . --config Release
+call cmake --build . --config Debug
+call cmake --build . --config RelWithDebInfo
+call cmake --build . --config MinSizeRel
+pushd test
+call ctest -C Release
+call ctest -C Debug
+call ctest -C RelWithDebInfo
+call ctest -C MinSizeRel
+popd
+popd
+```
+
+Here is an example bash file
+```bash
+build_folder=out
+mkdir $build_folder
+pushd $build_folder
+cmake .. -DEABASE_BUILD_TESTS:BOOL=ON
+cmake --build . --config Release
+cmake --build . --config Debug
+cmake --build . --config RelWithDebInfo
+cmake --build . --config MinSizeRel
+pushd test
+ctest -C Release
+ctest -C Debug
+ctest -C RelWithDebInfo
+ctest -C MinSizeRel
+popd
+popd
+```
+
diff --git a/EASTL/test/packages/EABase/LICENSE b/EASTL/test/packages/EABase/LICENSE
new file mode 100644
index 0000000..93ab228
--- /dev/null
+++ b/EASTL/test/packages/EABase/LICENSE
@@ -0,0 +1,27 @@
+/*
+Copyright (C) 2017 Electronic Arts Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
diff --git a/EASTL/test/packages/EABase/README.md b/EASTL/test/packages/EABase/README.md
new file mode 100644
index 0000000..5efa1de
--- /dev/null
+++ b/EASTL/test/packages/EABase/README.md
@@ -0,0 +1,26 @@
+# EABase
+
+[![Build Status](https://travis-ci.org/electronicarts/EABase.svg?branch=master)](https://travis-ci.org/electronicarts/EABase)
+
+EABase is a small set of header files that define platform-independent data types and macros.
+
+
+## Documentation
+
+Please see [Introduction](doc/EABase.html).
+
+
+## Compiling sources
+
+Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on compiling and testing the source.
+
+
+## Credits
+
+Roberto Parolin is the current EABase owner within EA and is responsible for the open source repository.
+
+
+## License
+
+Modified BSD License (3-Clause BSD license) see the file LICENSE in the project root.
+
diff --git a/EASTL/test/packages/EABase/doc/EABase.html b/EASTL/test/packages/EABase/doc/EABase.html
new file mode 100644
index 0000000..0be38e2
--- /dev/null
+++ b/EASTL/test/packages/EABase/doc/EABase.html
@@ -0,0 +1,309 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html>
+<head>
+ <title>EABase Future Plans</title>
+ <style type="text/css">
+ .SmallBody{
+ font-size: 10pt
+ }
+
+ body
+ {
+ font-family: Palatino Linotype, Book Antiqua, Times New Roman;
+ font-size: 11pt;
+ }
+
+ h1
+ {
+ font-family: Verdana;
+ display: block;
+ background-color: #FFF0B0;
+ border: solid 2px black;
+ font-size: 16pt;
+ font-weight: bold;
+ padding: 6px;
+ }
+
+ h2
+ {
+ font-size: 14pt;
+ font-family: Verdana;
+ border-bottom: 2px solid black;
+ }
+
+ h3
+ {
+ font-family: Verdana;
+ font-size: 13pt;
+ font-weight: bold;
+ }
+
+ .code-example
+ {
+ display: block;
+ background-color: #e0e0f0;
+ margin-left: 3em;
+ margin-right: 3em;
+ margin-top: 1em;
+ margin-bottom: 1em;
+ padding: 8px;
+ border: solid 2px #a0a0d0;
+ font-family: monospace;
+ font-size: 10pt;
+ white-space: pre;
+ }
+
+ .code-example-span
+ {
+ font-family: monospace;
+ font-size: 10pt;
+ white-space: pre;
+ }
+
+ .code-example-comment
+ {
+ background-color: #e0e0f0;
+ padding: 0px 0px;
+ font-family: monospace;
+ font-size: 10pt;
+ white-space: pre;
+ color: #999999;
+ margin: auto auto;
+ }
+
+
+ .faq-question
+ {
+ background-color: #D0E0D0;
+ font-size: 12pt;
+ font-weight: bold;
+ margin-bottom: 0.5em;
+ margin-top: 0em;
+ padding-left:8px;
+ padding-right:8px;
+ padding-top:2px;
+ padding-bottom:2px
+ }
+
+ .faq-answer
+ {
+ display: block;
+ margin: 4pt 1em 0.5em 1em;
+ }
+</style>
+</head>
+
+<body>
+<h1>EABase
+</h1>
+<h3>What is EABase?
+</h3>
+<p>EABase is a small set of header files that define platform-independent
+data types and macros. As such it is similar to many projects that have
+a platform.h, system.h, defines.h, etc. file. The difference is that
+EABase is very comprehensive and is the annointed Electronic Arts
+worldwide standard for new projects. </p>
+<p>With respect to the base types and definitions, many of these are
+already present in the most recent C language standard, though the C++
+standard has yet to formally adopt them. EABase bridges the gap and
+defines these values if they aren't already defined. With respect to
+compiler and platform definitions, EABase provides a standard reliable
+means of identifying or specifying compilers, platforms, endian-ness,
+alignment attributes, etc. </p>
+<h3>Usage notes </h3>
+<p>You probably don't want to use float_t and double_t. They are there for C99 compatibility but are rarely what you want to use, since their size is variable.</p>
+<p>Prid8, etc. are somewhat painful and ugly to use and you may find you don't like them. They too are for C99 compatibility.</p>
+<p>intptr_t is not a pointer to an int; it's an int with the same size as a pointer, so you can safely store pointers in it.</p>
+<p>EA::result_type is rarely used and exists for backwards compatibility.</p>
+<h3>What specifically does EABase define?</h3>
+<p>Here we list the things EABase defines, grouped by category. These
+defines are up to date as of the file modification date listed at the
+top of this file.</p>
+<h4>Base Types and Definitions<br>
+
+</h4>
+
+<div style="margin-left: 40px;">bool8_t, int8_t, uint8_t, int16_t, uint16_t, int32_t, uint32_t, int64_t, uint64_t, float_t, double_t, (EAStdC package implements int128_t)<br>
+
+intptr_t, uintptr_t, intmax_t, uintmax_t, ssize_t<br>
+
+char8_t, char16_t, char32_t<br>
+
+INT8_C(), UINT8_C(), etc.<br>
+
+INT8_MIN, INT8_MAX, UINT8_MAX, etc.<br>
+
+PRId8, PRId16, PRId32, etc, SCNd8, SCNd16, SCNd32, etc.</div>
+
+<h4>Result Types and Definitions<br>
+
+</h4>
+
+<div style="margin-left: 40px;">EA::result_type<br>
+
+EA::SUCCESS, EA::FAILURE<br>
+
+EA_SUCCEEDED(), EA_FAILED()</div>
+
+<h4>Compiler Definitions<br>
+
+</h4>
+
+<div style="margin-left: 40px;">EA_COMPILER_GNUC<br>
+
+EA_COMPILER_SN<br>
+
+EA_COMPILER_MSVC<br>
+
+EA_COMPILER_METROWERKS<br>
+
+EA_COMPILER_INTEL<br>
+
+EA_COMPILER_BORLANDC<br>
+
+<br>
+
+EA_COMPILER_VERSION = &lt;integer&gt;<br>
+
+EA_COMPILER_NAME = &lt;string&gt;<br>
+
+EA_COMPILER_STRING = &lt;string&gt;<br>
+
+<br>
+
+EA_COMPILER_NO_STATIC_CONSTANTS<br>
+
+EA_COMPILER_NO_TEMPLATE_SPECIALIZATION<br>
+
+EA_COMPILER_NO_TEMPLATE_PARTIAL_SPECIALIZATION<br>
+
+EA_COMPILER_NO_MEMBER_TEMPLATES<br>
+
+EA_COMPILER_NO_MEMBER_TEMPLATE_SPECIALIZATION<br>
+
+EA_COMPILER_NO_TEMPLATE_TEMPLATES<br>
+
+EA_COMPILER_NO_MEMBER_TEMPLATE_FRIENDS<br>
+
+EA_COMPILER_NO_VOID_RETURNS<br>
+
+EA_COMPILER_NO_COVARIANT_RETURN_TYPE<br>
+
+EA_COMPILER_NO_DEDUCED_TYPENAME<br>
+
+EA_COMPILER_NO_ARGUMENT_DEPENDENT_LOOKUP<br>
+
+EA_COMPILER_NO_EXCEPTION_STD_NAMESPACE<br>
+
+EA_COMPILER_NO_EXPLICIT_FUNCTION_TEMPLATE_ARGUMENTS<br>
+
+EA_COMPILER_NO_EXCEPTIONS<br>
+
+EA_COMPILER_NO_UNWIND<br>
+
+<br>
+
+EA_COMPILER_IS_ANSIC<br>
+
+EA_COMPILER_IS_C99<br>
+
+EA_COMPILER_HAS_C99_TYPES<br>
+EA_COMPILER_IS_CPLUSPLUS<br>
+
+EA_COMPILER_MANAGED_CPP</div>
+<h4>Utilities<br>
+</h4>
+<div style="margin-left: 40px;">
+
+ <p>EA_ALIGN_OF()<br>
+ EA_PREFIX_ALIGN()<br>
+
+EA_POSTFIX_ALIGN()<br>
+ EA_ALIGNED()<br>
+
+EA_PACKED()<br>
+ EA_LIKELY()<br>
+ EA_UNLIKELY()<br>
+ EA_ASSUME()<br>
+ EA_PURE<br>
+ EA_WCHAR_T_NON_NATIVE<br>
+
+EA_WCHAR_SIZE<br>
+ EA_RESTRICT<br>
+ EA_DEPRECATED<br>
+EA_PREFIX_DEPRECATED<br>
+EA_POSTFIX_DEPRECATED <br>
+ EA_FORCE_INLINE<br>
+ EA_NO_INLINE<br>
+ EA_PREFIX_NO_INLINE<br>
+ EA_POSTFIX_NO_INLINE <br>
+ EA_PASCAL<br>
+
+EA_PASCAL_FUNC()<br>
+
+EA_SSE = [0 | 1]<br>
+
+EA_IMPORT<br>
+
+EA_EXPORT<br>
+EA_OVERRIDE<br>
+EA_INIT_PRIORITY<br>
+EA_MAY_ALIAS<br>
+</p>
+</div>
+
+<h4>Platform Definitions<br>
+
+</h4>
+
+<div style="margin-left: 40px;"><br>
+
+
+EA_PLATFORM_MAC<br>
+
+EA_PLATFORM_OSX<br>
+EA_PLATFORM_IPHONE<br>
+EA_PLATFORM_ANDROID<br>
+EA_PLATFORM_LINUX<br>
+
+EA_PLATFORM_WINDOWS<br>
+
+EA_PLATFORM_WIN32<br>
+
+EA_PLATFORM_WIN64<br>
+
+EA_PLATFORM_HPUX<br>
+
+EA_PLATFORM_SUN<br>
+
+<br>
+
+EA_PLATFORM_NAME<br>
+
+EA_PLATFORM_DESCRIPTION<br>
+
+EA_PROCESSOR_POWERPC, EA_PROCESSOR_X86, EA_PROCESSOR_ARM, etc.<br>
+
+EA_SYSTEM_LITTLE_ENDIAN, EA_SYSTEM_BIG_ENDIAN<br>
+
+EA_ASM_STYLE_ATT, EA_ASM_STYLE_INTEL, EA_ASM_STYLE_MOTOROLA<br>
+
+EA_PLATFORM_PTR_SIZE<br>
+EA_PLATFORM_WORD_SIZE</div>
+
+
+<br>
+
+<hr style="width: 100%; height: 2px;"><br>
+
+<br>
+
+<br>
+<br>
+<br>
+<br>
+
+
+
+</body>
+</html>
diff --git a/EASTL/test/packages/EABase/include/Common/EABase/config/eacompiler.h b/EASTL/test/packages/EABase/include/Common/EABase/config/eacompiler.h
new file mode 100644
index 0000000..bd656ed
--- /dev/null
+++ b/EASTL/test/packages/EABase/include/Common/EABase/config/eacompiler.h
@@ -0,0 +1,1778 @@
+/*-----------------------------------------------------------------------------
+ * config/eacompiler.h
+ *
+ * Copyright (c) Electronic Arts Inc. All rights reserved.
+ *-----------------------------------------------------------------------------
+ * Currently supported defines include:
+ * EA_COMPILER_GNUC
+ * EA_COMPILER_ARM
+ * EA_COMPILER_EDG
+ * EA_COMPILER_SN
+ * EA_COMPILER_MSVC
+ * EA_COMPILER_METROWERKS
+ * EA_COMPILER_INTEL
+ * EA_COMPILER_BORLANDC
+ * EA_COMPILER_IBM
+ * EA_COMPILER_QNX
+ * EA_COMPILER_GREEN_HILLS
+ * EA_COMPILER_CLANG
+ * EA_COMPILER_CLANG_CL
+ *
+ * EA_COMPILER_VERSION = <integer>
+ * EA_COMPILER_NAME = <string>
+ * EA_COMPILER_STRING = <string>
+ *
+ * EA_COMPILER_VA_COPY_REQUIRED
+ *
+ * C++98/03 functionality
+ * EA_COMPILER_NO_STATIC_CONSTANTS
+ * EA_COMPILER_NO_TEMPLATE_SPECIALIZATION
+ * EA_COMPILER_NO_TEMPLATE_PARTIAL_SPECIALIZATION
+ * EA_COMPILER_NO_MEMBER_TEMPLATES
+ * EA_COMPILER_NO_MEMBER_TEMPLATE_SPECIALIZATION
+ * EA_COMPILER_NO_TEMPLATE_TEMPLATES
+ * EA_COMPILER_NO_MEMBER_TEMPLATE_FRIENDS
+ * EA_COMPILER_NO_VOID_RETURNS
+ * EA_COMPILER_NO_COVARIANT_RETURN_TYPE
+ * EA_COMPILER_NO_DEDUCED_TYPENAME
+ * EA_COMPILER_NO_ARGUMENT_DEPENDENT_LOOKUP
+ * EA_COMPILER_NO_EXCEPTION_STD_NAMESPACE
+ * EA_COMPILER_NO_EXPLICIT_FUNCTION_TEMPLATE_ARGUMENTS
+ * EA_COMPILER_NO_RTTI
+ * EA_COMPILER_NO_EXCEPTIONS
+ * EA_COMPILER_NO_NEW_THROW_SPEC
+ * EA_THROW_SPEC_NEW / EA_THROW_SPEC_DELETE
+ * EA_COMPILER_NO_UNWIND
+ * EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+ * EA_COMPILER_NO_STATIC_VARIABLE_INIT
+ * EA_COMPILER_NO_STATIC_FUNCTION_INIT
+ * EA_COMPILER_NO_VARIADIC_MACROS
+ *
+ * C++11 functionality
+ * EA_COMPILER_NO_RVALUE_REFERENCES
+ * EA_COMPILER_NO_EXTERN_TEMPLATE
+ * EA_COMPILER_NO_RANGE_BASED_FOR_LOOP
+ * EA_COMPILER_NO_CONSTEXPR
+ * EA_COMPILER_NO_OVERRIDE
+ * EA_COMPILER_NO_INHERITANCE_FINAL
+ * EA_COMPILER_NO_NULLPTR
+ * EA_COMPILER_NO_AUTO
+ * EA_COMPILER_NO_DECLTYPE
+ * EA_COMPILER_NO_DEFAULTED_FUNCTIONS
+ * EA_COMPILER_NO_DELETED_FUNCTIONS
+ * EA_COMPILER_NO_LAMBDA_EXPRESSIONS
+ * EA_COMPILER_NO_TRAILING_RETURN_TYPES
+ * EA_COMPILER_NO_STRONGLY_TYPED_ENUMS
+ * EA_COMPILER_NO_FORWARD_DECLARED_ENUMS
+ * EA_COMPILER_NO_VARIADIC_TEMPLATES
+ * EA_COMPILER_NO_TEMPLATE_ALIASES
+ * EA_COMPILER_NO_INITIALIZER_LISTS
+ * EA_COMPILER_NO_NORETURN
+ * EA_COMPILER_NO_CARRIES_DEPENDENCY
+ * EA_COMPILER_NO_FALLTHROUGH
+ * EA_COMPILER_NO_NODISCARD
+ * EA_COMPILER_NO_MAYBE_UNUSED
+ * EA_COMPILER_NO_NONSTATIC_MEMBER_INITIALIZERS
+ * EA_COMPILER_NO_RIGHT_ANGLE_BRACKETS
+ * EA_COMPILER_NO_ALIGNOF
+ * EA_COMPILER_NO_ALIGNAS
+ * EA_COMPILER_NO_DELEGATING_CONSTRUCTORS
+ * EA_COMPILER_NO_INHERITING_CONSTRUCTORS
+ * EA_COMPILER_NO_USER_DEFINED_LITERALS
+ * EA_COMPILER_NO_STANDARD_LAYOUT_TYPES
+ * EA_COMPILER_NO_EXTENDED_SIZEOF
+ * EA_COMPILER_NO_INLINE_NAMESPACES
+ * EA_COMPILER_NO_UNRESTRICTED_UNIONS
+ * EA_COMPILER_NO_EXPLICIT_CONVERSION_OPERATORS
+ * EA_COMPILER_NO_FUNCTION_TEMPLATE_DEFAULT_ARGS
+ * EA_COMPILER_NO_LOCAL_CLASS_TEMPLATE_PARAMETERS
+ * EA_COMPILER_NO_NOEXCEPT
+ * EA_COMPILER_NO_RAW_LITERALS
+ * EA_COMPILER_NO_UNICODE_STRING_LITERALS
+ * EA_COMPILER_NO_NEW_CHARACTER_TYPES
+ * EA_COMPILER_NO_UNICODE_CHAR_NAME_LITERALS
+ * EA_COMPILER_NO_UNIFIED_INITIALIZATION_SYNTAX
+ * EA_COMPILER_NO_EXTENDED_FRIEND_DECLARATIONS
+ *
+ * C++14 functionality
+ * EA_COMPILER_NO_VARIABLE_TEMPLATES
+ *
+ * C++17 functionality
+ * EA_COMPILER_NO_INLINE_VARIABLES
+ * EA_COMPILER_NO_ALIGNED_NEW
+ *
+ * C++20 functionality
+ * EA_COMPILER_NO_DESIGNATED_INITIALIZERS
+ *
+ *-----------------------------------------------------------------------------
+ *
+ * Supplemental documentation
+ * EA_COMPILER_NO_STATIC_CONSTANTS
+ * Code such as this is legal, but some compilers fail to compile it:
+ * struct A{ static const a = 1; };
+ *
+ * EA_COMPILER_NO_TEMPLATE_SPECIALIZATION
+ * Some compilers fail to allow template specialization, such as with this:
+ * template<class U> void DoSomething(U u);
+ * void DoSomething(int x);
+ *
+ * EA_COMPILER_NO_TEMPLATE_PARTIAL_SPECIALIZATION
+ * Some compilers fail to allow partial template specialization, such as with this:
+ * template <class T, class Allocator> class vector{ }; // Primary templated class.
+ * template <class Allocator> class vector<bool, Allocator>{ }; // Partially specialized version.
+ *
+ * EA_COMPILER_NO_MEMBER_TEMPLATES
+ * Some compilers fail to allow member template functions such as this:
+ * struct A{ template<class U> void DoSomething(U u); };
+ *
+ * EA_COMPILER_NO_MEMBER_TEMPLATE_SPECIALIZATION
+ * Some compilers fail to allow member template specialization, such as with this:
+ * struct A{
+ * template<class U> void DoSomething(U u);
+ * void DoSomething(int x);
+ * };
+ *
+ * EA_COMPILER_NO_TEMPLATE_TEMPLATES
+ * Code such as this is legal:
+ * template<typename T, template<typename> class U>
+ * U<T> SomeFunction(const U<T> x) { return x.DoSomething(); }
+ *
+ * EA_COMPILER_NO_MEMBER_TEMPLATE_FRIENDS
+ * Some compilers fail to compile templated friends, as with this:
+ * struct A{ template<class U> friend class SomeFriend; };
+ * This is described in the C++ Standard at 14.5.3.
+ *
+ * EA_COMPILER_NO_VOID_RETURNS
+ * This is legal C++:
+ * void DoNothing1(){ };
+ * void DoNothing2(){ return DoNothing1(); }
+ *
+ * EA_COMPILER_NO_COVARIANT_RETURN_TYPE
+ * See the C++ standard sec 10.3,p5.
+ *
+ * EA_COMPILER_NO_DEDUCED_TYPENAME
+ * Some compilers don't support the use of 'typename' for
+ * dependent types in deduced contexts, as with this:
+ * template <class T> void Function(T, typename T::type);
+ *
+ * EA_COMPILER_NO_ARGUMENT_DEPENDENT_LOOKUP
+ * Also known as Koenig lookup. Basically, if you have a function
+ * that is a namespace and you call that function without prefixing
+ * it with the namespace the compiler should look at any arguments
+ * you pass to that function call and search their namespace *first*
+ * to see if the given function exists there.
+ *
+ * EA_COMPILER_NO_EXCEPTION_STD_NAMESPACE
+ * <exception> is in namespace std. Some std libraries fail to
+ * put the contents of <exception> in namespace std. The following
+ * code should normally be legal:
+ * void Function(){ std::terminate(); }
+ *
+ * EA_COMPILER_NO_EXPLICIT_FUNCTION_TEMPLATE_ARGUMENTS
+ * Some compilers fail to execute DoSomething() properly, though they
+ * succeed in compiling it, as with this:
+ * template <int i>
+ * bool DoSomething(int j){ return i == j; };
+ * DoSomething<1>(2);
+ *
+ * EA_COMPILER_NO_EXCEPTIONS
+ * The compiler is configured to disallow the use of try/throw/catch
+ * syntax (often to improve performance). Use of such syntax in this
+ * case will cause a compilation error.
+ *
+ * EA_COMPILER_NO_UNWIND
+ * The compiler is configured to allow the use of try/throw/catch
+ * syntax and behaviour but disables the generation of stack unwinding
+ * code for responding to exceptions (often to improve performance).
+ *
+ *---------------------------------------------------------------------------*/
+
+#ifndef INCLUDED_eacompiler_H
+#define INCLUDED_eacompiler_H
+
+ #include <EABase/config/eaplatform.h>
+
+ // Note: This is used to generate the EA_COMPILER_STRING macros
+ #ifndef INTERNAL_STRINGIZE
+ #define INTERNAL_STRINGIZE(x) INTERNAL_PRIMITIVE_STRINGIZE(x)
+ #endif
+ #ifndef INTERNAL_PRIMITIVE_STRINGIZE
+ #define INTERNAL_PRIMITIVE_STRINGIZE(x) #x
+ #endif
+
+ // EA_COMPILER_HAS_FEATURE
+ #ifndef EA_COMPILER_HAS_FEATURE
+ #if defined(__clang__)
+ #define EA_COMPILER_HAS_FEATURE(x) __has_feature(x)
+ #else
+ #define EA_COMPILER_HAS_FEATURE(x) 0
+ #endif
+ #endif
+
+
+ // EA_COMPILER_HAS_BUILTIN
+ #ifndef EA_COMPILER_HAS_BUILTIN
+ #if defined(__clang__)
+ #define EA_COMPILER_HAS_BUILTIN(x) __has_builtin(x)
+ #else
+ #define EA_COMPILER_HAS_BUILTIN(x) 0
+ #endif
+ #endif
+
+
+ // EDG (EDG compiler front-end, used by other compilers such as SN)
+ #if defined(__EDG_VERSION__)
+ #define EA_COMPILER_EDG 1
+
+ #if defined(_MSC_VER)
+ #define EA_COMPILER_EDG_VC_MODE 1
+ #endif
+ #if defined(__GNUC__)
+ #define EA_COMPILER_EDG_GCC_MODE 1
+ #endif
+ #endif
+
+ // EA_COMPILER_WINRTCX_ENABLED
+ //
+ // Defined as 1 if the compiler has its available C++/CX support enabled, else undefined.
+ // This specifically means the corresponding compilation unit has been built with Windows Runtime
+ // Components enabled, usually via the '-ZW' compiler flags being used. This option allows for using
+ // ref counted hat-type '^' objects and other C++/CX specific keywords like "ref new"
+ #if !defined(EA_COMPILER_WINRTCX_ENABLED) && defined(__cplusplus_winrt)
+ #define EA_COMPILER_WINRTCX_ENABLED 1
+ #endif
+
+
+ // EA_COMPILER_CPP11_ENABLED
+ //
+ // Defined as 1 if the compiler has its available C++11 support enabled, else undefined.
+ // This does not mean that all of C++11 or any particular feature of C++11 is supported
+ // by the compiler. It means that whatever C++11 support the compiler has is enabled.
+ // This also includes existing and older compilers that still identify C++11 as C++0x.
+ //
+ // We cannot use (__cplusplus >= 201103L) alone because some compiler vendors have
+ // decided to not define __cplusplus like thus until they have fully completed their
+ // C++11 support.
+ //
+ #if !defined(EA_COMPILER_CPP11_ENABLED) && defined(__cplusplus)
+ #if (__cplusplus >= 201103L) // Clang and GCC defines this like so in C++11 mode.
+ #define EA_COMPILER_CPP11_ENABLED 1
+ #elif defined(__GNUC__) && defined(__GXX_EXPERIMENTAL_CXX0X__)
+ #define EA_COMPILER_CPP11_ENABLED 1
+ #elif defined(_MSC_VER) && _MSC_VER >= 1600 // Microsoft unilaterally enables its C++11 support; there is no way to disable it.
+ #define EA_COMPILER_CPP11_ENABLED 1
+ #elif defined(__EDG_VERSION__) // && ???
+ // To do: Is there a generic way to determine this?
+ #endif
+ #endif
+
+
+ // EA_COMPILER_CPP14_ENABLED
+ //
+ // Defined as 1 if the compiler has its available C++14 support enabled, else undefined.
+ // This does not mean that all of C++14 or any particular feature of C++14 is supported
+ // by the compiler. It means that whatever C++14 support the compiler has is enabled.
+ //
+ // We cannot use (__cplusplus >= 201402L) alone because some compiler vendors have
+ // decided to not define __cplusplus like thus until they have fully completed their
+ // C++14 support.
+ #if !defined(EA_COMPILER_CPP14_ENABLED) && defined(__cplusplus)
+ #if (__cplusplus >= 201402L) // Clang and GCC defines this like so in C++14 mode.
+ #define EA_COMPILER_CPP14_ENABLED 1
+ #elif defined(_MSC_VER) && (_MSC_VER >= 1900) // VS2015+
+ #define EA_COMPILER_CPP14_ENABLED 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_CPP17_ENABLED
+ //
+ // Defined as 1 if the compiler has its available C++17 support enabled, else undefined.
+ // This does not mean that all of C++17 or any particular feature of C++17 is supported
+ // by the compiler. It means that whatever C++17 support the compiler has is enabled.
+ //
+ // We cannot use (__cplusplus >= 201703L) alone because some compiler vendors have
+ // decided to not define __cplusplus like thus until they have fully completed their
+ // C++17 support.
+ #if !defined(EA_COMPILER_CPP17_ENABLED) && defined(__cplusplus)
+ #if (__cplusplus >= 201703L)
+ #define EA_COMPILER_CPP17_ENABLED 1
+ #elif defined(_MSVC_LANG) && (_MSVC_LANG >= 201703L) // C++17+
+ #define EA_COMPILER_CPP17_ENABLED 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_CPP20_ENABLED
+ //
+ // Defined as 1 if the compiler has its available C++20 support enabled, else undefined.
+ // This does not mean that all of C++20 or any particular feature of C++20 is supported
+ // by the compiler. It means that whatever C++20 support the compiler has is enabled.
+ //
+ // We cannot use (__cplusplus >= 202003L) alone because some compiler vendors have
+ // decided to not define __cplusplus like thus until they have fully completed their
+ // C++20 support.
+ #if !defined(EA_COMPILER_CPP20_ENABLED) && defined(__cplusplus)
+ // TODO(rparoin): enable once a C++20 value for the __cplusplus macro has been published
+ // #if (__cplusplus >= 202003L)
+ // #define EA_COMPILER_CPP20_ENABLED 1
+ // #elif defined(_MSVC_LANG) && (_MSVC_LANG >= 202003L) // C++20+
+ // #define EA_COMPILER_CPP20_ENABLED 1
+ // #endif
+ #endif
+
+
+
+ #if defined(__ARMCC_VERSION)
+ // Note that this refers to the ARM RVCT compiler (armcc or armcpp), but there
+ // are other compilers that target ARM processors, such as GCC and Microsoft VC++.
+ // If you want to detect compiling for the ARM processor, check for EA_PROCESSOR_ARM
+ // being defined.
+ // This compiler is also identified by defined(__CC_ARM) || defined(__ARMCC__).
+ #define EA_COMPILER_RVCT 1
+ #define EA_COMPILER_ARM 1
+ #define EA_COMPILER_VERSION __ARMCC_VERSION
+ #define EA_COMPILER_NAME "RVCT"
+ //#define EA_COMPILER_STRING (defined below)
+
+ // Clang's GCC-compatible driver.
+ #elif defined(__clang__) && !defined(_MSC_VER)
+ #define EA_COMPILER_CLANG 1
+ #define EA_COMPILER_VERSION (__clang_major__ * 100 + __clang_minor__)
+ #define EA_COMPILER_NAME "clang"
+ #define EA_COMPILER_STRING EA_COMPILER_NAME __clang_version__
+
+ // GCC (a.k.a. GNUC)
+ #elif defined(__GNUC__) // GCC compilers exist for many platforms.
+ #define EA_COMPILER_GNUC 1
+ #define EA_COMPILER_VERSION (__GNUC__ * 1000 + __GNUC_MINOR__)
+ #define EA_COMPILER_NAME "GCC"
+ #define EA_COMPILER_STRING EA_COMPILER_NAME " compiler, version " INTERNAL_STRINGIZE( __GNUC__ ) "." INTERNAL_STRINGIZE( __GNUC_MINOR__ )
+
+ #if (__GNUC__ == 2) && (__GNUC_MINOR__ < 95) // If GCC < 2.95...
+ #define EA_COMPILER_NO_MEMBER_TEMPLATES 1
+ #endif
+ #if (__GNUC__ == 2) && (__GNUC_MINOR__ <= 97) // If GCC <= 2.97...
+ #define EA_COMPILER_NO_MEMBER_TEMPLATE_FRIENDS 1
+ #endif
+ #if (__GNUC__ == 3) && ((__GNUC_MINOR__ == 1) || (__GNUC_MINOR__ == 2)) // If GCC 3.1 or 3.2 (but not pre 3.1 or post 3.2)...
+ #define EA_COMPILER_NO_EXPLICIT_FUNCTION_TEMPLATE_ARGUMENTS 1
+ #endif
+
+ // Borland C++
+ #elif defined(__BORLANDC__)
+ #define EA_COMPILER_BORLANDC 1
+ #define EA_COMPILER_VERSION __BORLANDC__
+ #define EA_COMPILER_NAME "Borland C"
+ //#define EA_COMPILER_STRING (defined below)
+
+ #if (__BORLANDC__ <= 0x0550) // If Borland C++ Builder 4 and 5...
+ #define EA_COMPILER_NO_MEMBER_TEMPLATE_FRIENDS 1
+ #endif
+ #if (__BORLANDC__ >= 0x561) && (__BORLANDC__ < 0x600)
+ #define EA_COMPILER_NO_MEMBER_FUNCTION_SPECIALIZATION 1
+ #endif
+
+
+ // Intel C++
+ // The Intel Windows compiler masquerades as VC++ and defines _MSC_VER.
+ // The Intel compiler is based on the EDG compiler front-end.
+ #elif defined(__ICL) || defined(__ICC)
+ #define EA_COMPILER_INTEL 1
+
+ // Should we enable the following? We probably should do so since enabling it does a lot more good than harm
+ // for users. The Intel Windows compiler does a pretty good job of emulating VC++ and so the user would likely
+ // have to handle few special cases where the Intel compiler doesn't emulate VC++ correctly.
+ #if defined(_MSC_VER)
+ #define EA_COMPILER_MSVC 1
+ #define EA_COMPILER_MICROSOFT 1
+ #endif
+
+ // Should we enable the following? This isn't as clear because as of this writing we don't know if the Intel
+ // compiler truly emulates GCC well enough that enabling this does more good than harm.
+ #if defined(__GNUC__)
+ #define EA_COMPILER_GNUC 1
+ #endif
+
+ #if defined(__ICL)
+ #define EA_COMPILER_VERSION __ICL
+ #elif defined(__ICC)
+ #define EA_COMPILER_VERSION __ICC
+ #endif
+ #define EA_COMPILER_NAME "Intel C++"
+ #if defined(_MSC_VER)
+ #define EA_COMPILER_STRING EA_COMPILER_NAME " compiler, version " INTERNAL_STRINGIZE( EA_COMPILER_VERSION ) ", EDG version " INTERNAL_STRINGIZE( __EDG_VERSION__ ) ", VC++ version " INTERNAL_STRINGIZE( _MSC_VER )
+ #elif defined(__GNUC__)
+ #define EA_COMPILER_STRING EA_COMPILER_NAME " compiler, version " INTERNAL_STRINGIZE( EA_COMPILER_VERSION ) ", EDG version " INTERNAL_STRINGIZE( __EDG_VERSION__ ) ", GCC version " INTERNAL_STRINGIZE( __GNUC__ )
+ #else
+ #define EA_COMPILER_STRING EA_COMPILER_NAME " compiler, version " INTERNAL_STRINGIZE( EA_COMPILER_VERSION ) ", EDG version " INTERNAL_STRINGIZE( __EDG_VERSION__ )
+ #endif
+
+
+ #elif defined(_MSC_VER)
+ #define EA_COMPILER_MSVC 1
+ #define EA_COMPILER_MICROSOFT 1
+ #define EA_COMPILER_VERSION _MSC_VER
+ #define EA_COMPILER_NAME "Microsoft Visual C++"
+ //#define EA_COMPILER_STRING (defined below)
+
+ #if defined(__clang__)
+ // Clang's MSVC-compatible driver.
+ #define EA_COMPILER_CLANG_CL 1
+ #endif
+
+ #define EA_STANDARD_LIBRARY_MSVC 1
+ #define EA_STANDARD_LIBRARY_MICROSOFT 1
+
+ #if (_MSC_VER <= 1200) // If VC6.x and earlier...
+ #if (_MSC_VER < 1200)
+ #define EA_COMPILER_MSVCOLD 1
+ #else
+ #define EA_COMPILER_MSVC6 1
+ #endif
+
+ #if (_MSC_VER < 1200) // If VC5.x or earlier...
+ #define EA_COMPILER_NO_TEMPLATE_SPECIALIZATION 1
+ #endif
+ #define EA_COMPILER_NO_EXPLICIT_FUNCTION_TEMPLATE_ARGUMENTS 1 // The compiler compiles this OK, but executes it wrong. Fixed in VC7.0
+ #define EA_COMPILER_NO_VOID_RETURNS 1 // The compiler fails to compile such cases. Fixed in VC7.0
+ #define EA_COMPILER_NO_EXCEPTION_STD_NAMESPACE 1 // The compiler fails to compile such cases. Fixed in VC7.0
+ #define EA_COMPILER_NO_DEDUCED_TYPENAME 1 // The compiler fails to compile such cases. Fixed in VC7.0
+ #define EA_COMPILER_NO_STATIC_CONSTANTS 1 // The compiler fails to compile such cases. Fixed in VC7.0
+ #define EA_COMPILER_NO_COVARIANT_RETURN_TYPE 1 // The compiler fails to compile such cases. Fixed in VC7.1
+ #define EA_COMPILER_NO_ARGUMENT_DEPENDENT_LOOKUP 1 // The compiler compiles this OK, but executes it wrong. Fixed in VC7.1
+ #define EA_COMPILER_NO_TEMPLATE_TEMPLATES 1 // The compiler fails to compile such cases. Fixed in VC7.1
+ #define EA_COMPILER_NO_TEMPLATE_PARTIAL_SPECIALIZATION 1 // The compiler fails to compile such cases. Fixed in VC7.1
+ #define EA_COMPILER_NO_MEMBER_TEMPLATE_FRIENDS 1 // The compiler fails to compile such cases. Fixed in VC7.1
+ //#define EA_COMPILER_NO_MEMBER_TEMPLATES 1 // VC6.x supports member templates properly 95% of the time. So do we flag the remaining 5%?
+ //#define EA_COMPILER_NO_MEMBER_TEMPLATE_SPECIALIZATION 1 // VC6.x supports member templates properly 95% of the time. So do we flag the remaining 5%?
+
+ #elif (_MSC_VER <= 1300) // If VC7.0 and earlier...
+ #define EA_COMPILER_MSVC7 1
+
+ #define EA_COMPILER_NO_COVARIANT_RETURN_TYPE 1 // The compiler fails to compile such cases. Fixed in VC7.1
+ #define EA_COMPILER_NO_ARGUMENT_DEPENDENT_LOOKUP 1 // The compiler compiles this OK, but executes it wrong. Fixed in VC7.1
+ #define EA_COMPILER_NO_TEMPLATE_TEMPLATES 1 // The compiler fails to compile such cases. Fixed in VC7.1
+ #define EA_COMPILER_NO_TEMPLATE_PARTIAL_SPECIALIZATION 1 // The compiler fails to compile such cases. Fixed in VC7.1
+ #define EA_COMPILER_NO_MEMBER_TEMPLATE_FRIENDS 1 // The compiler fails to compile such cases. Fixed in VC7.1
+ #define EA_COMPILER_NO_MEMBER_FUNCTION_SPECIALIZATION 1 // This is the case only for VC7.0 and not VC6 or VC7.1+. Fixed in VC7.1
+ //#define EA_COMPILER_NO_MEMBER_TEMPLATES 1 // VC7.0 supports member templates properly 95% of the time. So do we flag the remaining 5%?
+
+ #elif (_MSC_VER < 1400) // VS2003 _MSC_VER of 1300 means VC7 (VS2003)
+ // The VC7.1 and later compiler is fairly close to the C++ standard
+ // and thus has no compiler limitations that we are concerned about.
+ #define EA_COMPILER_MSVC7_2003 1
+ #define EA_COMPILER_MSVC7_1 1
+
+ #elif (_MSC_VER < 1500) // VS2005 _MSC_VER of 1400 means VC8 (VS2005)
+ #define EA_COMPILER_MSVC8_2005 1
+ #define EA_COMPILER_MSVC8_0 1
+
+ #elif (_MSC_VER < 1600) // VS2008. _MSC_VER of 1500 means VC9 (VS2008)
+ #define EA_COMPILER_MSVC9_2008 1
+ #define EA_COMPILER_MSVC9_0 1
+
+ #elif (_MSC_VER < 1700) // VS2010 _MSC_VER of 1600 means VC10 (VS2010)
+ #define EA_COMPILER_MSVC_2010 1
+ #define EA_COMPILER_MSVC10_0 1
+
+ #elif (_MSC_VER < 1800) // VS2012 _MSC_VER of 1700 means VS2011/VS2012
+ #define EA_COMPILER_MSVC_2011 1 // Microsoft changed the name to VS2012 before shipping, despite referring to it as VS2011 up to just a few weeks before shipping.
+ #define EA_COMPILER_MSVC11_0 1
+ #define EA_COMPILER_MSVC_2012 1
+ #define EA_COMPILER_MSVC12_0 1
+
+ #elif (_MSC_VER < 1900) // VS2013 _MSC_VER of 1800 means VS2013
+ #define EA_COMPILER_MSVC_2013 1
+ #define EA_COMPILER_MSVC13_0 1
+
+ #elif (_MSC_VER < 1910) // VS2015 _MSC_VER of 1900 means VS2015
+ #define EA_COMPILER_MSVC_2015 1
+ #define EA_COMPILER_MSVC14_0 1
+
+ #elif (_MSC_VER < 1911) // VS2017 _MSC_VER of 1910 means VS2017
+ #define EA_COMPILER_MSVC_2017 1
+ #define EA_COMPILER_MSVC15_0 1
+
+ #endif
+
+
+ // IBM
+ #elif defined(__xlC__)
+ #define EA_COMPILER_IBM 1
+ #define EA_COMPILER_NAME "IBM XL C"
+ #define EA_COMPILER_VERSION __xlC__
+ #define EA_COMPILER_STRING "IBM XL C compiler, version " INTERNAL_STRINGIZE( __xlC__ )
+
+ // Unknown
+ #else // Else the compiler is unknown
+
+ #define EA_COMPILER_VERSION 0
+ #define EA_COMPILER_NAME "Unknown"
+
+ #endif
+
+ #ifndef EA_COMPILER_STRING
+ #define EA_COMPILER_STRING EA_COMPILER_NAME " compiler, version " INTERNAL_STRINGIZE(EA_COMPILER_VERSION)
+ #endif
+
+
+ // Deprecated definitions
+ // For backwards compatibility, should be supported for at least the life of EABase v2.0.x.
+ #ifndef EA_COMPILER_NO_TEMPLATE_PARTIAL_SPECIALIZATION
+ #define EA_COMPILER_PARTIAL_TEMPLATE_SPECIALIZATION 1
+ #endif
+ #ifndef EA_COMPILER_NO_TEMPLATE_SPECIALIZATION
+ #define EA_COMPILER_TEMPLATE_SPECIALIZATION 1
+ #endif
+ #ifndef EA_COMPILER_NO_MEMBER_TEMPLATES
+ #define EA_COMPILER_MEMBER_TEMPLATES 1
+ #endif
+ #ifndef EA_COMPILER_NO_MEMBER_TEMPLATE_SPECIALIZATION
+ #define EA_COMPILER_MEMBER_TEMPLATE_SPECIALIZATION 1
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // EA_COMPILER_VA_COPY_REQUIRED
+ //
+ // Defines whether va_copy must be used to copy or save va_list objects between uses.
+ // Some compilers on some platforms implement va_list whereby its contents
+ // are destroyed upon usage, even if passed by value to another function.
+ // With these compilers you can use va_copy to save and restore a va_list.
+ // Known compiler/platforms that destroy va_list contents upon usage include:
+ // CodeWarrior on PowerPC
+ // GCC on x86-64
+ // However, va_copy is part of the C99 standard and not part of earlier C and
+ // C++ standards. So not all compilers support it. VC++ doesn't support va_copy,
+ // but it turns out that VC++ doesn't usually need it on the platforms it supports,
+ // and va_copy can usually be implemented via memcpy(va_list, va_list) with VC++.
+ ///////////////////////////////////////////////////////////////////////////////
+
+ #ifndef EA_COMPILER_VA_COPY_REQUIRED
+ #if ((defined(__GNUC__) && (__GNUC__ >= 3)) || defined(__clang__)) && (!defined(__i386__) || defined(__x86_64__)) && !defined(__ppc__) && !defined(__PPC__) && !defined(__PPC64__)
+ #define EA_COMPILER_VA_COPY_REQUIRED 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_RTTI
+ //
+ // If EA_COMPILER_NO_RTTI is defined, then RTTI (run-time type information)
+ // is not available (possibly due to being disabled by the user).
+ //
+ #if defined(__EDG_VERSION__) && !defined(__RTTI)
+ #define EA_COMPILER_NO_RTTI 1
+ #elif defined(__clang__) && !EA_COMPILER_HAS_FEATURE(cxx_rtti)
+ #define EA_COMPILER_NO_RTTI 1
+ #elif defined(__IBMCPP__) && !defined(__RTTI_ALL__)
+ #define EA_COMPILER_NO_RTTI 1
+ #elif defined(__GXX_ABI_VERSION) && !defined(__GXX_RTTI)
+ #define EA_COMPILER_NO_RTTI 1
+ #elif defined(_MSC_VER) && !defined(_CPPRTTI)
+ #define EA_COMPILER_NO_RTTI 1
+ #elif defined(__ARMCC_VERSION) && defined(__TARGET_CPU_MPCORE) && !defined(__RTTI)
+ #define EA_COMPILER_NO_RTTI 1
+ #endif
+
+
+
+ // EA_COMPILER_NO_EXCEPTIONS / EA_COMPILER_NO_UNWIND
+ //
+ // If EA_COMPILER_NO_EXCEPTIONS is defined, then the compiler is
+ // configured to not recognize C++ exception-handling statements
+ // such as try/catch/throw. Thus, when EA_COMPILER_NO_EXCEPTIONS is
+ // defined, code that attempts to use exception handling statements
+ // will usually cause a compilation error. If is often desirable
+ // for projects to disable exception handling because exception
+ // handling causes extra code and/or data generation which might
+ // not be needed, especially if it is known that exceptions won't
+ // be happening. When writing code that is to be portable between
+ // systems of which some enable exception handling while others
+ // don't, check for EA_COMPILER_NO_EXCEPTIONS being defined.
+ //
+ #if !defined(EA_COMPILER_NO_EXCEPTIONS) && !defined(EA_COMPILER_NO_UNWIND)
+ #if defined(EA_COMPILER_GNUC) && defined(_NO_EX) // GCC on some platforms defines _NO_EX when exceptions are disabled.
+ #define EA_COMPILER_NO_EXCEPTIONS 1
+
+ #elif (defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_INTEL) || defined(EA_COMPILER_RVCT)) && !defined(__EXCEPTIONS) // GCC and most EDG-based compilers define __EXCEPTIONS when exception handling is enabled.
+ #define EA_COMPILER_NO_EXCEPTIONS 1
+
+ #elif (defined(EA_COMPILER_MSVC)) && !defined(_CPPUNWIND)
+ #define EA_COMPILER_NO_UNWIND 1
+
+ #endif // EA_COMPILER_NO_EXCEPTIONS / EA_COMPILER_NO_UNWIND
+ #endif // !defined(EA_COMPILER_NO_EXCEPTIONS) && !defined(EA_COMPILER_NO_UNWIND)
+
+
+ // ------------------------------------------------------------------------
+ // EA_DISABLE_ALL_VC_WARNINGS / EA_RESTORE_ALL_VC_WARNINGS
+ //
+ // Disable and re-enable all warning(s) within code.
+ //
+ // Example usage:
+ // EA_DISABLE_ALL_VC_WARNINGS()
+ // <code>
+ // EA_RESTORE_ALL_VC_WARNINGS()
+ //
+ //This is duplicated from EABase's eacompilertraits.h
+ #ifndef EA_DISABLE_ALL_VC_WARNINGS
+ #if defined(_MSC_VER)
+ #define EA_DISABLE_ALL_VC_WARNINGS() \
+ __pragma(warning(push, 0)) \
+ __pragma(warning(disable: 4244 4265 4267 4350 4472 4509 4548 4623 4710 4985 6320 4755 4625 4626 4702)) // Some warnings need to be explicitly called out.
+ #else
+ #define EA_DISABLE_ALL_VC_WARNINGS()
+ #endif
+ #endif
+
+ //This is duplicated from EABase's eacompilertraits.h
+ #ifndef EA_RESTORE_ALL_VC_WARNINGS
+ #if defined(_MSC_VER)
+ #define EA_RESTORE_ALL_VC_WARNINGS() \
+ __pragma(warning(pop))
+ #else
+ #define EA_RESTORE_ALL_VC_WARNINGS()
+ #endif
+ #endif
+
+ // Dinkumware
+ //This is duplicated from EABase's eahave.h
+ #if !defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && !defined(EA_NO_HAVE_DINKUMWARE_CPP_LIBRARY)
+ #if defined(__cplusplus)
+ EA_DISABLE_ALL_VC_WARNINGS()
+ #include <cstddef> // Need to trigger the compilation of yvals.h without directly using <yvals.h> because it might not exist.
+ EA_RESTORE_ALL_VC_WARNINGS()
+ #endif
+
+ #if defined(__cplusplus) && defined(_CPPLIB_VER) /* If using the Dinkumware Standard library... */
+ #define EA_HAVE_DINKUMWARE_CPP_LIBRARY 1
+ #else
+ #define EA_NO_HAVE_DINKUMWARE_CPP_LIBRARY 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_ALIGNED_NEW
+ //
+ //
+ #if !defined(EA_COMPILER_NO_ALIGNED_NEW)
+ #if defined(_HAS_ALIGNED_NEW) && _HAS_ALIGNED_NEW // VS2017 15.5 Preview
+ // supported.
+ #elif defined(EA_COMPILER_CPP17_ENABLED)
+ // supported.
+ #else
+ #define EA_COMPILER_NO_ALIGNED_NEW 1
+ #endif
+ #endif
+
+ // EA_COMPILER_NO_NEW_THROW_SPEC / EA_THROW_SPEC_NEW / EA_THROW_SPEC_DELETE
+ //
+ // If defined then the compiler's version of operator new is not decorated
+ // with a throw specification. This is useful for us to know because we
+ // often want to write our own overloaded operator new implementations.
+ // We need such operator new overrides to be declared identically to the
+ // way the compiler is defining operator new itself.
+ //
+ // Example usage:
+ // void* operator new(std::size_t) EA_THROW_SPEC_NEW(std::bad_alloc);
+ // void* operator new[](std::size_t) EA_THROW_SPEC_NEW(std::bad_alloc);
+ // void* operator new(std::size_t, const std::nothrow_t&) EA_THROW_SPEC_NEW_NONE();
+ // void* operator new[](std::size_t, const std::nothrow_t&) EA_THROW_SPEC_NEW_NONE();
+ // void operator delete(void*) EA_THROW_SPEC_DELETE_NONE();
+ // void operator delete[](void*) EA_THROW_SPEC_DELETE_NONE();
+ // void operator delete(void*, const std::nothrow_t&) EA_THROW_SPEC_DELETE_NONE();
+ // void operator delete[](void*, const std::nothrow_t&) EA_THROW_SPEC_DELETE_NONE();
+ //
+ #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY)
+ #if defined(_MSC_VER) && (_MSC_VER >= 1912) // VS2017 15.3+
+ #define EA_THROW_SPEC_NEW(x) noexcept(false)
+ #define EA_THROW_SPEC_NEW_NONE() noexcept
+ #define EA_THROW_SPEC_DELETE_NONE() noexcept
+
+ #elif defined(_MSC_VER) && (_MSC_VER >= 1910) // VS2017+
+ #define EA_THROW_SPEC_NEW(x) throw(x)
+ #define EA_THROW_SPEC_NEW_NONE() throw()
+ #define EA_THROW_SPEC_DELETE_NONE() throw()
+
+ #else
+ #if defined(EA_PLATFORM_SONY)
+ #define EA_THROW_SPEC_NEW(X) _THROWS(X)
+ #elif defined(_MSC_VER)
+ // Disabled warning "nonstandard extension used: 'throw (...)'" as this warning is a W4 warning which is usually off by default
+ // and doesn't convey any important information but will still complain when building with /Wall (which most teams do)
+ #define EA_THROW_SPEC_NEW(X) __pragma(warning(push)) __pragma(warning(disable: 4987)) _THROWS(X) __pragma(warning(pop))
+ #else
+ #define EA_THROW_SPEC_NEW(X) _THROW1(X)
+ #endif
+ #define EA_THROW_SPEC_NEW_NONE() _THROW0()
+ #define EA_THROW_SPEC_DELETE_NONE() _THROW0()
+
+ #endif
+ #elif defined(EA_COMPILER_NO_EXCEPTIONS) && !defined(EA_COMPILER_RVCT) && !defined(EA_PLATFORM_LINUX) && !defined(EA_PLATFORM_APPLE) && !defined(CS_UNDEFINED_STRING)
+ #define EA_COMPILER_NO_NEW_THROW_SPEC 1
+
+ #define EA_THROW_SPEC_NEW(x)
+ #define EA_THROW_SPEC_NEW_NONE()
+ #define EA_THROW_SPEC_DELETE_NONE()
+ #else
+ #define EA_THROW_SPEC_NEW(x) throw(x)
+ #define EA_THROW_SPEC_NEW_NONE() throw()
+ #define EA_THROW_SPEC_DELETE_NONE() throw()
+ #endif
+
+
+ // EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+ //
+ // If defined, then the compiler doesn't provide a Standard C++ library.
+ //
+ #if defined(EA_PLATFORM_ANDROID)
+ // Disabled because EA's eaconfig/android_config/android_sdk packages currently
+ // don't support linking STL libraries. Perhaps we can figure out what linker arguments
+ // are needed for an app so we can manually specify them and then re-enable this code.
+ //#include <android/api-level.h>
+ //
+ //#if (__ANDROID_API__ < 9) // Earlier versions of Android provide no std C++ STL implementation.
+ #define EA_COMPILER_NO_STANDARD_CPP_LIBRARY 1
+ //#endif
+ #endif
+
+
+ // EA_COMPILER_NO_STATIC_VARIABLE_INIT
+ //
+ // If defined, it means that global or static C++ variables will be
+ // constructed. Not all compiler/platorm combinations support this.
+ // User code that needs to be portable must avoid having C++ variables
+ // that construct before main.
+ //
+ //#if defined(EA_PLATFORM_MOBILE)
+ // #define EA_COMPILER_NO_STATIC_VARIABLE_INIT 1
+ //#endif
+
+
+ // EA_COMPILER_NO_STATIC_FUNCTION_INIT
+ //
+ // If defined, it means that functions marked as startup functions
+ // (e.g. __attribute__((constructor)) in GCC) are supported. It may
+ // be that some compiler/platform combinations don't support this.
+ //
+ //#if defined(XXX) // So far, all compiler/platforms we use support this.
+ // #define EA_COMPILER_NO_STATIC_VARIABLE_INIT 1
+ //#endif
+
+ // EA_COMPILER_NO_VARIADIC_MACROS
+ //
+ // If defined, the compiler doesn't support C99/C++11 variadic macros.
+ // With a variadic macro, you can do this:
+ // #define MY_PRINTF(format, ...) printf(format, __VA_ARGS__)
+ //
+ #if !defined(EA_COMPILER_NO_VARIADIC_MACROS)
+ #if defined(_MSC_VER) && (_MSC_VER < 1500) // If earlier than VS2008..
+ #define EA_COMPILER_NO_VARIADIC_MACROS 1
+ #elif defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__)) < 401 // If earlier than GCC 4.1..
+ #define EA_COMPILER_NO_VARIADIC_MACROS 1
+ #elif defined(EA_COMPILER_EDG) // Includes other compilers
+ // variadic macros are supported
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_RVALUE_REFERENCES
+ //
+ // If defined, the compiler doesn't fully support C++11 rvalue reference semantics.
+ // This applies to the compiler only and not the Standard Library in use with the compiler,
+ // which is required by the Standard to have some support itself.
+ //
+ #if !defined(EA_COMPILER_NO_RVALUE_REFERENCES)
+ #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (_MSC_VER >= 1600) // VS2010+
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 403) // EDG 4.3+.
+ // supported. Earlier EDG supported a subset of rvalue references. Implicit move constructors and assignment operators aren't supported until EDG 4.5.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && EA_COMPILER_HAS_FEATURE(cxx_rvalue_references)
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4005) // GCC 4.5+
+ // supported.
+ #else
+ #define EA_COMPILER_NO_RVALUE_REFERENCES 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_EXTERN_TEMPLATE
+ //
+ // If defined, the compiler doesn't support C++11 extern template.
+ // With extern templates, you can do this:
+ // extern template void DoSomething(KnownType u);
+ //
+ #if !defined(EA_COMPILER_NO_EXTERN_TEMPLATE)
+ #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (_MSC_VER >= 1700) // VS2012+...
+ // Extern template is supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 401) // EDG 4.1+.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && defined(__apple_build_version__) && (EA_COMPILER_VERSION >= 401)
+ // Extern template is supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && !defined(__apple_build_version__) // Clang other than Apple's Clang
+ // Extern template is supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4006) // GCC 4.6+
+ // Extern template is supported.
+ #else
+ #define EA_COMPILER_NO_EXTERN_TEMPLATE 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_RANGE_BASED_FOR_LOOP
+ //
+ // If defined, the compiler doesn't support C++11 range-based for loops.
+ // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2009/n2930.html
+ // You must #include <iterator> for range-based for loops to work.
+ // Example usage:
+ // #include <iterator>
+ // #include <vector>
+ // std::vector<float> floatVector;
+ // for(float& f : floatVector)
+ // f += 1.0;
+ //
+ #if !defined(EA_COMPILER_NO_RANGE_BASED_FOR_LOOP)
+ #if defined(EA_COMPILER_CPP11_ENABLED) && (defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1700)) // VS2012+...
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 405) // EDG 4.5+.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && (defined(__clang__) && (EA_COMPILER_VERSION >= 300)) // Clang 3.x+
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && (defined(__GNUC__) && (EA_COMPILER_VERSION >= 4006)) // GCC 4.6+
+ // supported.
+ #else
+ #define EA_COMPILER_NO_RANGE_BASED_FOR_LOOP 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_CONSTEXPR
+ //
+ // Refers to C++11 = constexpr (const expression) declarations.
+ //
+ #if !defined(EA_COMPILER_NO_CONSTEXPR)
+ #if defined(EA_COMPILER_CPP11_ENABLED) && (defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1900)) // VS2015+... Not present in VC++ up to and including VS2013.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 406) // EDG 4.6+.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && EA_COMPILER_HAS_FEATURE(cxx_constexpr)
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4006) // GCC 4.6+
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1900) // VS 2015+
+ // supported.
+ #else
+ #define EA_COMPILER_NO_CONSTEXPR 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_CONSTEXPR_IF
+ //
+ // Refers to C++17 = constexpr if(const expression) conditionals.
+ //
+ #if !defined(EA_COMPILER_NO_CONSTEXPR_IF)
+ #if defined(EA_COMPILER_CPP17_ENABLED) && (defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1911)) // VS2017 15.3+
+ // supported.
+ #elif defined(EA_COMPILER_CPP17_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 309) // Clang 3.9+
+ // supported.
+ #elif defined(EA_COMPILER_CPP17_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 7000) // GCC 7+
+ // supported.
+ #else
+ #define EA_COMPILER_NO_CONSTEXPR_IF 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_OVERRIDE
+ //
+ // Refers to the C++11 override specifier.
+ //
+ #ifndef EA_COMPILER_NO_OVERRIDE
+ #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION > 1600) // VC++ > VS2010, even without C++11 support. VS2010 does support override, however will generate warnings due to the keyword being 'non-standard'
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 209) // Clang 2.9+
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4007) // GCC 4.7+
+ // supported.
+ #else
+ #define EA_COMPILER_NO_OVERRIDE 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_INHERITANCE_FINAL
+ //
+ // Refers to the C++11 final specifier.
+ //
+ #ifndef EA_COMPILER_NO_INHERITANCE_FINAL
+ #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1500) // VS2008+, even without C++11 support.
+ // supported, though you need to use EA_INHERITANCE_FINAL for it to work with VS versions prior to 2012.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 209) // Clang 2.9+
+ // supported
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4007) // GCC 4.7+
+ // supported
+ #else
+ #define EA_COMPILER_NO_INHERITANCE_FINAL 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_AUTO
+ //
+ // Refers to C++11 auto.
+ //
+ #if !defined(EA_COMPILER_NO_AUTO)
+ #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1600) // VS2010+
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 401) // EDG 4.1+.
+ // supported with the exception of the usage of braced initializer lists as of EDG 4.3.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 209) // Clang 2.9+, including Apple's Clang.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4004) // GCC 4.4+
+ // supported.
+ #else
+ #define EA_COMPILER_NO_AUTO 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_NULLPTR
+ //
+ // Refers to C++11 nullptr (which is a built in type). std::nullptr_t is defined in C++11 <cstddef>.
+ // Note that <EABase/nullptr.h> implements a portable nullptr implementation.
+ //
+ #if !defined(EA_COMPILER_NO_NULLPTR)
+ #if (defined(_MSC_VER) && (_MSC_VER >= 1600)) && defined(EA_COMPILER_CPP11_ENABLED)
+ // supported
+ #elif defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4006) && defined(EA_COMPILER_CPP11_ENABLED)
+ // supported
+ #elif defined(__clang__) && defined(EA_COMPILER_CPP11_ENABLED)
+ // supported
+ #elif defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 403) && defined(EA_COMPILER_CPP11_ENABLED)
+ // supported
+ #else
+ #define EA_COMPILER_NO_NULLPTR 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_DECLTYPE
+ //
+ // Refers to C++11 decltype.
+ //
+ #if !defined(EA_COMPILER_NO_DECLTYPE)
+ #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1600) // VS2010+
+ // supported, though VS2010 doesn't support the spec completely as specified in the final standard.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 401) // EDG 4.1+.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 209) // Clang 2.9+, including Apple's Clang.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4003) // GCC 4.3+
+ // supported.
+ #else
+ #define EA_COMPILER_NO_DECLTYPE 1
+ #endif
+ #endif
+
+
+
+ // EA_COMPILER_NO_DEFAULTED_FUNCTIONS
+ // EA_COMPILER_NO_DELETED_FUNCTIONS
+ //
+ // Refers to C++11 = default and = delete function declarations.
+ //
+ #if !defined(EA_COMPILER_NO_DEFAULTED_FUNCTIONS)
+ #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1800) // VS2013+
+ // supported, but as of VS2013 it isn't supported for defaulted move constructors and move assignment operators.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 401) // EDG 4.1+.
+ // supported, but as of EDG 4.3 it isn't supported for defaulted move constructors and move assignment operators until EDG 4.5.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 300) // Clang 3.0+, including Apple's Clang
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4004) // GCC 4.4+
+ // supported.
+ #else
+ // VC++ doesn't support it as of VS2012.
+ #define EA_COMPILER_NO_DEFAULTED_FUNCTIONS 1
+ #endif
+ #endif
+
+ #if !defined(EA_COMPILER_NO_DELETED_FUNCTIONS)
+ #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1800) // VS2013+
+ // supported, but as of VS2013 it isn't supported for defaulted move constructors and move assignment operators.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 401) // EDG 4.1+.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 209) // Clang 2.9+
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4004) // GCC 4.4+
+ // supported.
+ #else
+ // VC++ doesn't support it as of VS2012.
+ #define EA_COMPILER_NO_DELETED_FUNCTIONS 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_LAMBDA_EXPRESSIONS
+ //
+ // Refers to C++11 lambda expressions.
+ //
+ #if !defined(EA_COMPILER_NO_LAMBDA_EXPRESSIONS)
+ #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1600) // VS2010+
+ // supported, though VS2010 doesn't support the spec completely as specified in the final standard.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 401) // EDG 4.1+.
+ // supported. However, converting lambdas to function pointers is not supported until EDG 4.5.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__)
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 301) && !defined(__apple_build_version__) // Clang 3.1+, not including Apple's Clang.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4004) // GCC 4.4+
+ // supported.
+ #else
+ #define EA_COMPILER_NO_LAMBDA_EXPRESSIONS 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_TRAILING_RETURN_TYPES
+ //
+ // Refers to C++11 trailing-return-type. Also sometimes referred to as "incomplete return type".
+ //
+ #if !defined(EA_COMPILER_NO_TRAILING_RETURN_TYPES)
+ #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1600) // VS2010+
+ // supported, though VS2010 doesn't support the spec completely as specified in the final standard.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 402) // EDG 4.2+.
+ // supported. However, use of "this" in trailing return types is not supported untiil EDG 4.4
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__)
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 301) && !defined(__apple_build_version__) // Clang 3.1+, not including Apple's Clang.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4004) // GCC 4.4+
+ // supported.
+ #else
+ #define EA_COMPILER_NO_TRAILING_RETURN_TYPES 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_STRONGLY_TYPED_ENUMS
+ //
+ // Refers to C++11 strongly typed enums, which includes enum classes and sized enums. Doesn't include forward-declared enums.
+ //
+ #if !defined(EA_COMPILER_NO_STRONGLY_TYPED_ENUMS)
+ #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1700) // VS2012+
+ // supported. A subset of this is actually supported by VS2010.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 400) // EDG 4.0+.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 209) // Clang 2.9+, including Apple's Clang.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4004) // GCC 4.4+
+ // supported.
+ #else
+ #define EA_COMPILER_NO_STRONGLY_TYPED_ENUMS 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_FORWARD_DECLARED_ENUMS
+ //
+ // Refers to C++11 forward declared enums.
+ //
+ #if !defined(EA_COMPILER_NO_FORWARD_DECLARED_ENUMS)
+ #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1700) // VS2012+
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 405) // EDG 4.5+.
+ // supported. EDG 4.3 supports basic forward-declared enums, but not forward-declared strongly typed enums.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__)
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 301) && !defined(__apple_build_version__) // Clang 3.1+, not including Apple's Clang.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4006) // GCC 4.6+
+ // supported.
+ #else
+ #define EA_COMPILER_NO_FORWARD_DECLARED_ENUMS 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_VARIADIC_TEMPLATES
+ //
+ // Refers to C++11 variadic templates.
+ //
+ #if !defined(EA_COMPILER_NO_VARIADIC_TEMPLATES)
+ #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1800) // VS2013+.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (_MSC_FULL_VER == 170051025) // VS2012 November Preview for Windows only.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 403) // EDG 4.3+.
+ // supported, though 4.1 has partial support for variadic templates.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 209) // Clang 2.9+, including Apple's Clang.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4004) // GCC 4.4+
+ // supported, though GCC 4.3 has partial support for variadic templates.
+ #else
+ #define EA_COMPILER_NO_VARIADIC_TEMPLATES 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_TEMPLATE_ALIASES
+ //
+ // Refers to C++11 alias templates.
+ // Example alias template usage:
+ // template <typename T>
+ // using Dictionary = eastl::map<eastl::string, T>;
+ //
+ // Dictionary<int> StringIntDictionary;
+ //
+ #if !defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1800) // VS2013+.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__)
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 402) // EDG 4.2+.
+ // supported, though 4.1 has partial support for variadic templates.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 300) && !defined(__apple_build_version__) // Clang 3.0+, not including Apple's Clang.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4007) // GCC 4.7+
+ // supported, though GCC 4.3 has partial support for variadic templates.
+ #else
+ #define EA_COMPILER_NO_TEMPLATE_ALIASES 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_VARIABLE_TEMPLATES
+ //
+ // Refers to C++14 variable templates.
+ // Example variable template usage:
+ // template<class T>
+ // constexpr T pi = T(3.1415926535897932385);
+ //
+ #if !defined(EA_COMPILER_NO_VARIABLE_TEMPLATES)
+ #if defined(_MSC_VER) && (_MSC_FULL_VER >= 190023918) // VS2015 Update 2 and above.
+ // supported.
+ #elif defined(EA_COMPILER_CPP14_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 304) && !defined(__apple_build_version__) // Clang 3.4+, not including Apple's Clang.
+ // supported.
+ #elif defined(EA_COMPILER_CPP14_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 5000) // GCC 5+
+ // supported.
+ #elif !defined(EA_COMPILER_CPP14_ENABLED)
+ #define EA_COMPILER_NO_VARIABLE_TEMPLATES 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_INLINE_VARIABLES
+ //
+ // Refers to C++17 inline variables that allows the definition of variables in header files
+ //
+ // Example usage:
+ // struct Foo
+ // {
+ // static inline constexpr int kConstant = 42; // no out of class definition
+ // };
+ //
+ // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2015/n4424.pdf
+ // http://en.cppreference.com/w/cpp/language/inline
+ //
+ #if !defined(EA_COMPILER_NO_INLINE_VARIABLES)
+ #define EA_COMPILER_NO_INLINE_VARIABLES 1
+ #endif
+
+
+ // EA_COMPILER_NO_INITIALIZER_LISTS
+ //
+ // Refers to C++11 initializer lists.
+ // This refers to the compiler support for this and not the Standard Library support (std::initializer_list).
+ //
+ #if !defined(EA_COMPILER_NO_INITIALIZER_LISTS)
+ #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1800) // VS2013+.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (_MSC_FULL_VER == 170051025) // VS2012 November Preview for Windows only.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 405) // EDG 4.5+.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__)
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 301) && !defined(__apple_build_version__) // Clang 3.1+, not including Apple's Clang.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4004) // GCC 4.4+
+ // supported, though GCC 4.3 has partial support for it.
+ #else
+ #define EA_COMPILER_NO_INITIALIZER_LISTS 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_NORETURN
+ //
+ // Refers to C++11 declaration attribute: noreturn.
+ // http://en.cppreference.com/w/cpp/language/attributes
+ // http://blog.aaronballman.com/2011/09/understanding-attributes/
+ //
+ #if !defined(EA_COMPILER_NO_NORETURN)
+ #if defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1300) // VS2003+
+ // supported via __declspec(noreturn). You need to use that or EA_NORETURN. VC++ up to VS2013 doesn't support any C++11 attribute types.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 402) // EDG 4.2+.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__)
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 300) && !defined(__apple_build_version__) // Clang 3.0+, not including Apple's Clang.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4008) // GCC 4.8+
+ // supported.
+ #else
+ #define EA_COMPILER_NO_NORETURN 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_CARRIES_DEPENDENCY
+ //
+ // Refers to C++11 declaration attribute: carries_dependency.
+ // http://en.cppreference.com/w/cpp/language/attributes
+ // http://blog.aaronballman.com/2011/09/understanding-attributes/
+ //
+ #if !defined(EA_COMPILER_NO_CARRIES_DEPENDENCY)
+ #if defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__) // Apple clang 4.1+
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 402) // EDG 4.2+.
+ // supported; stricter than other compilers in its usage.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 300) && !defined(__apple_build_version__) // Clang 3.0+, not including Apple's Clang.
+ // supported.
+ // Currently GNUC doesn't appear to support this attribute.
+ //#elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4008) // GCC 4.8+
+ // // supported.
+ #else
+ #define EA_COMPILER_NO_CARRIES_DEPENDENCY 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_FALLTHROUGH
+ //
+ // Refers to C++17 declaration attribute: fallthrough.
+ // http://en.cppreference.com/w/cpp/language/attributes
+ //
+ #if !defined(EA_COMPILER_NO_FALLTHROUGH)
+ #if defined(EA_COMPILER_CPP17_ENABLED)
+ // supported.
+ #else
+ #define EA_COMPILER_NO_FALLTHROUGH 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_NODISCARD
+ //
+ // Refers to C++17 declaration attribute: nodiscard.
+ // http://en.cppreference.com/w/cpp/language/attributes
+ //
+ #if !defined(EA_COMPILER_NO_NODISCARD)
+ #if defined(EA_COMPILER_CPP17_ENABLED)
+ // supported.
+ #else
+ #define EA_COMPILER_NO_NODISCARD 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_MAYBE_UNUSED
+ //
+ // Refers to C++17 declaration attribute: maybe_unused.
+ // http://en.cppreference.com/w/cpp/language/attributes
+ //
+ #if !defined(EA_COMPILER_NO_MAYBE_UNUSED)
+ #if defined(EA_COMPILER_CPP17_ENABLED)
+ // supported.
+ #elif defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1912) // VS2017 15.3+
+ // supported.
+ #else
+ #define EA_COMPILER_NO_MAYBE_UNUSED 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_STRUCTURED_BINDING
+ //
+ // Indicates if target compiler supports the C++17 "structured binding" language feature.
+ // https://en.cppreference.com/w/cpp/language/structured_binding
+ //
+ //
+ #if !defined(EA_COMPILER_NO_STRUCTURED_BINDING)
+ #if defined(EA_COMPILER_CPP17_ENABLED)
+ // supported.
+ #elif defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1912) // VS2017 15.3+
+ // supported.
+ #else
+ #define EA_COMPILER_NO_STRUCTURED_BINDING 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_DESIGNATED_INITIALIZERS
+ //
+ // Indicates the target compiler supports the C++20 "designated initializer" language feature.
+ // https://en.cppreference.com/w/cpp/language/aggregate_initialization
+ //
+ // Example:
+ // struct A { int x; int y; };
+ // A a = { .y = 42, .x = 1 };
+ //
+ #if !defined(EA_COMPILER_NO_DESIGNATED_INITIALIZERS)
+ #if defined(EA_COMPILER_CPP20_ENABLED)
+ // supported.
+ #else
+ #define EA_COMPILER_NO_DESIGNATED_INITIALIZERS 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_NONSTATIC_MEMBER_INITIALIZERS
+ //
+ // Refers to C++11 declaration attribute: carries_dependency.
+ // http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2008/n2756.htm
+ //
+ #if !defined(EA_COMPILER_NO_NONSTATIC_MEMBER_INITIALIZERS)
+ #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1800) // VS2013+.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__) // Apple clang 4.1+
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 300) && !defined(__apple_build_version__) // Clang 3.0+, not including Apple's Clang.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4007) // GCC 4.7+
+ // supported.
+ #else
+ #define EA_COMPILER_NO_NONSTATIC_MEMBER_INITIALIZERS 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_RIGHT_ANGLE_BRACKETS
+ //
+ // Defines if the compiler supports >> (as opposed to > >) in template
+ // declarations such as typedef eastl::list<eastl::list<int>> ListList;
+ //
+ #if !defined(EA_COMPILER_NO_RIGHT_ANGLE_BRACKETS)
+ #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1600) // VS2010+
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 401) // EDG 4.1+.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 209) // Clang 2.9+, including Apple's Clang.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4003) // GCC 4.3+
+ // supported.
+ #else
+ #define EA_COMPILER_NO_RIGHT_ANGLE_BRACKETS 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_ALIGNOF
+ //
+ // Refers specifically to C++11 alignof and not old compiler extensions such as __alignof__().
+ // However, EABase provides a portable EA_ALIGN_OF which works for all compilers.
+ //
+ #if !defined(EA_COMPILER_NO_ALIGNOF)
+ // Not supported by VC++ as of VS2013, though EA_ALIGN_OF is supported on all coompilers as an alternative.
+ #if defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 209) // Clang 2.9+, including Apple's Clang.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4005) // GCC 4.5+
+ // supported.
+ #else
+ #define EA_COMPILER_NO_ALIGNOF 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_ALIGNAS
+ //
+ // Refers to C++11 alignas.
+ //
+ #if !defined(EA_COMPILER_NO_ALIGNAS)
+ // Not supported by VC++ as of VS2013.
+ #if defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__) // Apple clang 4.1+
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 300) && !defined(__apple_build_version__) // Clang 3.0+, not including Apple's Clang.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4008) // GCC 4.8+
+ // supported.
+ #else
+ #define EA_COMPILER_NO_ALIGNAS 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_DELEGATING_CONSTRUCTORS
+ //
+ // Refers to C++11 constructor delegation.
+ // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n1986.pdf
+ // https://www.ibm.com/developerworks/mydeveloperworks/blogs/5894415f-be62-4bc0-81c5-3956e82276f3/entry/c_0x_delegating_constructors
+ //
+ #if !defined(EA_COMPILER_NO_DELEGATING_CONSTRUCTORS)
+ #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1800) // VS2013+.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 407) // EDG 4.7+.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__) // Apple clang 4.1+
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 300) && !defined(__apple_build_version__) // Clang 3.0+, not including Apple's Clang.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4007) // GCC 4.7+
+ // supported.
+ #else
+ #define EA_COMPILER_NO_DELEGATING_CONSTRUCTORS 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_INHERITING_CONSTRUCTORS
+ //
+ // Refers to C++11 constructor inheritance via 'using'.
+ // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2540.htm
+ //
+ #if !defined(EA_COMPILER_NO_INHERITING_CONSTRUCTORS)
+ // Not supported by VC++ as of VS2013.
+ #if defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && EA_COMPILER_HAS_FEATURE(cxx_inheriting_constructors) // Clang
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4008) // GCC 4.8+
+ // supported.
+ #else
+ #define EA_COMPILER_NO_INHERITING_CONSTRUCTORS 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_USER_DEFINED_LITERALS
+ //
+ // http://en.cppreference.com/w/cpp/language/user_literal
+ // http://stackoverflow.com/questions/237804/what-new-capabilities-do-user-defined-literals-add-to-c
+ //
+ #if !defined(EA_COMPILER_NO_USER_DEFINED_LITERALS)
+ // Not supported by VC++ as of VS2013.
+ #if defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__) // Apple clang 4.1+
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 301) && !defined(__apple_build_version__) // Clang 3.1+, not including Apple's Clang.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4007) // GCC 4.7+
+ // supported.
+ #else
+ #define EA_COMPILER_NO_USER_DEFINED_LITERALS 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_STANDARD_LAYOUT_TYPES
+ // a.k.a. POD relaxation
+ // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2342.htm
+ //
+ #if !defined(EA_COMPILER_NO_STANDARD_LAYOUT_TYPES)
+ #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1700) // VS2012+
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__) // Apple clang 4.1+
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 300) && !defined(__apple_build_version__) // Clang 3.0+, not including Apple's Clang.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4005) // GCC 4.5+
+ // supported.
+ #else
+ #define EA_COMPILER_NO_STANDARD_LAYOUT_TYPES 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_EXTENDED_SIZEOF
+ //
+ // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2253.html
+ // Allows you to do this: sizeof(SomeClass::mSomeMember)
+ //
+ #if !defined(EA_COMPILER_NO_EXTENDED_SIZEOF)
+ // Not supported by VC++ as of VS2013.
+ #if defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__) // Apple clang 4.1+
+ // supported.
+ // Versions of EDG prior to 4.5 only support extended sizeof in non-member functions. Full support was added in 4.5
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 405) // EDG 4.5+.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 301) && !defined(__apple_build_version__) // Clang 3.1+, not including Apple's Clang.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4005) // GCC 4.5+
+ // supported.
+ #else
+ #define EA_COMPILER_NO_EXTENDED_SIZEOF 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_INLINE_NAMESPACES
+ //
+ // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2535.htm
+ // http://blog.aaronballman.com/2011/07/inline-namespaces/
+ //
+ #if !defined(EA_COMPILER_NO_INLINE_NAMESPACES)
+ // Not supported by VC++ as of VS2013.
+ #if defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 405) // EDG 4.5+.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 209) // Clang 2.9+, including Apple's Clang.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4004) // GCC 4.4+
+ // supported.
+ #else
+ #define EA_COMPILER_NO_INLINE_NAMESPACES 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_UNRESTRICTED_UNIONS
+ //
+ // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2544.pdf
+ //
+ #if !defined(EA_COMPILER_NO_UNRESTRICTED_UNIONS)
+ // Not supported by VC++ as of VS2013.
+ #if defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 406) // EDG 4.6+.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__) // Apple clang 4.1+
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 301) && !defined(__apple_build_version__) // Clang 3.1+, not including Apple's Clang.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4006) // GCC 4.6+
+ // supported.
+ #else
+ #define EA_COMPILER_NO_UNRESTRICTED_UNIONS 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_EXPLICIT_CONVERSION_OPERATORS
+ //
+ // http://en.wikipedia.org/wiki/C%2B%2B11#Explicit_conversion_operators
+ //
+ #if !defined(EA_COMPILER_NO_EXPLICIT_CONVERSION_OPERATORS)
+ #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1800) // VS2013+.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (_MSC_FULL_VER == 170051025) // VS2012 November Preview for Windows only.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 404) // EDG 4.4+.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__) // Apple clang 4.1+
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 300) && !defined(__apple_build_version__) // Clang 3.0+, not including Apple's Clang.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4005) // GCC 4.5+
+ // supported.
+ #else
+ #define EA_COMPILER_NO_EXPLICIT_CONVERSION_OPERATORS 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_FUNCTION_TEMPLATE_DEFAULT_ARGS
+ //
+ // The compiler does not support default template arguments for function templates.
+ // http://stackoverflow.com/questions/2447458/default-template-arguments-for-function-templates
+ //
+ #if !defined(EA_COMPILER_NO_FUNCTION_TEMPLATE_DEFAULT_ARGS)
+ #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1800) // VS2013+.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 403) // EDG 4.4+.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 209) // Clang 2.9+, including Apple's Clang.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4003) // GCC 4.3+
+ // supported.
+ #else
+ #define EA_COMPILER_NO_FUNCTION_TEMPLATE_DEFAULT_ARGS 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_LOCAL_CLASS_TEMPLATE_PARAMETERS
+ //
+ // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm
+ // http://stackoverflow.com/questions/5751977/local-type-as-template-arguments-in-c
+ //
+ #if !defined(EA_COMPILER_NO_LOCAL_CLASS_TEMPLATE_PARAMETERS)
+ #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1600) // VS2010+
+ // supported.
+ #if (EA_COMPILER_VERSION < 1700) // VS2010 generates a warning, but the C++ language now allows it.
+ #pragma warning(disable: 4836) // nonstandard extension used: local types or unnamed types cannot be used as template arguments.
+ #endif
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 402) // EDG 4.2+.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 209) // Clang 2.9+, including Apple's Clang.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4005) // GCC 4.5+
+ // supported.
+ #else
+ #define EA_COMPILER_NO_LOCAL_CLASS_TEMPLATE_PARAMETERS 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_NOEXCEPT
+ //
+ // C++11 noexcept
+ // http://en.cppreference.com/w/cpp/language/attributes
+ // http://en.cppreference.com/w/cpp/language/noexcept
+ //
+ #if !defined(EA_COMPILER_NO_NOEXCEPT)
+ #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1900) // VS2014+
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__) // Apple clang 4.1+
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 405) // EDG 4.5+.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 300) && !defined(__apple_build_version__) // Clang 3.0+, not including Apple's Clang.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4006) // GCC 4.6+
+ // supported.
+ #else
+ #define EA_COMPILER_NO_NOEXCEPT 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_RAW_LITERALS
+ //
+ // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2442.htm
+ // http://en.wikipedia.org/wiki/C%2B%2B11#New_string_literals
+ //
+ #if !defined(EA_COMPILER_NO_RAW_LITERALS)
+ #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1800) // VS2013+.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 407) // EDG 4.7+.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__) // Apple clang 4.1+
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 300) && !defined(__apple_build_version__) // Clang 3.0+, not including Apple's Clang.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4005) // GCC 4.5+
+ // supported.
+ #else
+ #define EA_COMPILER_NO_RAW_LITERALS 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_UNICODE_STRING_LITERALS
+ //
+ // http://en.wikipedia.org/wiki/C%2B%2B11#New_string_literals
+ //
+ #if !defined(EA_COMPILER_NO_UNICODE_STRING_LITERALS)
+ // Not supported by VC++ as of VS2013.
+ #if defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 407) // EDG 4.7+.
+ // supported. It's not clear if it's v4.4 or v4.7 that adds this support.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__) // Apple clang 4.1+
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 300) && !defined(__apple_build_version__) // Clang 3.0+, not including Apple's Clang.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4004) // GCC 4.4+
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 407) // EDG 4.7+.
+ // supported. It's not clear if it's v4.4 or v4.7 that adds this support.
+ #else
+ #define EA_COMPILER_NO_UNICODE_STRING_LITERALS 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_NEW_CHARACTER_TYPES
+ //
+ // Refers to char16_t and char32_t as true native types (and not something simply typedef'd from uint16_t and uint32_t).
+ // http://en.cppreference.com/w/cpp/language/types
+ //
+ #if !defined(EA_COMPILER_NO_NEW_CHARACTER_TYPES)
+ #if defined(EA_COMPILER_NO_UNICODE_STRING_LITERALS) // Some compilers have had support for char16_t prior to support for u"", but it's not useful to have the former without the latter.
+ #define EA_COMPILER_NO_NEW_CHARACTER_TYPES 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_UNICODE_CHAR_NAME_LITERALS
+ //
+ // C++ 11 relaxed \u\U sequences in strings.
+ // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2170.html
+ //
+ #if !defined(EA_COMPILER_NO_UNICODE_CHAR_NAME_LITERALS)
+ // VC++ up till at least VS2013 supports \u and \U but supports them wrong with respect to the C++11 Standard.
+
+ #if defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__) // Apple clang 4.1+
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 301) && !defined(__apple_build_version__) // Clang 3.1+, not including Apple's Clang.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4005) // GCC 4.5+
+ // supported.
+ #else
+ #define EA_COMPILER_NO_UNICODE_CHAR_NAME_LITERALS 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_UNIFIED_INITIALIZATION_SYNTAX
+ //
+ // http://en.wikipedia.org/wiki/C%2B%2B11#Uniform_initialization
+ //
+ #if !defined(EA_COMPILER_NO_UNIFIED_INITIALIZATION_SYNTAX)
+ #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1800) // VS2013+.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__) // Apple clang 4.1+
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 301) && !defined(__apple_build_version__) // Clang 3.1+, not including Apple's Clang.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4004) // GCC 4.4+
+ // supported.
+ #else
+ #define EA_COMPILER_NO_UNIFIED_INITIALIZATION_SYNTAX 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_EXTENDED_FRIEND_DECLARATIONS
+ //
+ // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1791.pdf
+ //
+ #if !defined(EA_COMPILER_NO_EXTENDED_FRIEND_DECLARATIONS)
+ #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1600) // VS2010+
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 401) // EDG 4.1+.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 209) // Clang 2.9+, including Apple's Clang.
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4007) // GCC 4.7+
+ // supported.
+ #else
+ #define EA_COMPILER_NO_EXTENDED_FRIEND_DECLARATIONS 1
+ #endif
+ #endif
+
+
+ // EA_COMPILER_NO_THREAD_LOCAL
+ //
+ // Refers specifically to C++ thread_local, which is like compiler __thread implementations except
+ // that it also supports non-trivial classes (e.g. with ctors). EA_COMPILER_NO_THREAD_LOCAL refers
+ // specifically to full C++11 thread_local support. The EAThread package provides a wrapper for
+ // __thread via EA_THREAD_LOCAL (which unfortunately sounds like C++ thread_local).
+ //
+ // https://en.cppreference.com/w/cpp/keyword/thread_local
+ //
+ #if !defined(EA_COMPILER_NO_THREAD_LOCAL)
+ #if defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && EA_COMPILER_HAS_FEATURE(cxx_thread_local)
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1900) // VS2015+
+ // supported.
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4008) // GCC 4.8+
+ // supported.
+ #else
+ #define EA_COMPILER_NO_THREAD_LOCAL 1
+ #endif
+ #endif
+
+
+#endif // INCLUDED_eacompiler_H
+
+
+
+
+
diff --git a/EASTL/test/packages/EABase/include/Common/EABase/config/eacompilertraits.h b/EASTL/test/packages/EABase/include/Common/EABase/config/eacompilertraits.h
new file mode 100644
index 0000000..1d8bcb4
--- /dev/null
+++ b/EASTL/test/packages/EABase/include/Common/EABase/config/eacompilertraits.h
@@ -0,0 +1,2561 @@
+/*-----------------------------------------------------------------------------
+ * config/eacompilertraits.h
+ *
+ * Copyright (c) Electronic Arts Inc. All rights reserved.
+ *-----------------------------------------------------------------------------
+ * Currently supported defines include:
+ * EA_PREPROCESSOR_JOIN
+ *
+ * EA_COMPILER_IS_ANSIC
+ * EA_COMPILER_IS_C99
+ * EA_COMPILER_IS_C11
+ * EA_COMPILER_HAS_C99_TYPES
+ * EA_COMPILER_IS_CPLUSPLUS
+ * EA_COMPILER_MANAGED_CPP
+ * EA_COMPILER_INTMAX_SIZE
+ * EA_OFFSETOF
+ * EA_SIZEOF_MEMBER
+ *
+ * EA_ALIGN_OF()
+ * EA_ALIGN_MAX_STATIC / EA_ALIGN_MAX_AUTOMATIC
+ * EA_ALIGN() / EA_PREFIX_ALIGN() / EA_POSTFIX_ALIGN()
+ * EA_ALIGNED()
+ * EA_PACKED()
+ *
+ * EA_LIKELY()
+ * EA_UNLIKELY()
+ * EA_INIT_PRIORITY()
+ * EA_MAY_ALIAS()
+ * EA_ASSUME()
+ * EA_ANALYSIS_ASSUME()
+ * EA_PURE
+ * EA_WEAK
+ * EA_UNUSED()
+ * EA_EMPTY()
+ *
+ * EA_WCHAR_T_NON_NATIVE
+ * EA_WCHAR_SIZE = <n bytes>
+ *
+ * EA_RESTRICT
+ * EA_DEPRECATED / EA_PREFIX_DEPRECATED / EA_POSTFIX_DEPRECATED
+ * EA_FORCE_INLINE / EA_PREFIX_FORCE_INLINE / EA_POSTFIX_FORCE_INLINE
+ * EA_NO_INLINE / EA_PREFIX_NO_INLINE / EA_POSTFIX_NO_INLINE
+ * EA_NO_VTABLE / EA_CLASS_NO_VTABLE / EA_STRUCT_NO_VTABLE
+ * EA_PASCAL
+ * EA_PASCAL_FUNC()
+ * EA_SSE = [0 | 1]
+ * EA_IMPORT
+ * EA_EXPORT
+ * EA_PRAGMA_ONCE_SUPPORTED
+ * EA_ONCE
+ * EA_OVERRIDE
+ * EA_INHERITANCE_FINAL
+ * EA_SEALED
+ * EA_ABSTRACT
+ * EA_CONSTEXPR / EA_CONSTEXPR_OR_CONST
+ * EA_CONSTEXPR_IF
+ * EA_EXTERN_TEMPLATE
+ * EA_NOEXCEPT
+ * EA_NORETURN
+ * EA_CARRIES_DEPENDENCY
+ * EA_NON_COPYABLE / struct EANonCopyable
+ * EA_OPTIMIZE_OFF / EA_OPTIMIZE_ON
+ * EA_SIGNED_RIGHT_SHIFT_IS_UNSIGNED
+ *
+ * EA_DISABLE_VC_WARNING / EA_RESTORE_VC_WARNING / EA_DISABLE_ALL_VC_WARNINGS / EA_RESTORE_ALL_VC_WARNINGS
+ * EA_DISABLE_GCC_WARNING / EA_RESTORE_GCC_WARNING
+ * EA_DISABLE_CLANG_WARNING / EA_RESTORE_CLANG_WARNING
+ * EA_DISABLE_SN_WARNING / EA_RESTORE_SN_WARNING / EA_DISABLE_ALL_SN_WARNINGS / EA_RESTORE_ALL_SN_WARNINGS
+ * EA_DISABLE_GHS_WARNING / EA_RESTORE_GHS_WARNING
+ * EA_DISABLE_EDG_WARNING / EA_RESTORE_EDG_WARNING
+ * EA_DISABLE_CW_WARNING / EA_RESTORE_CW_WARNING
+ *
+ * EA_DISABLE_DEFAULT_CTOR
+ * EA_DISABLE_COPY_CTOR
+ * EA_DISABLE_MOVE_CTOR
+ * EA_DISABLE_ASSIGNMENT_OPERATOR
+ * EA_DISABLE_MOVE_OPERATOR
+ *
+ * Todo:
+ * Find a way to reliably detect wchar_t size at preprocessor time and
+ * implement it below for EA_WCHAR_SIZE.
+ *
+ * Todo:
+ * Find out how to support EA_PASCAL and EA_PASCAL_FUNC for systems in
+ * which it hasn't yet been found out for.
+ *---------------------------------------------------------------------------*/
+
+
+#ifndef INCLUDED_eacompilertraits_H
+#define INCLUDED_eacompilertraits_H
+
+ #include <EABase/config/eaplatform.h>
+ #include <EABase/config/eacompiler.h>
+
+
+ // Metrowerks uses #defines in its core C header files to define
+ // the kind of information we need below (e.g. C99 compatibility)
+
+
+
+ // Determine if this compiler is ANSI C compliant and if it is C99 compliant.
+ #if defined(__STDC__)
+ #define EA_COMPILER_IS_ANSIC 1 // The compiler claims to be ANSI C
+
+ // Is the compiler a C99 compiler or equivalent?
+ // From ISO/IEC 9899:1999:
+ // 6.10.8 Predefined macro names
+ // __STDC_VERSION__ The integer constant 199901L. (150)
+ //
+ // 150) This macro was not specified in ISO/IEC 9899:1990 and was
+ // specified as 199409L in ISO/IEC 9899/AMD1:1995. The intention
+ // is that this will remain an integer constant of type long int
+ // that is increased with each revision of this International Standard.
+ //
+ #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)
+ #define EA_COMPILER_IS_C99 1
+ #endif
+
+ // Is the compiler a C11 compiler?
+ // From ISO/IEC 9899:2011:
+ // Page 176, 6.10.8.1 (Predefined macro names) :
+ // __STDC_VERSION__ The integer constant 201112L. (178)
+ //
+ #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)
+ #define EA_COMPILER_IS_C11 1
+ #endif
+ #endif
+
+ // Some compilers (e.g. GCC) define __USE_ISOC99 if they are not
+ // strictly C99 compilers (or are simply C++ compilers) but are set
+ // to use C99 functionality. Metrowerks defines _MSL_C99 as 1 in
+ // this case, but 0 otherwise.
+ #if (defined(__USE_ISOC99) || (defined(_MSL_C99) && (_MSL_C99 == 1))) && !defined(EA_COMPILER_IS_C99)
+ #define EA_COMPILER_IS_C99 1
+ #endif
+
+ // Metrowerks defines C99 types (e.g. intptr_t) instrinsically when in C99 mode (-lang C99 on the command line).
+ #if (defined(_MSL_C99) && (_MSL_C99 == 1))
+ #define EA_COMPILER_HAS_C99_TYPES 1
+ #endif
+
+ #if defined(__GNUC__)
+ #if (((__GNUC__ * 100) + __GNUC_MINOR__) >= 302) // Also, GCC defines _HAS_C9X.
+ #define EA_COMPILER_HAS_C99_TYPES 1 // The compiler is not necessarily a C99 compiler, but it defines C99 types.
+
+ #ifndef __STDC_LIMIT_MACROS
+ #define __STDC_LIMIT_MACROS 1
+ #endif
+
+ #ifndef __STDC_CONSTANT_MACROS
+ #define __STDC_CONSTANT_MACROS 1 // This tells the GCC compiler that we want it to use its native C99 types.
+ #endif
+ #endif
+ #endif
+
+ #if defined(_MSC_VER) && (_MSC_VER >= 1600)
+ #define EA_COMPILER_HAS_C99_TYPES 1
+ #endif
+
+ #ifdef __cplusplus
+ #define EA_COMPILER_IS_CPLUSPLUS 1
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_PREPROCESSOR_JOIN
+ //
+ // This macro joins the two arguments together, even when one of
+ // the arguments is itself a macro (see 16.3.1 in C++98 standard).
+ // This is often used to create a unique name with __LINE__.
+ //
+ // For example, this declaration:
+ // char EA_PREPROCESSOR_JOIN(unique_, __LINE__);
+ // expands to this:
+ // char unique_73;
+ //
+ // Note that all versions of MSVC++ up to at least version 7.1
+ // fail to properly compile macros that use __LINE__ in them
+ // when the "program database for edit and continue" option
+ // is enabled. The result is that __LINE__ gets converted to
+ // something like __LINE__(Var+37).
+ //
+ #ifndef EA_PREPROCESSOR_JOIN
+ #define EA_PREPROCESSOR_JOIN(a, b) EA_PREPROCESSOR_JOIN1(a, b)
+ #define EA_PREPROCESSOR_JOIN1(a, b) EA_PREPROCESSOR_JOIN2(a, b)
+ #define EA_PREPROCESSOR_JOIN2(a, b) a##b
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_STRINGIFY
+ //
+ // Example usage:
+ // printf("Line: %s", EA_STRINGIFY(__LINE__));
+ //
+ #ifndef EA_STRINGIFY
+ #define EA_STRINGIFY(x) EA_STRINGIFYIMPL(x)
+ #define EA_STRINGIFYIMPL(x) #x
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_IDENTITY
+ //
+ #ifndef EA_IDENTITY
+ #define EA_IDENTITY(x) x
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_COMPILER_MANAGED_CPP
+ // Defined if this is being compiled with Managed C++ extensions
+ #ifdef EA_COMPILER_MSVC
+ #if EA_COMPILER_VERSION >= 1300
+ #ifdef _MANAGED
+ #define EA_COMPILER_MANAGED_CPP 1
+ #endif
+ #endif
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_COMPILER_INTMAX_SIZE
+ //
+ // This is related to the concept of intmax_t uintmax_t, but is available
+ // in preprocessor form as opposed to compile-time form. At compile-time
+ // you can use intmax_t and uintmax_t to use the actual types.
+ //
+ #if defined(__GNUC__) && defined(__x86_64__)
+ #define EA_COMPILER_INTMAX_SIZE 16 // intmax_t is __int128_t (GCC extension) and is 16 bytes.
+ #else
+ #define EA_COMPILER_INTMAX_SIZE 8 // intmax_t is int64_t and is 8 bytes.
+ #endif
+
+
+
+ // ------------------------------------------------------------------------
+ // EA_LPAREN / EA_RPAREN / EA_COMMA / EA_SEMI
+ //
+ // These are used for using special characters in macro-using expressions.
+ // Note that this macro intentionally uses (), as in some cases it can't
+ // work unless it does.
+ //
+ // Example usage:
+ // int x = SOME_MACRO(SomeTemplate<int EA_COMMA() int EACOMMA() char>);
+ //
+ #ifndef EA_LPAREN
+ #define EA_LPAREN() (
+ #endif
+ #ifndef EA_RPAREN
+ #define EA_RPAREN() )
+ #endif
+ #ifndef EA_COMMA
+ #define EA_COMMA() ,
+ #endif
+ #ifndef EA_SEMI
+ #define EA_SEMI() ;
+ #endif
+
+
+
+
+ // ------------------------------------------------------------------------
+ // EA_OFFSETOF
+ // Implements a portable version of the non-standard offsetof macro.
+ //
+ // The offsetof macro is guaranteed to only work with POD types. However, we wish to use
+ // it for non-POD types but where we know that offsetof will still work for the cases
+ // in which we use it. GCC unilaterally gives a warning when using offsetof with a non-POD,
+ // even if the given usage happens to work. So we make a workaround version of offsetof
+ // here for GCC which has the same effect but tricks the compiler into not issuing the warning.
+ // The 65536 does the compiler fooling; the reinterpret_cast prevents the possibility of
+ // an overloaded operator& for the class getting in the way.
+ //
+ // Example usage:
+ // struct A{ int x; int y; };
+ // size_t n = EA_OFFSETOF(A, y);
+ //
+ #if defined(__GNUC__) // We can't use GCC 4's __builtin_offsetof because it mistakenly complains about non-PODs that are really PODs.
+ #define EA_OFFSETOF(struct_, member_) ((size_t)(((uintptr_t)&reinterpret_cast<const volatile char&>((((struct_*)65536)->member_))) - 65536))
+ #else
+ #define EA_OFFSETOF(struct_, member_) offsetof(struct_, member_)
+ #endif
+
+ // ------------------------------------------------------------------------
+ // EA_SIZEOF_MEMBER
+ // Implements a portable way to determine the size of a member.
+ //
+ // The EA_SIZEOF_MEMBER simply returns the size of a member within a class or struct; member
+ // access rules still apply. We offer two approaches depending on the compiler's support for non-static member
+ // initializers although most C++11 compilers support this.
+ //
+ // Example usage:
+ // struct A{ int x; int y; };
+ // size_t n = EA_SIZEOF_MEMBER(A, y);
+ //
+ #ifndef EA_COMPILER_NO_EXTENDED_SIZEOF
+ #define EA_SIZEOF_MEMBER(struct_, member_) (sizeof(struct_::member_))
+ #else
+ #define EA_SIZEOF_MEMBER(struct_, member_) (sizeof(((struct_*)0)->member_))
+ #endif
+
+ // ------------------------------------------------------------------------
+ // alignment expressions
+ //
+ // Here we define
+ // EA_ALIGN_OF(type) // Returns size_t.
+ // EA_ALIGN_MAX_STATIC // The max align value that the compiler will respect for EA_ALIGN for static data (global and static variables). Some compilers allow high values, some allow no more than 8. EA_ALIGN_MIN is assumed to be 1.
+ // EA_ALIGN_MAX_AUTOMATIC // The max align value for automatic variables (variables declared as local to a function).
+ // EA_ALIGN(n) // Used as a prefix. n is byte alignment, with being a power of two. Most of the time you can use this and avoid using EA_PREFIX_ALIGN/EA_POSTFIX_ALIGN.
+ // EA_ALIGNED(t, v, n) // Type, variable, alignment. Used to align an instance. You should need this only for unusual compilers.
+ // EA_PACKED // Specifies that the given structure be packed (and not have its members aligned).
+ //
+ // Also we define the following for rare cases that it's needed.
+ // EA_PREFIX_ALIGN(n) // n is byte alignment, with being a power of two. You should need this only for unusual compilers.
+ // EA_POSTFIX_ALIGN(n) // Valid values for n are 1, 2, 4, 8, etc. You should need this only for unusual compilers.
+ //
+ // Example usage:
+ // size_t x = EA_ALIGN_OF(int); Non-aligned equivalents. Meaning
+ // EA_PREFIX_ALIGN(8) int x = 5; int x = 5; Align x on 8 for compilers that require prefix attributes. Can just use EA_ALIGN instead.
+ // EA_ALIGN(8) int x; int x; Align x on 8 for compilers that allow prefix attributes.
+ // int x EA_POSTFIX_ALIGN(8); int x; Align x on 8 for compilers that require postfix attributes.
+ // int x EA_POSTFIX_ALIGN(8) = 5; int x = 5; Align x on 8 for compilers that require postfix attributes.
+ // int x EA_POSTFIX_ALIGN(8)(5); int x(5); Align x on 8 for compilers that require postfix attributes.
+ // struct EA_PREFIX_ALIGN(8) X { int x; } EA_POSTFIX_ALIGN(8); struct X { int x; }; Define X as a struct which is aligned on 8 when used.
+ // EA_ALIGNED(int, x, 8) = 5; int x = 5; Align x on 8.
+ // EA_ALIGNED(int, x, 16)(5); int x(5); Align x on 16.
+ // EA_ALIGNED(int, x[3], 16); int x[3]; Align x array on 16.
+ // EA_ALIGNED(int, x[3], 16) = { 1, 2, 3 }; int x[3] = { 1, 2, 3 }; Align x array on 16.
+ // int x[3] EA_PACKED; int x[3]; Pack the 3 ints of the x array. GCC doesn't seem to support packing of int arrays.
+ // struct EA_ALIGN(32) X { int x; int y; }; struct X { int x; }; Define A as a struct which is aligned on 32 when used.
+ // EA_ALIGN(32) struct X { int x; int y; } Z; struct X { int x; } Z; Define A as a struct, and align the instance Z on 32.
+ // struct X { int x EA_PACKED; int y EA_PACKED; }; struct X { int x; int y; }; Pack the x and y members of struct X.
+ // struct X { int x; int y; } EA_PACKED; struct X { int x; int y; }; Pack the members of struct X.
+ // typedef EA_ALIGNED(int, int16, 16); int16 n16; typedef int int16; int16 n16; Define int16 as an int which is aligned on 16.
+ // typedef EA_ALIGNED(X, X16, 16); X16 x16; typedef X X16; X16 x16; Define X16 as an X which is aligned on 16.
+
+ #if !defined(EA_ALIGN_MAX) // If the user hasn't globally set an alternative value...
+ #if defined(EA_PROCESSOR_ARM) // ARM compilers in general tend to limit automatic variables to 8 or less.
+ #define EA_ALIGN_MAX_STATIC 1048576
+ #define EA_ALIGN_MAX_AUTOMATIC 1 // Typically they support only built-in natural aligment types (both arm-eabi and apple-abi).
+ #elif defined(EA_PLATFORM_APPLE)
+ #define EA_ALIGN_MAX_STATIC 1048576
+ #define EA_ALIGN_MAX_AUTOMATIC 16
+ #else
+ #define EA_ALIGN_MAX_STATIC 1048576 // Arbitrarily high value. What is the actual max?
+ #define EA_ALIGN_MAX_AUTOMATIC 1048576
+ #endif
+ #endif
+
+ // EDG intends to be compatible with GCC but has a bug whereby it
+ // fails to support calling a constructor in an aligned declaration when
+ // using postfix alignment attributes. Prefix works for alignment, but does not align
+ // the size like postfix does. Prefix also fails on templates. So gcc style post fix
+ // is still used, but the user will need to use EA_POSTFIX_ALIGN before the constructor parameters.
+ #if defined(__GNUC__) && (__GNUC__ < 3)
+ #define EA_ALIGN_OF(type) ((size_t)__alignof__(type))
+ #define EA_ALIGN(n)
+ #define EA_PREFIX_ALIGN(n)
+ #define EA_POSTFIX_ALIGN(n) __attribute__((aligned(n)))
+ #define EA_ALIGNED(variable_type, variable, n) variable_type variable __attribute__((aligned(n)))
+ #define EA_PACKED __attribute__((packed))
+
+ // GCC 3.x+, IBM, and clang support prefix attributes.
+ #elif (defined(__GNUC__) && (__GNUC__ >= 3)) || defined(__xlC__) || defined(__clang__)
+ #define EA_ALIGN_OF(type) ((size_t)__alignof__(type))
+ #define EA_ALIGN(n) __attribute__((aligned(n)))
+ #define EA_PREFIX_ALIGN(n)
+ #define EA_POSTFIX_ALIGN(n) __attribute__((aligned(n)))
+ #define EA_ALIGNED(variable_type, variable, n) variable_type variable __attribute__((aligned(n)))
+ #define EA_PACKED __attribute__((packed))
+
+ // Metrowerks supports prefix attributes.
+ // Metrowerks does not support packed alignment attributes.
+ #elif defined(EA_COMPILER_INTEL) || defined(CS_UNDEFINED_STRING) || (defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1300))
+ #define EA_ALIGN_OF(type) ((size_t)__alignof(type))
+ #define EA_ALIGN(n) __declspec(align(n))
+ #define EA_PREFIX_ALIGN(n) EA_ALIGN(n)
+ #define EA_POSTFIX_ALIGN(n)
+ #define EA_ALIGNED(variable_type, variable, n) EA_ALIGN(n) variable_type variable
+ #define EA_PACKED // See EA_PRAGMA_PACK_VC for an alternative.
+
+ // Arm brand compiler
+ #elif defined(EA_COMPILER_ARM)
+ #define EA_ALIGN_OF(type) ((size_t)__ALIGNOF__(type))
+ #define EA_ALIGN(n) __align(n)
+ #define EA_PREFIX_ALIGN(n) __align(n)
+ #define EA_POSTFIX_ALIGN(n)
+ #define EA_ALIGNED(variable_type, variable, n) __align(n) variable_type variable
+ #define EA_PACKED __packed
+
+ #else // Unusual compilers
+ // There is nothing we can do about some of these. This is not as bad a problem as it seems.
+ // If the given platform/compiler doesn't support alignment specifications, then it's somewhat
+ // likely that alignment doesn't matter for that platform. Otherwise they would have defined
+ // functionality to manipulate alignment.
+ #define EA_ALIGN(n)
+ #define EA_PREFIX_ALIGN(n)
+ #define EA_POSTFIX_ALIGN(n)
+ #define EA_ALIGNED(variable_type, variable, n) variable_type variable
+ #define EA_PACKED
+
+ #ifdef __cplusplus
+ template <typename T> struct EAAlignOf1 { enum { s = sizeof (T), value = s ^ (s & (s - 1)) }; };
+ template <typename T> struct EAAlignOf2;
+ template <int size_diff> struct helper { template <typename T> struct Val { enum { value = size_diff }; }; };
+ template <> struct helper<0> { template <typename T> struct Val { enum { value = EAAlignOf2<T>::value }; }; };
+ template <typename T> struct EAAlignOf2 { struct Big { T x; char c; };
+ enum { diff = sizeof (Big) - sizeof (T), value = helper<diff>::template Val<Big>::value }; };
+ template <typename T> struct EAAlignof3 { enum { x = EAAlignOf2<T>::value, y = EAAlignOf1<T>::value, value = x < y ? x : y }; };
+ #define EA_ALIGN_OF(type) ((size_t)EAAlignof3<type>::value)
+
+ #else
+ // C implementation of EA_ALIGN_OF
+ // This implementation works for most cases, but doesn't directly work
+ // for types such as function pointer declarations. To work with those
+ // types you need to typedef the type and then use the typedef in EA_ALIGN_OF.
+ #define EA_ALIGN_OF(type) ((size_t)offsetof(struct { char c; type m; }, m))
+ #endif
+ #endif
+
+ // EA_PRAGMA_PACK_VC
+ //
+ // Wraps #pragma pack in a way that allows for cleaner code.
+ //
+ // Example usage:
+ // EA_PRAGMA_PACK_VC(push, 1)
+ // struct X{ char c; int i; };
+ // EA_PRAGMA_PACK_VC(pop)
+ //
+ #if !defined(EA_PRAGMA_PACK_VC)
+ #if defined(EA_COMPILER_MSVC)
+ #define EA_PRAGMA_PACK_VC(...) __pragma(pack(__VA_ARGS__))
+ #elif !defined(EA_COMPILER_NO_VARIADIC_MACROS)
+ #define EA_PRAGMA_PACK_VC(...)
+ #else
+ // No support. However, all compilers of significance to us support variadic macros.
+ #endif
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_LIKELY / EA_UNLIKELY
+ //
+ // Defined as a macro which gives a hint to the compiler for branch
+ // prediction. GCC gives you the ability to manually give a hint to
+ // the compiler about the result of a comparison, though it's often
+ // best to compile shipping code with profiling feedback under both
+ // GCC (-fprofile-arcs) and VC++ (/LTCG:PGO, etc.). However, there
+ // are times when you feel very sure that a boolean expression will
+ // usually evaluate to either true or false and can help the compiler
+ // by using an explicity directive...
+ //
+ // Example usage:
+ // if(EA_LIKELY(a == 0)) // Tell the compiler that a will usually equal 0.
+ // { ... }
+ //
+ // Example usage:
+ // if(EA_UNLIKELY(a == 0)) // Tell the compiler that a will usually not equal 0.
+ // { ... }
+ //
+ #ifndef EA_LIKELY
+ #if (defined(__GNUC__) && (__GNUC__ >= 3)) || defined(__clang__)
+ #if defined(__cplusplus)
+ #define EA_LIKELY(x) __builtin_expect(!!(x), true)
+ #define EA_UNLIKELY(x) __builtin_expect(!!(x), false)
+ #else
+ #define EA_LIKELY(x) __builtin_expect(!!(x), 1)
+ #define EA_UNLIKELY(x) __builtin_expect(!!(x), 0)
+ #endif
+ #else
+ #define EA_LIKELY(x) (x)
+ #define EA_UNLIKELY(x) (x)
+ #endif
+ #endif
+
+ // ------------------------------------------------------------------------
+ // EA_HAS_INCLUDE_AVAILABLE
+ //
+ // Used to guard against the EA_HAS_INCLUDE() macro on compilers that do not
+ // support said feature.
+ //
+ // Example usage:
+ //
+ // #if EA_HAS_INCLUDE_AVAILABLE
+ // #if EA_HAS_INCLUDE("myinclude.h")
+ // #include "myinclude.h"
+ // #endif
+ // #endif
+ #if !defined(EA_HAS_INCLUDE_AVAILABLE)
+ #if EA_COMPILER_CPP17_ENABLED || EA_COMPILER_CLANG || EA_COMPILER_GNUC
+ #define EA_HAS_INCLUDE_AVAILABLE 1
+ #else
+ #define EA_HAS_INCLUDE_AVAILABLE 0
+ #endif
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_HAS_INCLUDE
+ //
+ // May be used in #if and #elif expressions to test for the existence
+ // of the header referenced in the operand. If possible it evaluates to a
+ // non-zero value and zero otherwise. The operand is the same form as the file
+ // in a #include directive.
+ //
+ // Example usage:
+ //
+ // #if EA_HAS_INCLUDE("myinclude.h")
+ // #include "myinclude.h"
+ // #endif
+ //
+ // #if EA_HAS_INCLUDE(<myinclude.h>)
+ // #include <myinclude.h>
+ // #endif
+
+ #if !defined(EA_HAS_INCLUDE)
+ #if EA_COMPILER_CPP17_ENABLED
+ #define EA_HAS_INCLUDE(x) __has_include(x)
+ #elif EA_COMPILER_CLANG
+ #define EA_HAS_INCLUDE(x) __has_include(x)
+ #elif EA_COMPILER_GNUC
+ #define EA_HAS_INCLUDE(x) __has_include(x)
+ #endif
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_INIT_PRIORITY_AVAILABLE
+ //
+ // This value is either not defined, or defined to 1.
+ // Defines if the GCC attribute init_priority is supported by the compiler.
+ //
+ #if !defined(EA_INIT_PRIORITY_AVAILABLE)
+ #if defined(__GNUC__) && !defined(__EDG__) // EDG typically #defines __GNUC__ but doesn't implement init_priority.
+ #define EA_INIT_PRIORITY_AVAILABLE 1
+ #elif defined(__clang__)
+ #define EA_INIT_PRIORITY_AVAILABLE 1 // Clang implements init_priority
+ #endif
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_INIT_PRIORITY
+ //
+ // This is simply a wrapper for the GCC init_priority attribute that allows
+ // multiplatform code to be easier to read. This attribute doesn't apply
+ // to VC++ because VC++ uses file-level pragmas to control init ordering.
+ //
+ // Example usage:
+ // SomeClass gSomeClass EA_INIT_PRIORITY(2000);
+ //
+ #if !defined(EA_INIT_PRIORITY)
+ #if defined(EA_INIT_PRIORITY_AVAILABLE)
+ #define EA_INIT_PRIORITY(x) __attribute__ ((init_priority (x)))
+ #else
+ #define EA_INIT_PRIORITY(x)
+ #endif
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_INIT_SEG_AVAILABLE
+ //
+ //
+ #if !defined(EA_INIT_SEG_AVAILABLE)
+ #if defined(_MSC_VER)
+ #define EA_INIT_SEG_AVAILABLE 1
+ #endif
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_INIT_SEG
+ //
+ // Specifies a keyword or code section that affects the order in which startup code is executed.
+ //
+ // https://docs.microsoft.com/en-us/cpp/preprocessor/init-seg?view=vs-2019
+ //
+ // Example:
+ // EA_INIT_SEG(compiler) MyType gMyTypeGlobal;
+ // EA_INIT_SEG("my_section") MyOtherType gMyOtherTypeGlobal;
+ //
+ #if !defined(EA_INIT_SEG)
+ #if defined(EA_INIT_SEG_AVAILABLE)
+ #define EA_INIT_SEG(x) \
+ __pragma(warning(push)) __pragma(warning(disable : 4074)) __pragma(warning(disable : 4075)) __pragma(init_seg(x)) \
+ __pragma(warning(pop))
+ #else
+ #define EA_INIT_SEG(x)
+ #endif
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_MAY_ALIAS_AVAILABLE
+ //
+ // Defined as 0, 1, or 2.
+ // Defines if the GCC attribute may_alias is supported by the compiler.
+ // Consists of a value 0 (unsupported, shouldn't be used), 1 (some support),
+ // or 2 (full proper support).
+ //
+ #ifndef EA_MAY_ALIAS_AVAILABLE
+ #if defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 303)
+ #if !defined(__EDG__) // define it as 1 while defining GCC's support as 2.
+ #define EA_MAY_ALIAS_AVAILABLE 2
+ #else
+ #define EA_MAY_ALIAS_AVAILABLE 0
+ #endif
+ #else
+ #define EA_MAY_ALIAS_AVAILABLE 0
+ #endif
+ #endif
+
+
+ // EA_MAY_ALIAS
+ //
+ // Defined as a macro that wraps the GCC may_alias attribute. This attribute
+ // has no significance for VC++ because VC++ doesn't support the concept of
+ // strict aliasing. Users should avoid writing code that breaks strict
+ // aliasing rules; EA_MAY_ALIAS is for cases with no alternative.
+ //
+ // Example usage:
+ // void* EA_MAY_ALIAS gPtr = NULL;
+ //
+ // Example usage:
+ // typedef void* EA_MAY_ALIAS pvoid_may_alias;
+ // pvoid_may_alias gPtr = NULL;
+ //
+ #if EA_MAY_ALIAS_AVAILABLE
+ #define EA_MAY_ALIAS __attribute__((__may_alias__))
+ #else
+ #define EA_MAY_ALIAS
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_ASSUME
+ //
+ // This acts the same as the VC++ __assume directive and is implemented
+ // simply as a wrapper around it to allow portable usage of it and to take
+ // advantage of it if and when it appears in other compilers.
+ //
+ // Example usage:
+ // void Function(int a) {
+ // switch(a) {
+ // case 1:
+ // DoSomething(1);
+ // break;
+ // case 2:
+ // DoSomething(-1);
+ // break;
+ // default:
+ // EA_ASSUME(0); // This tells the optimizer that the default cannot be reached.
+ // }
+ // }
+ //
+ #ifndef EA_ASSUME
+ #if defined(_MSC_VER) && (_MSC_VER >= 1300) // If VC7.0 and later
+ #define EA_ASSUME(x) __assume(x)
+ #else
+ #define EA_ASSUME(x)
+ #endif
+ #endif
+
+
+
+ // ------------------------------------------------------------------------
+ // EA_ANALYSIS_ASSUME
+ //
+ // This acts the same as the VC++ __analysis_assume directive and is implemented
+ // simply as a wrapper around it to allow portable usage of it and to take
+ // advantage of it if and when it appears in other compilers.
+ //
+ // Example usage:
+ // char Function(char* p) {
+ // EA_ANALYSIS_ASSUME(p != NULL);
+ // return *p;
+ // }
+ //
+ #ifndef EA_ANALYSIS_ASSUME
+ #if defined(_MSC_VER) && (_MSC_VER >= 1300) // If VC7.0 and later
+ #define EA_ANALYSIS_ASSUME(x) __analysis_assume(!!(x)) // !! because that allows for convertible-to-bool in addition to bool.
+ #else
+ #define EA_ANALYSIS_ASSUME(x)
+ #endif
+ #endif
+
+
+
+ // ------------------------------------------------------------------------
+ // EA_DISABLE_VC_WARNING / EA_RESTORE_VC_WARNING
+ //
+ // Disable and re-enable warning(s) within code.
+ // This is simply a wrapper for VC++ #pragma warning(disable: nnnn) for the
+ // purpose of making code easier to read due to avoiding nested compiler ifdefs
+ // directly in code.
+ //
+ // Example usage:
+ // EA_DISABLE_VC_WARNING(4127 3244)
+ // <code>
+ // EA_RESTORE_VC_WARNING()
+ //
+ #ifndef EA_DISABLE_VC_WARNING
+ #if defined(_MSC_VER)
+ #define EA_DISABLE_VC_WARNING(w) \
+ __pragma(warning(push)) \
+ __pragma(warning(disable:w))
+ #else
+ #define EA_DISABLE_VC_WARNING(w)
+ #endif
+ #endif
+
+ #ifndef EA_RESTORE_VC_WARNING
+ #if defined(_MSC_VER)
+ #define EA_RESTORE_VC_WARNING() \
+ __pragma(warning(pop))
+ #else
+ #define EA_RESTORE_VC_WARNING()
+ #endif
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_ENABLE_VC_WARNING_AS_ERROR / EA_DISABLE_VC_WARNING_AS_ERROR
+ //
+ // Disable and re-enable treating a warning as error within code.
+ // This is simply a wrapper for VC++ #pragma warning(error: nnnn) for the
+ // purpose of making code easier to read due to avoiding nested compiler ifdefs
+ // directly in code.
+ //
+ // Example usage:
+ // EA_ENABLE_VC_WARNING_AS_ERROR(4996)
+ // <code>
+ // EA_DISABLE_VC_WARNING_AS_ERROR()
+ //
+ #ifndef EA_ENABLE_VC_WARNING_AS_ERROR
+ #if defined(_MSC_VER)
+ #define EA_ENABLE_VC_WARNING_AS_ERROR(w) \
+ __pragma(warning(push)) \
+ __pragma(warning(error:w))
+ #else
+ #define EA_ENABLE_VC_WARNING_AS_ERROR(w)
+ #endif
+ #endif
+
+ #ifndef EA_DISABLE_VC_WARNING_AS_ERROR
+ #if defined(_MSC_VER)
+ #define EA_DISABLE_VC_WARNING_AS_ERROR() \
+ __pragma(warning(pop))
+ #else
+ #define EA_DISABLE_VC_WARNING_AS_ERROR()
+ #endif
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_DISABLE_GCC_WARNING / EA_RESTORE_GCC_WARNING
+ //
+ // Example usage:
+ // // Only one warning can be ignored per statement, due to how GCC works.
+ // EA_DISABLE_GCC_WARNING(-Wuninitialized)
+ // EA_DISABLE_GCC_WARNING(-Wunused)
+ // <code>
+ // EA_RESTORE_GCC_WARNING()
+ // EA_RESTORE_GCC_WARNING()
+ //
+ #ifndef EA_DISABLE_GCC_WARNING
+ #if defined(EA_COMPILER_GNUC)
+ #define EAGCCWHELP0(x) #x
+ #define EAGCCWHELP1(x) EAGCCWHELP0(GCC diagnostic ignored x)
+ #define EAGCCWHELP2(x) EAGCCWHELP1(#x)
+ #endif
+
+ #if defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4006) // Can't test directly for __GNUC__ because some compilers lie.
+ #define EA_DISABLE_GCC_WARNING(w) \
+ _Pragma("GCC diagnostic push") \
+ _Pragma(EAGCCWHELP2(w))
+ #elif defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4004)
+ #define EA_DISABLE_GCC_WARNING(w) \
+ _Pragma(EAGCCWHELP2(w))
+ #else
+ #define EA_DISABLE_GCC_WARNING(w)
+ #endif
+ #endif
+
+ #ifndef EA_RESTORE_GCC_WARNING
+ #if defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4006)
+ #define EA_RESTORE_GCC_WARNING() \
+ _Pragma("GCC diagnostic pop")
+ #else
+ #define EA_RESTORE_GCC_WARNING()
+ #endif
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_DISABLE_ALL_GCC_WARNINGS / EA_RESTORE_ALL_GCC_WARNINGS
+ //
+ // This isn't possible except via using _Pragma("GCC system_header"), though
+ // that has some limitations in how it works. Another means is to manually
+ // disable individual warnings within a GCC diagnostic push statement.
+ // GCC doesn't have as many warnings as VC++ and EDG and so this may be feasible.
+ // ------------------------------------------------------------------------
+
+
+ // ------------------------------------------------------------------------
+ // EA_ENABLE_GCC_WARNING_AS_ERROR / EA_DISABLE_GCC_WARNING_AS_ERROR
+ //
+ // Example usage:
+ // // Only one warning can be treated as an error per statement, due to how GCC works.
+ // EA_ENABLE_GCC_WARNING_AS_ERROR(-Wuninitialized)
+ // EA_ENABLE_GCC_WARNING_AS_ERROR(-Wunused)
+ // <code>
+ // EA_DISABLE_GCC_WARNING_AS_ERROR()
+ // EA_DISABLE_GCC_WARNING_AS_ERROR()
+ //
+ #ifndef EA_ENABLE_GCC_WARNING_AS_ERROR
+ #if defined(EA_COMPILER_GNUC)
+ #define EAGCCWERRORHELP0(x) #x
+ #define EAGCCWERRORHELP1(x) EAGCCWERRORHELP0(GCC diagnostic error x)
+ #define EAGCCWERRORHELP2(x) EAGCCWERRORHELP1(#x)
+ #endif
+
+ #if defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4006) // Can't test directly for __GNUC__ because some compilers lie.
+ #define EA_ENABLE_GCC_WARNING_AS_ERROR(w) \
+ _Pragma("GCC diagnostic push") \
+ _Pragma(EAGCCWERRORHELP2(w))
+ #elif defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4004)
+ #define EA_DISABLE_GCC_WARNING(w) \
+ _Pragma(EAGCCWERRORHELP2(w))
+ #else
+ #define EA_DISABLE_GCC_WARNING(w)
+ #endif
+ #endif
+
+ #ifndef EA_DISABLE_GCC_WARNING_AS_ERROR
+ #if defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4006)
+ #define EA_DISABLE_GCC_WARNING_AS_ERROR() \
+ _Pragma("GCC diagnostic pop")
+ #else
+ #define EA_DISABLE_GCC_WARNING_AS_ERROR()
+ #endif
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_DISABLE_CLANG_WARNING / EA_RESTORE_CLANG_WARNING
+ //
+ // Example usage:
+ // // Only one warning can be ignored per statement, due to how clang works.
+ // EA_DISABLE_CLANG_WARNING(-Wuninitialized)
+ // EA_DISABLE_CLANG_WARNING(-Wunused)
+ // <code>
+ // EA_RESTORE_CLANG_WARNING()
+ // EA_RESTORE_CLANG_WARNING()
+ //
+ #ifndef EA_DISABLE_CLANG_WARNING
+ #if defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_CLANG_CL)
+ #define EACLANGWHELP0(x) #x
+ #define EACLANGWHELP1(x) EACLANGWHELP0(clang diagnostic ignored x)
+ #define EACLANGWHELP2(x) EACLANGWHELP1(#x)
+
+ #define EA_DISABLE_CLANG_WARNING(w) \
+ _Pragma("clang diagnostic push") \
+ _Pragma(EACLANGWHELP2(-Wunknown-warning-option))\
+ _Pragma(EACLANGWHELP2(w))
+ #else
+ #define EA_DISABLE_CLANG_WARNING(w)
+ #endif
+ #endif
+
+ #ifndef EA_RESTORE_CLANG_WARNING
+ #if defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_CLANG_CL)
+ #define EA_RESTORE_CLANG_WARNING() \
+ _Pragma("clang diagnostic pop")
+ #else
+ #define EA_RESTORE_CLANG_WARNING()
+ #endif
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_DISABLE_ALL_CLANG_WARNINGS / EA_RESTORE_ALL_CLANG_WARNINGS
+ //
+ // The situation for clang is the same as for GCC. See above.
+ // ------------------------------------------------------------------------
+
+
+ // ------------------------------------------------------------------------
+ // EA_ENABLE_CLANG_WARNING_AS_ERROR / EA_DISABLE_CLANG_WARNING_AS_ERROR
+ //
+ // Example usage:
+ // // Only one warning can be treated as an error per statement, due to how clang works.
+ // EA_ENABLE_CLANG_WARNING_AS_ERROR(-Wuninitialized)
+ // EA_ENABLE_CLANG_WARNING_AS_ERROR(-Wunused)
+ // <code>
+ // EA_DISABLE_CLANG_WARNING_AS_ERROR()
+ // EA_DISABLE_CLANG_WARNING_AS_ERROR()
+ //
+ #ifndef EA_ENABLE_CLANG_WARNING_AS_ERROR
+ #if defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_CLANG_CL)
+ #define EACLANGWERRORHELP0(x) #x
+ #define EACLANGWERRORHELP1(x) EACLANGWERRORHELP0(clang diagnostic error x)
+ #define EACLANGWERRORHELP2(x) EACLANGWERRORHELP1(#x)
+
+ #define EA_ENABLE_CLANG_WARNING_AS_ERROR(w) \
+ _Pragma("clang diagnostic push") \
+ _Pragma(EACLANGWERRORHELP2(w))
+ #else
+ #define EA_DISABLE_CLANG_WARNING(w)
+ #endif
+ #endif
+
+ #ifndef EA_DISABLE_CLANG_WARNING_AS_ERROR
+ #if defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_CLANG_CL)
+ #define EA_DISABLE_CLANG_WARNING_AS_ERROR() \
+ _Pragma("clang diagnostic pop")
+ #else
+ #define EA_DISABLE_CLANG_WARNING_AS_ERROR()
+ #endif
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_DISABLE_SN_WARNING / EA_RESTORE_SN_WARNING
+ //
+ // Note that we define this macro specifically for the SN compiler instead of
+ // having a generic one for EDG-based compilers. The reason for this is that
+ // while SN is indeed based on EDG, SN has different warning value mappings
+ // and thus warning 1234 for SN is not the same as 1234 for all other EDG compilers.
+ //
+ // Example usage:
+ // // Currently we are limited to one warning per line.
+ // EA_DISABLE_SN_WARNING(1787)
+ // EA_DISABLE_SN_WARNING(552)
+ // <code>
+ // EA_RESTORE_SN_WARNING()
+ // EA_RESTORE_SN_WARNING()
+ //
+ #ifndef EA_DISABLE_SN_WARNING
+ #define EA_DISABLE_SN_WARNING(w)
+ #endif
+
+ #ifndef EA_RESTORE_SN_WARNING
+ #define EA_RESTORE_SN_WARNING()
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_DISABLE_ALL_SN_WARNINGS / EA_RESTORE_ALL_SN_WARNINGS
+ //
+ // Example usage:
+ // EA_DISABLE_ALL_SN_WARNINGS()
+ // <code>
+ // EA_RESTORE_ALL_SN_WARNINGS()
+ //
+ #ifndef EA_DISABLE_ALL_SN_WARNINGS
+ #define EA_DISABLE_ALL_SN_WARNINGS()
+ #endif
+
+ #ifndef EA_RESTORE_ALL_SN_WARNINGS
+ #define EA_RESTORE_ALL_SN_WARNINGS()
+ #endif
+
+
+
+ // ------------------------------------------------------------------------
+ // EA_DISABLE_GHS_WARNING / EA_RESTORE_GHS_WARNING
+ //
+ // Disable warnings from the Green Hills compiler.
+ //
+ // Example usage:
+ // EA_DISABLE_GHS_WARNING(193)
+ // EA_DISABLE_GHS_WARNING(236, 5323)
+ // <code>
+ // EA_RESTORE_GHS_WARNING()
+ // EA_RESTORE_GHS_WARNING()
+ //
+ #ifndef EA_DISABLE_GHS_WARNING
+ #define EA_DISABLE_GHS_WARNING(w)
+ #endif
+
+ #ifndef EA_RESTORE_GHS_WARNING
+ #define EA_RESTORE_GHS_WARNING()
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_DISABLE_ALL_GHS_WARNINGS / EA_RESTORE_ALL_GHS_WARNINGS
+ //
+ // #ifndef EA_DISABLE_ALL_GHS_WARNINGS
+ // #if defined(EA_COMPILER_GREEN_HILLS)
+ // #define EA_DISABLE_ALL_GHS_WARNINGS(w) \_
+ // _Pragma("_________")
+ // #else
+ // #define EA_DISABLE_ALL_GHS_WARNINGS(w)
+ // #endif
+ // #endif
+ //
+ // #ifndef EA_RESTORE_ALL_GHS_WARNINGS
+ // #if defined(EA_COMPILER_GREEN_HILLS)
+ // #define EA_RESTORE_ALL_GHS_WARNINGS() \_
+ // _Pragma("_________")
+ // #else
+ // #define EA_RESTORE_ALL_GHS_WARNINGS()
+ // #endif
+ // #endif
+
+
+
+ // ------------------------------------------------------------------------
+ // EA_DISABLE_EDG_WARNING / EA_RESTORE_EDG_WARNING
+ //
+ // Example usage:
+ // // Currently we are limited to one warning per line.
+ // EA_DISABLE_EDG_WARNING(193)
+ // EA_DISABLE_EDG_WARNING(236)
+ // <code>
+ // EA_RESTORE_EDG_WARNING()
+ // EA_RESTORE_EDG_WARNING()
+ //
+ #ifndef EA_DISABLE_EDG_WARNING
+ // EDG-based compilers are inconsistent in how the implement warning pragmas.
+ #if defined(EA_COMPILER_EDG) && !defined(EA_COMPILER_INTEL) && !defined(EA_COMPILER_RVCT)
+ #define EAEDGWHELP0(x) #x
+ #define EAEDGWHELP1(x) EAEDGWHELP0(diag_suppress x)
+
+ #define EA_DISABLE_EDG_WARNING(w) \
+ _Pragma("control %push diag") \
+ _Pragma(EAEDGWHELP1(w))
+ #else
+ #define EA_DISABLE_EDG_WARNING(w)
+ #endif
+ #endif
+
+ #ifndef EA_RESTORE_EDG_WARNING
+ #if defined(EA_COMPILER_EDG) && !defined(EA_COMPILER_INTEL) && !defined(EA_COMPILER_RVCT)
+ #define EA_RESTORE_EDG_WARNING() \
+ _Pragma("control %pop diag")
+ #else
+ #define EA_RESTORE_EDG_WARNING()
+ #endif
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_DISABLE_ALL_EDG_WARNINGS / EA_RESTORE_ALL_EDG_WARNINGS
+ //
+ //#ifndef EA_DISABLE_ALL_EDG_WARNINGS
+ // #if defined(EA_COMPILER_EDG) && !defined(EA_COMPILER_SN)
+ // #define EA_DISABLE_ALL_EDG_WARNINGS(w) \_
+ // _Pragma("_________")
+ // #else
+ // #define EA_DISABLE_ALL_EDG_WARNINGS(w)
+ // #endif
+ //#endif
+ //
+ //#ifndef EA_RESTORE_ALL_EDG_WARNINGS
+ // #if defined(EA_COMPILER_EDG) && !defined(EA_COMPILER_SN)
+ // #define EA_RESTORE_ALL_EDG_WARNINGS() \_
+ // _Pragma("_________")
+ // #else
+ // #define EA_RESTORE_ALL_EDG_WARNINGS()
+ // #endif
+ //#endif
+
+
+
+ // ------------------------------------------------------------------------
+ // EA_DISABLE_CW_WARNING / EA_RESTORE_CW_WARNING
+ //
+ // Note that this macro can only control warnings via numbers and not by
+ // names. The reason for this is that the compiler's syntax for such
+ // warnings is not the same as for numbers.
+ //
+ // Example usage:
+ // // Currently we are limited to one warning per line and must also specify the warning in the restore macro.
+ // EA_DISABLE_CW_WARNING(10317)
+ // EA_DISABLE_CW_WARNING(10324)
+ // <code>
+ // EA_RESTORE_CW_WARNING(10317)
+ // EA_RESTORE_CW_WARNING(10324)
+ //
+ #ifndef EA_DISABLE_CW_WARNING
+ #define EA_DISABLE_CW_WARNING(w)
+ #endif
+
+ #ifndef EA_RESTORE_CW_WARNING
+
+ #define EA_RESTORE_CW_WARNING(w)
+
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_DISABLE_ALL_CW_WARNINGS / EA_RESTORE_ALL_CW_WARNINGS
+ //
+ #ifndef EA_DISABLE_ALL_CW_WARNINGS
+ #define EA_DISABLE_ALL_CW_WARNINGS()
+
+ #endif
+
+ #ifndef EA_RESTORE_ALL_CW_WARNINGS
+ #define EA_RESTORE_ALL_CW_WARNINGS()
+ #endif
+
+
+
+ // ------------------------------------------------------------------------
+ // EA_PURE
+ //
+ // This acts the same as the GCC __attribute__ ((pure)) directive and is
+ // implemented simply as a wrapper around it to allow portable usage of
+ // it and to take advantage of it if and when it appears in other compilers.
+ //
+ // A "pure" function is one that has no effects except its return value and
+ // its return value is a function of only the function's parameters or
+ // non-volatile global variables. Any parameter or global variable access
+ // must be read-only. Loop optimization and subexpression elimination can be
+ // applied to such functions. A common example is strlen(): Given identical
+ // inputs, the function's return value (its only effect) is invariant across
+ // multiple invocations and thus can be pulled out of a loop and called but once.
+ //
+ // Example usage:
+ // EA_PURE void Function();
+ //
+ #ifndef EA_PURE
+ #if defined(EA_COMPILER_GNUC)
+ #define EA_PURE __attribute__((pure))
+ #elif defined(EA_COMPILER_ARM) // Arm brand compiler for ARM CPU
+ #define EA_PURE __pure
+ #else
+ #define EA_PURE
+ #endif
+ #endif
+
+
+
+ // ------------------------------------------------------------------------
+ // EA_WEAK
+ // EA_WEAK_SUPPORTED -- defined as 0 or 1.
+ //
+ // GCC
+ // The weak attribute causes the declaration to be emitted as a weak
+ // symbol rather than a global. This is primarily useful in defining
+ // library functions which can be overridden in user code, though it
+ // can also be used with non-function declarations.
+ //
+ // VC++
+ // At link time, if multiple definitions of a COMDAT are seen, the linker
+ // picks one and discards the rest. If the linker option /OPT:REF
+ // is selected, then COMDAT elimination will occur to remove all the
+ // unreferenced data items in the linker output.
+ //
+ // Example usage:
+ // EA_WEAK void Function();
+ //
+ #ifndef EA_WEAK
+ #if defined(_MSC_VER) && (_MSC_VER >= 1300) // If VC7.0 and later
+ #define EA_WEAK __declspec(selectany)
+ #define EA_WEAK_SUPPORTED 1
+ #elif defined(_MSC_VER) || (defined(__GNUC__) && defined(__CYGWIN__))
+ #define EA_WEAK
+ #define EA_WEAK_SUPPORTED 0
+ #elif defined(EA_COMPILER_ARM) // Arm brand compiler for ARM CPU
+ #define EA_WEAK __weak
+ #define EA_WEAK_SUPPORTED 1
+ #else // GCC and IBM compilers, others.
+ #define EA_WEAK __attribute__((weak))
+ #define EA_WEAK_SUPPORTED 1
+ #endif
+ #endif
+
+
+
+ // ------------------------------------------------------------------------
+ // EA_UNUSED
+ //
+ // Makes compiler warnings about unused variables go away.
+ //
+ // Example usage:
+ // void Function(int x)
+ // {
+ // int y;
+ // EA_UNUSED(x);
+ // EA_UNUSED(y);
+ // }
+ //
+ #ifndef EA_UNUSED
+ // The EDG solution below is pretty weak and needs to be augmented or replaced.
+ // It can't handle the C language, is limited to places where template declarations
+ // can be used, and requires the type x to be usable as a functions reference argument.
+ #if defined(__cplusplus) && defined(__EDG__)
+ template <typename T>
+ inline void EABaseUnused(T const volatile & x) { (void)x; }
+ #define EA_UNUSED(x) EABaseUnused(x)
+ #else
+ #define EA_UNUSED(x) (void)x
+ #endif
+ #endif
+
+
+
+ // ------------------------------------------------------------------------
+ // EA_EMPTY
+ //
+ // Allows for a null statement, usually for the purpose of avoiding compiler warnings.
+ //
+ // Example usage:
+ // #ifdef EA_DEBUG
+ // #define MyDebugPrintf(x, y) printf(x, y)
+ // #else
+ // #define MyDebugPrintf(x, y) EA_EMPTY
+ // #endif
+ //
+ #ifndef EA_EMPTY
+ #define EA_EMPTY (void)0
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_CURRENT_FUNCTION
+ //
+ // Provides a consistent way to get the current function name as a macro
+ // like the __FILE__ and __LINE__ macros work. The C99 standard specifies
+ // that __func__ be provided by the compiler, but most compilers don't yet
+ // follow that convention. However, many compilers have an alternative.
+ //
+ // We also define EA_CURRENT_FUNCTION_SUPPORTED for when it is not possible
+ // to have EA_CURRENT_FUNCTION work as expected.
+ //
+ // Defined inside a function because otherwise the macro might not be
+ // defined and code below might not compile. This happens with some
+ // compilers.
+ //
+ #ifndef EA_CURRENT_FUNCTION
+ #if defined __GNUC__ || (defined __ICC && __ICC >= 600)
+ #define EA_CURRENT_FUNCTION __PRETTY_FUNCTION__
+ #elif defined(__FUNCSIG__)
+ #define EA_CURRENT_FUNCTION __FUNCSIG__
+ #elif (defined __INTEL_COMPILER && __INTEL_COMPILER >= 600) || (defined __IBMCPP__ && __IBMCPP__ >= 500) || (defined CS_UNDEFINED_STRING && CS_UNDEFINED_STRING >= 0x4200)
+ #define EA_CURRENT_FUNCTION __FUNCTION__
+ #elif defined __STDC_VERSION__ && __STDC_VERSION__ >= 199901
+ #define EA_CURRENT_FUNCTION __func__
+ #else
+ #define EA_CURRENT_FUNCTION "(unknown function)"
+ #endif
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // wchar_t
+ // Here we define:
+ // EA_WCHAR_T_NON_NATIVE
+ // EA_WCHAR_SIZE = <sizeof(wchar_t)>
+ //
+ #ifndef EA_WCHAR_T_NON_NATIVE
+ // Compilers that always implement wchar_t as native include:
+ // COMEAU, new SN, and other EDG-based compilers.
+ // GCC
+ // Borland
+ // SunPro
+ // IBM Visual Age
+ #if defined(EA_COMPILER_INTEL)
+ #if (EA_COMPILER_VERSION < 700)
+ #define EA_WCHAR_T_NON_NATIVE 1
+ #else
+ #if (!defined(_WCHAR_T_DEFINED) && !defined(_WCHAR_T))
+ #define EA_WCHAR_T_NON_NATIVE 1
+ #endif
+ #endif
+ #elif defined(EA_COMPILER_MSVC) || (defined(EA_COMPILER_CLANG) && defined(EA_PLATFORM_WINDOWS))
+ #ifndef _NATIVE_WCHAR_T_DEFINED
+ #define EA_WCHAR_T_NON_NATIVE 1
+ #endif
+ #elif defined(__EDG_VERSION__) && (!defined(_WCHAR_T) && (__EDG_VERSION__ < 400)) // EDG prior to v4 uses _WCHAR_T to indicate if wchar_t is native. v4+ may define something else, but we're not currently aware of it.
+ #define EA_WCHAR_T_NON_NATIVE 1
+ #endif
+ #endif
+
+ #ifndef EA_WCHAR_SIZE // If the user hasn't specified that it is a given size...
+ #if defined(__WCHAR_MAX__) // GCC defines this for most platforms.
+ #if (__WCHAR_MAX__ == 2147483647) || (__WCHAR_MAX__ == 4294967295)
+ #define EA_WCHAR_SIZE 4
+ #elif (__WCHAR_MAX__ == 32767) || (__WCHAR_MAX__ == 65535)
+ #define EA_WCHAR_SIZE 2
+ #elif (__WCHAR_MAX__ == 127) || (__WCHAR_MAX__ == 255)
+ #define EA_WCHAR_SIZE 1
+ #else
+ #define EA_WCHAR_SIZE 4
+ #endif
+ #elif defined(WCHAR_MAX) // The SN and Arm compilers define this.
+ #if (WCHAR_MAX == 2147483647) || (WCHAR_MAX == 4294967295)
+ #define EA_WCHAR_SIZE 4
+ #elif (WCHAR_MAX == 32767) || (WCHAR_MAX == 65535)
+ #define EA_WCHAR_SIZE 2
+ #elif (WCHAR_MAX == 127) || (WCHAR_MAX == 255)
+ #define EA_WCHAR_SIZE 1
+ #else
+ #define EA_WCHAR_SIZE 4
+ #endif
+ #elif defined(__WCHAR_BIT) // Green Hills (and other versions of EDG?) uses this.
+ #if (__WCHAR_BIT == 16)
+ #define EA_WCHAR_SIZE 2
+ #elif (__WCHAR_BIT == 32)
+ #define EA_WCHAR_SIZE 4
+ #elif (__WCHAR_BIT == 8)
+ #define EA_WCHAR_SIZE 1
+ #else
+ #define EA_WCHAR_SIZE 4
+ #endif
+ #elif defined(_WCMAX) // The SN and Arm compilers define this.
+ #if (_WCMAX == 2147483647) || (_WCMAX == 4294967295)
+ #define EA_WCHAR_SIZE 4
+ #elif (_WCMAX == 32767) || (_WCMAX == 65535)
+ #define EA_WCHAR_SIZE 2
+ #elif (_WCMAX == 127) || (_WCMAX == 255)
+ #define EA_WCHAR_SIZE 1
+ #else
+ #define EA_WCHAR_SIZE 4
+ #endif
+ #elif defined(EA_PLATFORM_UNIX)
+ // It is standard on Unix to have wchar_t be int32_t or uint32_t.
+ // All versions of GNUC default to a 32 bit wchar_t, but EA has used
+ // the -fshort-wchar GCC command line option to force it to 16 bit.
+ // If you know that the compiler is set to use a wchar_t of other than
+ // the default, you need to manually define EA_WCHAR_SIZE for the build.
+ #define EA_WCHAR_SIZE 4
+ #else
+ // It is standard on Windows to have wchar_t be uint16_t. GCC
+ // defines wchar_t as int by default. Electronic Arts has
+ // standardized on wchar_t being an unsigned 16 bit value on all
+ // console platforms. Given that there is currently no known way to
+ // tell at preprocessor time what the size of wchar_t is, we declare
+ // it to be 2, as this is the Electronic Arts standard. If you have
+ // EA_WCHAR_SIZE != sizeof(wchar_t), then your code might not be
+ // broken, but it also won't work with wchar libraries and data from
+ // other parts of EA. Under GCC, you can force wchar_t to two bytes
+ // with the -fshort-wchar compiler argument.
+ #define EA_WCHAR_SIZE 2
+ #endif
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_RESTRICT
+ //
+ // The C99 standard defines a new keyword, restrict, which allows for the
+ // improvement of code generation regarding memory usage. Compilers can
+ // generate significantly faster code when you are able to use restrict.
+ //
+ // Example usage:
+ // void DoSomething(char* EA_RESTRICT p1, char* EA_RESTRICT p2);
+ //
+ #ifndef EA_RESTRICT
+ #if defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1400) // If VC8 (VS2005) or later...
+ #define EA_RESTRICT __restrict
+ #elif defined(EA_COMPILER_CLANG)
+ #define EA_RESTRICT __restrict
+ #elif defined(EA_COMPILER_GNUC) // Includes GCC and other compilers emulating GCC.
+ #define EA_RESTRICT __restrict // GCC defines 'restrict' (as opposed to __restrict) in C99 mode only.
+ #elif defined(EA_COMPILER_ARM)
+ #define EA_RESTRICT __restrict
+ #elif defined(EA_COMPILER_IS_C99)
+ #define EA_RESTRICT restrict
+ #else
+ // If the compiler didn't support restricted pointers, defining EA_RESTRICT
+ // away would result in compiling and running fine but you just wouldn't
+ // the same level of optimization. On the other hand, all the major compilers
+ // support restricted pointers.
+ #define EA_RESTRICT
+ #endif
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_DEPRECATED // Used as a prefix.
+ // EA_PREFIX_DEPRECATED // You should need this only for unusual compilers.
+ // EA_POSTFIX_DEPRECATED // You should need this only for unusual compilers.
+ // EA_DEPRECATED_MESSAGE // Used as a prefix and provides a deprecation message.
+ //
+ // Example usage:
+ // EA_DEPRECATED void Function();
+ // EA_DEPRECATED_MESSAGE("Use 1.0v API instead") void Function();
+ //
+ // or for maximum portability:
+ // EA_PREFIX_DEPRECATED void Function() EA_POSTFIX_DEPRECATED;
+ //
+
+ #ifndef EA_DEPRECATED
+ #if defined(EA_COMPILER_CPP14_ENABLED)
+ #define EA_DEPRECATED [[deprecated]]
+ #elif defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION > 1300) // If VC7 (VS2003) or later...
+ #define EA_DEPRECATED __declspec(deprecated)
+ #elif defined(EA_COMPILER_MSVC)
+ #define EA_DEPRECATED
+ #else
+ #define EA_DEPRECATED __attribute__((deprecated))
+ #endif
+ #endif
+
+ #ifndef EA_PREFIX_DEPRECATED
+ #if defined(EA_COMPILER_CPP14_ENABLED)
+ #define EA_PREFIX_DEPRECATED [[deprecated]]
+ #define EA_POSTFIX_DEPRECATED
+ #elif defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION > 1300) // If VC7 (VS2003) or later...
+ #define EA_PREFIX_DEPRECATED __declspec(deprecated)
+ #define EA_POSTFIX_DEPRECATED
+ #elif defined(EA_COMPILER_MSVC)
+ #define EA_PREFIX_DEPRECATED
+ #define EA_POSTFIX_DEPRECATED
+ #else
+ #define EA_PREFIX_DEPRECATED
+ #define EA_POSTFIX_DEPRECATED __attribute__((deprecated))
+ #endif
+ #endif
+
+ #ifndef EA_DEPRECATED_MESSAGE
+ #if defined(EA_COMPILER_CPP14_ENABLED)
+ #define EA_DEPRECATED_MESSAGE(msg) [[deprecated(#msg)]]
+ #else
+ // Compiler does not support depreaction messages, explicitly drop the msg but still mark the function as deprecated
+ #define EA_DEPRECATED_MESSAGE(msg) EA_DEPRECATED
+ #endif
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_FORCE_INLINE // Used as a prefix.
+ // EA_PREFIX_FORCE_INLINE // You should need this only for unusual compilers.
+ // EA_POSTFIX_FORCE_INLINE // You should need this only for unusual compilers.
+ //
+ // Example usage:
+ // EA_FORCE_INLINE void Foo(); // Implementation elsewhere.
+ // EA_PREFIX_FORCE_INLINE void Foo() EA_POSTFIX_FORCE_INLINE; // Implementation elsewhere.
+ //
+ // Note that when the prefix version of this function is used, it replaces
+ // the regular C++ 'inline' statement. Thus you should not use both the
+ // C++ inline statement and this macro with the same function declaration.
+ //
+ // To force inline usage under GCC 3.1+, you use this:
+ // inline void Foo() __attribute__((always_inline));
+ // or
+ // inline __attribute__((always_inline)) void Foo();
+ //
+ // The CodeWarrior compiler doesn't have the concept of forcing inlining per function.
+ //
+ #ifndef EA_FORCE_INLINE
+ #if defined(EA_COMPILER_MSVC)
+ #define EA_FORCE_INLINE __forceinline
+ #elif defined(EA_COMPILER_GNUC) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 301) || defined(EA_COMPILER_CLANG)
+ #if defined(__cplusplus)
+ #define EA_FORCE_INLINE inline __attribute__((always_inline))
+ #else
+ #define EA_FORCE_INLINE __inline__ __attribute__((always_inline))
+ #endif
+ #else
+ #if defined(__cplusplus)
+ #define EA_FORCE_INLINE inline
+ #else
+ #define EA_FORCE_INLINE __inline
+ #endif
+ #endif
+ #endif
+
+ #if defined(EA_COMPILER_GNUC) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 301) || defined(EA_COMPILER_CLANG)
+ #define EA_PREFIX_FORCE_INLINE inline
+ #define EA_POSTFIX_FORCE_INLINE __attribute__((always_inline))
+ #else
+ #define EA_PREFIX_FORCE_INLINE inline
+ #define EA_POSTFIX_FORCE_INLINE
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_FORCE_INLINE_LAMBDA
+ //
+ // EA_FORCE_INLINE_LAMBDA is used to force inline a call to a lambda when possible.
+ // Force inlining a lambda can be useful to reduce overhead in situations where a lambda may
+ // may only be called once, or inlining allows the compiler to apply other optimizations that wouldn't
+ // otherwise be possible.
+ //
+ // The ability to force inline a lambda is currently only available on a subset of compilers.
+ //
+ // Example usage:
+ //
+ // auto lambdaFunction = []() EA_FORCE_INLINE_LAMBDA
+ // {
+ // };
+ //
+ #ifndef EA_FORCE_INLINE_LAMBDA
+ #if defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG)
+ #define EA_FORCE_INLINE_LAMBDA __attribute__((always_inline))
+ #else
+ #define EA_FORCE_INLINE_LAMBDA
+ #endif
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_NO_INLINE // Used as a prefix.
+ // EA_PREFIX_NO_INLINE // You should need this only for unusual compilers.
+ // EA_POSTFIX_NO_INLINE // You should need this only for unusual compilers.
+ //
+ // Example usage:
+ // EA_NO_INLINE void Foo(); // Implementation elsewhere.
+ // EA_PREFIX_NO_INLINE void Foo() EA_POSTFIX_NO_INLINE; // Implementation elsewhere.
+ //
+ // That this declaration is incompatbile with C++ 'inline' and any
+ // variant of EA_FORCE_INLINE.
+ //
+ // To disable inline usage under VC++ priof to VS2005, you need to use this:
+ // #pragma inline_depth(0) // Disable inlining.
+ // void Foo() { ... }
+ // #pragma inline_depth() // Restore to default.
+ //
+ // Since there is no easy way to disable inlining on a function-by-function
+ // basis in VC++ prior to VS2005, the best strategy is to write platform-specific
+ // #ifdefs in the code or to disable inlining for a given module and enable
+ // functions individually with EA_FORCE_INLINE.
+ //
+ #ifndef EA_NO_INLINE
+ #if defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1400) // If VC8 (VS2005) or later...
+ #define EA_NO_INLINE __declspec(noinline)
+ #elif defined(EA_COMPILER_MSVC)
+ #define EA_NO_INLINE
+ #else
+ #define EA_NO_INLINE __attribute__((noinline))
+ #endif
+ #endif
+
+ #if defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1400) // If VC8 (VS2005) or later...
+ #define EA_PREFIX_NO_INLINE __declspec(noinline)
+ #define EA_POSTFIX_NO_INLINE
+ #elif defined(EA_COMPILER_MSVC)
+ #define EA_PREFIX_NO_INLINE
+ #define EA_POSTFIX_NO_INLINE
+ #else
+ #define EA_PREFIX_NO_INLINE
+ #define EA_POSTFIX_NO_INLINE __attribute__((noinline))
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_NO_VTABLE
+ //
+ // Example usage:
+ // class EA_NO_VTABLE X {
+ // virtual void InterfaceFunction();
+ // };
+ //
+ // EA_CLASS_NO_VTABLE(X) {
+ // virtual void InterfaceFunction();
+ // };
+ //
+ #ifdef EA_COMPILER_MSVC
+ #define EA_NO_VTABLE __declspec(novtable)
+ #define EA_CLASS_NO_VTABLE(x) class __declspec(novtable) x
+ #define EA_STRUCT_NO_VTABLE(x) struct __declspec(novtable) x
+ #else
+ #define EA_NO_VTABLE
+ #define EA_CLASS_NO_VTABLE(x) class x
+ #define EA_STRUCT_NO_VTABLE(x) struct x
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_PASCAL
+ //
+ // Also known on PC platforms as stdcall.
+ // This convention causes the compiler to assume that the called function
+ // will pop off the stack space used to pass arguments, unless it takes a
+ // variable number of arguments.
+ //
+ // Example usage:
+ // this:
+ // void DoNothing(int x);
+ // void DoNothing(int x){}
+ // would be written as this:
+ // void EA_PASCAL_FUNC(DoNothing(int x));
+ // void EA_PASCAL_FUNC(DoNothing(int x)){}
+ //
+ #ifndef EA_PASCAL
+ #if defined(EA_COMPILER_MSVC)
+ #define EA_PASCAL __stdcall
+ #elif defined(EA_COMPILER_GNUC) && defined(EA_PROCESSOR_X86)
+ #define EA_PASCAL __attribute__((stdcall))
+ #else
+ // Some compilers simply don't support pascal calling convention.
+ // As a result, there isn't an issue here, since the specification of
+ // pascal calling convention is for the purpose of disambiguating the
+ // calling convention that is applied.
+ #define EA_PASCAL
+ #endif
+ #endif
+
+ #ifndef EA_PASCAL_FUNC
+ #if defined(EA_COMPILER_MSVC)
+ #define EA_PASCAL_FUNC(funcname_and_paramlist) __stdcall funcname_and_paramlist
+ #elif defined(EA_COMPILER_GNUC) && defined(EA_PROCESSOR_X86)
+ #define EA_PASCAL_FUNC(funcname_and_paramlist) __attribute__((stdcall)) funcname_and_paramlist
+ #else
+ #define EA_PASCAL_FUNC(funcname_and_paramlist) funcname_and_paramlist
+ #endif
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_SSE
+ // Visual C Processor Packs define _MSC_FULL_VER and are needed for SSE
+ // Intel C also has SSE support.
+ // EA_SSE is used to select FPU or SSE versions in hw_select.inl
+ //
+ // EA_SSE defines the level of SSE support:
+ // 0 indicates no SSE support
+ // 1 indicates SSE1 is supported
+ // 2 indicates SSE2 is supported
+ // 3 indicates SSE3 (or greater) is supported
+ //
+ // Note: SSE support beyond SSE3 can't be properly represented as a single
+ // version number. Instead users should use specific SSE defines (e.g.
+ // EA_SSE4_2) to detect what specific support is available. EA_SSE being
+ // equal to 3 really only indicates that SSE3 or greater is supported.
+ #ifndef EA_SSE
+ #if defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG)
+ #if defined(__SSE3__)
+ #define EA_SSE 3
+ #elif defined(__SSE2__)
+ #define EA_SSE 2
+ #elif defined(__SSE__) && __SSE__
+ #define EA_SSE 1
+ #else
+ #define EA_SSE 0
+ #endif
+ #elif (defined(EA_SSE3) && EA_SSE3) || defined EA_PLATFORM_XBOXONE || defined CS_UNDEFINED_STRING
+ #define EA_SSE 3
+ #elif defined(EA_SSE2) && EA_SSE2
+ #define EA_SSE 2
+ #elif defined(EA_PROCESSOR_X86) && defined(_MSC_FULL_VER) && !defined(__NOSSE__) && defined(_M_IX86_FP)
+ #define EA_SSE _M_IX86_FP
+ #elif defined(EA_PROCESSOR_X86) && defined(EA_COMPILER_INTEL) && !defined(__NOSSE__)
+ #define EA_SSE 1
+ #elif defined(EA_PROCESSOR_X86_64)
+ // All x64 processors support SSE2 or higher
+ #define EA_SSE 2
+ #else
+ #define EA_SSE 0
+ #endif
+ #endif
+
+ // ------------------------------------------------------------------------
+ // We define separate defines for SSE support beyond SSE1. These defines
+ // are particularly useful for detecting SSE4.x features since there isn't
+ // a single concept of SSE4.
+ //
+ // The following SSE defines are always defined. 0 indicates the
+ // feature/level of SSE is not supported, and 1 indicates support is
+ // available.
+ #ifndef EA_SSE2
+ #if EA_SSE >= 2
+ #define EA_SSE2 1
+ #else
+ #define EA_SSE2 0
+ #endif
+ #endif
+ #ifndef EA_SSE3
+ #if EA_SSE >= 3
+ #define EA_SSE3 1
+ #else
+ #define EA_SSE3 0
+ #endif
+ #endif
+ #ifndef EA_SSSE3
+ #if defined __SSSE3__ || defined EA_PLATFORM_XBOXONE || defined CS_UNDEFINED_STRING
+ #define EA_SSSE3 1
+ #else
+ #define EA_SSSE3 0
+ #endif
+ #endif
+ #ifndef EA_SSE4_1
+ #if defined __SSE4_1__ || defined EA_PLATFORM_XBOXONE || defined CS_UNDEFINED_STRING
+ #define EA_SSE4_1 1
+ #else
+ #define EA_SSE4_1 0
+ #endif
+ #endif
+ #ifndef EA_SSE4_2
+ #if defined __SSE4_2__ || defined EA_PLATFORM_XBOXONE || defined CS_UNDEFINED_STRING
+ #define EA_SSE4_2 1
+ #else
+ #define EA_SSE4_2 0
+ #endif
+ #endif
+ #ifndef EA_SSE4A
+ #if defined __SSE4A__ || defined EA_PLATFORM_XBOXONE || defined CS_UNDEFINED_STRING
+ #define EA_SSE4A 1
+ #else
+ #define EA_SSE4A 0
+ #endif
+ #endif
+
+ // ------------------------------------------------------------------------
+ // EA_AVX
+ // EA_AVX may be used to determine if Advanced Vector Extensions are available for the target architecture
+ //
+ // EA_AVX defines the level of AVX support:
+ // 0 indicates no AVX support
+ // 1 indicates AVX1 is supported
+ // 2 indicates AVX2 is supported
+ #ifndef EA_AVX
+ #if defined __AVX2__
+ #define EA_AVX 2
+ #elif defined __AVX__ || defined EA_PLATFORM_XBOXONE || defined CS_UNDEFINED_STRING
+ #define EA_AVX 1
+ #else
+ #define EA_AVX 0
+ #endif
+ #endif
+ #ifndef EA_AVX2
+ #if EA_AVX >= 2
+ #define EA_AVX2 1
+ #else
+ #define EA_AVX2 0
+ #endif
+ #endif
+
+ // EA_FP16C may be used to determine the existence of float <-> half conversion operations on an x86 CPU.
+ // (For example to determine if _mm_cvtph_ps or _mm_cvtps_ph could be used.)
+ #ifndef EA_FP16C
+ #if defined __F16C__ || defined EA_PLATFORM_XBOXONE || defined CS_UNDEFINED_STRING
+ #define EA_FP16C 1
+ #else
+ #define EA_FP16C 0
+ #endif
+ #endif
+
+ // EA_FP128 may be used to determine if __float128 is a supported type for use. This type is enabled by a GCC extension (_GLIBCXX_USE_FLOAT128)
+ // but has support by some implementations of clang (__FLOAT128__)
+ // PS4 does not support __float128 as of SDK 5.500 https://ps4.siedev.net/resources/documents/SDK/5.500/CPU_Compiler_ABI-Overview/0003.html
+ #ifndef EA_FP128
+ #if (defined __FLOAT128__ || defined _GLIBCXX_USE_FLOAT128) && !defined(EA_PLATFORM_SONY)
+ #define EA_FP128 1
+ #else
+ #define EA_FP128 0
+ #endif
+ #endif
+
+ // ------------------------------------------------------------------------
+ // EA_ABM
+ // EA_ABM may be used to determine if Advanced Bit Manipulation sets are available for the target architecture (POPCNT, LZCNT)
+ //
+ #ifndef EA_ABM
+ #if defined(__ABM__) || defined(EA_PLATFORM_XBOXONE) || defined(EA_PLATFORM_SONY) || defined(CS_UNDEFINED_STRING)
+ #define EA_ABM 1
+ #else
+ #define EA_ABM 0
+ #endif
+ #endif
+
+ // ------------------------------------------------------------------------
+ // EA_NEON
+ // EA_NEON may be used to determine if NEON is supported.
+ #ifndef EA_NEON
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
+ #define EA_NEON 1
+ #else
+ #define EA_NEON 0
+ #endif
+ #endif
+
+ // ------------------------------------------------------------------------
+ // EA_BMI
+ // EA_BMI may be used to determine if Bit Manipulation Instruction sets are available for the target architecture
+ //
+ // EA_BMI defines the level of BMI support:
+ // 0 indicates no BMI support
+ // 1 indicates BMI1 is supported
+ // 2 indicates BMI2 is supported
+ #ifndef EA_BMI
+ #if defined(__BMI2__)
+ #define EA_BMI 2
+ #elif defined(__BMI__) || defined(EA_PLATFORM_XBOXONE) || defined(CS_UNDEFINED_STRING)
+ #define EA_BMI 1
+ #else
+ #define EA_BMI 0
+ #endif
+ #endif
+ #ifndef EA_BMI2
+ #if EA_BMI >= 2
+ #define EA_BMI2 1
+ #else
+ #define EA_BMI2 0
+ #endif
+ #endif
+
+ // ------------------------------------------------------------------------
+ // EA_FMA3
+ // EA_FMA3 may be used to determine if Fused Multiply Add operations are available for the target architecture
+ // __FMA__ is defined only by GCC, Clang, and ICC; MSVC only defines __AVX__ and __AVX2__
+ // FMA3 was introduced alongside AVX2 on Intel Haswell
+ // All AMD processors support FMA3 if AVX2 is also supported
+ //
+ // EA_FMA3 defines the level of FMA3 support:
+ // 0 indicates no FMA3 support
+ // 1 indicates FMA3 is supported
+ #ifndef EA_FMA3
+ #if defined(__FMA__) || EA_AVX2 >= 1
+ #define EA_FMA3 1
+ #else
+ #define EA_FMA3 0
+ #endif
+ #endif
+
+ // ------------------------------------------------------------------------
+ // EA_TBM
+ // EA_TBM may be used to determine if Trailing Bit Manipulation instructions are available for the target architecture
+ #ifndef EA_TBM
+ #if defined(__TBM__)
+ #define EA_TBM 1
+ #else
+ #define EA_TBM 0
+ #endif
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_IMPORT
+ // import declaration specification
+ // specifies that the declared symbol is imported from another dynamic library.
+ #ifndef EA_IMPORT
+ #if defined(EA_COMPILER_MSVC)
+ #define EA_IMPORT __declspec(dllimport)
+ #else
+ #define EA_IMPORT
+ #endif
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_EXPORT
+ // export declaration specification
+ // specifies that the declared symbol is exported from the current dynamic library.
+ // this is not the same as the C++ export keyword. The C++ export keyword has been
+ // removed from the language as of C++11.
+ #ifndef EA_EXPORT
+ #if defined(EA_COMPILER_MSVC)
+ #define EA_EXPORT __declspec(dllexport)
+ #else
+ #define EA_EXPORT
+ #endif
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_PRAGMA_ONCE_SUPPORTED
+ //
+ // This is a wrapper for the #pragma once preprocessor directive.
+ // It allows for some compilers (in particular VC++) to implement signifcantly
+ // faster include file preprocessing. #pragma once can be used to replace
+ // header include guards or to augment them. However, #pragma once isn't
+ // necessarily supported by all compilers and isn't guaranteed to be so in
+ // the future, so using #pragma once to replace traditional include guards
+ // is not strictly portable. Note that a direct #define for #pragma once is
+ // impossible with VC++, due to limitations, but can be done with other
+ // compilers/preprocessors via _Pragma("once").
+ //
+ // Example usage (which includes traditional header guards for portability):
+ // #ifndef SOMEPACKAGE_SOMEHEADER_H
+ // #define SOMEPACKAGE_SOMEHEADER_H
+ //
+ // #if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ // #pragma once
+ // #endif
+ //
+ // <user code>
+ //
+ // #endif
+ //
+ #if defined(_MSC_VER) || defined(__GNUC__) || defined(__EDG__) || defined(__APPLE__)
+ #define EA_PRAGMA_ONCE_SUPPORTED 1
+ #endif
+
+
+
+ // ------------------------------------------------------------------------
+ // EA_ONCE
+ //
+ // Example usage (which includes traditional header guards for portability):
+ // #ifndef SOMEPACKAGE_SOMEHEADER_H
+ // #define SOMEPACKAGE_SOMEHEADER_H
+ //
+ // EA_ONCE()
+ //
+ // <user code>
+ //
+ // #endif
+ //
+ #if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #if defined(_MSC_VER)
+ #define EA_ONCE() __pragma(once)
+ #else
+ #define EA_ONCE() // _Pragma("once") It turns out that _Pragma("once") isn't supported by many compilers.
+ #endif
+ #endif
+
+
+
+ // ------------------------------------------------------------------------
+ // EA_OVERRIDE
+ //
+ // C++11 override
+ // See http://msdn.microsoft.com/en-us/library/jj678987.aspx for more information.
+ // You can use EA_FINAL_OVERRIDE to combine usage of EA_OVERRIDE and EA_INHERITANCE_FINAL in a single statement.
+ //
+ // Example usage:
+ // struct B { virtual void f(int); };
+ // struct D : B { void f(int) EA_OVERRIDE; };
+ //
+ #ifndef EA_OVERRIDE
+ #if defined(EA_COMPILER_NO_OVERRIDE)
+ #define EA_OVERRIDE
+ #else
+ #define EA_OVERRIDE override
+ #endif
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_INHERITANCE_FINAL
+ //
+ // Portably wraps the C++11 final specifier.
+ // See http://msdn.microsoft.com/en-us/library/jj678985.aspx for more information.
+ // You can use EA_FINAL_OVERRIDE to combine usage of EA_OVERRIDE and EA_INHERITANCE_FINAL in a single statement.
+ // This is not called EA_FINAL because that term is used within EA to denote debug/release/final builds.
+ //
+ // Example usage:
+ // struct B { virtual void f() EA_INHERITANCE_FINAL; };
+ //
+ #ifndef EA_INHERITANCE_FINAL
+ #if defined(EA_COMPILER_NO_INHERITANCE_FINAL)
+ #define EA_INHERITANCE_FINAL
+ #elif (defined(_MSC_VER) && (EA_COMPILER_VERSION < 1700)) // Pre-VS2012
+ #define EA_INHERITANCE_FINAL sealed
+ #else
+ #define EA_INHERITANCE_FINAL final
+ #endif
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_FINAL_OVERRIDE
+ //
+ // Portably wraps the C++11 override final specifiers combined.
+ //
+ // Example usage:
+ // struct A { virtual void f(); };
+ // struct B : public A { virtual void f() EA_FINAL_OVERRIDE; };
+ //
+ #ifndef EA_FINAL_OVERRIDE
+ #define EA_FINAL_OVERRIDE EA_OVERRIDE EA_INHERITANCE_FINAL
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_SEALED
+ //
+ // This is deprecated, as the C++11 Standard has final (EA_INHERITANCE_FINAL) instead.
+ // See http://msdn.microsoft.com/en-us/library/0w2w91tf.aspx for more information.
+ // Example usage:
+ // struct B { virtual void f() EA_SEALED; };
+ //
+ #ifndef EA_SEALED
+ #if defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1400) // VS2005 (VC8) and later
+ #define EA_SEALED sealed
+ #else
+ #define EA_SEALED
+ #endif
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_ABSTRACT
+ //
+ // This is a Microsoft language extension.
+ // See http://msdn.microsoft.com/en-us/library/b0z6b513.aspx for more information.
+ // Example usage:
+ // struct X EA_ABSTRACT { virtual void f(){} };
+ //
+ #ifndef EA_ABSTRACT
+ #if defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1400) // VS2005 (VC8) and later
+ #define EA_ABSTRACT abstract
+ #else
+ #define EA_ABSTRACT
+ #endif
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_CONSTEXPR
+ // EA_CONSTEXPR_OR_CONST
+ //
+ // Portable wrapper for C++11's 'constexpr' support.
+ //
+ // See http://www.cprogramming.com/c++11/c++11-compile-time-processing-with-constexpr.html for more information.
+ // Example usage:
+ // EA_CONSTEXPR int GetValue() { return 37; }
+ // EA_CONSTEXPR_OR_CONST double gValue = std::sin(kTwoPi);
+ //
+ #if !defined(EA_CONSTEXPR)
+ #if defined(EA_COMPILER_NO_CONSTEXPR)
+ #define EA_CONSTEXPR
+ #else
+ #define EA_CONSTEXPR constexpr
+ #endif
+ #endif
+
+ #if !defined(EA_CONSTEXPR_OR_CONST)
+ #if defined(EA_COMPILER_NO_CONSTEXPR)
+ #define EA_CONSTEXPR_OR_CONST const
+ #else
+ #define EA_CONSTEXPR_OR_CONST constexpr
+ #endif
+ #endif
+
+ // ------------------------------------------------------------------------
+ // EA_CONSTEXPR_IF
+ //
+ // Portable wrapper for C++17's 'constexpr if' support.
+ //
+ // https://en.cppreference.com/w/cpp/language/if
+ //
+ // Example usage:
+ //
+ // EA_CONSTEXPR_IF(eastl::is_copy_constructible_v<T>)
+ // { ... }
+ //
+ #if !defined(EA_CONSTEXPR_IF)
+ #if defined(EA_COMPILER_NO_CONSTEXPR_IF)
+ #define EA_CONSTEXPR_IF(predicate) if ((predicate))
+ #else
+ #define EA_CONSTEXPR_IF(predicate) if constexpr ((predicate))
+ #endif
+ #endif
+
+
+
+ // ------------------------------------------------------------------------
+ // EA_EXTERN_TEMPLATE
+ //
+ // Portable wrapper for C++11's 'extern template' support.
+ //
+ // Example usage:
+ // EA_EXTERN_TEMPLATE(class basic_string<char>);
+ //
+ #if !defined(EA_EXTERN_TEMPLATE)
+ #if defined(EA_COMPILER_NO_EXTERN_TEMPLATE)
+ #define EA_EXTERN_TEMPLATE(declaration)
+ #else
+ #define EA_EXTERN_TEMPLATE(declaration) extern template declaration
+ #endif
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_NOEXCEPT
+ // EA_NOEXCEPT_IF(predicate)
+ // EA_NOEXCEPT_EXPR(expression)
+ //
+ // Portable wrapper for C++11 noexcept
+ // http://en.cppreference.com/w/cpp/language/noexcept
+ // http://en.cppreference.com/w/cpp/language/noexcept_spec
+ //
+ // Example usage:
+ // EA_NOEXCEPT
+ // EA_NOEXCEPT_IF(predicate)
+ // EA_NOEXCEPT_EXPR(expression)
+ //
+ // This function never throws an exception.
+ // void DoNothing() EA_NOEXCEPT
+ // { }
+ //
+ // This function throws an exception of T::T() throws an exception.
+ // template <class T>
+ // void DoNothing() EA_NOEXCEPT_IF(EA_NOEXCEPT_EXPR(T()))
+ // { T t; }
+ //
+ #if !defined(EA_NOEXCEPT)
+ #if defined(EA_COMPILER_NO_NOEXCEPT)
+ #define EA_NOEXCEPT
+ #define EA_NOEXCEPT_IF(predicate)
+ #define EA_NOEXCEPT_EXPR(expression) false
+ #else
+ #define EA_NOEXCEPT noexcept
+ #define EA_NOEXCEPT_IF(predicate) noexcept((predicate))
+ #define EA_NOEXCEPT_EXPR(expression) noexcept((expression))
+ #endif
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_NORETURN
+ //
+ // Wraps the C++11 noreturn attribute. See EA_COMPILER_NO_NORETURN
+ // http://en.cppreference.com/w/cpp/language/attributes
+ // http://msdn.microsoft.com/en-us/library/k6ktzx3s%28v=vs.80%29.aspx
+ // http://blog.aaronballman.com/2011/09/understanding-attributes/
+ //
+ // Example usage:
+ // EA_NORETURN void SomeFunction()
+ // { throw "error"; }
+ //
+ #if !defined(EA_NORETURN)
+ #if defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1300) // VS2003 (VC7) and later
+ #define EA_NORETURN __declspec(noreturn)
+ #elif defined(EA_COMPILER_NO_NORETURN)
+ #define EA_NORETURN
+ #else
+ #define EA_NORETURN [[noreturn]]
+ #endif
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_CARRIES_DEPENDENCY
+ //
+ // Wraps the C++11 carries_dependency attribute
+ // http://en.cppreference.com/w/cpp/language/attributes
+ // http://blog.aaronballman.com/2011/09/understanding-attributes/
+ //
+ // Example usage:
+ // EA_CARRIES_DEPENDENCY int* SomeFunction()
+ // { return &mX; }
+ //
+ //
+ #if !defined(EA_CARRIES_DEPENDENCY)
+ #if defined(EA_COMPILER_NO_CARRIES_DEPENDENCY)
+ #define EA_CARRIES_DEPENDENCY
+ #else
+ #define EA_CARRIES_DEPENDENCY [[carries_dependency]]
+ #endif
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_FALLTHROUGH
+ //
+ // [[fallthrough] is a C++17 standard attribute that appears in switch
+ // statements to indicate that the fallthrough from the previous case in the
+ // switch statement is intentially and not a bug.
+ //
+ // http://en.cppreference.com/w/cpp/language/attributes
+ //
+ // Example usage:
+ // void f(int n)
+ // {
+ // switch(n)
+ // {
+ // case 1:
+ // DoCase1();
+ // // Compiler may generate a warning for fallthrough behaviour
+ //
+ // case 2:
+ // DoCase2();
+ //
+ // EA_FALLTHROUGH;
+ // case 3:
+ // DoCase3();
+ // }
+ // }
+ //
+ #if !defined(EA_FALLTHROUGH)
+ #if defined(EA_COMPILER_NO_FALLTHROUGH)
+ #define EA_FALLTHROUGH
+ #else
+ #define EA_FALLTHROUGH [[fallthrough]]
+ #endif
+ #endif
+
+
+
+ // ------------------------------------------------------------------------
+ // EA_NODISCARD
+ //
+ // [[nodiscard]] is a C++17 standard attribute that can be applied to a
+ // function declaration, enum, or class declaration. If a any of the list
+ // previously are returned from a function (without the user explicitly
+ // casting to void) the addition of the [[nodiscard]] attribute encourages
+ // the compiler to generate a warning about the user discarding the return
+ // value. This is a useful practice to encourage client code to check API
+ // error codes.
+ //
+ // http://en.cppreference.com/w/cpp/language/attributes
+ //
+ // Example usage:
+ //
+ // EA_NODISCARD int baz() { return 42; }
+ //
+ // void foo()
+ // {
+ // baz(); // warning: ignoring return value of function declared with 'nodiscard' attribute
+ // }
+ //
+ #if !defined(EA_NODISCARD)
+ #if defined(EA_COMPILER_NO_NODISCARD)
+ #define EA_NODISCARD
+ #else
+ #define EA_NODISCARD [[nodiscard]]
+ #endif
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_MAYBE_UNUSED
+ //
+ // [[maybe_unused]] is a C++17 standard attribute that suppresses warnings
+ // on unused entities that are declared as maybe_unused.
+ //
+ // http://en.cppreference.com/w/cpp/language/attributes
+ //
+ // Example usage:
+ // void foo(EA_MAYBE_UNUSED int i)
+ // {
+ // assert(i == 42); // warning suppressed when asserts disabled.
+ // }
+ //
+ #if !defined(EA_MAYBE_UNUSED)
+ #if defined(EA_COMPILER_NO_MAYBE_UNUSED)
+ #define EA_MAYBE_UNUSED
+ #else
+ #define EA_MAYBE_UNUSED [[maybe_unused]]
+ #endif
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_NO_UBSAN
+ //
+ // The LLVM/Clang undefined behaviour sanitizer will not analyse a function tagged with the following attribute.
+ //
+ // https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html#disabling-instrumentation-with-attribute-no-sanitize-undefined
+ //
+ // Example usage:
+ // EA_NO_UBSAN int SomeFunction() { ... }
+ //
+ #ifndef EA_NO_UBSAN
+ #if defined(EA_COMPILER_CLANG)
+ #define EA_NO_UBSAN __attribute__((no_sanitize("undefined")))
+ #else
+ #define EA_NO_UBSAN
+ #endif
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_NO_ASAN
+ //
+ // The LLVM/Clang address sanitizer will not analyse a function tagged with the following attribute.
+ //
+ // https://clang.llvm.org/docs/AddressSanitizer.html#disabling-instrumentation-with-attribute-no-sanitize-address
+ //
+ // Example usage:
+ // EA_NO_ASAN int SomeFunction() { ... }
+ //
+ #ifndef EA_NO_ASAN
+ #if defined(EA_COMPILER_CLANG)
+ #define EA_NO_ASAN __attribute__((no_sanitize("address")))
+ #else
+ #define EA_NO_ASAN
+ #endif
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_ASAN_ENABLED
+ //
+ // Defined as 0 or 1. It's value depends on the compile environment.
+ // Specifies whether the code is being built with Clang's Address Sanitizer.
+ //
+ #if defined(__has_feature)
+ #if __has_feature(address_sanitizer)
+ #define EA_ASAN_ENABLED 1
+ #else
+ #define EA_ASAN_ENABLED 0
+ #endif
+ #else
+ #define EA_ASAN_ENABLED 0
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_NON_COPYABLE
+ //
+ // This macro defines as a class as not being copy-constructable
+ // or assignable. This is useful for preventing class instances
+ // from being passed to functions by value, is useful for preventing
+ // compiler warnings by some compilers about the inability to
+ // auto-generate a copy constructor and assignment, and is useful
+ // for simply declaring in the interface that copy semantics are
+ // not supported by the class. Your class needs to have at least a
+ // default constructor when using this macro.
+ //
+ // Beware that this class works by declaring a private: section of
+ // the class in the case of compilers that don't support C++11 deleted
+ // functions.
+ //
+ // Note: With some pre-C++11 compilers (e.g. Green Hills), you may need
+ // to manually define an instances of the hidden functions, even
+ // though they are not used.
+ //
+ // Example usage:
+ // class Widget {
+ // Widget();
+ // . . .
+ // EA_NON_COPYABLE(Widget)
+ // };
+ //
+ #if !defined(EA_NON_COPYABLE)
+ #if defined(EA_COMPILER_NO_DELETED_FUNCTIONS)
+ #define EA_NON_COPYABLE(EAClass_) \
+ private: \
+ EA_DISABLE_VC_WARNING(4822); /* local class member function does not have a body */ \
+ EAClass_(const EAClass_&); \
+ void operator=(const EAClass_&); \
+ EA_RESTORE_VC_WARNING();
+ #else
+ #define EA_NON_COPYABLE(EAClass_) \
+ EA_DISABLE_VC_WARNING(4822); /* local class member function does not have a body */ \
+ EAClass_(const EAClass_&) = delete; \
+ void operator=(const EAClass_&) = delete; \
+ EA_RESTORE_VC_WARNING();
+ #endif
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_FUNCTION_DELETE
+ //
+ // Semi-portable way of specifying a deleted function which allows for
+ // cleaner code in class declarations.
+ //
+ // Example usage:
+ //
+ // class Example
+ // {
+ // private: // For portability with pre-C++11 compilers, make the function private.
+ // void foo() EA_FUNCTION_DELETE;
+ // };
+ //
+ // Note: EA_FUNCTION_DELETE'd functions should be private to prevent the
+ // functions from being called even when the compiler does not support
+ // deleted functions. Some compilers (e.g. Green Hills) that don't support
+ // C++11 deleted functions can require that you define the function,
+ // which you can do in the associated source file for the class.
+ //
+ #if defined(EA_COMPILER_NO_DELETED_FUNCTIONS)
+ #define EA_FUNCTION_DELETE
+ #else
+ #define EA_FUNCTION_DELETE = delete
+ #endif
+
+ // ------------------------------------------------------------------------
+ // EA_DISABLE_DEFAULT_CTOR
+ //
+ // Disables the compiler generated default constructor. This macro is
+ // provided to improve portability and clarify intent of code.
+ //
+ // Example usage:
+ //
+ // class Example
+ // {
+ // private:
+ // EA_DISABLE_DEFAULT_CTOR(Example);
+ // };
+ //
+ #define EA_DISABLE_DEFAULT_CTOR(ClassName) ClassName() EA_FUNCTION_DELETE
+
+ // ------------------------------------------------------------------------
+ // EA_DISABLE_COPY_CTOR
+ //
+ // Disables the compiler generated copy constructor. This macro is
+ // provided to improve portability and clarify intent of code.
+ //
+ // Example usage:
+ //
+ // class Example
+ // {
+ // private:
+ // EA_DISABLE_COPY_CTOR(Example);
+ // };
+ //
+ #define EA_DISABLE_COPY_CTOR(ClassName) ClassName(const ClassName &) EA_FUNCTION_DELETE
+
+ // ------------------------------------------------------------------------
+ // EA_DISABLE_MOVE_CTOR
+ //
+ // Disables the compiler generated move constructor. This macro is
+ // provided to improve portability and clarify intent of code.
+ //
+ // Example usage:
+ //
+ // class Example
+ // {
+ // private:
+ // EA_DISABLE_MOVE_CTOR(Example);
+ // };
+ //
+ #define EA_DISABLE_MOVE_CTOR(ClassName) ClassName(ClassName&&) EA_FUNCTION_DELETE
+
+ // ------------------------------------------------------------------------
+ // EA_DISABLE_ASSIGNMENT_OPERATOR
+ //
+ // Disables the compiler generated assignment operator. This macro is
+ // provided to improve portability and clarify intent of code.
+ //
+ // Example usage:
+ //
+ // class Example
+ // {
+ // private:
+ // EA_DISABLE_ASSIGNMENT_OPERATOR(Example);
+ // };
+ //
+ #define EA_DISABLE_ASSIGNMENT_OPERATOR(ClassName) ClassName & operator=(const ClassName &) EA_FUNCTION_DELETE
+
+ // ------------------------------------------------------------------------
+ // EA_DISABLE_MOVE_OPERATOR
+ //
+ // Disables the compiler generated move operator. This macro is
+ // provided to improve portability and clarify intent of code.
+ //
+ // Example usage:
+ //
+ // class Example
+ // {
+ // private:
+ // EA_DISABLE_MOVE_OPERATOR(Example);
+ // };
+ //
+ #define EA_DISABLE_MOVE_OPERATOR(ClassName) ClassName & operator=(ClassName&&) EA_FUNCTION_DELETE
+
+ // ------------------------------------------------------------------------
+ // EANonCopyable
+ //
+ // Declares a class as not supporting copy construction or assignment.
+ // May be more reliable with some situations that EA_NON_COPYABLE alone,
+ // though it may result in more code generation.
+ //
+ // Note that VC++ will generate warning C4625 and C4626 if you use EANonCopyable
+ // and you are compiling with /W4 and /Wall. There is no resolution but
+ // to redelare EA_NON_COPYABLE in your subclass or disable the warnings with
+ // code like this:
+ // EA_DISABLE_VC_WARNING(4625 4626)
+ // ...
+ // EA_RESTORE_VC_WARNING()
+ //
+ // Example usage:
+ // struct Widget : EANonCopyable {
+ // . . .
+ // };
+ //
+ #ifdef __cplusplus
+ struct EANonCopyable
+ {
+ #if defined(EA_COMPILER_NO_DEFAULTED_FUNCTIONS) || defined(__EDG__)
+ // EDG doesn't appear to behave properly for the case of defaulted constructors;
+ // it generates a mistaken warning about missing default constructors.
+ EANonCopyable() {} // Putting {} here has the downside that it allows a class to create itself,
+ ~EANonCopyable() {} // but avoids linker errors that can occur with some compilers (e.g. Green Hills).
+ #else
+ EANonCopyable() = default;
+ ~EANonCopyable() = default;
+ #endif
+
+ EA_NON_COPYABLE(EANonCopyable)
+ };
+ #endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_OPTIMIZE_OFF / EA_OPTIMIZE_ON
+ //
+ // Implements portable inline optimization enabling/disabling.
+ // Usage of these macros must be in order OFF then ON. This is
+ // because the OFF macro pushes a set of settings and the ON
+ // macro pops them. The nesting of OFF/ON sets (e.g. OFF, OFF, ON, ON)
+ // is not guaranteed to work on all platforms.
+ //
+ // This is often used to allow debugging of some code that's
+ // otherwise compiled with undebuggable optimizations. It's also
+ // useful for working around compiler code generation problems
+ // that occur in optimized builds.
+ //
+ // Some compilers (e.g. VC++) don't allow doing this within a function and
+ // so the usage must be outside a function, as with the example below.
+ // GCC on x86 appears to have some problem with argument passing when
+ // using EA_OPTIMIZE_OFF in optimized builds.
+ //
+ // Example usage:
+ // // Disable optimizations for SomeFunction.
+ // EA_OPTIMIZE_OFF()
+ // void SomeFunction()
+ // {
+ // ...
+ // }
+ // EA_OPTIMIZE_ON()
+ //
+ #if !defined(EA_OPTIMIZE_OFF)
+ #if defined(EA_COMPILER_MSVC)
+ #define EA_OPTIMIZE_OFF() __pragma(optimize("", off))
+ #elif defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION > 4004) && (defined(__i386__) || defined(__x86_64__)) // GCC 4.4+ - Seems to work only on x86/Linux so far. However, GCC 4.4 itself appears broken and screws up parameter passing conventions.
+ #define EA_OPTIMIZE_OFF() \
+ _Pragma("GCC push_options") \
+ _Pragma("GCC optimize 0")
+ #elif defined(EA_COMPILER_CLANG) && (!defined(EA_PLATFORM_ANDROID) || (EA_COMPILER_VERSION >= 380))
+ #define EA_OPTIMIZE_OFF() \
+ EA_DISABLE_CLANG_WARNING(-Wunknown-pragmas) \
+ _Pragma("clang optimize off") \
+ EA_RESTORE_CLANG_WARNING()
+ #else
+ #define EA_OPTIMIZE_OFF()
+ #endif
+ #endif
+
+ #if !defined(EA_OPTIMIZE_ON)
+ #if defined(EA_COMPILER_MSVC)
+ #define EA_OPTIMIZE_ON() __pragma(optimize("", on))
+ #elif defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION > 4004) && (defined(__i386__) || defined(__x86_64__)) // GCC 4.4+ - Seems to work only on x86/Linux so far. However, GCC 4.4 itself appears broken and screws up parameter passing conventions.
+ #define EA_OPTIMIZE_ON() _Pragma("GCC pop_options")
+ #elif defined(EA_COMPILER_CLANG) && (!defined(EA_PLATFORM_ANDROID) || (EA_COMPILER_VERSION >= 380))
+ #define EA_OPTIMIZE_ON() \
+ EA_DISABLE_CLANG_WARNING(-Wunknown-pragmas) \
+ _Pragma("clang optimize on") \
+ EA_RESTORE_CLANG_WARNING()
+ #else
+ #define EA_OPTIMIZE_ON()
+ #endif
+ #endif
+
+
+
+ // ------------------------------------------------------------------------
+ // EA_SIGNED_RIGHT_SHIFT_IS_UNSIGNED
+ //
+ // Defined if right shifts of signed integers (i.e. arithmetic shifts) fail
+ // to propogate the high bit downward, and thus preserve sign. Most hardware
+ // and their corresponding compilers do this.
+ //
+ // <No current platform fails to propogate sign bits on right signed shifts>
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/test/packages/EABase/include/Common/EABase/config/eaplatform.h b/EASTL/test/packages/EABase/include/Common/EABase/config/eaplatform.h
new file mode 100644
index 0000000..37c1350
--- /dev/null
+++ b/EASTL/test/packages/EABase/include/Common/EABase/config/eaplatform.h
@@ -0,0 +1,738 @@
+/*-----------------------------------------------------------------------------
+ * config/eaplatform.h
+ *
+ * Copyright (c) Electronic Arts Inc. All rights reserved.
+ *-----------------------------------------------------------------------------
+ * Currently supported platform indentification defines include:
+ */
+#ifdef EA_PLATFORM_PS4 // ifdef for code stripping purposes
+// EA_PLATFORM_PS4 (EA_PLATFORM_KETTLE)
+#endif
+#ifdef EA_PLATFORM_XBOXONE // ifdef for code stripping purposes
+ // EA_PLATFORM_XBOXONE (EA_PLATFORM_CAPILANO)
+ // EA_PLATFORM_XBOXONE_XDK (EA_PLATFORM_CAPILANO_XDK), set by capilano_config package
+ // EA_PLATFORM_XBOXONE_ADK (EA_PLATFORM_CAPILANO_ADK), set by capilano_config package
+#endif
+// EA_PLATFORM_ANDROID
+// EA_PLATFORM_APPLE
+// EA_PLATFORM_IPHONE
+// EA_PLATFORM_IPHONE_SIMULATOR
+// EA_PLATFORM_OSX
+// EA_PLATFORM_LINUX
+// EA_PLATFORM_SAMSUNG_TV
+// EA_PLATFORM_WINDOWS
+// EA_PLATFORM_WIN32
+// EA_PLATFORM_WIN64
+// EA_PLATFORM_WINDOWS_PHONE
+// EA_PLATFORM_WINRT
+// EA_PLATFORM_SUN
+// EA_PLATFORM_LRB (Larrabee)
+// EA_PLATFORM_POSIX (pseudo-platform; may be defined along with another platform like EA_PLATFORM_LINUX, EA_PLATFORM_UNIX, EA_PLATFORM_QNX)
+// EA_PLATFORM_UNIX (pseudo-platform; may be defined along with another platform like EA_PLATFORM_LINUX)
+// EA_PLATFORM_CYGWIN (pseudo-platform; may be defined along with another platform like EA_PLATFORM_LINUX)
+// EA_PLATFORM_MINGW (pseudo-platform; may be defined along with another platform like EA_PLATFORM_WINDOWS)
+// EA_PLATFORM_MICROSOFT (pseudo-platform; may be defined along with another platform like EA_PLATFORM_WINDOWS)
+//
+// EA_ABI_ARM_LINUX (a.k.a. "eabi". for all platforms that use the CodeSourcery GNU/Linux toolchain, like Android)
+// EA_ABI_ARM_APPLE (similar to eabi but not identical)
+// EA_ABI_ARM64_APPLE (similar to eabi but not identical) https://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/Articles/ARM64FunctionCallingConventions.html
+// EA_ABI_ARM_WINCE (similar to eabi but not identical)
+//
+// Other definitions emanated from this file inclue:
+// EA_PLATFORM_NAME = <string>
+// EA_PLATFORM_DESCRIPTION = <string>
+// EA_PROCESSOR_XXX
+// EA_MISALIGNED_SUPPORT_LEVEL=0|1|2
+// EA_SYSTEM_LITTLE_ENDIAN | EA_SYSTEM_BIG_ENDIAN
+// EA_ASM_STYLE_ATT | EA_ASM_STYLE_INTEL | EA_ASM_STYLE_MOTOROLA
+// EA_PLATFORM_PTR_SIZE = <integer size in bytes>
+// EA_PLATFORM_WORD_SIZE = <integer size in bytes>
+// EA_CACHE_LINE_SIZE = <integer size in bytes>
+//---------------------------------------------------------------------------
+
+/*
+ EA_PLATFORM_MOBILE
+ EA_PLATFORM_MOBILE is a peer to EA_PLATORM_DESKTOP and EA_PLATFORM_CONSOLE. Their definition is qualitative rather
+ than quantitative, and refers to the general (usually weaker) capabilities of the machine. Mobile devices have a
+ similar set of weaknesses that are useful to generally categorize. The primary motivation is to avoid code that
+ tests for multiple mobile platforms on a line and needs to be updated every time we get a new one.
+ For example, mobile platforms tend to have weaker ARM processors, don't have full multiple processor support,
+ are hand-held, don't have mice (though may have touch screens or basic cursor controls), have writable solid
+ state permanent storage. Production user code shouldn't have too many expectations about the meaning of this define.
+
+ EA_PLATFORM_DESKTOP
+ This is similar to EA_PLATFORM_MOBILE in its qualitative nature and refers to platforms that are powerful.
+ For example, they nearly always have virtual memory, mapped memory, hundreds of GB of writable disk storage,
+ TCP/IP network connections, mice, keyboards, 512+ MB of RAM, multiprocessing, multiple display support.
+ Production user code shouldn't have too many expectations about the meaning of this define.
+
+ EA_PLATFORM_CONSOLE
+ This is similar to EA_PLATFORM_MOBILE in its qualitative nature and refers to platforms that are consoles.
+ This means platforms that are connected to TVs, are fairly powerful (especially graphics-wise), are tightly
+ controlled by vendors, tend not to have mapped memory, tend to have TCP/IP, don't have multiple process support
+ though they might have multiple CPUs, support TV output only. Production user code shouldn't have too many
+ expectations about the meaning of this define.
+
+*/
+
+
+#ifndef INCLUDED_eaplatform_H
+#define INCLUDED_eaplatform_H
+
+
+// Cygwin
+// This is a pseudo-platform which will be defined along with EA_PLATFORM_LINUX when
+// using the Cygwin build environment.
+#if defined(__CYGWIN__)
+ #define EA_PLATFORM_CYGWIN 1
+ #define EA_PLATFORM_DESKTOP 1
+#endif
+
+// MinGW
+// This is a pseudo-platform which will be defined along with EA_PLATFORM_WINDOWS when
+// using the MinGW Windows build environment.
+#if defined(__MINGW32__) || defined(__MINGW64__)
+ #define EA_PLATFORM_MINGW 1
+ #define EA_PLATFORM_DESKTOP 1
+#endif
+
+#if defined(EA_PLATFORM_PS4) || defined(__ORBIS__) || defined(EA_PLATFORM_KETTLE)
+ // PlayStation 4
+ // Orbis was Sony's code-name for the platform, which is now obsolete.
+ // Kettle was an EA-specific code-name for the platform, which is now obsolete.
+ #if defined(EA_PLATFORM_PS4)
+ #undef EA_PLATFORM_PS4
+ #endif
+ #define EA_PLATFORM_PS4 1
+
+ // Backward compatibility:
+ #if defined(EA_PLATFORM_KETTLE)
+ #undef EA_PLATFORM_KETTLE
+ #endif
+ // End backward compatbility
+
+ #define EA_PLATFORM_KETTLE 1
+ #define EA_PLATFORM_NAME "PS4"
+ #define EA_SYSTEM_LITTLE_ENDIAN 1
+ #define EA_PLATFORM_DESCRIPTION "PS4 on x64"
+ #define EA_PLATFORM_CONSOLE 1
+ #define EA_PLATFORM_SONY 1
+ #define EA_PLATFORM_POSIX 1
+ // #define EA_POSIX_THREADS_AVAILABLE 1 // POSIX threading API is available but discouraged. Sony indicated use of the scePthreads* API is preferred.
+ #define EA_PROCESSOR_X86_64 1
+ #if defined(__GNUC__) || defined(__clang__)
+ #define EA_ASM_STYLE_ATT 1
+ #endif
+
+#elif defined(EA_PLATFORM_XBOXONE) || defined(_DURANGO) || defined(_XBOX_ONE) || defined(EA_PLATFORM_CAPILANO) || defined(_GAMING_XBOX)
+ // XBox One
+ // Durango was Microsoft's code-name for the platform, which is now obsolete.
+ // Microsoft uses _DURANGO instead of some variation of _XBOX, though it's not natively defined by the compiler.
+ // Capilano was an EA-specific code-name for the platform, which is now obsolete.
+ #if defined(EA_PLATFORM_XBOXONE)
+ #undef EA_PLATFORM_XBOXONE
+ #endif
+ #define EA_PLATFORM_XBOXONE 1
+
+ // Backward compatibility:
+ #if defined(EA_PLATFORM_CAPILANO)
+ #undef EA_PLATFORM_CAPILANO
+ #endif
+ #define EA_PLATFORM_CAPILANO 1
+ #if defined(EA_PLATFORM_CAPILANO_XDK) && !defined(EA_PLATFORM_XBOXONE_XDK)
+ #define EA_PLATFORM_XBOXONE_XDK 1
+ #endif
+ #if defined(EA_PLATFORM_CAPILANO_ADK) && !defined(EA_PLATFORM_XBOXONE_ADK)
+ #define EA_PLATFORM_XBOXONE_ADK 1
+ #endif
+ // End backward compatibility
+
+ #if !defined(_DURANGO)
+ #define _DURANGO
+ #endif
+ #define EA_PLATFORM_NAME "XBox One"
+ //#define EA_PROCESSOR_X86 Currently our policy is that we don't define this, even though x64 is something of a superset of x86.
+ #define EA_PROCESSOR_X86_64 1
+ #define EA_SYSTEM_LITTLE_ENDIAN 1
+ #define EA_PLATFORM_DESCRIPTION "XBox One on x64"
+ #define EA_ASM_STYLE_INTEL 1
+ #define EA_PLATFORM_CONSOLE 1
+ #define EA_PLATFORM_MICROSOFT 1
+
+ // WINAPI_FAMILY defines - mirrored from winapifamily.h
+ #define EA_WINAPI_FAMILY_APP 1000
+ #define EA_WINAPI_FAMILY_DESKTOP_APP 1001
+ #define EA_WINAPI_FAMILY_PHONE_APP 1002
+ #define EA_WINAPI_FAMILY_TV_APP 1003
+ #define EA_WINAPI_FAMILY_TV_TITLE 1004
+ #define EA_WINAPI_FAMILY_GAMES 1006
+
+ #if defined(WINAPI_FAMILY)
+ #include <winapifamily.h>
+ #if defined(WINAPI_FAMILY_TV_TITLE) && WINAPI_FAMILY == WINAPI_FAMILY_TV_TITLE
+ #define EA_WINAPI_FAMILY EA_WINAPI_FAMILY_TV_TITLE
+ #elif defined(WINAPI_FAMILY_DESKTOP_APP) && WINAPI_FAMILY == WINAPI_FAMILY_DESKTOP_APP
+ #define EA_WINAPI_FAMILY EA_WINAPI_FAMILY_DESKTOP_APP
+ #elif defined(WINAPI_FAMILY_GAMES) && WINAPI_FAMILY == WINAPI_FAMILY_GAMES
+ #define EA_WINAPI_FAMILY EA_WINAPI_FAMILY_GAMES
+ #else
+ #error Unsupported WINAPI_FAMILY
+ #endif
+ #else
+ #error WINAPI_FAMILY should always be defined on Capilano.
+ #endif
+
+ // Macro to determine if a partition is enabled.
+ #define EA_WINAPI_FAMILY_PARTITION(Partition) (Partition)
+
+ #if EA_WINAPI_FAMILY == EA_WINAPI_FAMILY_DESKTOP_APP
+ #define EA_WINAPI_PARTITION_CORE 1
+ #define EA_WINAPI_PARTITION_DESKTOP 1
+ #define EA_WINAPI_PARTITION_APP 1
+ #define EA_WINAPI_PARTITION_PC_APP 0
+ #define EA_WIANPI_PARTITION_PHONE 0
+ #define EA_WINAPI_PARTITION_TV_APP 0
+ #define EA_WINAPI_PARTITION_TV_TITLE 0
+ #define EA_WINAPI_PARTITION_GAMES 0
+ #elif EA_WINAPI_FAMILY == EA_WINAPI_FAMILY_TV_TITLE
+ #define EA_WINAPI_PARTITION_CORE 1
+ #define EA_WINAPI_PARTITION_DESKTOP 0
+ #define EA_WINAPI_PARTITION_APP 0
+ #define EA_WINAPI_PARTITION_PC_APP 0
+ #define EA_WIANPI_PARTITION_PHONE 0
+ #define EA_WINAPI_PARTITION_TV_APP 0
+ #define EA_WINAPI_PARTITION_TV_TITLE 1
+ #define EA_WINAPI_PARTITION_GAMES 0
+ #elif EA_WINAPI_FAMILY == EA_WINAPI_FAMILY_GAMES
+ #define EA_WINAPI_PARTITION_CORE 1
+ #define EA_WINAPI_PARTITION_DESKTOP 0
+ #define EA_WINAPI_PARTITION_APP 0
+ #define EA_WINAPI_PARTITION_PC_APP 0
+ #define EA_WIANPI_PARTITION_PHONE 0
+ #define EA_WINAPI_PARTITION_TV_APP 0
+ #define EA_WINAPI_PARTITION_TV_TITLE 0
+ #define EA_WINAPI_PARTITION_GAMES 1
+ #else
+ #error Unsupported WINAPI_FAMILY
+ #endif
+
+ #if EA_WINAPI_FAMILY_PARTITION(EA_WINAPI_PARTITION_GAMES)
+ #define CS_UNDEFINED_STRING 1
+ #define CS_UNDEFINED_STRING 1
+ #endif
+
+ #if EA_WINAPI_FAMILY_PARTITION(EA_WINAPI_PARTITION_TV_TITLE)
+ #define EA_PLATFORM_XBOXONE_XDK 1
+ #endif
+#elif defined(EA_PLATFORM_LRB) || defined(__LRB__) || (defined(__EDG__) && defined(__ICC) && defined(__x86_64__))
+ #undef EA_PLATFORM_LRB
+ #define EA_PLATFORM_LRB 1
+ #define EA_PLATFORM_NAME "Larrabee"
+ #define EA_PLATFORM_DESCRIPTION "Larrabee on LRB1"
+ #define EA_PROCESSOR_X86_64 1
+ #if defined(BYTE_ORDER) && (BYTE_ORDER == 4321)
+ #define EA_SYSTEM_BIG_ENDIAN 1
+ #else
+ #define EA_SYSTEM_LITTLE_ENDIAN 1
+ #endif
+ #define EA_PROCESSOR_LRB 1
+ #define EA_PROCESSOR_LRB1 1 // Larrabee version 1
+ #define EA_ASM_STYLE_ATT 1 // Both types of asm style
+ #define EA_ASM_STYLE_INTEL 1 // are supported.
+ #define EA_PLATFORM_DESKTOP 1
+
+// Android (Google phone OS)
+#elif defined(EA_PLATFORM_ANDROID) || defined(__ANDROID__)
+ #undef EA_PLATFORM_ANDROID
+ #define EA_PLATFORM_ANDROID 1
+ #define EA_PLATFORM_LINUX 1
+ #define EA_PLATFORM_UNIX 1
+ #define EA_PLATFORM_POSIX 1
+ #define EA_PLATFORM_NAME "Android"
+ #define EA_ASM_STYLE_ATT 1
+ #if defined(__arm__)
+ #define EA_ABI_ARM_LINUX 1 // a.k.a. "ARM eabi"
+ #define EA_PROCESSOR_ARM32 1
+ #define EA_PLATFORM_DESCRIPTION "Android on ARM"
+ #elif defined(__aarch64__)
+ #define EA_PROCESSOR_ARM64 1
+ #define EA_PLATFORM_DESCRIPTION "Android on ARM64"
+ #elif defined(__i386__)
+ #define EA_PROCESSOR_X86 1
+ #define EA_PLATFORM_DESCRIPTION "Android on x86"
+ #elif defined(__x86_64)
+ #define EA_PROCESSOR_X86_64 1
+ #define EA_PLATFORM_DESCRIPTION "Android on x64"
+ #else
+ #error Unknown processor
+ #endif
+ #if !defined(EA_SYSTEM_BIG_ENDIAN) && !defined(EA_SYSTEM_LITTLE_ENDIAN)
+ #define EA_SYSTEM_LITTLE_ENDIAN 1
+ #endif
+ #define EA_PLATFORM_MOBILE 1
+
+// Samsung SMART TV - a Linux-based smart TV
+#elif defined(EA_PLATFORM_SAMSUNG_TV)
+ #undef EA_PLATFORM_SAMSUNG_TV
+ #define EA_PLATFORM_SAMSUNG_TV 1
+ #define EA_PLATFORM_LINUX 1
+ #define EA_PLATFORM_UNIX 1
+ #define EA_PLATFORM_POSIX 1
+ #define EA_PLATFORM_NAME "SamsungTV"
+ #define EA_PLATFORM_DESCRIPTION "Samsung SMART TV on ARM"
+ #define EA_ASM_STYLE_ATT 1
+ #define EA_SYSTEM_LITTLE_ENDIAN 1
+ #define EA_PROCESSOR_ARM32 1
+ #define EA_ABI_ARM_LINUX 1 // a.k.a. "ARM eabi"
+ #define EA_PROCESSOR_ARM7 1
+
+#elif defined(__APPLE__) && __APPLE__
+ #include <TargetConditionals.h>
+
+ // Apple family of operating systems.
+ #define EA_PLATFORM_APPLE
+ #define EA_PLATFORM_POSIX 1
+
+ // iPhone
+ // TARGET_OS_IPHONE will be undefined on an unknown compiler, and will be defined on gcc.
+ #if defined(EA_PLATFORM_IPHONE) || defined(__IPHONE__) || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) || (defined(TARGET_IPHONE_SIMULATOR) && TARGET_IPHONE_SIMULATOR)
+ #undef EA_PLATFORM_IPHONE
+ #define EA_PLATFORM_IPHONE 1
+ #define EA_PLATFORM_NAME "iPhone"
+ #define EA_ASM_STYLE_ATT 1
+ #define EA_POSIX_THREADS_AVAILABLE 1
+ #if defined(__arm__)
+ #define EA_ABI_ARM_APPLE 1
+ #define EA_PROCESSOR_ARM32 1
+ #define EA_SYSTEM_LITTLE_ENDIAN 1
+ #define EA_PLATFORM_DESCRIPTION "iPhone on ARM"
+ #elif defined(__aarch64__) || defined(__AARCH64)
+ #define EA_ABI_ARM64_APPLE 1
+ #define EA_PROCESSOR_ARM64 1
+ #define EA_SYSTEM_LITTLE_ENDIAN 1
+ #define EA_PLATFORM_DESCRIPTION "iPhone on ARM64"
+ #elif defined(__i386__)
+ #define EA_PLATFORM_IPHONE_SIMULATOR 1
+ #define EA_PROCESSOR_X86 1
+ #define EA_SYSTEM_LITTLE_ENDIAN 1
+ #define EA_PLATFORM_DESCRIPTION "iPhone simulator on x86"
+ #elif defined(__x86_64) || defined(__amd64)
+ #define EA_PROCESSOR_X86_64 1
+ #define EA_SYSTEM_LITTLE_ENDIAN 1
+ #define EA_PLATFORM_DESCRIPTION "iPhone simulator on x64"
+ #else
+ #error Unknown processor
+ #endif
+ #define EA_PLATFORM_MOBILE 1
+
+ // Macintosh OSX
+ // TARGET_OS_MAC is defined by the Metrowerks and older AppleC compilers.
+ // Howerver, TARGET_OS_MAC is defined to be 1 in all cases.
+ // __i386__ and __intel__ are defined by the GCC compiler.
+ // __dest_os is defined by the Metrowerks compiler.
+ // __MACH__ is defined by the Metrowerks and GCC compilers.
+ // powerc and __powerc are defined by the Metrowerks and GCC compilers.
+ #elif defined(EA_PLATFORM_OSX) || defined(__MACH__) || (defined(__MSL__) && (__dest_os == __mac_os_x))
+ #undef EA_PLATFORM_OSX
+ #define EA_PLATFORM_OSX 1
+ #define EA_PLATFORM_UNIX 1
+ #define EA_PLATFORM_POSIX 1
+ //#define EA_PLATFORM_BSD 1 We don't currently define this. OSX has some BSD history but a lot of the API is different.
+ #define EA_PLATFORM_NAME "OSX"
+ #if defined(__i386__) || defined(__intel__)
+ #define EA_PROCESSOR_X86 1
+ #define EA_SYSTEM_LITTLE_ENDIAN 1
+ #define EA_PLATFORM_DESCRIPTION "OSX on x86"
+ #elif defined(__x86_64) || defined(__amd64)
+ #define EA_PROCESSOR_X86_64 1
+ #define EA_SYSTEM_LITTLE_ENDIAN 1
+ #define EA_PLATFORM_DESCRIPTION "OSX on x64"
+ #elif defined(__arm__)
+ #define EA_ABI_ARM_APPLE 1
+ #define EA_PROCESSOR_ARM32 1
+ #define EA_SYSTEM_LITTLE_ENDIAN 1
+ #define EA_PLATFORM_DESCRIPTION "OSX on ARM"
+ #elif defined(__aarch64__) || defined(__AARCH64)
+ #define EA_ABI_ARM64_APPLE 1
+ #define EA_PROCESSOR_ARM64 1
+ #define EA_SYSTEM_LITTLE_ENDIAN 1
+ #define EA_PLATFORM_DESCRIPTION "OSX on ARM64"
+ #elif defined(__POWERPC64__) || defined(__powerpc64__)
+ #define EA_PROCESSOR_POWERPC 1
+ #define EA_PROCESSOR_POWERPC_64 1
+ #define EA_SYSTEM_BIG_ENDIAN 1
+ #define EA_PLATFORM_DESCRIPTION "OSX on PowerPC 64"
+ #elif defined(__POWERPC__) || defined(__powerpc__)
+ #define EA_PROCESSOR_POWERPC 1
+ #define EA_PROCESSOR_POWERPC_32 1
+ #define EA_SYSTEM_BIG_ENDIAN 1
+ #define EA_PLATFORM_DESCRIPTION "OSX on PowerPC"
+ #else
+ #error Unknown processor
+ #endif
+ #if defined(__GNUC__)
+ #define EA_ASM_STYLE_ATT 1
+ #else
+ #define EA_ASM_STYLE_MOTOROLA 1
+ #endif
+ #define EA_PLATFORM_DESKTOP 1
+ #else
+ #error Unknown Apple Platform
+ #endif
+
+// Linux
+// __linux and __linux__ are defined by the GCC and Borland compiler.
+// __i386__ and __intel__ are defined by the GCC compiler.
+// __i386__ is defined by the Metrowerks compiler.
+// _M_IX86 is defined by the Borland compiler.
+// __sparc__ is defined by the GCC compiler.
+// __powerpc__ is defined by the GCC compiler.
+// __ARM_EABI__ is defined by GCC on an ARM v6l (Raspberry Pi 1)
+// __ARM_ARCH_7A__ is defined by GCC on an ARM v7l (Raspberry Pi 2)
+#elif defined(EA_PLATFORM_LINUX) || (defined(__linux) || defined(__linux__))
+ #undef EA_PLATFORM_LINUX
+ #define EA_PLATFORM_LINUX 1
+ #define EA_PLATFORM_UNIX 1
+ #define EA_PLATFORM_POSIX 1
+ #define EA_PLATFORM_NAME "Linux"
+ #if defined(__i386__) || defined(__intel__) || defined(_M_IX86)
+ #define EA_PROCESSOR_X86 1
+ #define EA_SYSTEM_LITTLE_ENDIAN 1
+ #define EA_PLATFORM_DESCRIPTION "Linux on x86"
+ #elif defined(__ARM_ARCH_7A__) || defined(__ARM_EABI__)
+ #define EA_ABI_ARM_LINUX 1
+ #define EA_PROCESSOR_ARM32 1
+ #define EA_PLATFORM_DESCRIPTION "Linux on ARM 6/7 32-bits"
+ #elif defined(__aarch64__) || defined(__AARCH64)
+ #define EA_PROCESSOR_ARM64 1
+ #define EA_PLATFORM_DESCRIPTION "Linux on ARM64"
+ #elif defined(__x86_64__)
+ #define EA_PROCESSOR_X86_64 1
+ #define EA_SYSTEM_LITTLE_ENDIAN 1
+ #define EA_PLATFORM_DESCRIPTION "Linux on x64"
+ #elif defined(__powerpc64__)
+ #define EA_PROCESSOR_POWERPC 1
+ #define EA_PROCESSOR_POWERPC_64 1
+ #define EA_SYSTEM_BIG_ENDIAN 1
+ #define EA_PLATFORM_DESCRIPTION "Linux on PowerPC 64"
+ #elif defined(__powerpc__)
+ #define EA_PROCESSOR_POWERPC 1
+ #define EA_PROCESSOR_POWERPC_32 1
+ #define EA_SYSTEM_BIG_ENDIAN 1
+ #define EA_PLATFORM_DESCRIPTION "Linux on PowerPC"
+ #else
+ #error Unknown processor
+ #error Unknown endianness
+ #endif
+ #if defined(__GNUC__)
+ #define EA_ASM_STYLE_ATT 1
+ #endif
+ #define EA_PLATFORM_DESKTOP 1
+
+
+#elif defined(EA_PLATFORM_BSD) || (defined(__BSD__) || defined(__FreeBSD__))
+ #undef EA_PLATFORM_BSD
+ #define EA_PLATFORM_BSD 1
+ #define EA_PLATFORM_UNIX 1
+ #define EA_PLATFORM_POSIX 1 // BSD's posix complaince is not identical to Linux's
+ #define EA_PLATFORM_NAME "BSD Unix"
+ #if defined(__i386__) || defined(__intel__)
+ #define EA_PROCESSOR_X86 1
+ #define EA_SYSTEM_LITTLE_ENDIAN 1
+ #define EA_PLATFORM_DESCRIPTION "BSD on x86"
+ #elif defined(__x86_64__)
+ #define EA_PROCESSOR_X86_64 1
+ #define EA_SYSTEM_LITTLE_ENDIAN 1
+ #define EA_PLATFORM_DESCRIPTION "BSD on x64"
+ #elif defined(__powerpc64__)
+ #define EA_PROCESSOR_POWERPC 1
+ #define EA_PROCESSOR_POWERPC_64 1
+ #define EA_SYSTEM_BIG_ENDIAN 1
+ #define EA_PLATFORM_DESCRIPTION "BSD on PowerPC 64"
+ #elif defined(__powerpc__)
+ #define EA_PROCESSOR_POWERPC 1
+ #define EA_PROCESSOR_POWERPC_32 1
+ #define EA_SYSTEM_BIG_ENDIAN 1
+ #define EA_PLATFORM_DESCRIPTION "BSD on PowerPC"
+ #else
+ #error Unknown processor
+ #error Unknown endianness
+ #endif
+ #if !defined(EA_PLATFORM_FREEBSD) && defined(__FreeBSD__)
+ #define EA_PLATFORM_FREEBSD 1 // This is a variation of BSD.
+ #endif
+ #if defined(__GNUC__)
+ #define EA_ASM_STYLE_ATT 1
+ #endif
+ #define EA_PLATFORM_DESKTOP 1
+
+
+#elif defined(EA_PLATFORM_WINDOWS_PHONE)
+ #undef EA_PLATFORM_WINDOWS_PHONE
+ #define EA_PLATFORM_WINDOWS_PHONE 1
+ #define EA_PLATFORM_NAME "Windows Phone"
+ #if defined(_M_AMD64) || defined(_AMD64_) || defined(__x86_64__)
+ #define EA_PROCESSOR_X86_64 1
+ #define EA_SYSTEM_LITTLE_ENDIAN 1
+ #define EA_PLATFORM_DESCRIPTION "Windows Phone on x64"
+ #elif defined(_M_IX86) || defined(_X86_)
+ #define EA_PROCESSOR_X86 1
+ #define EA_SYSTEM_LITTLE_ENDIAN 1
+ #define EA_PLATFORM_DESCRIPTION "Windows Phone on X86"
+ #elif defined(_M_ARM)
+ #define EA_ABI_ARM_WINCE 1
+ #define EA_PROCESSOR_ARM32 1
+ #define EA_SYSTEM_LITTLE_ENDIAN 1
+ #define EA_PLATFORM_DESCRIPTION "Windows Phone on ARM"
+ #else //Possibly other Windows Phone variants
+ #error Unknown processor
+ #error Unknown endianness
+ #endif
+ #define EA_PLATFORM_MICROSOFT 1
+
+ // WINAPI_FAMILY defines - mirrored from winapifamily.h
+ #define EA_WINAPI_FAMILY_APP 1
+ #define EA_WINAPI_FAMILY_DESKTOP_APP 2
+ #define EA_WINAPI_FAMILY_PHONE_APP 3
+
+ #if defined(WINAPI_FAMILY)
+ #include <winapifamily.h>
+ #if WINAPI_FAMILY == WINAPI_FAMILY_PHONE_APP
+ #define EA_WINAPI_FAMILY EA_WINAPI_FAMILY_PHONE_APP
+ #else
+ #error Unsupported WINAPI_FAMILY for Windows Phone
+ #endif
+ #else
+ #error WINAPI_FAMILY should always be defined on Windows Phone.
+ #endif
+
+ // Macro to determine if a partition is enabled.
+ #define EA_WINAPI_FAMILY_PARTITION(Partition) (Partition)
+
+ // Enable the appropriate partitions for the current family
+ #if EA_WINAPI_FAMILY == EA_WINAPI_FAMILY_PHONE_APP
+ # define EA_WINAPI_PARTITION_CORE 1
+ # define EA_WINAPI_PARTITION_PHONE 1
+ # define EA_WINAPI_PARTITION_APP 1
+ #else
+ # error Unsupported WINAPI_FAMILY for Windows Phone
+ #endif
+
+
+// Windows
+// _WIN32 is defined by the VC++, Intel and GCC compilers.
+// _WIN64 is defined by the VC++, Intel and GCC compilers.
+// __WIN32__ is defined by the Borland compiler.
+// __INTEL__ is defined by the Metrowerks compiler.
+// _M_IX86, _M_AMD64 and _M_IA64 are defined by the VC++, Intel, and Borland compilers.
+// _X86_, _AMD64_, and _IA64_ are defined by the Metrowerks compiler.
+// _M_ARM is defined by the VC++ compiler.
+#elif (defined(EA_PLATFORM_WINDOWS) || (defined(_WIN32) || defined(__WIN32__) || defined(_WIN64))) && !defined(CS_UNDEFINED_STRING)
+ #undef EA_PLATFORM_WINDOWS
+ #define EA_PLATFORM_WINDOWS 1
+ #define EA_PLATFORM_NAME "Windows"
+ #ifdef _WIN64 // VC++ defines both _WIN32 and _WIN64 when compiling for Win64.
+ #define EA_PLATFORM_WIN64 1
+ #else
+ #define EA_PLATFORM_WIN32 1
+ #endif
+ #if defined(_M_AMD64) || defined(_AMD64_) || defined(__x86_64__)
+ #define EA_PROCESSOR_X86_64 1
+ #define EA_SYSTEM_LITTLE_ENDIAN 1
+ #define EA_PLATFORM_DESCRIPTION "Windows on x64"
+ #elif defined(_M_IX86) || defined(_X86_)
+ #define EA_PROCESSOR_X86 1
+ #define EA_SYSTEM_LITTLE_ENDIAN 1
+ #define EA_PLATFORM_DESCRIPTION "Windows on X86"
+ #elif defined(_M_IA64) || defined(_IA64_)
+ #define EA_PROCESSOR_IA64 1
+ #define EA_SYSTEM_LITTLE_ENDIAN 1
+ #define EA_PLATFORM_DESCRIPTION "Windows on IA-64"
+ #elif defined(_M_ARM)
+ #define EA_ABI_ARM_WINCE 1
+ #define EA_PROCESSOR_ARM32 1
+ #define EA_SYSTEM_LITTLE_ENDIAN 1
+ #define EA_PLATFORM_DESCRIPTION "Windows on ARM"
+ #elif defined(_M_ARM64)
+ #define EA_PROCESSOR_ARM64 1
+ #define EA_SYSTEM_LITTLE_ENDIAN 1
+ #define EA_PLATFORM_DESCRIPTION "Windows on ARM64"
+ #else //Possibly other Windows CE variants
+ #error Unknown processor
+ #error Unknown endianness
+ #endif
+ #if defined(__GNUC__)
+ #define EA_ASM_STYLE_ATT 1
+ #elif defined(_MSC_VER) || defined(__BORLANDC__) || defined(__ICL)
+ #define EA_ASM_STYLE_INTEL 1
+ #endif
+ #define EA_PLATFORM_DESKTOP 1
+ #define EA_PLATFORM_MICROSOFT 1
+
+ // WINAPI_FAMILY defines to support Windows 8 Metro Apps - mirroring winapifamily.h in the Windows 8 SDK
+ #define EA_WINAPI_FAMILY_APP 1000
+ #define EA_WINAPI_FAMILY_DESKTOP_APP 1001
+ #define EA_WINAPI_FAMILY_GAMES 1006
+
+ #if defined(WINAPI_FAMILY)
+ #if defined(_MSC_VER)
+ #pragma warning(push, 0)
+ #endif
+ #include <winapifamily.h>
+ #if defined(_MSC_VER)
+ #pragma warning(pop)
+ #endif
+ #if defined(WINAPI_FAMILY_DESKTOP_APP) && WINAPI_FAMILY == WINAPI_FAMILY_DESKTOP_APP
+ #define EA_WINAPI_FAMILY EA_WINAPI_FAMILY_DESKTOP_APP
+ #elif defined(WINAPI_FAMILY_APP) && WINAPI_FAMILY == WINAPI_FAMILY_APP
+ #define EA_WINAPI_FAMILY EA_WINAPI_FAMILY_APP
+ #elif defined(WINAPI_FAMILY_GAMES) && WINAPI_FAMILY == WINAPI_FAMILY_GAMES
+ #define EA_WINAPI_FAMILY EA_WINAPI_FAMILY_GAMES
+ #else
+ #error Unsupported WINAPI_FAMILY
+ #endif
+ #else
+ #define EA_WINAPI_FAMILY EA_WINAPI_FAMILY_DESKTOP_APP
+ #endif
+
+ #define EA_WINAPI_PARTITION_DESKTOP 1
+ #define EA_WINAPI_PARTITION_APP 1
+ #define EA_WINAPI_PARTITION_GAMES (EA_WINAPI_FAMILY == EA_WINAPI_FAMILY_GAMES)
+
+ #define EA_WINAPI_FAMILY_PARTITION(Partition) (Partition)
+
+ // EA_PLATFORM_WINRT
+ // This is a subset of Windows which is used for tablets and the "Metro" (restricted) Windows user interface.
+ // WinRT doesn't doesn't have access to the Windows "desktop" API, but WinRT can nevertheless run on
+ // desktop computers in addition to tablets. The Windows Phone API is a subset of WinRT and is not included
+ // in it due to it being only a part of the API.
+ #if defined(__cplusplus_winrt)
+ #define EA_PLATFORM_WINRT 1
+ #endif
+
+// Sun (Solaris)
+// __SUNPRO_CC is defined by the Sun compiler.
+// __sun is defined by the GCC compiler.
+// __i386 is defined by the Sun and GCC compilers.
+// __sparc is defined by the Sun and GCC compilers.
+#else
+ #error Unknown platform
+ #error Unknown processor
+ #error Unknown endianness
+#endif
+
+#ifndef EA_PROCESSOR_ARM
+ #if defined(EA_PROCESSOR_ARM32) || defined(EA_PROCESSOR_ARM64) || defined(EA_PROCESSOR_ARM7)
+ #define EA_PROCESSOR_ARM
+ #endif
+#endif
+
+// EA_PLATFORM_PTR_SIZE
+// Platform pointer size; same as sizeof(void*).
+// This is not the same as sizeof(int), as int is usually 32 bits on
+// even 64 bit platforms.
+//
+// _WIN64 is defined by Win64 compilers, such as VC++.
+// _M_IA64 is defined by VC++ and Intel compilers for IA64 processors.
+// __LP64__ is defined by HP compilers for the LP64 standard.
+// _LP64 is defined by the GCC and Sun compilers for the LP64 standard.
+// __ia64__ is defined by the GCC compiler for IA64 processors.
+// __arch64__ is defined by the Sparc compiler for 64 bit processors.
+// __mips64__ is defined by the GCC compiler for MIPS processors.
+// __powerpc64__ is defined by the GCC compiler for PowerPC processors.
+// __64BIT__ is defined by the AIX compiler for 64 bit processors.
+// __sizeof_ptr is defined by the ARM compiler (armcc, armcpp).
+//
+#ifndef EA_PLATFORM_PTR_SIZE
+ #if defined(__WORDSIZE) // Defined by some variations of GCC.
+ #define EA_PLATFORM_PTR_SIZE ((__WORDSIZE) / 8)
+ #elif defined(_WIN64) || defined(__LP64__) || defined(_LP64) || defined(_M_IA64) || defined(__ia64__) || defined(__arch64__) || defined(__aarch64__) || defined(__mips64__) || defined(__64BIT__) || defined(__Ptr_Is_64)
+ #define EA_PLATFORM_PTR_SIZE 8
+ #elif defined(__CC_ARM) && (__sizeof_ptr == 8)
+ #define EA_PLATFORM_PTR_SIZE 8
+ #else
+ #define EA_PLATFORM_PTR_SIZE 4
+ #endif
+#endif
+
+
+
+// EA_PLATFORM_WORD_SIZE
+// This defines the size of a machine word. This will be the same as
+// the size of registers on the machine but not necessarily the same
+// as the size of pointers on the machine. A number of 64 bit platforms
+// have 64 bit registers but 32 bit pointers.
+//
+#ifndef EA_PLATFORM_WORD_SIZE
+ #define EA_PLATFORM_WORD_SIZE EA_PLATFORM_PTR_SIZE
+#endif
+
+// EA_PLATFORM_MIN_MALLOC_ALIGNMENT
+// This defines the minimal alignment that the platform's malloc
+// implementation will return. This should be used when writing custom
+// allocators to ensure that the alignment matches that of malloc
+#ifndef EA_PLATFORM_MIN_MALLOC_ALIGNMENT
+ #if defined(EA_PLATFORM_APPLE)
+ #define EA_PLATFORM_MIN_MALLOC_ALIGNMENT 16
+ #elif defined(EA_PLATFORM_ANDROID) && defined(EA_PROCESSOR_ARM)
+ #define EA_PLATFORM_MIN_MALLOC_ALIGNMENT 8
+ #elif defined(EA_PLATFORM_ANDROID) && defined(EA_PROCESSOR_X86_64)
+ #define EA_PLATFORM_MIN_MALLOC_ALIGNMENT 8
+ #else
+ #define EA_PLATFORM_MIN_MALLOC_ALIGNMENT (EA_PLATFORM_PTR_SIZE * 2)
+ #endif
+#endif
+
+
+// EA_MISALIGNED_SUPPORT_LEVEL
+// Specifies if the processor can read and write built-in types that aren't
+// naturally aligned.
+// 0 - not supported. Likely causes an exception.
+// 1 - supported but slow.
+// 2 - supported and fast.
+//
+#ifndef EA_MISALIGNED_SUPPORT_LEVEL
+ #if defined(EA_PROCESSOR_X86_64)
+ #define EA_MISALIGNED_SUPPORT_LEVEL 2
+ #else
+ #define EA_MISALIGNED_SUPPORT_LEVEL 0
+ #endif
+#endif
+
+// Macro to determine if a Windows API partition is enabled. Always false on non Microsoft platforms.
+#if !defined(EA_WINAPI_FAMILY_PARTITION)
+ #define EA_WINAPI_FAMILY_PARTITION(Partition) (0)
+#endif
+
+
+// EA_CACHE_LINE_SIZE
+// Specifies the cache line size broken down by compile target.
+// This the expected best guess values for the targets that we can make at compilation time.
+
+#ifndef EA_CACHE_LINE_SIZE
+ #if defined(EA_PROCESSOR_X86)
+ #define EA_CACHE_LINE_SIZE 32 // This is the minimum possible value.
+ #elif defined(EA_PROCESSOR_X86_64)
+ #define EA_CACHE_LINE_SIZE 64 // This is the minimum possible value
+ #elif defined(EA_PROCESSOR_ARM32)
+ #define EA_CACHE_LINE_SIZE 32 // This varies between implementations and is usually 32 or 64.
+ #elif defined(EA_PROCESSOR_ARM64)
+ #define EA_CACHE_LINE_SIZE 64 // Cache line Cortex-A8 (64 bytes) http://shervinemami.info/armAssembly.html however this remains to be mostly an assumption at this stage
+ #elif (EA_PLATFORM_WORD_SIZE == 4)
+ #define EA_CACHE_LINE_SIZE 32 // This is the minimum possible value
+ #else
+ #define EA_CACHE_LINE_SIZE 64 // This is the minimum possible value
+ #endif
+#endif
+
+
+#endif // INCLUDED_eaplatform_H
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/test/packages/EABase/include/Common/EABase/eabase.h b/EASTL/test/packages/EABase/include/Common/EABase/eabase.h
new file mode 100644
index 0000000..dab9e46
--- /dev/null
+++ b/EASTL/test/packages/EABase/include/Common/EABase/eabase.h
@@ -0,0 +1,1011 @@
+/*-----------------------------------------------------------------------------
+ * eabase.h
+ *
+ * Copyright (c) Electronic Arts Inc. All rights reserved.
+ *---------------------------------------------------------------------------*/
+
+
+#ifndef INCLUDED_eabase_H
+#define INCLUDED_eabase_H
+
+
+// Identify the compiler and declare the EA_COMPILER_xxxx defines
+#include <EABase/config/eacompiler.h>
+
+// Identify traits which this compiler supports, or does not support
+#include <EABase/config/eacompilertraits.h>
+
+// Identify the platform and declare the EA_xxxx defines
+#include <EABase/config/eaplatform.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+// Always include version.h for backwards compatibility.
+#include <EABase/version.h>
+
+// Define common SI unit macros
+#include <EABase/eaunits.h>
+
+
+// ------------------------------------------------------------------------
+// The C++ standard defines size_t as a built-in type. Some compilers are
+// not standards-compliant in this respect, so we need an additional include.
+// The case is similar with wchar_t under C++.
+
+#if defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_MSVC) || defined(EA_WCHAR_T_NON_NATIVE) || defined(EA_PLATFORM_SONY)
+ #if defined(EA_COMPILER_MSVC)
+ #pragma warning(push, 0)
+ #pragma warning(disable: 4265 4365 4836 4574)
+ #endif
+ #include <stddef.h>
+ #if defined(EA_COMPILER_MSVC)
+ #pragma warning(pop)
+ #endif
+#endif
+
+// ------------------------------------------------------------------------
+// Include stddef.h on Apple's clang compiler to ensure the ptrdiff_t type
+// is defined.
+#if defined(EA_COMPILER_CLANG) && defined(EA_PLATFORM_APPLE)
+ #include <stddef.h>
+#endif
+
+// ------------------------------------------------------------------------
+// Include assert.h on C11 supported compilers so we may allow static_assert usage
+// http://en.cppreference.com/w/c/error/static_assert
+// C11 standard(ISO / IEC 9899:2011) :
+// 7.2/3 Diagnostics <assert.h>(p : 186)
+#if !defined(__cplusplus) && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201100L
+ #include <assert.h>
+#endif
+
+
+// ------------------------------------------------------------------------
+// By default, GCC defines NULL as ((void*)0), which is the
+// C definition. This causes all sort of problems for C++ code, so it is
+// worked around by undefining NULL.
+
+#if defined(NULL)
+ #undef NULL
+#endif
+
+
+// ------------------------------------------------------------------------
+// Define the NULL pointer. This is normally defined in <stddef.h>, but we
+// don't want to force a global dependency on that header, so the definition
+// is duplicated here.
+
+#if defined(__cplusplus)
+ #define NULL 0
+#else
+ #define NULL ((void*)0)
+#endif
+
+
+// ------------------------------------------------------------------------
+// C98/99 Standard typedefs. From the ANSI ISO/IEC 9899 standards document
+// Most recent versions of the gcc-compiler come with these defined in
+// inttypes.h or stddef.h. Determining if they are predefined can be
+// tricky, so we expect some problems on non-standard compilers
+
+//#if (defined(_INTTYPES_H) || defined(_INTTYPES_H_)) && !defined(PRId64)
+// #error "<inttypes.h> was #included before eabase.h, but without __STDC_FORMAT_MACROS #defined. You must #include eabase.h or an equivalent before #including C99 headers, or you must define __STDC_FORMAT_MACRO before #including system headrs."
+//#endif
+
+// ------------------------------------------------------------------------
+// We need to test this after we potentially include stddef.h, otherwise we
+// would have put this into the compilertraits header.
+#if !defined(EA_COMPILER_HAS_INTTYPES) && (!defined(_MSC_VER) || (_MSC_VER > 1500)) && (defined(EA_COMPILER_IS_C99) || defined(INT8_MIN) || defined(EA_COMPILER_HAS_C99_TYPES) || defined(_SN_STDINT_H))
+ #define EA_COMPILER_HAS_INTTYPES
+#endif
+
+#ifdef EA_COMPILER_HAS_INTTYPES // If the compiler supports inttypes...
+ // ------------------------------------------------------------------------
+ // Include the stdint header to define and derive the required types.
+ // Additionally include inttypes.h as many compilers, including variations
+ // of GCC define things in inttypes.h that the C99 standard says goes
+ // in stdint.h.
+ //
+ // The C99 standard specifies that inttypes.h only define printf/scanf
+ // format macros if __STDC_FORMAT_MACROS is defined before #including
+ // inttypes.h. For consistency, we do that here.
+ #ifndef __STDC_FORMAT_MACROS
+ #define __STDC_FORMAT_MACROS
+ #endif
+ // The GCC PSP compiler defines standard int types (e.g. uint32_t) but not PRId8, etc.
+ // MSVC added support for inttypes.h header in VS2013.
+ #if !defined(EA_COMPILER_MSVC) || (defined(EA_COMPILER_MSVC) && EA_COMPILER_VERSION >= 1800)
+ #include <inttypes.h> // PRId8, SCNd8, etc.
+ #endif
+ #if defined(_MSC_VER)
+ #pragma warning(push, 0)
+ #endif
+ #include <stdint.h> // int32_t, INT64_C, UINT8_MAX, etc.
+ #include <math.h> // float_t, double_t, etc.
+ #include <float.h> // FLT_EVAL_METHOD.
+ #if defined(_MSC_VER)
+ #pragma warning(pop)
+ #endif
+
+ #if !defined(FLT_EVAL_METHOD) && (defined(__FLT_EVAL_METHOD__) || defined(_FEVAL)) // GCC 3.x defines __FLT_EVAL_METHOD__ instead of the C99 standard FLT_EVAL_METHOD.
+ #ifdef __FLT_EVAL_METHOD__
+ #define FLT_EVAL_METHOD __FLT_EVAL_METHOD__
+ #else
+ #define FLT_EVAL_METHOD _FEVAL
+ #endif
+ #endif
+
+ // MinGW GCC (up to at least v4.3.0-20080502) mistakenly neglects to define float_t and double_t.
+ // This appears to be an acknowledged bug as of March 2008 and is scheduled to be fixed.
+ // Similarly, Android uses a mix of custom standard library headers which prior to SDK API level 21
+ // don't define float_t and double_t.
+ #if defined(__MINGW32__) || (defined(EA_PLATFORM_ANDROID) && !(defined(EA_ANDROID_SDK_LEVEL) && EA_ANDROID_SDK_LEVEL >= 21))
+ #if defined(__FLT_EVAL_METHOD__)
+ #if(__FLT_EVAL_METHOD__== 0)
+ typedef float float_t;
+ typedef double double_t;
+ #elif(__FLT_EVAL_METHOD__ == 1)
+ typedef double float_t;
+ typedef double double_t;
+ #elif(__FLT_EVAL_METHOD__ == 2)
+ typedef long double float_t;
+ typedef long double double_t;
+ #endif
+ #else
+ typedef float float_t;
+ typedef double double_t;
+ #endif
+ #endif
+
+ // The CodeSourcery definitions of PRIxPTR and SCNxPTR are broken for 32 bit systems.
+ #if defined(__SIZEOF_SIZE_T__) && (__SIZEOF_SIZE_T__ == 4) && (defined(__have_long64) || defined(__have_longlong64))
+ #undef PRIdPTR
+ #define PRIdPTR "d"
+ #undef PRIiPTR
+ #define PRIiPTR "i"
+ #undef PRIoPTR
+ #define PRIoPTR "o"
+ #undef PRIuPTR
+ #define PRIuPTR "u"
+ #undef PRIxPTR
+ #define PRIxPTR "x"
+ #undef PRIXPTR
+ #define PRIXPTR "X"
+
+ #undef SCNdPTR
+ #define SCNdPTR "d"
+ #undef SCNiPTR
+ #define SCNiPTR "i"
+ #undef SCNoPTR
+ #define SCNoPTR "o"
+ #undef SCNuPTR
+ #define SCNuPTR "u"
+ #undef SCNxPTR
+ #define SCNxPTR "x"
+ #endif
+#else // else we must implement types ourselves.
+
+ #if !defined(__BIT_TYPES_DEFINED__) && !defined(__int8_t_defined)
+ typedef signed char int8_t; //< 8 bit signed integer
+ #endif
+ #if !defined( __int8_t_defined )
+ typedef signed short int16_t; //< 16 bit signed integer
+ typedef signed int int32_t; //< 32 bit signed integer. This works for both 32 bit and 64 bit platforms, as we assume the LP64 is followed.
+ #define __int8_t_defined
+ #endif
+ typedef unsigned char uint8_t; //< 8 bit unsigned integer
+ typedef unsigned short uint16_t; //< 16 bit unsigned integer
+ #if !defined( __uint32_t_defined )
+ typedef unsigned int uint32_t; //< 32 bit unsigned integer. This works for both 32 bit and 64 bit platforms, as we assume the LP64 is followed.
+ #define __uint32_t_defined
+ #endif
+
+ // According to the C98/99 standard, FLT_EVAL_METHOD defines control the
+ // width used for floating point _t types.
+ #if defined(_MSC_VER) && _MSC_VER >= 1800
+ // MSVC's math.h provides float_t, double_t under this condition.
+ #elif defined(FLT_EVAL_METHOD)
+ #if (FLT_EVAL_METHOD == 0)
+ typedef float float_t;
+ typedef double double_t;
+ #elif (FLT_EVAL_METHOD == 1)
+ typedef double float_t;
+ typedef double double_t;
+ #elif (FLT_EVAL_METHOD == 2)
+ typedef long double float_t;
+ typedef long double double_t;
+ #endif
+ #endif
+
+ #if defined(EA_COMPILER_MSVC)
+ typedef signed __int64 int64_t;
+ typedef unsigned __int64 uint64_t;
+
+ #else
+ typedef signed long long int64_t;
+ typedef unsigned long long uint64_t;
+ #endif
+#endif
+
+
+// ------------------------------------------------------------------------
+// macros for declaring constants in a portable way.
+//
+// e.g. int64_t x = INT64_C(1234567812345678);
+// e.g. int64_t x = INT64_C(0x1111111122222222);
+// e.g. uint64_t x = UINT64_C(0x1111111122222222);
+//
+// Microsoft VC++'s definitions of INT8_C/UINT8_C/INT16_C/UINT16_C are like so:
+// #define INT8_C(x) (x)
+// #define INT16_C(x) (x)
+// #define UINT8_C(x) (x)
+// #define UINT16_C(x) (x)
+// To consider: undefine Microsoft's and use the casting versions below.
+// ------------------------------------------------------------------------
+
+#ifndef INT8_C_DEFINED // If the user hasn't already defined these...
+ #define INT8_C_DEFINED
+
+ #ifndef INT8_C
+ #define INT8_C(x) int8_t(x) // For the majority of compilers and platforms, long is 32 bits and long long is 64 bits.
+ #endif
+ #ifndef UINT8_C
+ #define UINT8_C(x) uint8_t(x)
+ #endif
+ #ifndef INT16_C
+ #define INT16_C(x) int16_t(x)
+ #endif
+ #ifndef UINT16_C
+ #define UINT16_C(x) uint16_t(x) // Possibly we should make this be uint16_t(x##u). Let's see how compilers react before changing this.
+ #endif
+ #ifndef INT32_C
+ #define INT32_C(x) x##L
+ #endif
+ #ifndef UINT32_C
+ #define UINT32_C(x) x##UL
+ #endif
+ #ifndef INT64_C
+ #define INT64_C(x) x##LL // The way to deal with this is to compare ULONG_MAX to 0xffffffff and if not equal, then remove the L.
+ #endif
+ #ifndef UINT64_C
+ #define UINT64_C(x) x##ULL // We need to follow a similar approach for LL.
+ #endif
+ #ifndef UINTMAX_C
+ #define UINTMAX_C(x) UINT64_C(x)
+ #endif
+#endif
+
+// ------------------------------------------------------------------------
+// type sizes
+#ifndef INT8_MAX_DEFINED // If the user hasn't already defined these...
+ #define INT8_MAX_DEFINED
+
+ // The value must be 2^(n-1)-1
+ #ifndef INT8_MAX
+ #define INT8_MAX 127
+ #endif
+ #ifndef INT16_MAX
+ #define INT16_MAX 32767
+ #endif
+ #ifndef INT32_MAX
+ #define INT32_MAX 2147483647
+ #endif
+ #ifndef INT64_MAX
+ #define INT64_MAX INT64_C(9223372036854775807)
+ #endif
+ #ifndef INTMAX_MAX
+ #define INTMAX_MAX INT64_MAX
+ #endif
+ #ifndef INTPTR_MAX
+ #if EA_PLATFORM_PTR_SIZE == 4
+ #define INTPTR_MAX INT32_MAX
+ #else
+ #define INTPTR_MAX INT64_MAX
+ #endif
+ #endif
+
+ // The value must be either -2^(n-1) or 1-2(n-1).
+ #ifndef INT8_MIN
+ #define INT8_MIN -128
+ #endif
+ #ifndef INT16_MIN
+ #define INT16_MIN -32768
+ #endif
+ #ifndef INT32_MIN
+ #define INT32_MIN (-INT32_MAX - 1) // -2147483648
+ #endif
+ #ifndef INT64_MIN
+ #define INT64_MIN (-INT64_MAX - 1) // -9223372036854775808
+ #endif
+ #ifndef INTMAX_MIN
+ #define INTMAX_MIN INT64_MIN
+ #endif
+ #ifndef INTPTR_MIN
+ #if EA_PLATFORM_PTR_SIZE == 4
+ #define INTPTR_MIN INT32_MIN
+ #else
+ #define INTPTR_MIN INT64_MIN
+ #endif
+ #endif
+
+ // The value must be 2^n-1
+ #ifndef UINT8_MAX
+ #define UINT8_MAX 0xffU // 255
+ #endif
+ #ifndef UINT16_MAX
+ #define UINT16_MAX 0xffffU // 65535
+ #endif
+ #ifndef UINT32_MAX
+ #define UINT32_MAX UINT32_C(0xffffffff) // 4294967295
+ #endif
+ #ifndef UINT64_MAX
+ #define UINT64_MAX UINT64_C(0xffffffffffffffff) // 18446744073709551615
+ #endif
+ #ifndef UINTMAX_MAX
+ #define UINTMAX_MAX UINT64_MAX
+ #endif
+ #ifndef UINTPTR_MAX
+ #if EA_PLATFORM_PTR_SIZE == 4
+ #define UINTPTR_MAX UINT32_MAX
+ #else
+ #define UINTPTR_MAX UINT64_MAX
+ #endif
+ #endif
+#endif
+
+#ifndef FLT_EVAL_METHOD
+ #define FLT_EVAL_METHOD 0
+ typedef float float_t;
+ typedef double double_t;
+#endif
+
+#if defined(EA_COMPILER_HAS_INTTYPES) && (!defined(EA_COMPILER_MSVC) || (defined(EA_COMPILER_MSVC) && EA_COMPILER_VERSION >= 1800))
+ #define EA_COMPILER_HAS_C99_FORMAT_MACROS
+#endif
+
+#ifndef EA_COMPILER_HAS_C99_FORMAT_MACROS
+ // ------------------------------------------------------------------------
+ // sized printf and scanf format specifiers
+ // See the C99 standard, section 7.8.1 -- Macros for format specifiers.
+ //
+ // The C99 standard specifies that inttypes.h only define printf/scanf
+ // format macros if __STDC_FORMAT_MACROS is defined before #including
+ // inttypes.h. For consistency, we define both __STDC_FORMAT_MACROS and
+ // the printf format specifiers here. We also skip the "least/most"
+ // variations of these specifiers, as we've decided to do so with
+ // basic types.
+ //
+ // For 64 bit systems, we assume the LP64 standard is followed
+ // (as opposed to ILP64, etc.) For 32 bit systems, we assume the
+ // ILP32 standard is followed. See:
+ // http://www.opengroup.org/public/tech/aspen/lp64_wp.htm
+ // for information about this. Thus, on both 32 and 64 bit platforms,
+ // %l refers to 32 bit data while %ll refers to 64 bit data.
+
+ #ifndef __STDC_FORMAT_MACROS
+ #define __STDC_FORMAT_MACROS
+ #endif
+
+ #if defined(EA_COMPILER_MSVC) // VC++ 7.1+ understands long long as a data type but doesn't accept %ll as a printf specifier.
+ #define EA_PRI_64_LENGTH_SPECIFIER "I64"
+ #define EA_SCN_64_LENGTH_SPECIFIER "I64"
+ #else
+ #define EA_PRI_64_LENGTH_SPECIFIER "ll"
+ #define EA_SCN_64_LENGTH_SPECIFIER "ll"
+ #endif // It turns out that some platforms use %q to represent a 64 bit value, but these are not relevant to us at this time.
+
+ // Printf format specifiers
+ #if defined(EA_COMPILER_IS_C99) || defined(EA_COMPILER_GNUC)
+ #define PRId8 "hhd"
+ #define PRIi8 "hhi"
+ #define PRIo8 "hho"
+ #define PRIu8 "hhu"
+ #define PRIx8 "hhx"
+ #define PRIX8 "hhX"
+ #else // VC++, Borland, etc. which have no way to specify 8 bit values other than %c.
+ #define PRId8 "c" // This may not work properly but it at least will not crash. Try using 16 bit versions instead.
+ #define PRIi8 "c" // "
+ #define PRIo8 "o" // "
+ #define PRIu8 "u" // "
+ #define PRIx8 "x" // "
+ #define PRIX8 "X" // "
+ #endif
+
+ #define PRId16 "hd"
+ #define PRIi16 "hi"
+ #define PRIo16 "ho"
+ #define PRIu16 "hu"
+ #define PRIx16 "hx"
+ #define PRIX16 "hX"
+
+ #define PRId32 "d" // This works for both 32 bit and 64 bit systems, as we assume LP64 conventions.
+ #define PRIi32 "i"
+ #define PRIo32 "o"
+ #define PRIu32 "u"
+ #define PRIx32 "x"
+ #define PRIX32 "X"
+
+ #define PRId64 EA_PRI_64_LENGTH_SPECIFIER "d"
+ #define PRIi64 EA_PRI_64_LENGTH_SPECIFIER "i"
+ #define PRIo64 EA_PRI_64_LENGTH_SPECIFIER "o"
+ #define PRIu64 EA_PRI_64_LENGTH_SPECIFIER "u"
+ #define PRIx64 EA_PRI_64_LENGTH_SPECIFIER "x"
+ #define PRIX64 EA_PRI_64_LENGTH_SPECIFIER "X"
+
+ #if (EA_PLATFORM_PTR_SIZE == 4)
+ #define PRIdPTR PRId32 // Usage of pointer values will generate warnings with
+ #define PRIiPTR PRIi32 // some compilers because they are defined in terms of
+ #define PRIoPTR PRIo32 // integers. However, you can't simply use "p" because
+ #define PRIuPTR PRIu32 // 'p' is interpreted in a specific and often different
+ #define PRIxPTR PRIx32 // way by the library.
+ #define PRIXPTR PRIX32
+ #elif (EA_PLATFORM_PTR_SIZE == 8)
+ #define PRIdPTR PRId64
+ #define PRIiPTR PRIi64
+ #define PRIoPTR PRIo64
+ #define PRIuPTR PRIu64
+ #define PRIxPTR PRIx64
+ #define PRIXPTR PRIX64
+ #endif
+
+ // Scanf format specifiers
+ #if defined(EA_COMPILER_IS_C99) || defined(EA_COMPILER_GNUC)
+ #define SCNd8 "hhd"
+ #define SCNi8 "hhi"
+ #define SCNo8 "hho"
+ #define SCNu8 "hhu"
+ #define SCNx8 "hhx"
+ #else // VC++, Borland, etc. which have no way to specify 8 bit values other than %c.
+ #define SCNd8 "c" // This will not work properly but it at least will not crash. Try using 16 bit versions instead.
+ #define SCNi8 "c" // "
+ #define SCNo8 "c" // "
+ #define SCNu8 "c" // "
+ #define SCNx8 "c" // "
+ #endif
+
+ #define SCNd16 "hd"
+ #define SCNi16 "hi"
+ #define SCNo16 "ho"
+ #define SCNu16 "hu"
+ #define SCNx16 "hx"
+
+ #define SCNd32 "d" // This works for both 32 bit and 64 bit systems, as we assume LP64 conventions.
+ #define SCNi32 "i"
+ #define SCNo32 "o"
+ #define SCNu32 "u"
+ #define SCNx32 "x"
+
+ #define SCNd64 EA_SCN_64_LENGTH_SPECIFIER "d"
+ #define SCNi64 EA_SCN_64_LENGTH_SPECIFIER "i"
+ #define SCNo64 EA_SCN_64_LENGTH_SPECIFIER "o"
+ #define SCNu64 EA_SCN_64_LENGTH_SPECIFIER "u"
+ #define SCNx64 EA_SCN_64_LENGTH_SPECIFIER "x"
+
+ #if defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1900)
+ #define SCNdPTR PRIdPTR
+ #define SCNiPTR PRIiPTR
+ #define SCNoPTR PRIoPTR
+ #define SCNuPTR PRIuPTR
+ #define SCNxPTR PRIxPTR
+ #elif (EA_PLATFORM_PTR_SIZE == 4)
+ #define SCNdPTR SCNd32 // Usage of pointer values will generate warnings with
+ #define SCNiPTR SCNi32 // some compilers because they are defined in terms of
+ #define SCNoPTR SCNo32 // integers. However, you can't simply use "p" because
+ #define SCNuPTR SCNu32 // 'p' is interpreted in a specific and often different
+ #define SCNxPTR SCNx32 // way by the library.
+ #elif (EA_PLATFORM_PTR_SIZE == 8)
+ #define SCNdPTR SCNd64
+ #define SCNiPTR SCNi64
+ #define SCNoPTR SCNo64
+ #define SCNuPTR SCNu64
+ #define SCNxPTR SCNx64
+ #endif
+#endif
+
+
+// ------------------------------------------------------------------------
+// bool8_t
+// The definition of a bool8_t is controversial with some, as it doesn't
+// act just like built-in bool. For example, you can assign -100 to it.
+//
+#ifndef BOOL8_T_DEFINED // If the user hasn't already defined this...
+ #define BOOL8_T_DEFINED
+ #if defined(EA_COMPILER_MSVC) || (defined(EA_COMPILER_INTEL) && defined(EA_PLATFORM_WINDOWS))
+ #if defined(__cplusplus)
+ typedef bool bool8_t;
+ #else
+ typedef int8_t bool8_t;
+ #endif
+ #else // EA_COMPILER_GNUC generally uses 4 bytes per bool.
+ typedef int8_t bool8_t;
+ #endif
+#endif
+
+
+// ------------------------------------------------------------------------
+// intptr_t / uintptr_t
+// Integer type guaranteed to be big enough to hold
+// a native pointer ( intptr_t is defined in STDDEF.H )
+//
+#if !defined(_INTPTR_T_DEFINED) && !defined(_intptr_t_defined) && !defined(EA_COMPILER_HAS_C99_TYPES)
+ #if (EA_PLATFORM_PTR_SIZE == 4)
+ typedef int32_t intptr_t;
+ #elif (EA_PLATFORM_PTR_SIZE == 8)
+ typedef int64_t intptr_t;
+ #endif
+
+ #define _intptr_t_defined
+ #define _INTPTR_T_DEFINED
+#endif
+
+#if !defined(_UINTPTR_T_DEFINED) && !defined(_uintptr_t_defined) && !defined(EA_COMPILER_HAS_C99_TYPES)
+ #if (EA_PLATFORM_PTR_SIZE == 4)
+ typedef uint32_t uintptr_t;
+ #elif (EA_PLATFORM_PTR_SIZE == 8)
+ typedef uint64_t uintptr_t;
+ #endif
+
+ #define _uintptr_t_defined
+ #define _UINTPTR_T_DEFINED
+#endif
+
+#if !defined(EA_COMPILER_HAS_INTTYPES)
+ #ifndef INTMAX_T_DEFINED
+ #define INTMAX_T_DEFINED
+
+ // At this time, all supported compilers have int64_t as the max
+ // integer type. Some compilers support a 128 bit integer type,
+ // but in some cases it is not a true int128_t but rather a
+ // crippled data type. Also, it turns out that Unix 64 bit ABIs
+ // require that intmax_t be int64_t and nothing larger. So we
+ // play it safe here and set intmax_t to int64_t, even though
+ // an int128_t type may exist.
+
+ typedef int64_t intmax_t;
+ typedef uint64_t uintmax_t;
+ #endif
+#endif
+
+
+// ------------------------------------------------------------------------
+// ssize_t
+// signed equivalent to size_t.
+// This is defined by GCC (except the QNX implementation of GCC) but not by other compilers.
+//
+#if !defined(__GNUC__)
+ // As of this writing, all non-GCC compilers significant to us implement
+ // uintptr_t the same as size_t. However, this isn't guaranteed to be
+ // so for all compilers, as size_t may be based on int, long, or long long.
+ #if !defined(_SSIZE_T_) && !defined(_SSIZE_T_DEFINED)
+ #define _SSIZE_T_
+ #define _SSIZE_T_DEFINED
+
+ #if defined(_MSC_VER) && (EA_PLATFORM_PTR_SIZE == 8)
+ typedef __int64 ssize_t;
+ #else
+ typedef long ssize_t;
+ #endif
+ #endif
+#else
+ #include <sys/types.h>
+#endif
+
+
+// ------------------------------------------------------------------------
+// Character types
+//
+#if defined(EA_COMPILER_MSVC)
+ #if defined(EA_WCHAR_T_NON_NATIVE)
+ // In this case, wchar_t is not defined unless we include
+ // wchar.h or if the compiler makes it built-in.
+ #ifdef EA_COMPILER_MSVC
+ #pragma warning(push, 3)
+ #endif
+ #include <wchar.h>
+ #ifdef EA_COMPILER_MSVC
+ #pragma warning(pop)
+ #endif
+ #endif
+#endif
+
+
+// ------------------------------------------------------------------------
+// char8_t -- Guaranteed to be equal to the compiler's char data type.
+// Some compilers implement char8_t as unsigned, though char
+// is usually set to be signed.
+//
+// char16_t -- This is set to be an unsigned 16 bit value. If the compiler
+// has wchar_t as an unsigned 16 bit value, then char16_t is
+// set to be the same thing as wchar_t in order to allow the
+// user to use char16_t with standard wchar_t functions.
+//
+// char32_t -- This is set to be an unsigned 32 bit value. If the compiler
+// has wchar_t as an unsigned 32 bit value, then char32_t is
+// set to be the same thing as wchar_t in order to allow the
+// user to use char32_t with standard wchar_t functions.
+//
+// EA_CHAR8_UNIQUE
+// EA_CHAR16_NATIVE
+// EA_CHAR32_NATIVE
+// EA_WCHAR_UNIQUE
+//
+// VS2010 unilaterally defines char16_t and char32_t in its yvals.h header
+// unless _HAS_CHAR16_T_LANGUAGE_SUPPORT or _CHAR16T are defined.
+// However, VS2010 does not support the C++0x u"" and U"" string literals,
+// which makes its definition of char16_t and char32_t somewhat useless.
+// Until VC++ supports string literals, the build system should define
+// _CHAR16T and let EABase define char16_t and EA_CHAR16.
+//
+// GCC defines char16_t and char32_t in the C compiler in -std=gnu99 mode,
+// as __CHAR16_TYPE__ and __CHAR32_TYPE__, and for the C++ compiler
+// in -std=c++0x and -std=gnu++0x modes, as char16_t and char32_t too.
+//
+// The EA_WCHAR_UNIQUE symbol is defined to 1 if wchar_t is distinct from
+// char8_t, char16_t, and char32_t, and defined to 0 if not. In some cases,
+// if the compiler does not support char16_t/char32_t, one of these two types
+// is typically a typedef or define of wchar_t. For compilers that support
+// the C++11 unicode character types often overloads must be provided to
+// support existing code that passes a wide char string to a function that
+// takes a unicode string.
+//
+// The EA_CHAR8_UNIQUE symbol is defined to 1 if char8_t is distinct type
+// from char in the type system, and defined to 0 if otherwise.
+
+#if !defined(EA_CHAR16_NATIVE)
+ // To do: Change this to be based on EA_COMPILER_NO_NEW_CHARACTER_TYPES.
+ #if defined(_MSC_VER) && (_MSC_VER >= 1600) && defined(_HAS_CHAR16_T_LANGUAGE_SUPPORT) && _HAS_CHAR16_T_LANGUAGE_SUPPORT // VS2010+
+ #define EA_CHAR16_NATIVE 1
+ #elif defined(EA_COMPILER_CLANG) && defined(EA_COMPILER_CPP11_ENABLED)
+ #if __has_feature(cxx_unicode_literals)
+ #define EA_CHAR16_NATIVE 1
+ #elif (EA_COMPILER_VERSION >= 300) && !(defined(EA_PLATFORM_IPHONE) || defined(EA_PLATFORM_OSX))
+ #define EA_CHAR16_NATIVE 1
+ #elif defined(EA_PLATFORM_APPLE)
+ #define EA_CHAR16_NATIVE 1
+ #else
+ #define EA_CHAR16_NATIVE 0
+ #endif
+ #elif defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 404) && defined(__CHAR16_TYPE__) && defined(EA_COMPILER_CPP11_ENABLED)// EDG 4.4+.
+ #define EA_CHAR16_NATIVE 1
+ #elif defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4004) && !defined(EA_COMPILER_EDG) && (defined(EA_COMPILER_CPP11_ENABLED) || defined(__STDC_VERSION__)) // g++ (C++ compiler) 4.4+ with -std=c++0x or gcc (C compiler) 4.4+ with -std=gnu99
+ #define EA_CHAR16_NATIVE 1
+ #else
+ #define EA_CHAR16_NATIVE 0
+ #endif
+#endif
+
+#if !defined(EA_CHAR32_NATIVE) // Microsoft currently ties char32_t language support to char16_t language support. So we use CHAR16_T here.
+ // To do: Change this to be based on EA_COMPILER_NO_NEW_CHARACTER_TYPES.
+ #if defined(_MSC_VER) && (_MSC_VER >= 1600) && defined(_HAS_CHAR16_T_LANGUAGE_SUPPORT) && _HAS_CHAR16_T_LANGUAGE_SUPPORT // VS2010+
+ #define EA_CHAR32_NATIVE 1
+ #elif defined(EA_COMPILER_CLANG) && defined(EA_COMPILER_CPP11_ENABLED)
+ #if __has_feature(cxx_unicode_literals)
+ #define EA_CHAR32_NATIVE 1
+ #elif (EA_COMPILER_VERSION >= 300) && !(defined(EA_PLATFORM_IPHONE) || defined(EA_PLATFORM_OSX))
+ #define EA_CHAR32_NATIVE 1
+ #elif defined(EA_PLATFORM_APPLE)
+ #define EA_CHAR32_NATIVE 1
+ #else
+ #define EA_CHAR32_NATIVE 0
+ #endif
+ #elif defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 404) && defined(__CHAR32_TYPE__) && defined(EA_COMPILER_CPP11_ENABLED)// EDG 4.4+.
+ #define EA_CHAR32_NATIVE 1
+ #elif defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4004) && !defined(EA_COMPILER_EDG) && (defined(EA_COMPILER_CPP11_ENABLED) || defined(__STDC_VERSION__)) // g++ (C++ compiler) 4.4+ with -std=c++0x or gcc (C compiler) 4.4+ with -std=gnu99
+ #define EA_CHAR32_NATIVE 1
+ #else
+ #define EA_CHAR32_NATIVE 0
+ #endif
+#endif
+
+
+#if EA_CHAR16_NATIVE || EA_CHAR32_NATIVE
+ #define EA_WCHAR_UNIQUE 1
+#else
+ #define EA_WCHAR_UNIQUE 0
+#endif
+
+
+// EA_CHAR8_UNIQUE
+//
+// Check for char8_t support in the cpp type system. Moving forward from c++20,
+// the char8_t type allows users to overload function for character encoding.
+//
+// EA_CHAR8_UNIQUE is 1 when the type is a unique in the type system and
+// can there be used as a valid overload. EA_CHAR8_UNIQUE is 0 otherwise.
+//
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0482r6.html
+//
+#ifdef __cpp_char8_t
+ #define CHAR8_T_DEFINED
+ #define EA_CHAR8_UNIQUE 1
+#else
+ #define EA_CHAR8_UNIQUE 0
+#endif
+
+
+#ifndef CHAR8_T_DEFINED // If the user hasn't already defined these...
+ #define CHAR8_T_DEFINED
+ #if defined(EA_PLATFORM_APPLE)
+ #define char8_t char // The Apple debugger is too stupid to realize char8_t is typedef'd to char, so we #define it.
+ #else
+ typedef char char8_t;
+ #endif
+
+ #if EA_CHAR16_NATIVE
+ // In C++, char16_t and char32_t are already defined by the compiler.
+ // In MS C, char16_t and char32_t are already defined by the compiler/standard library.
+ // In GCC C, __CHAR16_TYPE__ and __CHAR32_TYPE__ are defined instead, and we must define char16_t and char32_t from these.
+ #if defined(__GNUC__) && !defined(__GXX_EXPERIMENTAL_CXX0X__) && defined(__CHAR16_TYPE__) // If using GCC and compiling in C...
+ typedef __CHAR16_TYPE__ char16_t;
+ typedef __CHAR32_TYPE__ char32_t;
+ #endif
+ #elif (EA_WCHAR_SIZE == 2)
+ #if (defined(_MSC_VER) && (_MSC_VER >= 1600)) // if VS2010+ or using platforms that use Dinkumware under a compiler that doesn't natively support C++11 char16_t.
+ #if !defined(_CHAR16T)
+ #define _CHAR16T
+ #endif
+ #if !defined(_HAS_CHAR16_T_LANGUAGE_SUPPORT) || !_HAS_CHAR16_T_LANGUAGE_SUPPORT
+ typedef wchar_t char16_t;
+ typedef uint32_t char32_t;
+ #endif
+ #else
+ typedef wchar_t char16_t;
+ typedef uint32_t char32_t;
+ #endif
+ #else
+ typedef uint16_t char16_t;
+ #if defined(__cplusplus)
+ typedef wchar_t char32_t;
+ #else
+ typedef uint32_t char32_t;
+ #endif
+ #endif
+#endif
+
+
+// CHAR8_MIN, CHAR8_MAX, etc.
+//
+#define EA_LIMITS_DIGITS_S(T) ((sizeof(T) * 8) - 1)
+#define EA_LIMITS_DIGITS_U(T) ((sizeof(T) * 8))
+#define EA_LIMITS_DIGITS(T) ((EA_LIMITS_IS_SIGNED(T) ? EA_LIMITS_DIGITS_S(T) : EA_LIMITS_DIGITS_U(T)))
+#define EA_LIMITS_IS_SIGNED(T) ((T)(-1) < 0)
+#define EA_LIMITS_MIN_S(T) ((T)((T)1 << EA_LIMITS_DIGITS_S(T)))
+#define EA_LIMITS_MIN_U(T) ((T)0)
+#define EA_LIMITS_MIN(T) ((EA_LIMITS_IS_SIGNED(T) ? EA_LIMITS_MIN_S(T) : EA_LIMITS_MIN_U(T)))
+#define EA_LIMITS_MAX_S(T) ((T)(((((T)1 << (EA_LIMITS_DIGITS(T) - 1)) - 1) << 1) + 1))
+#define EA_LIMITS_MAX_U(T) ((T)~(T)0)
+#define EA_LIMITS_MAX(T) ((EA_LIMITS_IS_SIGNED(T) ? EA_LIMITS_MAX_S(T) : EA_LIMITS_MAX_U(T)))
+
+#if !defined(CHAR8_MIN)
+ #define CHAR8_MIN EA_LIMITS_MIN(char8_t)
+#endif
+
+#if !defined(CHAR8_MAX)
+ #define CHAR8_MAX EA_LIMITS_MAX(char8_t)
+#endif
+
+#if !defined(CHAR16_MIN)
+ #define CHAR16_MIN EA_LIMITS_MIN(char16_t)
+#endif
+
+#if !defined(CHAR16_MAX)
+ #define CHAR16_MAX EA_LIMITS_MAX(char16_t)
+#endif
+
+#if !defined(CHAR32_MIN)
+ #define CHAR32_MIN EA_LIMITS_MIN(char32_t)
+#endif
+
+#if !defined(CHAR32_MAX)
+ #define CHAR32_MAX EA_LIMITS_MAX(char32_t)
+#endif
+
+
+
+// EA_CHAR8 / EA_CHAR16 / EA_CHAR32 / EA_WCHAR
+//
+// Supports usage of portable string constants.
+//
+// Example usage:
+// const char16_t* str = EA_CHAR16("Hello world");
+// const char32_t* str = EA_CHAR32("Hello world");
+// const char16_t c = EA_CHAR16('\x3001');
+// const char32_t c = EA_CHAR32('\x3001');
+//
+#ifndef EA_CHAR8
+ #if EA_CHAR8_UNIQUE
+ #define EA_CHAR8(s) u8 ## s
+ #else
+ #define EA_CHAR8(s) s
+ #endif
+#endif
+
+#ifndef EA_WCHAR
+ #define EA_WCHAR_(s) L ## s
+ #define EA_WCHAR(s) EA_WCHAR_(s)
+#endif
+
+#ifndef EA_CHAR16
+ #if EA_CHAR16_NATIVE && !defined(_MSC_VER) // Microsoft doesn't support char16_t string literals.
+ #define EA_CHAR16_(s) u ## s
+ #define EA_CHAR16(s) EA_CHAR16_(s)
+ #elif (EA_WCHAR_SIZE == 2)
+ #if defined(_MSC_VER) && (_MSC_VER >= 1900) && defined(__cplusplus) // VS2015 supports u"" string literals.
+ #define EA_CHAR16_(s) u ## s
+ #define EA_CHAR16(s) EA_CHAR16_(s)
+ #else
+ #define EA_CHAR16_(s) L ## s
+ #define EA_CHAR16(s) EA_CHAR16_(s)
+ #endif
+ #else
+ //#define EA_CHAR16(s) // Impossible to implement efficiently.
+ #endif
+#endif
+
+#ifndef EA_CHAR32
+ #if EA_CHAR32_NATIVE && !defined(_MSC_VER) // Microsoft doesn't support char32_t string literals.
+ #define EA_CHAR32_(s) U ## s
+ #define EA_CHAR32(s) EA_CHAR32_(s)
+ #elif (EA_WCHAR_SIZE == 2)
+ #if defined(_MSC_VER) && (_MSC_VER >= 1900) && defined(__cplusplus) // VS2015 supports u"" string literals.
+ #define EA_CHAR32_(s) U ## s
+ #define EA_CHAR32(s) EA_CHAR32_(s)
+ #else
+ //#define EA_CHAR32(s) // Impossible to implement.
+ #endif
+ #elif (EA_WCHAR_SIZE == 4)
+ #define EA_CHAR32_(s) L ## s
+ #define EA_CHAR32(s) EA_CHAR32_(s)
+ #else
+ #error Unexpected size of wchar_t
+ #endif
+#endif
+
+// EAText8 / EAText16
+//
+// Provided for backwards compatibility with older code.
+//
+#if defined(EABASE_ENABLE_EATEXT_MACROS)
+ #define EAText8(x) x
+ #define EAChar8(x) x
+
+ #define EAText16(x) EA_CHAR16(x)
+ #define EAChar16(x) EA_CHAR16(x)
+#endif
+
+
+
+
+// ------------------------------------------------------------------------
+// EAArrayCount
+//
+// Returns the count of items in a built-in C array. This is a common technique
+// which is often used to help properly calculate the number of items in an
+// array at runtime in order to prevent overruns, etc.
+//
+// Example usage:
+// int array[75];
+// size_t arrayCount = EAArrayCount(array); // arrayCount is 75.
+//
+#if defined(EA_COMPILER_NO_CONSTEXPR)
+ #ifndef EAArrayCount
+ #define EAArrayCount(x) (sizeof(x) / sizeof(x[0]))
+ #endif
+#else
+ // This C++11 version is a little smarter than the macro version above;
+ // it can tell the difference between arrays and pointers. Other simpler
+ // templated versions have failed in various subtle ways.
+
+ template <typename T, size_t N>
+ char (&EAArraySizeHelper(T (&x)[N]))[N];
+
+ template <typename T, size_t N>
+ char (&EAArraySizeHelper(T (&&x)[N]))[N];
+
+ #define EAArrayCount(x) (sizeof(EAArraySizeHelper(x)))
+#endif
+
+
+// ------------------------------------------------------------------------
+// static_assert
+//
+// C++11 static_assert (a.k.a. compile-time assert).
+//
+// Specification:
+// void static_assert(bool const_expression, const char* description);
+//
+// Example usage:
+// static_assert(sizeof(int) == 4, "int must be 32 bits");
+//
+#if defined(_MSC_VER) && (_MSC_VER >= 1600) && defined(__cplusplus)
+ // static_assert is defined by the compiler for both C and C++.
+#elif !defined(__cplusplus) && defined(EA_PLATFORM_ANDROID) && ((defined(__STDC_VERSION__) && __STDC_VERSION__ < 201100L) || !defined(__STDC_VERSION__))
+ // AndroidNDK does not support static_assert despite claiming it's a C11 compiler
+ #define NEED_CUSTOM_STATIC_ASSERT
+#elif defined(__clang__) && defined(__cplusplus)
+ // We need to separate these checks on a new line, as the pre-processor on other compilers will fail on the _has_feature macros
+ #if !(__has_feature(cxx_static_assert) || __has_extension(cxx_static_assert))
+ #define NEED_CUSTOM_STATIC_ASSERT
+ #endif
+#elif defined(__GNUC__) && (defined(__GXX_EXPERIMENTAL_CXX0X__) || (defined(__cplusplus) && (__cplusplus >= 201103L)))
+ // static_assert is defined by the compiler.
+#elif defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 401) && defined(EA_COMPILER_CPP11_ENABLED)
+ // static_assert is defined by the compiler.
+#elif !defined(__cplusplus) && defined(__GLIBC__) && defined(__USE_ISOC11)
+ // static_assert is defined by the compiler.
+#elif !defined(__cplusplus) && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201100L
+ // static_assert is defined by the compiler.
+#else
+ #define NEED_CUSTOM_STATIC_ASSERT
+#endif
+
+#ifdef NEED_CUSTOM_STATIC_ASSERT
+ #ifdef __GNUC__
+ // On GCC the 'unused' attribute can be used to indicate a typedef is not actually used
+ // (such as in the static_assert implementation below). New versions of GCC generate
+ // warnings for unused typedefs in function/method scopes.
+ #define EA_STATIC_ASSERT_UNUSED_ATTRIBUTE __attribute__((unused))
+ #else
+ #define EA_STATIC_ASSERT_UNUSED_ATTRIBUTE
+ #endif
+ #define EA_STATIC_ASSERT_TOKEN_PASTE(a,b) a ## b
+ #define EA_STATIC_ASSERT_CONCATENATE_HELPER(a,b) EA_STATIC_ASSERT_TOKEN_PASTE(a,b)
+
+ #if defined(__COUNTER__) // If this extension is available, which allows multiple statements per line...
+ #define static_assert(expression, description) typedef char EA_STATIC_ASSERT_CONCATENATE_HELPER(compileTimeAssert,__COUNTER__) [((expression) != 0) ? 1 : -1] EA_STATIC_ASSERT_UNUSED_ATTRIBUTE
+ #else
+ #define static_assert(expression, description) typedef char EA_STATIC_ASSERT_CONCATENATE_HELPER(compileTimeAssert,__LINE__) [((expression) != 0) ? 1 : -1] EA_STATIC_ASSERT_UNUSED_ATTRIBUTE
+ #endif
+
+ #undef NEED_CUSTOM_STATIC_ASSERT
+#endif
+
+// ------------------------------------------------------------------------
+// EA_IS_ENABLED
+//
+// EA_IS_ENABLED is intended to be used for detecting if compile time features are enabled or disabled.
+//
+// It has some advantages over using a standard #if or #ifdef tests:
+// 1) Fails to compile when passes numeric macro values. Valid options are strictly enabled or disabled.
+// 2) Fails to compile when passed undefined macro values rather than disabling by default
+// 3) Fails to compile when the passed macro is defined to but empty
+//
+// To use the macro, the calling code should create a define for the feature to enable or disable. This feature define
+// must be set to either EA_ENABLED or EA_DISABLED. (Do not try to set the feature define directly to some other
+// value.)
+//
+// Note: These macros are analogous to the Frostbite macro FB_USING used in combination with FB_OFF / FB_ON and are
+// designed to be compatible to support gradual migration.
+//
+// Example usage:
+//
+// // The USER_PROVIDED_FEATURE_DEFINE should be defined as either
+// // EA_ENABLED or EA_DISABLED.
+// #define USER_PROVIDED_FEATURE_DEFINE EA_ENABLED
+//
+// #if EA_IS_ENABLED(USER_PROVIDED_FEATURE_DEFINE)
+// // USER_PROVIDED_FEATURE_DEFINE is enabled
+// #else
+// // USER_PROVIDED_FEATURE_DEFINE is disabled
+// #endif
+//
+#define EA_ENABLED 111-
+#define EA_DISABLED 333-
+// NOTE: Numeric values for x will produce a parse error while empty values produce a divide by zero, and the test is a bool for proper negation behavior
+#define EA_IS_ENABLED(x) (333 == 333 * 111 / ((x 0) * (((x 0) == 333 ? 1 : 0) + ((x 0) == 111 ? 1 : 0))))
+
+
+
+// Define int128_t / uint128_t types.
+// NOTE(rparolin): include file at the end because we want all the signed integral types defined.
+#ifdef __cplusplus
+ #include <EABase/int128.h>
+#endif
+
+#endif // Header include guard
+
+
+
+
diff --git a/EASTL/test/packages/EABase/include/Common/EABase/eahave.h b/EASTL/test/packages/EABase/include/Common/EABase/eahave.h
new file mode 100644
index 0000000..b0987be
--- /dev/null
+++ b/EASTL/test/packages/EABase/include/Common/EABase/eahave.h
@@ -0,0 +1,877 @@
+/*-----------------------------------------------------------------------------
+ * eahave.h
+ *
+ * Copyright (c) Electronic Arts Inc. All rights reserved.
+ *---------------------------------------------------------------------------*/
+
+
+/*-----------------------------------------------------------------------------
+ This file's functionality is preliminary and won't be considered stable until
+ a future EABase version.
+ *---------------------------------------------------------------------------*/
+
+
+/*-----------------------------------------------------------------------------
+ This header identifies if the given facilities are available in the
+ standard build environment the current compiler/linker/standard library/
+ operating system combination. This file may in some cases #include standard
+ headers in order to make availability determinations, such as to check
+ compiler or SDK version numbers. However, it cannot be perfect.
+ This header does not identify compiler features, as those are defined in
+ eacompiler.h and eacompilertraits.h. Rather this header is about library support.
+ This header does not identify platform or library conventions either, such
+ as whether the file paths use \ or / for directory separators.
+
+ We provide three types of HAVE features here:
+
+ - EA_HAVE_XXX_FEATURE - Have compiler feature.
+ Identifies if the compiler has or lacks some feature in the
+ current build. Sometimes you need to check to see if the
+ compiler is running in some mode in able to write portable code
+ against it. For example, some compilers (e.g. VC++) have a
+ mode in which all language extensions are disabled. If you want
+ to write code that works with that but still uses the extensions
+ when available then you can check #if defined(EA_HAVE_EXTENSIONS_FEATURE).
+ Features can be forcibly cancelled via EA_NO_HAVE_XXX_FEATURE.
+ EA_NO_HAVE is useful for a build system or user to override the
+ defaults because it happens to know better.
+
+ - EA_HAVE_XXX_H - Have header file information.
+ Identifies if a given header file is available to the current
+ compile configuration. For example, some compilers provide a
+ malloc.h header, while others don't. For the former we define
+ EA_HAVE_MALLOC_H, while for the latter it remains undefined.
+ If a header is missing then it may still be that the functions
+ the header usually declares are declared in some other header.
+ EA_HAVE_XXX does not include the possibility that our own code
+ provides versions of these headers, and in fact a purpose of
+ EA_HAVE_XXX is to decide if we should be using our own because
+ the system doesn't provide one.
+ Header availability can be forcibly cancelled via EA_NO_HAVE_XXX_H.
+ EA_NO_HAVE is useful for a build system or user to override the
+ defaults because it happens to know better.
+
+ - EA_HAVE_XXX_DECL - Have function declaration information.
+ Identifies if a given function declaration is provided by
+ the current compile configuration. For example, some compiler
+ standard libraries declare a wcslen function, while others
+ don't. For the former we define EA_HAVE_WCSLEN_DECL, while for
+ the latter it remains undefined. If a declaration of a function
+ is missing then we assume the implementation is missing as well.
+ EA_HAVE_XXX_DECL does not include the possibility that our
+ own code provides versions of these declarations, and in fact a
+ purpose of EA_HAVE_XXX_DECL is to decide if we should be using
+ our own because the system doesn't provide one.
+ Declaration availability can be forcibly cancelled via EA_NO_HAVE_XXX_DECL.
+ EA_NO_HAVE is useful for a build system or user to override the
+ defaults because it happens to know better.
+
+ - EA_HAVE_XXX_IMPL - Have function implementation information.
+ Identifies if a given function implementation is provided by
+ the current compile and link configuration. For example, it's
+ commonly the case that console platforms declare a getenv function
+ but don't provide a linkable implementation.
+ In this case the user needs to provide such a function manually
+ as part of the link. If the implementation is available then
+ we define EA_HAVE_GETENV_IMPL, otherwise it remains undefined.
+ Beware that sometimes a function may not seem to be present in
+ the Standard Library but in reality you need to link some auxiliary
+ provided library for it. An example of this is the Unix real-time
+ functions such as clock_gettime.
+ EA_HAVE_XXX_IMPL does not include the possibility that our
+ own code provides versions of these implementations, and in fact a
+ purpose of EA_HAVE_XXX_IMPL is to decide if we should be using
+ our own because the system doesn't provide one.
+ Implementation availability can be forcibly cancelled via EA_NO_HAVE_XXX_IMPL.
+ EA_NO_HAVE is useful for a build system or user to override the
+ defaults because it happens to know better.
+
+ It's not practical to define EA_HAVE macros for every possible header,
+ declaration, and implementation, and so the user must simply know that
+ some headers, declarations, and implementations tend to require EA_HAVE
+ checking. Nearly every C Standard Library we've seen has a <string.h>
+ header, a strlen declaration, and a linkable strlen implementation,
+ so there's no need to provide EA_HAVE support for this. On the other hand
+ it's commonly the case that the C Standard Library doesn't have a malloc.h
+ header or an inet_ntop declaration.
+
+---------------------------------------------------------------------------*/
+
+
+#ifndef INCLUDED_eahave_H
+#define INCLUDED_eahave_H
+
+
+#include <EABase/eabase.h>
+
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+/* EA_HAVE_XXX_FEATURE */
+
+#if !defined(EA_HAVE_EXTENSIONS_FEATURE) && !defined(EA_NO_HAVE_EXTENSIONS_FEATURE)
+ #define EA_HAVE_EXTENSIONS_FEATURE 1
+#endif
+
+
+/* EA_HAVE_XXX_LIBRARY */
+
+// Dinkumware
+#if !defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && !defined(EA_NO_HAVE_DINKUMWARE_CPP_LIBRARY)
+ #if defined(__cplusplus)
+ EA_DISABLE_ALL_VC_WARNINGS()
+ #include <cstddef> // Need to trigger the compilation of yvals.h without directly using <yvals.h> because it might not exist.
+ EA_RESTORE_ALL_VC_WARNINGS()
+ #endif
+
+ #if defined(__cplusplus) && defined(_CPPLIB_VER) /* If using the Dinkumware Standard library... */
+ #define EA_HAVE_DINKUMWARE_CPP_LIBRARY 1
+ #else
+ #define EA_NO_HAVE_DINKUMWARE_CPP_LIBRARY 1
+ #endif
+#endif
+
+// GCC libstdc++
+#if !defined(EA_HAVE_LIBSTDCPP_LIBRARY) && !defined(EA_NO_HAVE_LIBSTDCPP_LIBRARY)
+ #if defined(__GLIBCXX__) /* If using libstdc++ ... */
+ #define EA_HAVE_LIBSTDCPP_LIBRARY 1
+ #else
+ #define EA_NO_HAVE_LIBSTDCPP_LIBRARY 1
+ #endif
+#endif
+
+// Clang libc++
+#if !defined(EA_HAVE_LIBCPP_LIBRARY) && !defined(EA_NO_HAVE_LIBCPP_LIBRARY)
+ #if EA_HAS_INCLUDE_AVAILABLE
+ #if EA_HAS_INCLUDE(<__config>)
+ #define EA_HAVE_LIBCPP_LIBRARY 1 // We could also #include <ciso646> and check if defined(_LIBCPP_VERSION).
+ #endif
+ #endif
+
+ #if !defined(EA_HAVE_LIBCPP_LIBRARY)
+ #define EA_NO_HAVE_LIBCPP_LIBRARY 1
+ #endif
+#endif
+
+
+/* EA_HAVE_XXX_H */
+
+// #include <sys/types.h>
+#if !defined(EA_HAVE_SYS_TYPES_H) && !defined(EA_NO_HAVE_SYS_TYPES_H)
+ #define EA_HAVE_SYS_TYPES_H 1
+#endif
+
+// #include <io.h> (and not sys/io.h or asm/io.h)
+#if !defined(EA_HAVE_IO_H) && !defined(EA_NO_HAVE_IO_H)
+ // Unix doesn't have Microsoft's <io.h> but has the same functionality in <fcntl.h> and <sys/stat.h>.
+ #if defined(EA_PLATFORM_MICROSOFT)
+ #define EA_HAVE_IO_H 1
+ #else
+ #define EA_NO_HAVE_IO_H 1
+ #endif
+#endif
+
+// #include <inttypes.h>
+#if !defined(EA_HAVE_INTTYPES_H) && !defined(EA_NO_HAVE_INTTYPES_H)
+ #if !defined(EA_PLATFORM_MICROSOFT)
+ #define EA_HAVE_INTTYPES_H 1
+ #else
+ #define EA_NO_HAVE_INTTYPES_H 1
+ #endif
+#endif
+
+// #include <unistd.h>
+#if !defined(EA_HAVE_UNISTD_H) && !defined(EA_NO_HAVE_UNISTD_H)
+ #if defined(EA_PLATFORM_UNIX)
+ #define EA_HAVE_UNISTD_H 1
+ #else
+ #define EA_NO_HAVE_UNISTD_H 1
+ #endif
+#endif
+
+// #include <sys/time.h>
+#if !defined(EA_HAVE_SYS_TIME_H) && !defined(EA_NO_HAVE_SYS_TIME_H)
+ #if !defined(EA_PLATFORM_MICROSOFT) && !defined(_CPPLIB_VER) /* _CPPLIB_VER indicates Dinkumware. */
+ #define EA_HAVE_SYS_TIME_H 1 /* defines struct timeval */
+ #else
+ #define EA_NO_HAVE_SYS_TIME_H 1
+ #endif
+#endif
+
+// #include <ptrace.h>
+#if !defined(EA_HAVE_SYS_PTRACE_H) && !defined(EA_NO_HAVE_SYS_PTRACE_H)
+ #if defined(EA_PLATFORM_UNIX) && !defined(__CYGWIN__) && (defined(EA_PLATFORM_DESKTOP) || defined(EA_PLATFORM_SERVER))
+ #define EA_HAVE_SYS_PTRACE_H 1 /* declares the ptrace function */
+ #else
+ #define EA_NO_HAVE_SYS_PTRACE_H 1
+ #endif
+#endif
+
+// #include <sys/stat.h>
+#if !defined(EA_HAVE_SYS_STAT_H) && !defined(EA_NO_HAVE_SYS_STAT_H)
+ #if (defined(EA_PLATFORM_UNIX) && !(defined(EA_PLATFORM_SONY) && defined(EA_PLATFORM_CONSOLE))) || defined(__APPLE__) || defined(EA_PLATFORM_ANDROID)
+ #define EA_HAVE_SYS_STAT_H 1 /* declares the stat struct and function */
+ #else
+ #define EA_NO_HAVE_SYS_STAT_H 1
+ #endif
+#endif
+
+// #include <locale.h>
+#if !defined(EA_HAVE_LOCALE_H) && !defined(EA_NO_HAVE_LOCALE_H)
+ #define EA_HAVE_LOCALE_H 1
+#endif
+
+// #include <signal.h>
+#if !defined(EA_HAVE_SIGNAL_H) && !defined(EA_NO_HAVE_SIGNAL_H)
+ #if !defined(EA_PLATFORM_BSD) && !defined(EA_PLATFORM_SONY) && !defined(CS_UNDEFINED_STRING)
+ #define EA_HAVE_SIGNAL_H 1
+ #else
+ #define EA_NO_HAVE_SIGNAL_H 1
+ #endif
+#endif
+
+// #include <sys/signal.h>
+#if !defined(EA_HAVE_SYS_SIGNAL_H) && !defined(EA_NO_HAVE_SYS_SIGNAL_H)
+ #if defined(EA_PLATFORM_BSD) || defined(EA_PLATFORM_SONY)
+ #define EA_HAVE_SYS_SIGNAL_H 1
+ #else
+ #define EA_NO_HAVE_SYS_SIGNAL_H 1
+ #endif
+#endif
+
+// #include <pthread.h>
+#if !defined(EA_HAVE_PTHREAD_H) && !defined(EA_NO_HAVE_PTHREAD_H)
+ #if defined(EA_PLATFORM_UNIX) || defined(EA_PLATFORM_APPLE) || defined(EA_PLATFORM_POSIX)
+ #define EA_HAVE_PTHREAD_H 1 /* It can be had under Microsoft/Windows with the http://sourceware.org/pthreads-win32/ library */
+ #else
+ #define EA_NO_HAVE_PTHREAD_H 1
+ #endif
+#endif
+
+// #include <wchar.h>
+#if !defined(EA_HAVE_WCHAR_H) && !defined(EA_NO_HAVE_WCHAR_H)
+ #if defined(EA_PLATFORM_DESKTOP) && defined(EA_PLATFORM_UNIX) && defined(EA_PLATFORM_SONY) && defined(EA_PLATFORM_APPLE)
+ #define EA_HAVE_WCHAR_H 1
+ #else
+ #define EA_NO_HAVE_WCHAR_H 1
+ #endif
+#endif
+
+// #include <malloc.h>
+#if !defined(EA_HAVE_MALLOC_H) && !defined(EA_NO_HAVE_MALLOC_H)
+ #if defined(_MSC_VER) || defined(__MINGW32__)
+ #define EA_HAVE_MALLOC_H 1
+ #else
+ #define EA_NO_HAVE_MALLOC_H 1
+ #endif
+#endif
+
+// #include <alloca.h>
+#if !defined(EA_HAVE_ALLOCA_H) && !defined(EA_NO_HAVE_ALLOCA_H)
+ #if !defined(EA_HAVE_MALLOC_H) && !defined(EA_PLATFORM_SONY)
+ #define EA_HAVE_ALLOCA_H 1
+ #else
+ #define EA_NO_HAVE_ALLOCA_H 1
+ #endif
+#endif
+
+// #include <execinfo.h>
+#if !defined(EA_HAVE_EXECINFO_H) && !defined(EA_NO_HAVE_EXECINFO_H)
+ #if (defined(EA_PLATFORM_LINUX) || defined(EA_PLATFORM_OSX)) && !defined(EA_PLATFORM_ANDROID)
+ #define EA_HAVE_EXECINFO_H 1
+ #else
+ #define EA_NO_HAVE_EXECINFO_H 1
+ #endif
+#endif
+
+// #include <semaphore.h> (Unix semaphore support)
+#if !defined(EA_HAVE_SEMAPHORE_H) && !defined(EA_NO_HAVE_SEMAPHORE_H)
+ #if defined(EA_PLATFORM_UNIX)
+ #define EA_HAVE_SEMAPHORE_H 1
+ #else
+ #define EA_NO_HAVE_SEMAPHORE_H 1
+ #endif
+#endif
+
+// #include <dirent.h> (Unix semaphore support)
+#if !defined(EA_HAVE_DIRENT_H) && !defined(EA_NO_HAVE_DIRENT_H)
+ #if defined(EA_PLATFORM_UNIX) && !defined(EA_PLATFORM_CONSOLE)
+ #define EA_HAVE_DIRENT_H 1
+ #else
+ #define EA_NO_HAVE_DIRENT_H 1
+ #endif
+#endif
+
+// #include <array>, <forward_list>, <ununordered_set>, <unordered_map>
+#if !defined(EA_HAVE_CPP11_CONTAINERS) && !defined(EA_NO_HAVE_CPP11_CONTAINERS)
+ #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 520) // Dinkumware. VS2010+
+ #define EA_HAVE_CPP11_CONTAINERS 1
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4004) // Actually GCC 4.3 supports array and unordered_
+ #define EA_HAVE_CPP11_CONTAINERS 1
+ #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1)
+ #define EA_HAVE_CPP11_CONTAINERS 1
+ #else
+ #define EA_NO_HAVE_CPP11_CONTAINERS 1
+ #endif
+#endif
+
+// #include <atomic>
+#if !defined(EA_HAVE_CPP11_ATOMIC) && !defined(EA_NO_HAVE_CPP11_ATOMIC)
+ #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 540) // Dinkumware. VS2012+
+ #define EA_HAVE_CPP11_ATOMIC 1
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4007)
+ #define EA_HAVE_CPP11_ATOMIC 1
+ #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1)
+ #define EA_HAVE_CPP11_ATOMIC 1
+ #else
+ #define EA_NO_HAVE_CPP11_ATOMIC 1
+ #endif
+#endif
+
+// #include <condition_variable>
+#if !defined(EA_HAVE_CPP11_CONDITION_VARIABLE) && !defined(EA_NO_HAVE_CPP11_CONDITION_VARIABLE)
+ #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 540) // Dinkumware. VS2012+
+ #define EA_HAVE_CPP11_CONDITION_VARIABLE 1
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4007)
+ #define EA_HAVE_CPP11_CONDITION_VARIABLE 1
+ #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1)
+ #define EA_HAVE_CPP11_CONDITION_VARIABLE 1
+ #else
+ #define EA_NO_HAVE_CPP11_CONDITION_VARIABLE 1
+ #endif
+#endif
+
+// #include <mutex>
+#if !defined(EA_HAVE_CPP11_MUTEX) && !defined(EA_NO_HAVE_CPP11_MUTEX)
+ #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 540) // Dinkumware. VS2012+
+ #define EA_HAVE_CPP11_MUTEX 1
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4007)
+ #define EA_HAVE_CPP11_MUTEX 1
+ #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1)
+ #define EA_HAVE_CPP11_MUTEX 1
+ #else
+ #define EA_NO_HAVE_CPP11_MUTEX 1
+ #endif
+#endif
+
+// #include <thread>
+#if !defined(EA_HAVE_CPP11_THREAD) && !defined(EA_NO_HAVE_CPP11_THREAD)
+ #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 540) // Dinkumware. VS2012+
+ #define EA_HAVE_CPP11_THREAD 1
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4007)
+ #define EA_HAVE_CPP11_THREAD 1
+ #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1)
+ #define EA_HAVE_CPP11_THREAD 1
+ #else
+ #define EA_NO_HAVE_CPP11_THREAD 1
+ #endif
+#endif
+
+// #include <future>
+#if !defined(EA_HAVE_CPP11_FUTURE) && !defined(EA_NO_HAVE_CPP11_FUTURE)
+ #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 540) // Dinkumware. VS2012+
+ #define EA_HAVE_CPP11_FUTURE 1
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4005)
+ #define EA_HAVE_CPP11_FUTURE 1
+ #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1)
+ #define EA_HAVE_CPP11_FUTURE 1
+ #else
+ #define EA_NO_HAVE_CPP11_FUTURE 1
+ #endif
+#endif
+
+
+// #include <type_traits>
+#if !defined(EA_HAVE_CPP11_TYPE_TRAITS) && !defined(EA_NO_HAVE_CPP11_TYPE_TRAITS)
+ #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 540) // Dinkumware. VS2012+
+ #define EA_HAVE_CPP11_TYPE_TRAITS 1
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4007) // Prior versions of libstdc++ have incomplete support for C++11 type traits.
+ #define EA_HAVE_CPP11_TYPE_TRAITS 1
+ #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1)
+ #define EA_HAVE_CPP11_TYPE_TRAITS 1
+ #else
+ #define EA_NO_HAVE_CPP11_TYPE_TRAITS 1
+ #endif
+#endif
+
+// #include <tuple>
+#if !defined(EA_HAVE_CPP11_TUPLES) && !defined(EA_NO_HAVE_CPP11_TUPLES)
+ #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 520) // Dinkumware. VS2010+
+ #define EA_HAVE_CPP11_TUPLES 1
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4003)
+ #define EA_HAVE_CPP11_TUPLES 1
+ #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1)
+ #define EA_HAVE_CPP11_TUPLES 1
+ #else
+ #define EA_NO_HAVE_CPP11_TUPLES 1
+ #endif
+#endif
+
+// #include <regex>
+#if !defined(EA_HAVE_CPP11_REGEX) && !defined(EA_NO_HAVE_CPP11_REGEX)
+ #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 540) && (defined(_HAS_EXCEPTIONS) && _HAS_EXCEPTIONS) // Dinkumware. VS2012+
+ #define EA_HAVE_CPP11_REGEX 1
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4003)
+ #define EA_HAVE_CPP11_REGEX 1
+ #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1)
+ #define EA_HAVE_CPP11_REGEX 1
+ #else
+ #define EA_NO_HAVE_CPP11_REGEX 1
+ #endif
+#endif
+
+// #include <random>
+#if !defined(EA_HAVE_CPP11_RANDOM) && !defined(EA_NO_HAVE_CPP11_RANDOM)
+ #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 520) // Dinkumware. VS2010+
+ #define EA_HAVE_CPP11_RANDOM 1
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4005)
+ #define EA_HAVE_CPP11_RANDOM 1
+ #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1)
+ #define EA_HAVE_CPP11_RANDOM 1
+ #else
+ #define EA_NO_HAVE_CPP11_RANDOM 1
+ #endif
+#endif
+
+// #include <chrono>
+#if !defined(EA_HAVE_CPP11_CHRONO) && !defined(EA_NO_HAVE_CPP11_CHRONO)
+ #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 540) // Dinkumware. VS2012+
+ #define EA_HAVE_CPP11_CHRONO 1
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4007) // chrono was broken in glibc prior to 4.7.
+ #define EA_HAVE_CPP11_CHRONO 1
+ #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1)
+ #define EA_HAVE_CPP11_CHRONO 1
+ #else
+ #define EA_NO_HAVE_CPP11_CHRONO 1
+ #endif
+#endif
+
+// #include <scoped_allocator>
+#if !defined(EA_HAVE_CPP11_SCOPED_ALLOCATOR) && !defined(EA_NO_HAVE_CPP11_SCOPED_ALLOCATOR)
+ #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 540) // Dinkumware. VS2012+
+ #define EA_HAVE_CPP11_SCOPED_ALLOCATOR 1
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4007)
+ #define EA_HAVE_CPP11_SCOPED_ALLOCATOR 1
+ #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1)
+ #define EA_HAVE_CPP11_SCOPED_ALLOCATOR 1
+ #else
+ #define EA_NO_HAVE_CPP11_SCOPED_ALLOCATOR 1
+ #endif
+#endif
+
+// #include <initializer_list>
+#if !defined(EA_HAVE_CPP11_INITIALIZER_LIST) && !defined(EA_NO_HAVE_CPP11_INITIALIZER_LIST)
+ #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 520) && !defined(EA_COMPILER_NO_INITIALIZER_LISTS) // Dinkumware. VS2010+
+ #define EA_HAVE_CPP11_INITIALIZER_LIST 1
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_CLANG) && (EA_COMPILER_VERSION >= 301) && !defined(EA_COMPILER_NO_INITIALIZER_LISTS) && !defined(EA_PLATFORM_APPLE)
+ #define EA_HAVE_CPP11_INITIALIZER_LIST 1
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBCPP_LIBRARY) && defined(EA_COMPILER_CLANG) && (EA_COMPILER_VERSION >= 301) && !defined(EA_COMPILER_NO_INITIALIZER_LISTS) && !defined(EA_PLATFORM_APPLE)
+ #define EA_HAVE_CPP11_INITIALIZER_LIST 1
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4004) && !defined(EA_COMPILER_NO_INITIALIZER_LISTS) && !defined(EA_PLATFORM_APPLE)
+ #define EA_HAVE_CPP11_INITIALIZER_LIST 1
+ #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1) && !defined(EA_COMPILER_NO_INITIALIZER_LISTS)
+ #define EA_HAVE_CPP11_INITIALIZER_LIST 1
+ #else
+ #define EA_NO_HAVE_CPP11_INITIALIZER_LIST 1
+ #endif
+#endif
+
+// #include <system_error>
+#if !defined(EA_HAVE_CPP11_SYSTEM_ERROR) && !defined(EA_NO_HAVE_CPP11_SYSTEM_ERROR)
+ #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 520) && !(defined(_HAS_CPP0X) && _HAS_CPP0X) // Dinkumware. VS2010+
+ #define EA_HAVE_CPP11_SYSTEM_ERROR 1
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_CLANG) && (EA_COMPILER_VERSION >= 301) && !defined(EA_PLATFORM_APPLE)
+ #define EA_HAVE_CPP11_SYSTEM_ERROR 1
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4004) && !defined(EA_PLATFORM_APPLE)
+ #define EA_HAVE_CPP11_SYSTEM_ERROR 1
+ #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1)
+ #define EA_HAVE_CPP11_SYSTEM_ERROR 1
+ #else
+ #define EA_NO_HAVE_CPP11_SYSTEM_ERROR 1
+ #endif
+#endif
+
+// #include <codecvt>
+#if !defined(EA_HAVE_CPP11_CODECVT) && !defined(EA_NO_HAVE_CPP11_CODECVT)
+ #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 520) // Dinkumware. VS2010+
+ #define EA_HAVE_CPP11_CODECVT 1
+ // Future versions of libc++ may support this header. However, at the moment there isn't
+ // a reliable way of detecting if this header is available.
+ //#elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4008)
+ // #define EA_HAVE_CPP11_CODECVT 1
+ #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1)
+ #define EA_HAVE_CPP11_CODECVT 1
+ #else
+ #define EA_NO_HAVE_CPP11_CODECVT 1
+ #endif
+#endif
+
+// #include <typeindex>
+#if !defined(EA_HAVE_CPP11_TYPEINDEX) && !defined(EA_NO_HAVE_CPP11_TYPEINDEX)
+ #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 520) // Dinkumware. VS2010+
+ #define EA_HAVE_CPP11_TYPEINDEX 1
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4006)
+ #define EA_HAVE_CPP11_TYPEINDEX 1
+ #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1)
+ #define EA_HAVE_CPP11_TYPEINDEX 1
+ #else
+ #define EA_NO_HAVE_CPP11_TYPEINDEX 1
+ #endif
+#endif
+
+
+
+
+/* EA_HAVE_XXX_DECL */
+
+#if !defined(EA_HAVE_mkstemps_DECL) && !defined(EA_NO_HAVE_mkstemps_DECL)
+ #if defined(EA_PLATFORM_APPLE) || defined(CS_UNDEFINED_STRING)
+ #define EA_HAVE_mkstemps_DECL 1
+ #else
+ #define EA_NO_HAVE_mkstemps_DECL 1
+ #endif
+#endif
+
+#if !defined(EA_HAVE_gettimeofday_DECL) && !defined(EA_NO_HAVE_gettimeofday_DECL)
+ #if defined(EA_PLATFORM_POSIX) /* Posix means Linux, Unix, and Macintosh OSX, among others (including Linux-based mobile platforms). */
+ #define EA_HAVE_gettimeofday_DECL 1
+ #else
+ #define EA_NO_HAVE_gettimeofday_DECL 1
+ #endif
+#endif
+
+#if !defined(EA_HAVE_strcasecmp_DECL) && !defined(EA_NO_HAVE_strcasecmp_DECL)
+ #if !defined(EA_PLATFORM_MICROSOFT)
+ #define EA_HAVE_strcasecmp_DECL 1 /* This is found as stricmp when not found as strcasecmp */
+ #define EA_HAVE_strncasecmp_DECL 1
+ #else
+ #define EA_HAVE_stricmp_DECL 1
+ #define EA_HAVE_strnicmp_DECL 1
+ #endif
+#endif
+
+#if !defined(EA_HAVE_mmap_DECL) && !defined(EA_NO_HAVE_mmap_DECL)
+ #if defined(EA_PLATFORM_POSIX)
+ #define EA_HAVE_mmap_DECL 1 /* mmap functionality varies significantly between systems. */
+ #else
+ #define EA_NO_HAVE_mmap_DECL 1
+ #endif
+#endif
+
+#if !defined(EA_HAVE_fopen_DECL) && !defined(EA_NO_HAVE_fopen_DECL)
+ #define EA_HAVE_fopen_DECL 1 /* C FILE functionality such as fopen */
+#endif
+
+#if !defined(EA_HAVE_ISNAN) && !defined(EA_NO_HAVE_ISNAN)
+ #if defined(EA_PLATFORM_MICROSOFT) && !defined(EA_PLATFORM_MINGW)
+ #define EA_HAVE_ISNAN(x) _isnan(x) /* declared in <math.h> */
+ #define EA_HAVE_ISINF(x) !_finite(x)
+ #elif defined(EA_PLATFORM_APPLE)
+ #define EA_HAVE_ISNAN(x) std::isnan(x) /* declared in <cmath> */
+ #define EA_HAVE_ISINF(x) std::isinf(x)
+ #elif defined(EA_PLATFORM_ANDROID)
+ #define EA_HAVE_ISNAN(x) __builtin_isnan(x) /* There are a number of standard libraries for Android and it's hard to tell them apart, so just go with builtins */
+ #define EA_HAVE_ISINF(x) __builtin_isinf(x)
+ #elif defined(__GNUC__) && defined(__CYGWIN__)
+ #define EA_HAVE_ISNAN(x) __isnand(x) /* declared nowhere, it seems. */
+ #define EA_HAVE_ISINF(x) __isinfd(x)
+ #else
+ #define EA_HAVE_ISNAN(x) std::isnan(x) /* declared in <cmath> */
+ #define EA_HAVE_ISINF(x) std::isinf(x)
+ #endif
+#endif
+
+#if !defined(EA_HAVE_itoa_DECL) && !defined(EA_NO_HAVE_itoa_DECL)
+ #if defined(EA_COMPILER_MSVC)
+ #define EA_HAVE_itoa_DECL 1
+ #else
+ #define EA_NO_HAVE_itoa_DECL 1
+ #endif
+#endif
+
+#if !defined(EA_HAVE_nanosleep_DECL) && !defined(EA_NO_HAVE_nanosleep_DECL)
+ #if (defined(EA_PLATFORM_UNIX) && !defined(EA_PLATFORM_SONY)) || defined(EA_PLATFORM_IPHONE) || defined(EA_PLATFORM_OSX) || defined(EA_PLATFORM_SONY) || defined(CS_UNDEFINED_STRING)
+ #define EA_HAVE_nanosleep_DECL 1
+ #else
+ #define EA_NO_HAVE_nanosleep_DECL 1
+ #endif
+#endif
+
+#if !defined(EA_HAVE_utime_DECL) && !defined(EA_NO_HAVE_utime_DECL)
+ #if defined(EA_PLATFORM_MICROSOFT)
+ #define EA_HAVE_utime_DECL _utime
+ #elif EA_PLATFORM_UNIX
+ #define EA_HAVE_utime_DECL utime
+ #else
+ #define EA_NO_HAVE_utime_DECL 1
+ #endif
+#endif
+
+#if !defined(EA_HAVE_ftruncate_DECL) && !defined(EA_NO_HAVE_ftruncate_DECL)
+ #if !defined(__MINGW32__)
+ #define EA_HAVE_ftruncate_DECL 1
+ #else
+ #define EA_NO_HAVE_ftruncate_DECL 1
+ #endif
+#endif
+
+#if !defined(EA_HAVE_localtime_DECL) && !defined(EA_NO_HAVE_localtime_DECL)
+ #define EA_HAVE_localtime_DECL 1
+#endif
+
+#if !defined(EA_HAVE_pthread_getattr_np_DECL) && !defined(EA_NO_HAVE_pthread_getattr_np_DECL)
+ #if defined(EA_PLATFORM_LINUX)
+ #define EA_HAVE_pthread_getattr_np_DECL 1
+ #else
+ #define EA_NO_HAVE_pthread_getattr_np_DECL 1
+ #endif
+#endif
+
+
+
+/* EA_HAVE_XXX_IMPL*/
+
+#if !defined(EA_HAVE_WCHAR_IMPL) && !defined(EA_NO_HAVE_WCHAR_IMPL)
+ #if defined(EA_PLATFORM_DESKTOP)
+ #define EA_HAVE_WCHAR_IMPL 1 /* Specifies if wchar_t string functions are provided, such as wcslen, wprintf, etc. Implies EA_HAVE_WCHAR_H */
+ #else
+ #define EA_NO_HAVE_WCHAR_IMPL 1
+ #endif
+#endif
+
+#if !defined(EA_HAVE_getenv_IMPL) && !defined(EA_NO_HAVE_getenv_IMPL)
+ #if (defined(EA_PLATFORM_DESKTOP) || defined(EA_PLATFORM_UNIX)) && !defined(EA_PLATFORM_WINRT)
+ #define EA_HAVE_getenv_IMPL 1
+ #else
+ #define EA_NO_HAVE_getenv_IMPL 1
+ #endif
+#endif
+
+#if !defined(EA_HAVE_setenv_IMPL) && !defined(EA_NO_HAVE_setenv_IMPL)
+ #if defined(EA_PLATFORM_UNIX) && defined(EA_PLATFORM_POSIX)
+ #define EA_HAVE_setenv_IMPL 1
+ #else
+ #define EA_NO_HAVE_setenv_IMPL 1
+ #endif
+#endif
+
+#if !defined(EA_HAVE_unsetenv_IMPL) && !defined(EA_NO_HAVE_unsetenv_IMPL)
+ #if defined(EA_PLATFORM_UNIX) && defined(EA_PLATFORM_POSIX)
+ #define EA_HAVE_unsetenv_IMPL 1
+ #else
+ #define EA_NO_HAVE_unsetenv_IMPL 1
+ #endif
+#endif
+
+#if !defined(EA_HAVE_putenv_IMPL) && !defined(EA_NO_HAVE_putenv_IMPL)
+ #if (defined(EA_PLATFORM_DESKTOP) || defined(EA_PLATFORM_UNIX)) && !defined(EA_PLATFORM_WINRT)
+ #define EA_HAVE_putenv_IMPL 1 /* With Microsoft compilers you may need to use _putenv, as they have deprecated putenv. */
+ #else
+ #define EA_NO_HAVE_putenv_IMPL 1
+ #endif
+#endif
+
+#if !defined(EA_HAVE_time_IMPL) && !defined(EA_NO_HAVE_time_IMPL)
+ #define EA_HAVE_time_IMPL 1
+ #define EA_HAVE_clock_IMPL 1
+#endif
+
+// <cstdio> fopen()
+#if !defined(EA_HAVE_fopen_IMPL) && !defined(EA_NO_HAVE_fopen_IMPL)
+ #define EA_HAVE_fopen_IMPL 1 /* C FILE functionality such as fopen */
+#endif
+
+// <arpa/inet.h> inet_ntop()
+#if !defined(EA_HAVE_inet_ntop_IMPL) && !defined(EA_NO_HAVE_inet_ntop_IMPL)
+ #if (defined(EA_PLATFORM_UNIX) || defined(EA_PLATFORM_POSIX)) && !defined(EA_PLATFORM_SONY) && !defined(CS_UNDEFINED_STRING)
+ #define EA_HAVE_inet_ntop_IMPL 1 /* This doesn't identify if the platform SDK has some alternative function that does the same thing; */
+ #define EA_HAVE_inet_pton_IMPL 1 /* it identifies strictly the <arpa/inet.h> inet_ntop and inet_pton functions. For example, Microsoft has InetNtop in <Ws2tcpip.h> */
+ #else
+ #define EA_NO_HAVE_inet_ntop_IMPL 1
+ #define EA_NO_HAVE_inet_pton_IMPL 1
+ #endif
+#endif
+
+// <time.h> clock_gettime()
+#if !defined(EA_HAVE_clock_gettime_IMPL) && !defined(EA_NO_HAVE_clock_gettime_IMPL)
+ #if defined(EA_PLATFORM_LINUX) || defined(__CYGWIN__) || (defined(_POSIX_TIMERS) && (_POSIX_TIMERS > 0)) || (defined(EA_PLATFORM_POSIX) && defined(_CPPLIB_VER) /*Dinkumware*/)
+ #define EA_HAVE_clock_gettime_IMPL 1 /* You need to link the 'rt' library to get this */
+ #else
+ #define EA_NO_HAVE_clock_gettime_IMPL 1
+ #endif
+#endif
+
+#if !defined(EA_HAVE_getcwd_IMPL) && !defined(EA_NO_HAVE_getcwd_IMPL)
+ #if (defined(EA_PLATFORM_DESKTOP) || defined(EA_PLATFORM_UNIX)) && !defined(EA_PLATFORM_ANDROID) && !defined(EA_PLATFORM_WINRT)
+ #define EA_HAVE_getcwd_IMPL 1 /* With Microsoft compilers you may need to use _getcwd, as they have deprecated getcwd. And in any case it's present at <direct.h> */
+ #else
+ #define EA_NO_HAVE_getcwd_IMPL 1
+ #endif
+#endif
+
+#if !defined(EA_HAVE_tmpnam_IMPL) && !defined(EA_NO_HAVE_tmpnam_IMPL)
+ #if (defined(EA_PLATFORM_DESKTOP) || defined(EA_PLATFORM_UNIX)) && !defined(EA_PLATFORM_ANDROID)
+ #define EA_HAVE_tmpnam_IMPL 1
+ #else
+ #define EA_NO_HAVE_tmpnam_IMPL 1
+ #endif
+#endif
+
+// nullptr, the built-in C++11 type.
+// This EA_HAVE is deprecated, as EA_COMPILER_NO_NULLPTR is more appropriate, given that nullptr is a compiler-level feature and not a library feature.
+#if !defined(EA_HAVE_nullptr_IMPL) && !defined(EA_NO_HAVE_nullptr_IMPL)
+ #if defined(EA_COMPILER_NO_NULLPTR)
+ #define EA_NO_HAVE_nullptr_IMPL 1
+ #else
+ #define EA_HAVE_nullptr_IMPL 1
+ #endif
+#endif
+
+// <cstddef> std::nullptr_t
+// Note that <EABase/nullptr.h> implements a portable nullptr implementation, but this
+// EA_HAVE specifically refers to std::nullptr_t from the standard libraries.
+#if !defined(EA_HAVE_nullptr_t_IMPL) && !defined(EA_NO_HAVE_nullptr_t_IMPL)
+ #if defined(EA_COMPILER_CPP11_ENABLED)
+ // VS2010+ with its default Dinkumware standard library.
+ #if defined(_MSC_VER) && (_MSC_VER >= 1600) && defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY)
+ #define EA_HAVE_nullptr_t_IMPL 1
+
+ #elif defined(EA_HAVE_LIBCPP_LIBRARY) // clang/llvm libc++
+ #define EA_HAVE_nullptr_t_IMPL 1
+
+ #elif defined(EA_HAVE_LIBSTDCPP_LIBRARY) // GNU libstdc++
+ // Unfortunately __GLIBCXX__ date values don't go strictly in version ordering.
+ #if (__GLIBCXX__ >= 20110325) && (__GLIBCXX__ != 20120702) && (__GLIBCXX__ != 20110428)
+ #define EA_HAVE_nullptr_t_IMPL 1
+ #else
+ #define EA_NO_HAVE_nullptr_t_IMPL 1
+ #endif
+
+ // We simply assume that the standard library (e.g. Dinkumware) provides std::nullptr_t.
+ #elif defined(__clang__)
+ #define EA_HAVE_nullptr_t_IMPL 1
+
+ // With GCC compiler >= 4.6, std::nullptr_t is always defined in <cstddef>, in practice.
+ #elif defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4006)
+ #define EA_HAVE_nullptr_t_IMPL 1
+
+ // The EDG compiler provides nullptr, but uses an older standard library that doesn't support std::nullptr_t.
+ #elif defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 403)
+ #define EA_HAVE_nullptr_t_IMPL 1
+
+ #else
+ #define EA_NO_HAVE_nullptr_t_IMPL 1
+ #endif
+ #else
+ #define EA_NO_HAVE_nullptr_t_IMPL 1
+ #endif
+#endif
+
+// <exception> std::terminate
+#if !defined(EA_HAVE_std_terminate_IMPL) && !defined(EA_NO_HAVE_std_terminate_IMPL)
+ #if !defined(EA_PLATFORM_IPHONE) && !defined(EA_PLATFORM_ANDROID)
+ #define EA_HAVE_std_terminate_IMPL 1 /* iOS doesn't appear to provide an implementation for std::terminate under the armv6 target. */
+ #else
+ #define EA_NO_HAVE_std_terminate_IMPL 1
+ #endif
+#endif
+
+// <iterator>: std::begin, std::end, std::prev, std::next, std::move_iterator.
+#if !defined(EA_HAVE_CPP11_ITERATOR_IMPL) && !defined(EA_NO_HAVE_CPP11_ITERATOR_IMPL)
+ #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 520) && !(defined(_HAS_CPP0X) && _HAS_CPP0X) // Dinkumware. VS2010+
+ #define EA_HAVE_CPP11_ITERATOR_IMPL 1
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4006)
+ #define EA_HAVE_CPP11_ITERATOR_IMPL 1
+ #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1)
+ #define EA_HAVE_CPP11_ITERATOR_IMPL 1
+ #else
+ #define EA_NO_HAVE_CPP11_ITERATOR_IMPL 1
+ #endif
+#endif
+
+// <memory>: std::weak_ptr, std::shared_ptr, std::unique_ptr, std::bad_weak_ptr, std::owner_less
+#if !defined(EA_HAVE_CPP11_SMART_POINTER_IMPL) && !defined(EA_NO_HAVE_CPP11_SMART_POINTER_IMPL)
+ #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 520) && !(defined(_HAS_CPP0X) && _HAS_CPP0X) // Dinkumware. VS2010+
+ #define EA_HAVE_CPP11_SMART_POINTER_IMPL 1
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4004)
+ #define EA_HAVE_CPP11_SMART_POINTER_IMPL 1
+ #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1)
+ #define EA_HAVE_CPP11_SMART_POINTER_IMPL 1
+ #else
+ #define EA_NO_HAVE_CPP11_SMART_POINTER_IMPL 1
+ #endif
+#endif
+
+// <functional>: std::function, std::mem_fn, std::bad_function_call, std::is_bind_expression, std::is_placeholder, std::reference_wrapper, std::hash, std::bind, std::ref, std::cref.
+#if !defined(EA_HAVE_CPP11_FUNCTIONAL_IMPL) && !defined(EA_NO_HAVE_CPP11_FUNCTIONAL_IMPL)
+ #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 520) && !(defined(_HAS_CPP0X) && _HAS_CPP0X) // Dinkumware. VS2010+
+ #define EA_HAVE_CPP11_FUNCTIONAL_IMPL 1
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4004)
+ #define EA_HAVE_CPP11_FUNCTIONAL_IMPL 1
+ #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1)
+ #define EA_HAVE_CPP11_FUNCTIONAL_IMPL 1
+ #else
+ #define EA_NO_HAVE_CPP11_FUNCTIONAL_IMPL 1
+ #endif
+#endif
+
+// <exception> std::current_exception, std::rethrow_exception, std::exception_ptr, std::make_exception_ptr
+#if !defined(EA_HAVE_CPP11_EXCEPTION_IMPL) && !defined(EA_NO_HAVE_CPP11_EXCEPTION_IMPL)
+ #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 520) && !(defined(_HAS_CPP0X) && _HAS_CPP0X) // Dinkumware. VS2010+
+ #define EA_HAVE_CPP11_EXCEPTION_IMPL 1
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4004)
+ #define EA_HAVE_CPP11_EXCEPTION_IMPL 1
+ #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1)
+ #define EA_HAVE_CPP11_EXCEPTION_IMPL 1
+ #else
+ #define EA_NO_HAVE_CPP11_EXCEPTION_IMPL 1
+ #endif
+#endif
+
+
+
+
+/* Implementations that all platforms seem to have: */
+/*
+ alloca
+ malloc
+ calloc
+ strtoll
+ strtoull
+ vsprintf
+ vsnprintf
+*/
+
+/* Implementations that we don't care about: */
+/*
+ bcopy -- Just use memmove or some customized equivalent. bcopy offers no practical benefit.
+ strlcpy -- So few platforms have this built-in that we get no benefit from using it. Use EA::StdC::Strlcpy instead.
+ strlcat -- "
+*/
+
+
+
+/*-----------------------------------------------------------------------------
+ EABASE_USER_HAVE_HEADER
+
+ This allows the user to define a header file to be #included after the
+ eahave.h's contents are compiled. A primary use of this is to override
+ the contents of this header file. You can define the overhead header
+ file name in-code or define it globally as part of your build file.
+
+ Example usage:
+ #define EABASE_USER_HAVE_HEADER "MyHaveOverrides.h"
+ #include <EABase/eahave.h>
+---------------------------------------------------------------------------*/
+
+#ifdef EABASE_USER_HAVE_HEADER
+ #include EABASE_USER_HAVE_HEADER
+#endif
+
+
+#endif /* Header include guard */
+
+
+
diff --git a/EASTL/test/packages/EABase/include/Common/EABase/earesult.h b/EASTL/test/packages/EABase/include/Common/EABase/earesult.h
new file mode 100644
index 0000000..d08b346
--- /dev/null
+++ b/EASTL/test/packages/EABase/include/Common/EABase/earesult.h
@@ -0,0 +1,62 @@
+/*-----------------------------------------------------------------------------
+ * earesult.h
+ *
+ * Copyright (c) Electronic Arts Inc. All rights reserved.
+ *---------------------------------------------------------------------------*/
+
+
+#ifndef INCLUDED_earesult_H
+#define INCLUDED_earesult_H
+
+
+#include <EABase/eabase.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once /* Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. */
+#endif
+
+
+
+/* This result type is width-compatible with most systems. */
+typedef int32_t ea_result_type;
+
+
+namespace EA
+{
+ typedef int32_t result_type;
+
+ enum
+ {
+#ifndef SUCCESS
+ // Deprecated
+ // Note: a public MS header has created a define of this name which causes a build error. Fortunately they
+ // define it to 0 which is compatible.
+ // see: WindowsSDK\8.1.51641-fb\installed\Include\um\RasError.h
+ SUCCESS = 0,
+#endif
+ // Deprecated
+ FAILURE = -1,
+
+ // These values are now the preferred constants
+ EA_SUCCESS = 0,
+ EA_FAILURE = -1,
+ };
+}
+
+
+/* Macro to simplify testing for success. */
+#ifndef EA_SUCCEEDED
+ #define EA_SUCCEEDED(result) ((result) >= 0)
+#endif
+
+/* Macro to simplfify testing for general failure. */
+#ifndef EA_FAILED
+ #define EA_FAILED(result) ((result) < 0)
+#endif
+
+
+#endif
+
+
+
+
diff --git a/EASTL/test/packages/EABase/include/Common/EABase/eastdarg.h b/EASTL/test/packages/EABase/include/Common/EABase/eastdarg.h
new file mode 100644
index 0000000..2c613eb
--- /dev/null
+++ b/EASTL/test/packages/EABase/include/Common/EABase/eastdarg.h
@@ -0,0 +1,99 @@
+/*-----------------------------------------------------------------------------
+ * eastdarg.h
+ *
+ * Copyright (c) Electronic Arts Inc. All rights reserved.
+ *---------------------------------------------------------------------------*/
+
+
+#ifndef INCLUDED_eastdarg_H
+#define INCLUDED_eastdarg_H
+
+
+#include <EABase/eabase.h>
+#include <stdarg.h>
+
+
+// VA_ARG_COUNT
+//
+// Returns the number of arguments passed to a macro's ... argument.
+// This applies to macros only and not functions.
+//
+// Example usage:
+// assert(VA_ARG_COUNT() == 0);
+// assert(VA_ARG_COUNT(a) == 1);
+// assert(VA_ARG_COUNT(a, b) == 2);
+// assert(VA_ARG_COUNT(a, b, c) == 3);
+//
+#if !defined(VA_ARG_COUNT)
+ #define VA_ARG_COUNT(...) VA_ARG_COUNT_II((VA_ARG_COUNT_PREFIX_ ## __VA_ARGS__ ## _VA_ARG_COUNT_POSTFIX,32,31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0))
+ #define VA_ARG_COUNT_II(__args) VA_ARG_COUNT_I __args
+ #define VA_ARG_COUNT_PREFIX__VA_ARG_COUNT_POSTFIX ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,0
+ #define VA_ARG_COUNT_I(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_10,_11,_12,_13,_14,_15,_16,_17,_18,_19,_20,_21,_22,_23,_24,_25,_26,_27,_28,_29,_30,_31,N,...) N
+#endif
+
+
+// va_copy
+//
+// va_copy is required by C++11
+// C++11 and C99 require va_copy to be #defined and implemented.
+// http://en.cppreference.com/w/cpp/utility/variadic/va_copy
+//
+// Example usage:
+// void Func(char* p, ...){
+// va_list args, argsCopy;
+// va_start(args, p);
+// va_copy(argsCopy, args);
+// (use args)
+// (use argsCopy, which acts the same as args)
+// va_end(args);
+// va_end(argsCopy);
+// }
+//
+#ifndef va_copy
+ #if defined(__va_copy) // GCC and others define this for non-C99 compatibility.
+ #define va_copy(dest, src) __va_copy((dest), (src))
+ #else
+ // This may not work for some platforms, depending on their ABI.
+ // It works for Microsoft x86,x64, and PowerPC-based platforms.
+ #define va_copy(dest, src) memcpy(&(dest), &(src), sizeof(va_list))
+ #endif
+#endif
+
+
+
+// va_list_reference
+//
+// va_list_reference is not part of the C or C++ standards.
+// It allows you to pass a va_list by reference to another
+// function instead of by value. You cannot simply use va_list&
+// as that won't work with many va_list implementations because
+// they are implemented as arrays (which can't be passed by
+// reference to a function without decaying to a pointer).
+//
+// Example usage:
+// void Test(va_list_reference args){
+// printf("%d", va_arg(args, int));
+// }
+// void Func(char* p, ...){
+// va_list args;
+// va_start(args, p);
+// Test(args); // Upon return args will be modified.
+// va_end(args);
+// }
+#ifndef va_list_reference
+ #if defined(EA_PLATFORM_MICROSOFT) || (EA_PLATFORM_PTR_SIZE == 4) || (defined(EA_PLATFORM_APPLE) && defined(EA_PROCESSOR_ARM64)) || defined(CS_UNDEFINED_STRING) || (defined(EA_PLATFORM_ANDROID) && defined(EA_PROCESSOR_ARM64))
+ // This is required for platform ABIs in which va_list is a struct or pointer.
+ #define va_list_reference va_list&
+ #else
+ // This is required for platform ABIs in which va_list is defined to be an array.
+ #define va_list_reference va_list
+ #endif
+#endif
+
+
+
+
+#endif /* Header include guard */
+
+
+
diff --git a/EASTL/test/packages/EABase/include/Common/EABase/eaunits.h b/EASTL/test/packages/EABase/include/Common/EABase/eaunits.h
new file mode 100644
index 0000000..2235723
--- /dev/null
+++ b/EASTL/test/packages/EABase/include/Common/EABase/eaunits.h
@@ -0,0 +1,54 @@
+/*-----------------------------------------------------------------------------
+ * eaunits.h
+ *
+ * Copyright (c) Electronic Arts Inc. All rights reserved.
+ *---------------------------------------------------------------------------*/
+
+
+#ifndef INCLUDED_eaunits_h
+#define INCLUDED_eaunits_h
+
+#include <EABase/eabase.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+// Defining common SI unit macros.
+//
+// The mebibyte is a multiple of the unit byte for digital information. Technically a
+// megabyte (MB) is a power of ten, while a mebibyte (MiB) is a power of two,
+// appropriate for binary machines. Many Linux distributions use the unit, but it is
+// not widely acknowledged within the industry or media.
+// Reference: https://en.wikipedia.org/wiki/Mebibyte
+//
+// Examples:
+// auto size1 = EA_KILOBYTE(16);
+// auto size2 = EA_MEGABYTE(128);
+// auto size3 = EA_MEBIBYTE(8);
+// auto size4 = EA_GIBIBYTE(8);
+
+// define byte for completeness
+#define EA_BYTE(x) (x)
+
+// Decimal SI units
+#define EA_KILOBYTE(x) (size_t(x) * 1000)
+#define EA_MEGABYTE(x) (size_t(x) * 1000 * 1000)
+#define EA_GIGABYTE(x) (size_t(x) * 1000 * 1000 * 1000)
+#define EA_TERABYTE(x) (size_t(x) * 1000 * 1000 * 1000 * 1000)
+#define EA_PETABYTE(x) (size_t(x) * 1000 * 1000 * 1000 * 1000 * 1000)
+#define EA_EXABYTE(x) (size_t(x) * 1000 * 1000 * 1000 * 1000 * 1000 * 1000)
+
+// Binary SI units
+#define EA_KIBIBYTE(x) (size_t(x) * 1024)
+#define EA_MEBIBYTE(x) (size_t(x) * 1024 * 1024)
+#define EA_GIBIBYTE(x) (size_t(x) * 1024 * 1024 * 1024)
+#define EA_TEBIBYTE(x) (size_t(x) * 1024 * 1024 * 1024 * 1024)
+#define EA_PEBIBYTE(x) (size_t(x) * 1024 * 1024 * 1024 * 1024 * 1024)
+#define EA_EXBIBYTE(x) (size_t(x) * 1024 * 1024 * 1024 * 1024 * 1024 * 1024)
+
+#endif // INCLUDED_earesult_H
+
+
+
+
diff --git a/EASTL/test/packages/EABase/include/Common/EABase/int128.h b/EASTL/test/packages/EABase/include/Common/EABase/int128.h
new file mode 100644
index 0000000..068d557
--- /dev/null
+++ b/EASTL/test/packages/EABase/include/Common/EABase/int128.h
@@ -0,0 +1,1268 @@
+/*-----------------------------------------------------------------------------
+ * eaint128_t.h
+ *
+ * Copyright (c) Electronic Arts Inc. All rights reserved.
+ *---------------------------------------------------------------------------*/
+
+
+#ifndef INCLUDED_int128_h
+#define INCLUDED_int128_h
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////
+// EA_INT128_INTRINSIC_AVAILABLE
+//
+#if (EA_COMPILER_INTMAX_SIZE >= 16) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+ // __int128_t/__uint128_t is supported
+ #define EA_INT128_INTRINSIC_AVAILABLE 1
+#else
+ #define EA_INT128_INTRINSIC_AVAILABLE 0
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////
+// EA_INT128_ALIGNAS
+//
+#if EA_INT128_INTRINSIC_AVAILABLE && !defined(EA_COMPILER_NO_ALIGNAS)
+ #define EA_INT128_ALIGNAS alignas(unsigned __int128)
+#else
+ #define EA_INT128_ALIGNAS
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////
+// EA_HAVE_INT128
+//
+// Indicates that EABase implements 128-bit integer types
+//
+#define EA_HAVE_INT128 1
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////
+// uint128_t_base
+//
+struct EA_INT128_ALIGNAS int128_t_base
+{
+ // Constructors / destructors
+ int128_t_base() = default;
+ int128_t_base(uint32_t nPart0, uint32_t nPart1, uint32_t nPart2, uint32_t nPart3);
+ int128_t_base(uint64_t nPart0, uint64_t nPart1);
+ int128_t_base(uint8_t value);
+ int128_t_base(uint16_t value);
+ int128_t_base(uint32_t value);
+ int128_t_base(uint64_t value);
+ int128_t_base(const int128_t_base& value) = default;
+
+ // Assignment operator
+ int128_t_base& operator=(const int128_t_base& value) = default;
+
+ // Explicit operators to convert back to basic types
+ EA_CONSTEXPR explicit operator bool() const;
+ EA_CONSTEXPR explicit operator char() const;
+ EA_CONSTEXPR explicit operator int() const;
+ EA_CONSTEXPR explicit operator long() const;
+ EA_CONSTEXPR explicit operator long long() const;
+ EA_CONSTEXPR explicit operator short() const;
+ EA_CONSTEXPR explicit operator signed char() const;
+ EA_CONSTEXPR explicit operator unsigned char() const;
+ EA_CONSTEXPR explicit operator unsigned int() const;
+ EA_CONSTEXPR explicit operator unsigned long long() const;
+ EA_CONSTEXPR explicit operator unsigned long() const;
+ EA_CONSTEXPR explicit operator unsigned short() const;
+#if EA_WCHAR_UNIQUE
+ // EA_CONSTEXPR explicit operator char16_t() const;
+ // EA_CONSTEXPR explicit operator char32_t() const;
+ // EA_CONSTEXPR explicit operator wchar_t() const;
+#endif
+ EA_CONSTEXPR explicit operator float() const;
+ EA_CONSTEXPR explicit operator double() const;
+ EA_CONSTEXPR explicit operator long double() const;
+#if EA_INT128_INTRINSIC_AVAILABLE
+ EA_CONSTEXPR explicit operator __int128() const;
+ EA_CONSTEXPR explicit operator unsigned __int128() const;
+#endif
+
+ // Math operators
+ static void OperatorPlus (const int128_t_base& value1, const int128_t_base& value2, int128_t_base& result);
+ static void OperatorMinus(const int128_t_base& value1, const int128_t_base& value2, int128_t_base& result);
+ static void OperatorMul (const int128_t_base& value1, const int128_t_base& value2, int128_t_base& result);
+
+ // Shift operators
+ static void OperatorShiftRight(const int128_t_base& value, int nShift, int128_t_base& result);
+ static void OperatorShiftLeft (const int128_t_base& value, int nShift, int128_t_base& result);
+
+ // Unary arithmetic/logic operators
+ bool operator!() const;
+
+ // Logical operators
+ static void OperatorXOR(const int128_t_base& value1, const int128_t_base& value2, int128_t_base& result);
+ static void OperatorOR (const int128_t_base& value1, const int128_t_base& value2, int128_t_base& result);
+ static void OperatorAND(const int128_t_base& value1, const int128_t_base& value2, int128_t_base& result);
+
+ bool IsZero() const;
+ void SetZero();
+ void TwosComplement();
+ void InverseTwosComplement();
+
+ int GetBit(int nIndex) const;
+ void SetBit(int nIndex, int value);
+
+protected:
+ void DoubleToUint128(double value);
+
+ EA_CONSTEXPR uint64_t Low() const
+ {
+ return mPart0;
+ }
+
+ EA_CONSTEXPR uint64_t High() const
+ {
+ return mPart1;
+ }
+
+protected:
+ #ifdef EA_SYSTEM_BIG_ENDIAN
+ uint64_t mPart1; // Most significant byte.
+ uint64_t mPart0; // Least significant byte.
+ #else
+ uint64_t mPart0; // Most significant byte.
+ uint64_t mPart1; // Least significant byte.
+ #endif
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////
+// int128_t
+//
+// Implements signed 128 bit integer.
+//
+struct int128_t : public int128_t_base
+{
+ // Constructors / destructors
+ using int128_t_base::int128_t_base;
+
+ // Assignment operator
+ using int128_t_base::operator=;
+
+ // Unary arithmetic/logic operators
+ int128_t operator-() const;
+ int128_t& operator++();
+ int128_t& operator--();
+ int128_t operator++(int);
+ int128_t operator--(int);
+ int128_t operator~() const;
+ int128_t operator+() const;
+
+ // Math operators
+ int128_t operator+ (const int128_t& other);
+ int128_t operator- (const int128_t& other);
+ int128_t operator* (const int128_t& other);
+ int128_t operator/ (const int128_t& other);
+ int128_t operator% (const int128_t& other);
+ int128_t& operator+=(const int128_t& other);
+ int128_t& operator-=(const int128_t& other);
+ int128_t& operator*=(const int128_t& other);
+ int128_t& operator/=(const int128_t& other);
+ int128_t& operator%=(const int128_t& other);
+
+ // Shift operators
+ int128_t operator>> (int nShift) const;
+ int128_t operator<< (int nShift) const;
+ int128_t& operator>>=(int nShift);
+ int128_t& operator<<=(int nShift);
+
+ // Logical operators
+ int128_t operator^ (const int128_t& other) const;
+ int128_t operator| (const int128_t& other) const;
+ int128_t operator& (const int128_t& other) const;
+ int128_t& operator^=(const int128_t& other);
+ int128_t& operator|=(const int128_t& other);
+ int128_t& operator&=(const int128_t& other);
+
+ // Equality operators
+ bool operator==(const int128_t& other) const;
+ bool operator!=(const int128_t& other) const;
+ bool operator> (const int128_t& other) const;
+ bool operator>=(const int128_t& other) const;
+ bool operator< (const int128_t& other) const;
+ bool operator<=(const int128_t& other) const;
+
+protected:
+ int compare(const int128_t& other) const;
+ void Negate();
+ void Modulus(const int128_t& divisor, int128_t& quotient, int128_t& remainder) const;
+ bool IsNegative() const; // Returns true for value < 0
+ bool IsPositive() const; // Returns true for value >= 0
+};
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////
+// uint128_t
+//
+// Implements unsigned 128 bit integer.
+//
+struct uint128_t : public int128_t_base
+{
+ // Constructors / destructors
+ using int128_t_base::int128_t_base;
+
+ // Assignment operator
+ using int128_t_base::operator=;
+
+ // Unary arithmetic/logic operators
+ uint128_t operator-() const;
+ uint128_t& operator++();
+ uint128_t& operator--();
+ uint128_t operator++(int);
+ uint128_t operator--(int);
+ uint128_t operator~() const;
+ uint128_t operator+() const;
+
+ // Math operators
+ uint128_t operator+ (const uint128_t& other);
+ uint128_t operator- (const uint128_t& other);
+ uint128_t operator* (const uint128_t& other);
+ uint128_t operator/ (const uint128_t& other);
+ uint128_t operator% (const uint128_t& other);
+ uint128_t& operator+=(const uint128_t& other);
+ uint128_t& operator-=(const uint128_t& other);
+ uint128_t& operator*=(const uint128_t& other);
+ uint128_t& operator/=(const uint128_t& other);
+ uint128_t& operator%=(const uint128_t& other);
+
+ // Shift operators
+ uint128_t operator>> (int nShift) const;
+ uint128_t operator<< (int nShift) const;
+ uint128_t& operator>>=(int nShift);
+ uint128_t& operator<<=(int nShift);
+
+ // Logical operators
+ uint128_t operator^ (const uint128_t& other) const;
+ uint128_t operator| (const uint128_t& other) const;
+ uint128_t operator& (const uint128_t& other) const;
+ uint128_t& operator^=(const uint128_t& other);
+ uint128_t& operator|=(const uint128_t& other);
+ uint128_t& operator&=(const uint128_t& other);
+
+ // Equality operators
+ bool operator==(const uint128_t& other) const;
+ bool operator!=(const uint128_t& other) const;
+ bool operator> (const uint128_t& other) const;
+ bool operator>=(const uint128_t& other) const;
+ bool operator< (const uint128_t& other) const;
+ bool operator<=(const uint128_t& other) const;
+
+protected:
+ int compare(const uint128_t& other) const;
+ void Negate();
+ void Modulus(const uint128_t& divisor, uint128_t& quotient, uint128_t& remainder) const;
+ bool IsNegative() const; // Returns true for value < 0
+ bool IsPositive() const; // Returns true for value >= 0
+};
+
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////
+// uint128_t_base implementation
+///////////////////////////////////////////////////////////////////////////////////////////////////////
+EA_CONSTEXPR inline int128_t_base::operator bool() const { return mPart0 || mPart1; }
+EA_CONSTEXPR inline int128_t_base::operator char() const { return static_cast<char>(Low()); }
+#if EA_WCHAR_UNIQUE
+// EA_CONSTEXPR inline int128_t_base::operator char16_t() const { return static_cast<char16_t>(Low()); }
+// EA_CONSTEXPR inline int128_t_base::operator char32_t() const { return static_cast<char32_t>(Low()); }
+// EA_CONSTEXPR inline int128_t_base::operator wchar_t() const { return static_cast<wchar_t>(Low()); }
+#endif
+EA_CONSTEXPR inline int128_t_base::operator int() const { return static_cast<int>(Low()); }
+EA_CONSTEXPR inline int128_t_base::operator long() const { return static_cast<long>(Low()); }
+EA_CONSTEXPR inline int128_t_base::operator long long() const { return static_cast<long long>(Low()); }
+EA_CONSTEXPR inline int128_t_base::operator short() const { return static_cast<short>(Low()); }
+EA_CONSTEXPR inline int128_t_base::operator signed char() const { return static_cast<signed char>(Low()); }
+EA_CONSTEXPR inline int128_t_base::operator unsigned char() const { return static_cast<unsigned char>(Low()); }
+EA_CONSTEXPR inline int128_t_base::operator unsigned int() const { return static_cast<unsigned int>(Low()); }
+EA_CONSTEXPR inline int128_t_base::operator unsigned long long() const { return static_cast<unsigned long long>(Low()); }
+EA_CONSTEXPR inline int128_t_base::operator unsigned long() const { return static_cast<unsigned long>(Low()); }
+EA_CONSTEXPR inline int128_t_base::operator unsigned short() const { return static_cast<unsigned short>(Low()); }
+EA_CONSTEXPR inline int128_t_base::operator float() const { return static_cast<float>(Low()); }
+EA_CONSTEXPR inline int128_t_base::operator double() const { return static_cast<double>(Low()); }
+EA_CONSTEXPR inline int128_t_base::operator long double() const { return static_cast<long double>(Low()); }
+#if EA_INT128_INTRINSIC_AVAILABLE
+EA_CONSTEXPR inline int128_t_base::operator __int128() const { return static_cast<__int128>(Low()); }
+EA_CONSTEXPR inline int128_t_base::operator unsigned __int128() const { return static_cast<unsigned __int128>(Low()); }
+#endif
+
+inline void int128_t_base::SetBit(int nIndex, int value)
+{
+ // EA_ASSERT((nIndex >= 0) && (nIndex < 128));
+
+ const uint64_t nBitMask = ((uint64_t)1 << (nIndex % 64));
+
+ if(nIndex < 64)
+ {
+ if(value)
+ mPart0 = mPart0 | nBitMask;
+ else
+ mPart0 = mPart0 & ~nBitMask;
+ }
+ else if(nIndex < 128)
+ {
+ if(value)
+ mPart1 = mPart1 | nBitMask;
+ else
+ mPart1 = mPart1 & ~nBitMask;
+ }
+}
+
+inline int int128_t_base::GetBit(int nIndex) const
+{
+ // EA_ASSERT((nIndex >= 0) && (nIndex < 128));
+
+ const uint64_t nBitMask = ((uint64_t)1 << (nIndex % 64));
+
+ if(nIndex < 64)
+ return ((mPart0 & nBitMask) ? 1 : 0);
+ else if(nIndex < 128)
+ return ((mPart1 & nBitMask) ? 1 : 0);
+ return 0;
+}
+
+inline int128_t_base::int128_t_base(uint32_t nPart0, uint32_t nPart1, uint32_t nPart2, uint32_t nPart3)
+{
+ mPart1 = ((uint64_t)nPart3 << 32) + nPart2;
+ mPart0 = ((uint64_t)nPart1 << 32) + nPart0;
+}
+
+inline int128_t_base::int128_t_base(uint64_t nPart0, uint64_t nPart1)
+{
+ mPart1 = nPart1;
+ mPart0 = nPart0;
+}
+
+inline int128_t_base::int128_t_base(uint8_t value)
+{
+ mPart1 = 0;
+ mPart0 = value;
+}
+
+inline int128_t_base::int128_t_base(uint16_t value)
+{
+ mPart1 = 0;
+ mPart0 = value;
+}
+
+inline int128_t_base::int128_t_base(uint32_t value)
+{
+ mPart1 = 0;
+ mPart0 = value;
+}
+
+inline int128_t_base::int128_t_base(uint64_t value)
+{
+ mPart1 = 0;
+ mPart0 = value;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// OperatorPlus
+//
+// Returns: (value1 + value2) into result.
+// The output 'result' *is* allowed to point to the same memory as one of the inputs.
+// To consider: Fix 'defect' of this function whereby it doesn't implement overflow wraparound.
+//
+inline void int128_t_base::OperatorPlus(const int128_t_base& value1, const int128_t_base& value2, int128_t_base& result)
+{
+ uint64_t t = value1.mPart0 + value2.mPart0;
+ uint64_t nCarry = (t < value1.mPart0) && (t < value2.mPart0);
+ result.mPart0 = t;
+ result.mPart1 = value1.mPart1 + value2.mPart1 + nCarry;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// OperatorMinus
+//
+// Returns: (value1 - value2) into result.
+// The output 'result' *is* allowed to point to the same memory as one of the inputs.
+// To consider: Fix 'defect' of this function whereby it doesn't implement overflow wraparound.
+//
+inline void int128_t_base::OperatorMinus(const int128_t_base& value1, const int128_t_base& value2, int128_t_base& result)
+{
+ uint64_t t = (value1.mPart0 - value2.mPart0);
+ uint64_t nCarry = (value1.mPart0 < value2.mPart0) ? 1u : 0u;
+ result.mPart0 = t;
+ result.mPart1 = (value1.mPart1 - value2.mPart1) - nCarry;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// OperatorMul
+//
+// 64 bit systems:
+// This is how it would be able to work if we could get a 128 bit result from
+// two 64 bit values. None of the 64 bit systems that we are currently working
+// with have C language support for multiplying two 64 bit numbers and retrieving
+// the 128 bit result. However, many 64 bit platforms have support at the asm
+// level for doing such a thing.
+// Part 1 Part 0
+// 0000000000000002 0000000000000001
+// x 0000000000000002 0000000000000001
+// -------------------------------------------
+// | 0000000000000002 0000000000000001
+// + 0000000000000004 | 0000000000000002 (0000000000000000)
+// -------------------------------------------------------------------------
+//
+inline void int128_t_base::OperatorMul(const int128_t_base& a, const int128_t_base& b, int128_t_base& result)
+{
+ // To consider: Use compiler or OS-provided custom functionality here, such as
+ // Windows UnsignedMultiply128 and GCC's built-in int128_t.
+
+ #if defined(DISABLED_PLATFORM_WIN64)
+ // To do: Implement x86-64 asm here.
+
+ #else
+ // Else we are stuck doing something less efficient. In this case we
+ // fall back to doing 32 bit multiplies as with 32 bit platforms.
+ result = (a.mPart0 & 0xffffffff) * (b.mPart0 & 0xffffffff);
+ int128_t v01 = (a.mPart0 & 0xffffffff) * ((b.mPart0 >> 32) & 0xffffffff);
+ int128_t v02 = (a.mPart0 & 0xffffffff) * (b.mPart1 & 0xffffffff);
+ int128_t v03 = (a.mPart0 & 0xffffffff) * ((b.mPart1 >> 32) & 0xffffffff);
+
+ int128_t v10 = ((a.mPart0 >> 32) & 0xffffffff) * (b.mPart0 & 0xffffffff);
+ int128_t v11 = ((a.mPart0 >> 32) & 0xffffffff) * ((b.mPart0 >> 32) & 0xffffffff);
+ int128_t v12 = ((a.mPart0 >> 32) & 0xffffffff) * (b.mPart1 & 0xffffffff);
+
+ int128_t v20 = (a.mPart1 & 0xffffffff) * (b.mPart0 & 0xffffffff);
+ int128_t v21 = (a.mPart1 & 0xffffffff) * ((b.mPart0 >> 32) & 0xffffffff);
+
+ int128_t v30 = ((a.mPart1 >> 32) & 0xffffffff) * (b.mPart0 & 0xffffffff);
+
+ // Do row addition, shifting as needed.
+ OperatorPlus(result, v01 << 32, result);
+ OperatorPlus(result, v02 << 64, result);
+ OperatorPlus(result, v03 << 96, result);
+
+ OperatorPlus(result, v10 << 32, result);
+ OperatorPlus(result, v11 << 64, result);
+ OperatorPlus(result, v12 << 96, result);
+
+ OperatorPlus(result, v20 << 64, result);
+ OperatorPlus(result, v21 << 96, result);
+
+ OperatorPlus(result, v30 << 96, result);
+ #endif
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// OperatorShiftRight
+//
+// Returns: value >> nShift into result
+// The output 'result' may *not* be the same as one the input.
+// With rightward shifts of negative numbers, shift in zero from the left side.
+//
+inline void int128_t_base::OperatorShiftRight(const int128_t_base& value, int nShift, int128_t_base& result)
+{
+ if(nShift >= 0)
+ {
+ if(nShift < 64)
+ { // 0 - 63
+ result.mPart1 = (value.mPart1 >> nShift);
+
+ if(nShift == 0)
+ result.mPart0 = (value.mPart0 >> nShift);
+ else
+ result.mPart0 = (value.mPart0 >> nShift) | (value.mPart1 << (64 - nShift));
+ }
+ else
+ { // 64+
+ result.mPart1 = 0;
+ result.mPart0 = (value.mPart1 >> (nShift - 64));
+ }
+ }
+ else // (nShift < 0)
+ OperatorShiftLeft(value, -nShift, result);
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+// OperatorShiftRight
+//
+// Returns: value << nShift into result
+// The output 'result' may *not* be the same as one the input.
+// With rightward shifts of negative numbers, shift in zero from the left side.
+//
+inline void int128_t_base::OperatorShiftLeft(const int128_t_base& value, int nShift, int128_t_base& result)
+{
+ if(nShift >= 0)
+ {
+ if(nShift < 64)
+ {
+ if(nShift) // We need to have a special case because CPUs convert a shift by 64 to a no-op.
+ {
+ // 1 - 63
+ result.mPart0 = (value.mPart0 << nShift);
+ result.mPart1 = (value.mPart1 << nShift) | (value.mPart0 >> (64 - nShift));
+ }
+ else
+ {
+ result.mPart0 = value.mPart0;
+ result.mPart1 = value.mPart1;
+ }
+ }
+ else
+ { // 64+
+ result.mPart0 = 0;
+ result.mPart1 = (value.mPart0 << (nShift - 64));
+ }
+ }
+ else // (nShift < 0)
+ OperatorShiftRight(value, -nShift, result);
+}
+
+
+inline bool int128_t_base::operator!() const
+{
+ return (mPart0 == 0) && (mPart1 == 0);
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+// OperatorXOR
+//
+// Returns: value1 ^ value2 into result
+// The output 'result' may be the same as one the input.
+//
+inline void int128_t_base::OperatorXOR(const int128_t_base& value1, const int128_t_base& value2, int128_t_base& result)
+{
+ result.mPart0 = (value1.mPart0 ^ value2.mPart0);
+ result.mPart1 = (value1.mPart1 ^ value2.mPart1);
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+// OperatorOR
+//
+// Returns: value1 | value2 into result
+// The output 'result' may be the same as one the input.
+//
+inline void int128_t_base::OperatorOR(const int128_t_base& value1, const int128_t_base& value2, int128_t_base& result)
+{
+ result.mPart0 = (value1.mPart0 | value2.mPart0);
+ result.mPart1 = (value1.mPart1 | value2.mPart1);
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+// OperatorAND
+//
+// Returns: value1 & value2 into result
+// The output 'result' may be the same as one the input.
+//
+inline void int128_t_base::OperatorAND(const int128_t_base& value1, const int128_t_base& value2, int128_t_base& result)
+{
+ result.mPart0 = (value1.mPart0 & value2.mPart0);
+ result.mPart1 = (value1.mPart1 & value2.mPart1);
+}
+
+
+inline bool int128_t_base::IsZero() const
+{
+ return (mPart0 == 0) && // Check mPart0 first as this will likely yield faster execution.
+ (mPart1 == 0);
+}
+
+
+inline void int128_t_base::SetZero()
+{
+ mPart1 = 0;
+ mPart0 = 0;
+}
+
+
+inline void int128_t_base::TwosComplement()
+{
+ mPart1 = ~mPart1;
+ mPart0 = ~mPart0;
+
+ // What we want to do, but isn't available at this level:
+ // operator++();
+ // Alternative:
+ int128_t_base one((uint32_t)1);
+ OperatorPlus(*this, one, *this);
+}
+
+
+inline void int128_t_base::InverseTwosComplement()
+{
+ // What we want to do, but isn't available at this level:
+ // operator--();
+ // Alternative:
+ int128_t_base one((uint32_t)1);
+ OperatorMinus(*this, one, *this);
+
+ mPart1 = ~mPart1;
+ mPart0 = ~mPart0;
+}
+
+
+inline void int128_t_base::DoubleToUint128(double value)
+{
+ // Currently this function is limited to 64 bits of integer input.
+ // We need to make a better version of this function. Perhaps we should implement
+ // it via dissecting the IEEE floating point format (sign, exponent, matissa).
+ // EA_ASSERT(fabs(value) < 18446744073709551616.0); // Assert that the input is <= 64 bits of integer.
+
+ mPart1 = 0;
+ mPart0 = (value >= 0 ? (uint64_t)value : (uint64_t)-value);
+}
+
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////
+// uint128_t implementation
+///////////////////////////////////////////////////////////////////////////////////////////////////////
+
+inline uint128_t uint128_t::operator^(const uint128_t& other) const
+{
+ uint128_t temp;
+ uint128_t::OperatorXOR(*this, other, temp);
+ return temp;
+}
+
+inline uint128_t uint128_t::operator|(const uint128_t& other) const
+{
+ uint128_t temp;
+ uint128_t::OperatorOR(*this, other, temp);
+ return temp;
+}
+
+inline uint128_t uint128_t::operator&(const uint128_t& other) const
+{
+ uint128_t temp;
+ uint128_t::OperatorAND(*this, other, temp);
+ return temp;
+}
+
+inline uint128_t& uint128_t::operator^=(const uint128_t& value)
+{
+ OperatorXOR(*this, value, *this);
+ return *this;
+}
+
+inline uint128_t& uint128_t::operator|=(const uint128_t& value)
+{
+ OperatorOR(*this, value, *this);
+ return *this;
+}
+
+inline uint128_t& uint128_t::operator&=(const uint128_t& value)
+{
+ OperatorAND(*this, value, *this);
+ return *this;
+}
+
+// With rightward shifts of negative numbers, shift in zero from the left side.
+inline uint128_t uint128_t::operator>>(int nShift) const
+{
+ uint128_t temp;
+ OperatorShiftRight(*this, nShift, temp);
+ return temp;
+}
+
+// With rightward shifts of negative numbers, shift in zero from the left side.
+inline uint128_t uint128_t::operator<<(int nShift) const
+{
+ uint128_t temp;
+ OperatorShiftLeft(*this, nShift, temp);
+ return temp;
+}
+
+inline uint128_t& uint128_t::operator>>=(int nShift)
+{
+ uint128_t temp;
+ OperatorShiftRight(*this, nShift, temp);
+ *this = temp;
+ return *this;
+}
+
+inline uint128_t& uint128_t::operator<<=(int nShift)
+{
+ uint128_t temp;
+ OperatorShiftLeft(*this, nShift, temp);
+ *this = temp;
+ return *this;
+}
+
+inline uint128_t& uint128_t::operator+=(const uint128_t& value)
+{
+ OperatorPlus(*this, value, *this);
+ return *this;
+}
+
+inline uint128_t& uint128_t::operator-=(const uint128_t& value)
+{
+ OperatorMinus(*this, value, *this);
+ return *this;
+}
+
+inline uint128_t& uint128_t::operator*=(const uint128_t& value)
+{
+ *this = *this * value;
+ return *this;
+}
+
+inline uint128_t& uint128_t::operator/=(const uint128_t& value)
+{
+ *this = *this / value;
+ return *this;
+}
+
+inline uint128_t& uint128_t::operator%=(const uint128_t& value)
+{
+ *this = *this % value;
+ return *this;
+}
+
+inline uint128_t uint128_t::operator+(const uint128_t& other)
+{
+ uint128_t temp;
+ uint128_t::OperatorPlus(*this, other, temp);
+ return temp;
+}
+
+inline uint128_t uint128_t::operator-(const uint128_t& other)
+{
+ uint128_t temp;
+ uint128_t::OperatorMinus(*this, other, temp);
+ return temp;
+}
+
+inline uint128_t uint128_t::operator*(const uint128_t& other)
+{
+ uint128_t returnValue;
+ int128_t_base::OperatorMul(*this, other, returnValue);
+ return returnValue;
+}
+
+inline uint128_t uint128_t::operator/(const uint128_t& other)
+{
+ uint128_t remainder;
+ uint128_t quotient;
+ this->Modulus(other, quotient, remainder);
+ return quotient;
+}
+
+inline uint128_t uint128_t::operator%(const uint128_t& other)
+{
+ uint128_t remainder;
+ uint128_t quotient;
+ this->Modulus(other, quotient, remainder);
+ return remainder;
+}
+
+inline uint128_t uint128_t::operator+() const
+{
+ return *this;
+}
+
+inline uint128_t uint128_t::operator~() const
+{
+ return uint128_t(~mPart0, ~mPart1);
+}
+
+inline uint128_t& uint128_t::operator--()
+{
+ int128_t_base one((uint32_t)1);
+ OperatorMinus(*this, one, *this);
+ return *this;
+}
+
+inline uint128_t uint128_t::operator--(int)
+{
+ uint128_t temp((uint32_t)1);
+ OperatorMinus(*this, temp, temp);
+ return temp;
+}
+
+inline uint128_t uint128_t::operator++(int)
+{
+ uint128_t prev = *this;
+ uint128_t temp((uint32_t)1);
+ OperatorPlus(*this, temp, *this);
+ return prev;
+}
+
+inline uint128_t& uint128_t::operator++()
+{
+ int128_t_base one((uint32_t)1);
+ OperatorPlus(*this, one, *this);
+ return *this;
+}
+
+inline void uint128_t::Negate()
+{
+ TwosComplement();
+}
+
+inline uint128_t uint128_t::operator-() const
+{
+ uint128_t returnValue(*this);
+ returnValue.Negate();
+ return returnValue;
+}
+
+// This function forms the basis of all logical comparison functions.
+// If value1 < value2, the return value is -1.
+// If value1 == value2, the return value is 0.
+// If value1 > value2, the return value is 1.
+inline int uint128_t::compare(const uint128_t& other) const
+{
+ // Compare individual parts. At this point, the two numbers have the same sign.
+ if(mPart1 == other.mPart1)
+ {
+ if(mPart0 == other.mPart0)
+ return 0;
+ else if(mPart0 > other.mPart0)
+ return 1;
+ // return -1; //Just fall through to the end.
+ }
+ else if(mPart1 > other.mPart1)
+ return 1;
+ return -1;
+}
+
+EA_DISABLE_VC_WARNING(4723) // warning C4723: potential divide by 0
+inline void uint128_t::Modulus(const uint128_t& divisor, uint128_t& quotient, uint128_t& remainder) const
+{
+ uint128_t tempDividend(*this);
+ uint128_t tempDivisor(divisor);
+
+ if(tempDivisor.IsZero())
+ {
+ // Force a divide by zero exception.
+ // We know that tempDivisor.mPart0 is zero.
+ quotient.mPart0 /= tempDivisor.mPart0;
+ }
+ else if(tempDividend.IsZero())
+ {
+ quotient = uint128_t((uint32_t)0);
+ remainder = uint128_t((uint32_t)0);
+ }
+ else
+ {
+ remainder.SetZero();
+
+ for(int i(0); i < 128; i++)
+ {
+ remainder += (uint32_t)tempDividend.GetBit(127 - i);
+ const bool bBit(remainder >= tempDivisor);
+ quotient.SetBit(127 - i, bBit);
+
+ if(bBit)
+ remainder -= tempDivisor;
+
+ if((i != 127) && !remainder.IsZero())
+ remainder <<= 1;
+ }
+ }
+}
+EA_RESTORE_VC_WARNING()
+
+inline bool uint128_t::operator==(const uint128_t& other) const
+{
+ return (mPart0 == other.mPart0) && // Check mPart0 first as this will likely yield faster execution.
+ (mPart1 == other.mPart1);
+}
+
+inline bool uint128_t::operator< (const uint128_t& other) const { return (compare(other) < 0); }
+inline bool uint128_t::operator!=(const uint128_t& other) const { return !(*this == other); }
+inline bool uint128_t::operator> (const uint128_t& other) const { return other < *this; }
+inline bool uint128_t::operator>=(const uint128_t& other) const { return !(*this < other); }
+inline bool uint128_t::operator<=(const uint128_t& other) const { return !(other < *this); }
+
+inline bool uint128_t::IsNegative() const
+{ // True if value < 0
+ return false;
+}
+
+inline bool uint128_t::IsPositive() const
+{
+ // True of value >= 0
+ return true;
+}
+
+
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////
+// int128_t implementation
+///////////////////////////////////////////////////////////////////////////////////////////////////////
+
+inline void int128_t::Negate()
+{
+ if (IsPositive())
+ TwosComplement();
+ else
+ InverseTwosComplement();
+}
+
+inline int128_t int128_t::operator-() const
+{
+ int128_t returnValue(*this);
+ returnValue.Negate();
+ return returnValue;
+}
+
+inline int128_t& int128_t::operator++()
+{
+ int128_t_base one((uint32_t)1);
+ OperatorPlus(*this, one, *this);
+ return *this;
+}
+
+inline int128_t& int128_t::operator--()
+{
+ int128_t_base one((uint32_t)1);
+ OperatorMinus(*this, one, *this);
+ return *this;
+}
+
+inline int128_t int128_t::operator++(int)
+{
+ int128_t prev = *this;
+ int128_t temp((uint32_t)1);
+ OperatorPlus(*this, temp, *this);
+ return prev;
+}
+
+inline int128_t int128_t::operator--(int)
+{
+ int128_t temp((uint32_t)1);
+ OperatorMinus(*this, temp, temp);
+ return temp;
+}
+
+inline int128_t int128_t::operator+() const
+{
+ return *this;
+}
+
+inline int128_t int128_t::operator~() const
+{
+ return int128_t(~mPart0, ~mPart1);
+}
+
+inline int128_t int128_t::operator+(const int128_t& other)
+{
+ int128_t temp;
+ int128_t::OperatorPlus(*this, other, temp);
+ return temp;
+}
+
+inline int128_t int128_t::operator-(const int128_t& other)
+{
+ int128_t temp;
+ int128_t::OperatorMinus(*this, other, temp);
+ return temp;
+}
+
+// This function forms the basis of all logical comparison functions.
+// If value1 < value2, the return value is -1.
+// If value1 == value2, the return value is 0.
+// If value1 > value2, the return value is 1.
+inline int int128_t::compare(const int128_t& other) const
+{
+ // Cache some values. Positive means >= 0. Negative means < 0 and thus means '!positive'.
+ const bool bValue1IsPositive( IsPositive());
+ const bool bValue2IsPositive(other.IsPositive());
+
+ // Do positive/negative tests.
+ if(bValue1IsPositive != bValue2IsPositive)
+ return bValue1IsPositive ? 1 : -1;
+
+ // Compare individual parts. At this point, the two numbers have the same sign.
+ if(mPart1 == other.mPart1)
+ {
+ if(mPart0 == other.mPart0)
+ return 0;
+ else if(mPart0 > other.mPart0)
+ return 1;
+ // return -1; //Just fall through to the end.
+ }
+ else if(mPart1 > other.mPart1)
+ return 1;
+ return -1;
+}
+
+inline bool int128_t::operator==(const int128_t& other) const
+{
+ return (mPart0 == other.mPart0) && // Check mPart0 first as this will likely yield faster execution.
+ (mPart1 == other.mPart1);
+}
+
+inline bool int128_t::operator!=(const int128_t& other) const
+{
+ return (mPart0 != other.mPart0) || // Check mPart0 first as this will likely yield faster execution.
+ (mPart1 != other.mPart1);
+}
+
+inline bool int128_t::operator>(const int128_t& other) const
+{
+ return (compare(other) > 0);
+}
+
+inline bool int128_t::operator>=(const int128_t& other) const
+{
+ return (compare(other) >= 0);
+}
+
+inline bool int128_t::operator<(const int128_t& other) const
+{
+ return (compare(other) < 0);
+}
+
+inline bool int128_t::operator<=(const int128_t& other) const
+{
+ return (compare(other) <= 0);
+}
+
+inline bool int128_t::IsNegative() const
+{ // True if value < 0
+ return ((mPart1 & UINT64_C(0x8000000000000000)) != 0);
+}
+
+inline bool int128_t::IsPositive() const
+{ // True of value >= 0
+ return ((mPart1 & UINT64_C(0x8000000000000000)) == 0);
+}
+
+inline int128_t int128_t::operator*(const int128_t& other)
+{
+ int128_t a(*this);
+ int128_t b(other);
+ int128_t returnValue;
+
+ // Correctly handle negative values
+ bool bANegative(false);
+ bool bBNegative(false);
+
+ if(a.IsNegative())
+ {
+ bANegative = true;
+ a.Negate();
+ }
+
+ if(b.IsNegative())
+ {
+ bBNegative = true;
+ b.Negate();
+ }
+
+ int128_t_base::OperatorMul(a, b, returnValue);
+
+ // Do negation as needed.
+ if(bANegative != bBNegative)
+ returnValue.Negate();
+
+ return returnValue;
+}
+
+inline int128_t int128_t::operator/(const int128_t& other)
+{
+ int128_t remainder;
+ int128_t quotient;
+ this->Modulus(other, quotient, remainder);
+ return quotient;
+}
+
+inline int128_t int128_t::operator<<(int nShift) const
+{
+ int128_t temp;
+ OperatorShiftLeft(*this, nShift, temp);
+ return temp;
+}
+
+inline int128_t& int128_t::operator+=(const int128_t& value)
+{
+ OperatorPlus(*this, value, *this);
+ return *this;
+}
+
+inline int128_t& int128_t::operator-=(const int128_t& value)
+{
+ OperatorMinus(*this, value, *this);
+ return *this;
+}
+
+inline int128_t& int128_t::operator<<=(int nShift)
+{
+ int128_t temp;
+ OperatorShiftLeft(*this, nShift, temp);
+ *this = temp;
+ return *this;
+}
+
+inline int128_t& int128_t::operator*=(const int128_t& value)
+{
+ *this = *this * value;
+ return *this;
+}
+
+inline int128_t& int128_t::operator%=(const int128_t& value)
+{
+ *this = *this % value;
+ return *this;
+}
+
+inline int128_t int128_t::operator%(const int128_t& other)
+{
+ int128_t remainder;
+ int128_t quotient;
+ this->Modulus(other, quotient, remainder);
+ return remainder;
+}
+
+inline int128_t& int128_t::operator/=(const int128_t& value)
+{
+ *this = *this / value;
+ return *this;
+}
+
+// With rightward shifts of negative numbers, shift in zero from the left side.
+inline int128_t int128_t::operator>>(int nShift) const
+{
+ int128_t temp;
+ OperatorShiftRight(*this, nShift, temp);
+ return temp;
+}
+
+inline int128_t& int128_t::operator>>=(int nShift)
+{
+ int128_t temp;
+ OperatorShiftRight(*this, nShift, temp);
+ *this = temp;
+ return *this;
+}
+
+inline int128_t int128_t::operator^(const int128_t& other) const
+{
+ int128_t temp;
+ int128_t::OperatorXOR(*this, other, temp);
+ return temp;
+}
+
+inline int128_t int128_t::operator|(const int128_t& other) const
+{
+ int128_t temp;
+ int128_t::OperatorOR(*this, other, temp);
+ return temp;
+}
+
+
+inline int128_t int128_t::operator&(const int128_t& other) const
+{
+ int128_t temp;
+ int128_t::OperatorAND(*this, other, temp);
+ return temp;
+}
+
+inline int128_t& int128_t::operator^=(const int128_t& value)
+{
+ OperatorXOR(*this, value, *this);
+ return *this;
+}
+
+inline int128_t& int128_t::operator|=(const int128_t& value)
+{
+ OperatorOR(*this, value, *this);
+ return *this;
+}
+
+inline int128_t& int128_t::operator&=(const int128_t& value)
+{
+ OperatorAND(*this, value, *this);
+ return *this;
+}
+
+EA_DISABLE_VC_WARNING(4723) // warning C4723: potential divide by 0
+inline void int128_t::Modulus(const int128_t& divisor, int128_t& quotient, int128_t& remainder) const
+{
+ int128_t tempDividend(*this);
+ int128_t tempDivisor(divisor);
+
+ bool bDividendNegative = false;
+ bool bDivisorNegative = false;
+
+ if(tempDividend.IsNegative())
+ {
+ bDividendNegative = true;
+ tempDividend.Negate();
+ }
+ if(tempDivisor.IsNegative())
+ {
+ bDivisorNegative = true;
+ tempDivisor.Negate();
+ }
+
+ // Handle the special cases
+ if(tempDivisor.IsZero())
+ {
+ // Force a divide by zero exception.
+ // We know that tempDivisor.mPart0 is zero.
+ quotient.mPart0 /= tempDivisor.mPart0;
+ }
+ else if(tempDividend.IsZero())
+ {
+ quotient = int128_t((uint32_t)0);
+ remainder = int128_t((uint32_t)0);
+ }
+ else
+ {
+ remainder.SetZero();
+
+ for(int i(0); i < 128; i++)
+ {
+ remainder += (uint32_t)tempDividend.GetBit(127 - i);
+ const bool bBit(remainder >= tempDivisor);
+ quotient.SetBit(127 - i, bBit);
+
+ if(bBit)
+ remainder -= tempDivisor;
+
+ if((i != 127) && !remainder.IsZero())
+ remainder <<= 1;
+ }
+ }
+
+ if((bDividendNegative && !bDivisorNegative) || (!bDividendNegative && bDivisorNegative))
+ {
+ // Ensure the following formula applies for negative dividends
+ // dividend = divisor * quotient + remainder
+ quotient.Negate();
+ }
+}
+EA_RESTORE_VC_WARNING()
+
+
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////
+// INT128_C / UINT128_C
+//
+// The C99 language defines macros for portably defining constants of
+// sized numeric types. For example, there might be:
+// #define UINT64_C(x) x##ULL
+// Since our int128 data type is not a built-in type, we can't define a
+// UINT128_C macro as something that pastes ULLL at the end of the digits.
+// Instead we define it to create a temporary that is constructed from a
+// string of the digits. This will work in most cases that suffix pasting
+// would work.
+//
+/* EA_CONSTEXPR */ inline uint128_t UINT128_C(uint64_t nPart1, uint64_t nPart0) { return uint128_t(nPart0, nPart1); }
+/* EA_CONSTEXPR */ inline int128_t INT128_C(int64_t nPart1, int64_t nPart0) { return int128_t(static_cast<uint64_t>(nPart0), static_cast<uint64_t>(nPart1)); }
+
+
+
+
+#endif // INCLUDED_int128_h
+
diff --git a/EASTL/test/packages/EABase/include/Common/EABase/nullptr.h b/EASTL/test/packages/EABase/include/Common/EABase/nullptr.h
new file mode 100644
index 0000000..d6629d5
--- /dev/null
+++ b/EASTL/test/packages/EABase/include/Common/EABase/nullptr.h
@@ -0,0 +1,102 @@
+/*-----------------------------------------------------------------------------
+ * nullptr.h
+ *
+ * Copyright (c) Electronic Arts Inc. All rights reserved.
+ *---------------------------------------------------------------------------*/
+
+
+#include <EABase/eabase.h>
+#include <EABase/eahave.h>
+
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once /* Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. */
+#endif
+
+
+#if defined(EA_COMPILER_CPP11_ENABLED) && !defined(EA_COMPILER_NO_NULLPTR) && !defined(EA_HAVE_nullptr_t_IMPL)
+ // The compiler supports nullptr, but the standard library doesn't implement a declaration for std::nullptr_t. So we provide one.
+ namespace std { typedef decltype(nullptr) nullptr_t; }
+#endif
+
+
+
+#if defined(EA_COMPILER_NO_NULLPTR) // If the compiler lacks a native version...
+
+ namespace std
+ {
+ class nullptr_t
+ {
+ public:
+ template<class T> // When tested a pointer, acts as 0.
+ operator T*() const
+ { return 0; }
+
+ template<class C, class T> // When tested as a member pointer, acts as 0.
+ operator T C::*() const
+ { return 0; }
+
+ typedef void* (nullptr_t::*bool_)() const;
+ operator bool_() const // An rvalue of type std::nullptr_t can be converted to an rvalue of type bool; the resulting value is false.
+ { return false; } // We can't use operator bool(){ return false; } because bool is convertable to int which breaks other required functionality.
+
+ // We can't enable this without generating warnings about nullptr being uninitialized after being used when created without "= {}".
+ //void* mSizeofVoidPtr; // sizeof(nullptr_t) == sizeof(void*). Needs to be public if nullptr_t is to be a POD.
+
+ private:
+ void operator&() const; // Address cannot be taken.
+ };
+
+ inline nullptr_t nullptr_get()
+ {
+ nullptr_t n = { }; // std::nullptr exists.
+ return n;
+ }
+
+ #if !defined(nullptr) // If somebody hasn't already defined nullptr in a custom way...
+ #define nullptr nullptr_get()
+ #endif
+
+ } // namespace std
+
+
+ template<class T>
+ inline bool operator==(T* p, const std::nullptr_t)
+ { return p == 0; }
+
+ template<class T>
+ inline bool operator==(const std::nullptr_t, T* p)
+ { return p == 0; }
+
+ template<class T, class U>
+ inline bool operator==(T U::* p, const std::nullptr_t)
+ { return p == 0; }
+
+ template<class T, class U>
+ inline bool operator==(const std::nullptr_t, T U::* p)
+ { return p == 0; }
+
+ inline bool operator==(const std::nullptr_t, const std::nullptr_t)
+ { return true; }
+
+ inline bool operator!=(const std::nullptr_t, const std::nullptr_t)
+ { return false; }
+
+ inline bool operator<(const std::nullptr_t, const std::nullptr_t)
+ { return false; }
+
+ inline bool operator>(const std::nullptr_t, const std::nullptr_t)
+ { return false; }
+
+ inline bool operator<=(const std::nullptr_t, const std::nullptr_t)
+ { return true; }
+
+ inline bool operator>=(const std::nullptr_t, const std::nullptr_t)
+ { return true; }
+
+
+ using std::nullptr_t; // exported to global namespace.
+ using std::nullptr_get; // exported to global namespace.
+
+#endif // EA_COMPILER_NO_NULLPTR
+
diff --git a/EASTL/test/packages/EABase/include/Common/EABase/version.h b/EASTL/test/packages/EABase/include/Common/EABase/version.h
new file mode 100644
index 0000000..b6e1b66
--- /dev/null
+++ b/EASTL/test/packages/EABase/include/Common/EABase/version.h
@@ -0,0 +1,36 @@
+/*-----------------------------------------------------------------------------
+ * version.h
+ *
+ * Copyright (c) Electronic Arts Inc. All rights reserved.
+ *---------------------------------------------------------------------------*/
+
+#ifndef INCLUDED_EABASE_VERSION_H
+#define INCLUDED_EABASE_VERSION_H
+
+///////////////////////////////////////////////////////////////////////////////
+// EABASE_VERSION
+//
+// We more or less follow the conventional EA packaging approach to versioning
+// here. A primary distinction here is that minor versions are defined as two
+// digit entities (e.g. .03") instead of minimal digit entities ".3"). The logic
+// here is that the value is a counter and not a floating point fraction.
+// Note that the major version doesn't have leading zeros.
+//
+// Example version strings:
+// "0.91.00" // Major version 0, minor version 91, patch version 0.
+// "1.00.00" // Major version 1, minor and patch version 0.
+// "3.10.02" // Major version 3, minor version 10, patch version 02.
+// "12.03.01" // Major version 12, minor version 03, patch version
+//
+// Example usage:
+// printf("EABASE version: %s", EABASE_VERSION);
+// printf("EABASE version: %d.%d.%d", EABASE_VERSION_N / 10000 % 100, EABASE_VERSION_N / 100 % 100, EABASE_VERSION_N % 100);
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EABASE_VERSION
+ #define EABASE_VERSION "2.09.12"
+ #define EABASE_VERSION_N 20912
+#endif
+
+#endif
diff --git a/EASTL/test/packages/EABase/test/CMakeLists.txt b/EASTL/test/packages/EABase/test/CMakeLists.txt
new file mode 100644
index 0000000..93b3e82
--- /dev/null
+++ b/EASTL/test/packages/EABase/test/CMakeLists.txt
@@ -0,0 +1,67 @@
+#-------------------------------------------------------------------------------------------
+# Copyright (C) Electronic Arts Inc. All rights reserved.
+#-------------------------------------------------------------------------------------------
+
+#-------------------------------------------------------------------------------------------
+# CMake info
+#-------------------------------------------------------------------------------------------
+cmake_minimum_required(VERSION 3.1)
+project(EABaseTest CXX)
+include(CTest)
+
+#-------------------------------------------------------------------------------------------
+# Defines
+#-------------------------------------------------------------------------------------------
+add_definitions(-D_CRT_SECURE_NO_WARNINGS)
+add_definitions(-D_SCL_SECURE_NO_WARNINGS)
+add_definitions(-D_CHAR16T)
+
+#-------------------------------------------------------------------------------------------
+# Compiler Flags
+#-------------------------------------------------------------------------------------------
+set (CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH};${CMAKE_CURRENT_SOURCE_DIR}/packages/EASTL/scripts/CMake")
+include(CommonCppFlags)
+
+#-------------------------------------------------------------------------------------------
+# Source files
+#-------------------------------------------------------------------------------------------
+file(GLOB EABASETEST_SOURCES "source/TestEABase.cpp" "source/TestEABase.h")
+set(SOURCES ${EABASETEST_SOURCES})
+
+#-------------------------------------------------------------------------------------------
+# Executable definition
+#-------------------------------------------------------------------------------------------
+add_executable(EABaseTest ${SOURCES})
+
+#-------------------------------------------------------------------------------------------
+# Dependencies
+#-------------------------------------------------------------------------------------------
+add_subdirectory(packages/EAAssert)
+add_subdirectory(packages/EAMain)
+add_subdirectory(packages/EASTL)
+add_subdirectory(packages/EAStdC)
+add_subdirectory(packages/EATest)
+add_subdirectory(packages/EAThread)
+
+target_link_libraries(EABaseTest EAAssert)
+target_link_libraries(EABaseTest EAMain)
+target_link_libraries(EABaseTest EASTL)
+target_link_libraries(EABaseTest EAStdC)
+target_link_libraries(EABaseTest EATest)
+target_link_libraries(EABaseTest EAThread)
+
+set(THREADS_PREFER_PTHREAD_FLAG ON)
+find_package(Threads REQUIRED)
+
+if((NOT APPLE) AND (NOT WIN32))
+ target_link_libraries(EABaseTest ${EASTLTest_Libraries} Threads::Threads rt)
+else()
+ target_link_libraries(EABaseTest ${EASTLTest_Libraries} Threads::Threads)
+endif()
+
+#-------------------------------------------------------------------------------------------
+# Run Unit tests and verify the results.
+#-------------------------------------------------------------------------------------------
+add_test(EABaseTestRuns EABaseTest)
+set_tests_properties (EABaseTestRuns PROPERTIES PASS_REGULAR_EXPRESSION "RETURNCODE=0")
+
diff --git a/EASTL/test/packages/EABase/test/source/CEntryPoint.cpp b/EASTL/test/packages/EABase/test/source/CEntryPoint.cpp
new file mode 100644
index 0000000..469581f
--- /dev/null
+++ b/EASTL/test/packages/EABase/test/source/CEntryPoint.cpp
@@ -0,0 +1,4 @@
+// EAMain/EAEntryPointMain.inl contains C++ code but it exposes the application entry point with C linkage.
+
+#include "EAMain/EAEntryPointMain.inl"
+#include "EATest/EASTLNewOperatorGuard.inl"
diff --git a/EASTL/test/packages/EABase/test/source/TestEABase.cpp b/EASTL/test/packages/EABase/test/source/TestEABase.cpp
new file mode 100644
index 0000000..1e7bee9
--- /dev/null
+++ b/EASTL/test/packages/EABase/test/source/TestEABase.cpp
@@ -0,0 +1,3742 @@
+///////////////////////////////////////////////////////////////////////////////
+// TestEABase.cpp
+//
+// Copyright (c) 2003 Electronic Arts, Inc. -- All Rights Reserved.
+// Created by Paul Pedriana.
+///////////////////////////////////////////////////////////////////////////////
+
+#include "TestEABase.h"
+#include "TestEABase.h" // Intentionally double-include the same header file, to test it.
+#include <EABase/eabase.h>
+#include <EABase/earesult.h>
+#include <EABase/eahave.h>
+#include <EABase/nullptr.h>
+#include <EABase/eaunits.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <cmath>
+#include <new>
+#include <assert.h>
+#include <stdarg.h>
+#include <EAMain/EAEntryPointMain.inl>
+#include <EATest/EASTLVsnprintf.inl>
+#include <EATest/EASTLNewOperatorGuard.inl>
+#include <EATest/EATest.h>
+#include <EASTL/vector.h>
+#include <EASTL/fixed_vector.h>
+#include <EASTL/string.h>
+#include <EASTL/sort.h>
+#include <EASTL/numeric_limits.h>
+#include <EAStdC/EAString.h>
+#if !defined(EA_COMPILER_NO_STANDARD_CPP_LIBRARY)
+EA_DISABLE_ALL_VC_WARNINGS()
+ #include <iterator>
+EA_RESTORE_ALL_VC_WARNINGS()
+#endif
+
+#if defined(EA_COMPILER_MSVC) && defined(EA_PLATFORM_MICROSOFT)
+ EA_DISABLE_ALL_VC_WARNINGS()
+ #define NOMINMAX
+ #include <Windows.h>
+ EA_RESTORE_ALL_VC_WARNINGS()
+#elif defined(EA_PLATFORM_ANDROID)
+ #include <android/log.h>
+#endif
+
+#if EA_FP16C
+ // Include emmintrin.h so that the test code can try to call one of the intrinsics.
+ #include "emmintrin.h"
+ #if EA_COMPILER_CLANG
+ // On some versions of clang immintrin.h needs to be included to pull in f16c operations.
+ #include "immintrin.h"
+ #endif
+#endif
+
+EA_DISABLE_SN_WARNING(1229) // function is deprecated.
+EA_DISABLE_VC_WARNING(4265 4296 4310 4350 4481 4530 4625 4626 4996)
+
+
+// ------------------------------------------------------------------------
+// EA_STATIC_WARNING
+//
+// ** Temporarily here instead of eabase.h **
+//
+// Unilaterally prints a message during the compilation pre-processing phase.
+// No string quotes are required, and no trailing semicolon should be used.
+// As of this writing, clang reports this usage like a warning, but using
+// -Wno-#pragma-messages causes both the warning and message to go away.
+//
+// Example usage:
+// EA_STATIC_WARNING(This function is deprecated.)
+//
+#if defined(_MSC_VER)
+ #define EA_PRAGMA_MESSAGE(x) __pragma(message(#x))
+ #define EA_STATIC_WARNING(msg) EA_PRAGMA_MESSAGE(msg)
+#elif defined(__clang__) || (defined(__GNUC__) && (EA_COMPILER_VERSION >= 4005)) || defined(__SN_VER__)
+ #define EA_PRAGMA(x) _Pragma(#x)
+ #define EA_STATIC_WARNING(msg) EA_PRAGMA(message(#msg))
+#else
+ #define EA_STATIC_WARNING(msg)
+#endif
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Exercise EA_HAS_INCLUDE
+///////////////////////////////////////////////////////////////////////////////
+#if EA_HAS_INCLUDE_AVAILABLE
+ #if EA_HAS_INCLUDE(<EASTL/map.h>)
+ #include <EASTL/map.h>
+
+ eastl::map<int, int> gTestHasIncludeMap;
+ #endif
+#endif
+
+#if EA_HAS_INCLUDE_AVAILABLE
+ #if EA_HAS_INCLUDE(<DefinitelyDoesNotExist.h>)
+ #error "Include Does Not EXIST!"
+ #endif
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Exercise EAHave
+///////////////////////////////////////////////////////////////////////////////
+
+// EA_HAVE_EXTENSIONS_FEATURE
+// We don't yet have a test for this.
+
+// EA_HAVE_DINKUMWARE_CPP_LIBRARY
+// EA_HAVE_LIBSTDCPP_LIBRARY
+// EA_HAVE_LIBCPP_LIBRARY
+
+#if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY)
+ #if !defined(_YVALS)
+ #error
+ #endif
+#elif defined(EA_HAVE_LIBSTDCPP_LIBRARY)
+ #if !defined(__GLIBCXX__) && !defined(__GLIBCPP__)
+ #error
+ #endif
+#elif defined(EA_HAVE_LIBCPP_LIBRARY)
+ #if !defined(_LIBCPP_VERSION)
+ #error
+ #endif
+#endif
+
+// EA_HAVE_XXX_H
+#if defined(EA_HAVE_SYS_TYPES_H)
+ #include <sys/types.h>
+#endif
+#if defined(EA_HAVE_IO_H)
+ #include <io.h>
+#endif
+#if defined(EA_HAVE_INTTYPES_H)
+ #include <inttypes.h>
+#endif
+#if defined(EA_HAVE_UNISTD_H)
+ #include <unistd.h>
+#endif
+#if defined(EA_HAVE_SYS_TIME_H)
+ #include <sys/time.h>
+#endif
+#if defined(EA_HAVE_SYS_PTRACE_H)
+ #include <sys/ptrace.h>
+#endif
+#if defined(EA_HAVE_SYS_STAT_H)
+ #include <sys/stat.h>
+#endif
+#if defined(EA_HAVE_LOCALE_H)
+ #include <locale.h>
+#endif
+#if defined(EA_HAVE_DIRENT_H)
+ #include <dirent.h>
+#endif
+#if defined(EA_HAVE_SIGNAL_H)
+ #include <signal.h>
+#endif
+#if defined(EA_HAVE_SYS_SIGNAL_H)
+ #include <sys/signal.h>
+#endif
+#if defined(EA_HAVE_PTHREAD_H)
+ #include <pthread.h>
+#endif
+#if defined(EA_HAVE_WCHAR_H)
+ #include <wchar.h>
+#endif
+#if defined(EA_HAVE_MALLOC_H)
+ #include <malloc.h>
+#endif
+#if defined(EA_HAVE_ALLOCA_H)
+ #include <alloca.h>
+#endif
+#if defined(EA_HAVE_EXECINFO_H)
+ #include <execinfo.h>
+#endif
+#if defined(EA_HAVE_SEMAPHORE_H)
+ #include <semaphore.h>
+#endif
+
+#if defined(EA_HAVE_CPP11_CONTAINERS)
+ #include <array>
+ #include <forward_list>
+ #include <unordered_set>
+ #include <unordered_map>
+#endif
+#if defined(EA_HAVE_CPP11_ATOMIC)
+ #include <atomic>
+#endif
+#if defined(EA_HAVE_CPP11_CONDITION_VARIABLE)
+ #include <condition_variable>
+#endif
+#if defined(EA_HAVE_CPP11_MUTEX)
+ #include <mutex>
+#endif
+#if defined(EA_HAVE_CPP11_THREAD)
+ #if defined(_MSC_VER) && defined(EA_COMPILER_NO_EXCEPTIONS) || defined(EA_COMPILER_NO_UNWIND)
+ // Skip this #include, as VC++ has a bug: <concrt.h> (included by <future>) fails to compile when exceptions are disabled.
+ #else
+ #include <thread>
+ #endif
+#endif
+#if defined(EA_HAVE_CPP11_FUTURE)
+ #if defined(_MSC_VER) && defined(EA_COMPILER_NO_EXCEPTIONS) || defined(EA_COMPILER_NO_UNWIND)
+ // Skip this #include, as VC++ has a bug: <concrt.h> (included by <future>) fails to compile when exceptions are disabled.
+ #else
+ #include <future>
+ #endif
+#endif
+#if defined(EA_HAVE_CPP11_TYPE_TRAITS)
+ #include <type_traits>
+#endif
+#if defined(EA_HAVE_CPP11_TUPLES)
+ #include <tuple>
+#endif
+#if defined(EA_HAVE_CPP11_REGEX)
+ #include <regex>
+#endif
+#if defined(EA_HAVE_CPP11_RANDOM)
+ #include <random>
+#endif
+#if defined(EA_HAVE_CPP11_CHRONO)
+ #include <chrono>
+#endif
+#if defined(EA_HAVE_CPP11_SCOPED_ALLOCATOR)
+ #include <scoped_allocator>
+#endif
+#if defined(EA_HAVE_CPP11_INITIALIZER_LIST)
+ #include <initializer_list>
+#else
+ // If there is no initializer_list support the the following should succeed.
+ // The following is disabled because EASTL defines initializer_list itself and that can collide with this:
+ // namespace std{ template<class E> class initializer_list{ }; }
+#endif
+#if defined(EA_HAVE_CPP11_SYSTEM_ERROR)
+ #include <system_error>
+#endif
+#if defined(EA_HAVE_CPP11_CODECVT)
+ #include <codecvt>
+#endif
+#if defined(EA_HAVE_CPP11_TYPEINDEX)
+ #include <typeindex>
+#endif
+
+
+
+// EA_HAVE_XXX_IMPL
+#if defined(EA_HAVE_inet_ntop_IMPL)
+ #include <arpa/inet.h>
+#endif
+
+#if defined(EA_HAVE_time_IMPL)
+ #include <time.h>
+#endif
+
+#if defined(EA_HAVE_clock_gettime_IMPL)
+ #include <time.h>
+#endif
+
+#if defined(EA_HAVE_getcwd_IMPL)
+ #if defined(EA_PLATFORM_MICROSOFT)
+ #include <direct.h>
+ #else
+ #include <unistd.h>
+ #endif
+#endif
+
+#if defined(EA_HAVE_std_terminate_IMPL)
+ #include <exception>
+#endif
+
+#if defined(EA_HAVE_CPP11_ITERATOR_IMPL)
+ #include <iterator>
+#endif
+
+#if defined(EA_HAVE_CPP11_SMART_POINTER_IMPL)
+ #include <memory>
+#endif
+
+#if defined(EA_HAVE_CPP11_FUNCTIONAL_IMPL)
+ #include <functional>
+ void BindTestFunction(int /*n1*/, int /*n2*/, int /*n3*/, const int& /*n4*/, int /*n5*/)
+ {
+ }
+
+ struct BindTestStruct
+ {
+ void Test(int /*n1*/, int /*n2*/) const
+ {
+ }
+ };
+#endif
+
+#if defined(EA_HAVE_CPP11_EXCEPTION_IMPL)
+ #include <exception>
+#endif
+
+
+EA_DISABLE_SN_WARNING(1229) // function is deprecated.
+
+
+// Some CPU/Compiler combinations don't support arbitrary alignment declarations.
+// In particular some ARM compilers often don't. You can use EAStdC's EAAlignment to
+// achieve arbitrary alignment if EA_ALIGN doesn't work.
+#if (EA_ALIGN_MAX_AUTOMATIC < 64)
+ #define ALIGNMENT_AMOUNT_64 EA_ALIGN_MAX_AUTOMATIC
+#else
+ #define ALIGNMENT_AMOUNT_64 64
+#endif
+
+#if (EA_ALIGN_MAX_AUTOMATIC < 32)
+ #define ALIGNMENT_AMOUNT_32 EA_ALIGN_MAX_AUTOMATIC
+#else
+ #define ALIGNMENT_AMOUNT_32 32
+#endif
+
+#if (EA_ALIGN_MAX_AUTOMATIC < 16)
+ #define ALIGNMENT_AMOUNT_16 EA_ALIGN_MAX_AUTOMATIC
+#else
+ #define ALIGNMENT_AMOUNT_16 16
+#endif
+
+
+// EA_OVERRIDE
+struct OverrideBase { virtual ~OverrideBase(){} virtual void f(int){} };
+struct OverrideDerived : public OverrideBase { void f(int) EA_OVERRIDE {} };
+
+
+// EA_INHERITANCE_FINAL
+struct FinalBase EA_INHERITANCE_FINAL { virtual ~FinalBase(){} virtual void f() EA_INHERITANCE_FINAL; };
+
+
+// EA_SEALED
+struct SealedBase EA_SEALED { virtual ~SealedBase(){} virtual void f() EA_SEALED; };
+
+
+// EA_ABSTRACT
+struct AbstractBase EA_ABSTRACT {virtual ~AbstractBase(){} virtual void f(){} };
+
+
+// EA_CONSTEXPR / EA_COMPILER_NO_CONSTEXPR
+EA_CONSTEXPR int GetValue(){ return 37; }
+
+
+// EA_EXTERN_TEMPLATE / EA_COMPILER_NO_EXTERN_TEMPLATE
+template struct eabase_template<char>;
+
+
+// Forward declarations
+template<class T>
+bool VerifyValue(T v1, T v2);
+void DoError(int& nErrorCount, const char* pMessage = NULL);
+int Stricmp(const char* pString1, const char* pString2);
+int TestEABase();
+int TestEAResult();
+int TestEAPlatform();
+bool TestNU();
+int TestEACompiler();
+int TestEACompilerTraits();
+
+
+template<class T>
+bool VerifyValue(T v1, T v2)
+{
+ return (v1 == v2);
+}
+
+
+// Test EA_PLATFORM_XXX support
+// We don't do anything with the defined values below. We are just doing basic testing
+// of the usage of #if EA_PLATFORM_XXX
+#if EA_PLATFORM_WIN64
+ #define EA_PLATFORM_WIN64_OK
+#elif EA_PLATFORM_WIN32
+ #define EA_PLATFORM_WIN64_OK
+#elif EA_PLATFORM_WINDOWS
+ #define EA_PLATFORM_WINDOWS_OK
+#elif EA_PLATFORM_POSIX
+ #define EA_PLATFORM_POSIX_OK
+#elif EA_PLATFORM_UNIX
+ #define EA_PLATFORM_UNIX_OK
+#elif EA_PLATFORM_APPLE
+ #define EA_PLATFORM_APPLE_OK
+#elif EA_PLATFORM_CONSOLE
+ #define EA_PLATFORM_CONSOLE_OK
+#elif EA_PLATFORM_DESKTOP
+ #define EA_PLATFORM_DESKTOP_OK
+#else
+ #define EA_PLATFORM_OK
+#endif
+
+
+
+/* Test EA_DISABLE_WARNING */
+EA_DISABLE_VC_WARNING(4548 4127)
+EA_DISABLE_ALL_VC_WARNINGS()
+EA_RESTORE_ALL_VC_WARNINGS()
+
+EA_DISABLE_GCC_WARNING(-Wuninitialized)
+
+EA_DISABLE_SN_WARNING(1787)
+EA_DISABLE_ALL_SN_WARNINGS()
+EA_RESTORE_ALL_SN_WARNINGS()
+
+EA_DISABLE_GHS_WARNING(123)
+
+EA_DISABLE_EDG_WARNING(193)
+
+EA_DISABLE_CW_WARNING(10317)
+EA_DISABLE_ALL_CW_WARNINGS()
+EA_RESTORE_ALL_CW_WARNINGS()
+
+/* Test EA_DISABLE_WARNING */
+EA_RESTORE_VC_WARNING()
+EA_RESTORE_GCC_WARNING()
+EA_RESTORE_SN_WARNING()
+EA_RESTORE_GHS_WARNING()
+EA_RESTORE_EDG_WARNING()
+EA_RESTORE_CW_WARNING(10317)
+
+
+void DoError(int& nErrorCount, const char* pMessage)
+{
+ ++nErrorCount;
+ if(pMessage)
+ EA::EAMain::Report("Test error: %s\n", pMessage);
+}
+
+
+int Stricmp(const char* pString1, const char* pString2)
+{
+ char c1, c2;
+
+ while((c1 = (char)tolower(*pString1++)) == (c2 = (char)tolower(*pString2++)))
+ {
+ if(c1 == 0)
+ return 0;
+ }
+
+ return (c1 - c2);
+}
+
+
+// EA_PURE
+static EA_PURE bool PureFunction()
+{
+ return (strlen("abc") == 3);
+}
+
+// EA_WEAK
+EA_WEAK int gWeakVariable = 1;
+
+
+// EA_NO_VTABLE
+struct EA_NO_VTABLE NoVTable1
+{
+ virtual ~NoVTable1(){}
+ virtual void InterfaceFunction()
+ {
+ }
+};
+
+EA_STRUCT_NO_VTABLE(NoVTable2)
+{
+ virtual ~NoVTable2(){}
+ virtual void InterfaceFunction()
+ {
+ }
+};
+
+class NoVTable1Subclass : public NoVTable1
+{
+ virtual void InterfaceFunction()
+ {
+ }
+};
+
+class NoVTable2Subclass : public NoVTable2
+{
+ virtual void InterfaceFunction()
+ {
+ }
+};
+
+
+
+struct ClassWithDefaultCtor
+{
+ ClassWithDefaultCtor(int x = 0)
+ { char buffer[16]; sprintf(buffer, "%d", x); }
+};
+
+
+struct ClassWithoutDefaultCtor
+{
+ ClassWithoutDefaultCtor(int x)
+ { char buffer[16]; sprintf(buffer, "%d", x); }
+};
+
+
+struct InitPriorityTestClass
+{
+ int mX;
+ InitPriorityTestClass(int x = 0) { mX = x; }
+};
+
+
+struct OffsetofTestClass // Intentionally a non-pod.
+{
+ int32_t mX;
+ int32_t mY;
+ OffsetofTestClass(int32_t x = 0) : mX(x), mY(0) { }
+};
+
+struct SizeofMemberTestClass // Intentionally a non-pod.
+{
+ int32_t mX;
+ int32_t mY;
+ SizeofMemberTestClass(int32_t x = 0) : mX(x), mY(0) { }
+};
+
+// EA_INIT_PRIORITY
+InitPriorityTestClass gInitPriorityTestClass0 EA_INIT_PRIORITY(2000);
+InitPriorityTestClass gInitPriorityTestClass1 EA_INIT_PRIORITY(2000) (1);
+
+// EA_INIT_SEG
+EA_INIT_SEG(compiler) InitPriorityTestClass gInitSegTestSection(2300);
+
+
+// EA_MAY_ALIAS
+void* EA_MAY_ALIAS gPtr0 = NULL;
+
+typedef void* EA_MAY_ALIAS pvoid_may_alias;
+pvoid_may_alias gPtr1 = NULL;
+
+
+// EA_NO_INLINE
+static EA_NO_INLINE void DoNothingInline()
+{
+}
+
+
+// EA_PREFIX_NO_INLINE / EA_POSTFIX_NO_INLINE
+static void EA_PREFIX_NO_INLINE DoNothingPrefixInline() EA_POSTFIX_NO_INLINE;
+
+static void DoNothingPrefixInline()
+{
+}
+
+
+// EA_FORCE_INLINE
+static EA_FORCE_INLINE void DoNothingForceInline()
+{
+}
+
+
+// EA_PREFIX_FORCE_INLINE / EA_POSTFIX_FORCE_INLINE
+static void EA_PREFIX_FORCE_INLINE DoNothingPrefixForceInline() EA_POSTFIX_FORCE_INLINE;
+
+static void DoNothingPrefixForceInline()
+{
+}
+
+
+// static_asset at global scope
+// Should succeed.
+static_assert(sizeof(int32_t) == 4, "static_assert failure");
+// Should fail.
+//static_assert(sizeof(int32_t) == 8, "static_assert failure");
+
+
+// EA_STATIC_WARNING
+EA_DISABLE_CLANG_WARNING(-W#pragma-messages) // Clang treats messages as warnings.
+EA_STATIC_WARNING(EA_STATIC_WARNING test)
+EA_RESTORE_CLANG_WARNING()
+
+
+// EA_OPTIMIZE_OFF / EA_OPTIMIZE_ON
+EA_OPTIMIZE_OFF()
+static EA_NO_INLINE int DisabledOptimizations(int x)
+{
+ return x * 37;
+}
+EA_OPTIMIZE_ON()
+
+
+// EA_UNUSED
+static void FunctionWithUnusedVariables(int x)
+{
+ int y = 0;
+ EA_UNUSED(x);
+ EA_UNUSED(y);
+}
+
+
+// EA_NON_COPYABLE / EANonCopyable
+struct NonCopyableA
+{
+ NonCopyableA(){}
+ int x;
+
+ EA_NON_COPYABLE(NonCopyableA)
+};
+
+EA_DISABLE_VC_WARNING(4625 4626) // C4625: A copy constructor was not accessible in a base class and was therefore not generated for a derived class. C4626: An assignment operator was not accessible in a base class and was therefore not generated for a derived class.
+struct NonCopyableB : public EANonCopyable
+{
+ #if !EA_COMPILER_NO_DELETED_FUNCTIONS
+ NonCopyableB& operator=(NonCopyableB&& other) = delete;
+ #endif
+ int x;
+};
+EA_RESTORE_VC_WARNING()
+
+
+
+// Exercize the case of using EA_NON_COPYABLE when
+struct NonDefaultConstructibleBase
+{
+ int mX;
+ NonDefaultConstructibleBase(int x) : mX(x){}
+};
+
+struct NonCopyableSubclass : public NonDefaultConstructibleBase
+{
+ NonCopyableSubclass(int x) : NonDefaultConstructibleBase(x){}
+ EA_NON_COPYABLE(NonCopyableSubclass)
+};
+
+
+
+
+// EA_COMPILER_NO_DEFAULTED_FUNCTIONS
+// EA_COMPILER_NO_DELETED_FUNCTIONS
+//
+// We currently test only the ability of the compiler to build the code,
+// and don't test if the compiler built it correctly.
+struct DefaultedDeletedTest
+{
+ #if defined(EA_COMPILER_NO_DEFAULTED_FUNCTIONS)
+ DefaultedDeletedTest(){}
+ #else
+ DefaultedDeletedTest() = default;
+ #endif
+
+ #if defined(EA_COMPILER_NO_DEFAULTED_FUNCTIONS)
+ ~DefaultedDeletedTest(){}
+ #else
+ ~DefaultedDeletedTest() = delete;
+ #endif
+};
+
+
+struct EA_FUNCTION_DELETE_Test
+{
+ int x;
+ EA_FUNCTION_DELETE_Test(int xValue) : x(xValue){}
+
+private: // For portability with pre-C++11 compilers, make the function private.
+ void foo() EA_FUNCTION_DELETE;
+};
+
+
+#if !defined(EA_COMPILER_NO_USER_DEFINED_LITERALS)
+ // Conversion example
+ inline long double operator"" _deg(long double degrees)
+ { return (degrees * 3.141592) / 180; }
+
+ // Custom type example
+ struct UDLTest
+ {
+ UDLTest() : mX(0){}
+ UDLTest(uint64_t x) : mX(x){}
+
+ uint64_t mX;
+ };
+
+ UDLTest operator"" _udl(unsigned long long x) // The type must be unsigned long long and can't be uint64_t, as uint64_t might be unsigned long int.
+ { return UDLTest(x); }
+#endif
+
+
+#if !defined(EA_COMPILER_NO_INLINE_NAMESPACES)
+ namespace INSNamespace
+ {
+ inline namespace INSNamespace_1
+ {
+ template <typename T>
+ class A;
+ }
+
+ template <typename T>
+ int g(T){ return 37; }
+ }
+
+ struct INSClass{ };
+
+ namespace INSNamespace
+ {
+ template<>
+ class A<INSClass>{ };
+ }
+
+#endif
+
+
+#if !defined(EA_COMPILER_NO_FUNCTION_TEMPLATE_DEFAULT_ARGS)
+ struct FunctionTemplateTest
+ {
+ template<typename T = int>
+ static T AddOne(T value)
+ { return value + 1; }
+ };
+#endif
+
+
+#if !defined(EA_COMPILER_NO_NOEXCEPT)
+ int NoExceptTestFunction() EA_NOEXCEPT
+ { return 37; }
+
+ struct NoExceptTestStruct
+ {
+ int mX;
+ NoExceptTestStruct() : mX(37) {}
+ };
+
+ template <class T>
+ int NoExceptTestTemplate() EA_NOEXCEPT_IF(EA_NOEXCEPT_EXPR(T()))
+ { T t; return t.mX; }
+#endif
+
+
+// The following function defintions are intended to generate compilation errors if EA_CHAR16_NATIVE or EA_CHAR32_NATIVE is set to 1 when it should be 0.
+// i.e. if the types are not actually native then their will be a function redefinition error generated.
+void NoopTakingString(const wchar_t *)
+{
+}
+#if EA_WCHAR_UNIQUE
+ #if EA_WCHAR_SIZE == 2
+ // This definition should not conflict with the wchar_t defintion because char16_t should be unique.
+ void NoopTakingString(const char16_t *)
+ {
+ #if !EA_CHAR16_NATIVE
+ #error Expected EA_CHAR16_NATIVE to be 1.
+ #endif
+ }
+ #else
+ // This definition should not conflict with the wchar_t defintion because char32_t should be unique.
+ void NoopTakingString(const char32_t *)
+ {
+ #if !EA_CHAR32_NATIVE
+ #error Expected EA_CHAR32_NATIVE to be 1.
+ #endif
+ }
+
+ #endif
+#endif
+
+
+int TestEABase()
+{
+ int nErrorCount(0);
+
+ DoNothingInline();
+
+ // Test NULL
+ {
+ if(!VerifyValue<void*>(NULL, (void*)0))
+ DoError(nErrorCount, "unspecified test");
+ }
+
+ // Verify sized type sizes
+ {
+ if(!VerifyValue<size_t>(sizeof(int8_t), 1))
+ DoError(nErrorCount, "int8_t size test");
+ if(!VerifyValue<size_t>(sizeof(uint8_t), 1))
+ DoError(nErrorCount, "uint8_t size test");
+ if(!VerifyValue<size_t>(sizeof(int16_t), 2))
+ DoError(nErrorCount, "int16_t size test");
+ if(!VerifyValue<size_t>(sizeof(uint16_t), 2))
+ DoError(nErrorCount, "uint16_t size test");
+ if(!VerifyValue<size_t>(sizeof(int32_t), 4))
+ DoError(nErrorCount, "int32_t size test");
+ if(!VerifyValue<size_t>(sizeof(uint32_t), 4))
+ DoError(nErrorCount, "uint32_t size test");
+ if(!VerifyValue<size_t>(sizeof(int64_t), 8))
+ DoError(nErrorCount, "int64_t size test");
+ if(!VerifyValue<size_t>(sizeof(uint64_t), 8))
+ DoError(nErrorCount, "uint64_t size test");
+
+ #if !defined(FLT_EVAL_METHOD)
+ #error EABase should always define FLT_EVAL_METHOD
+ DoError(nErrorCount, "FLT_EVAL_METHOD test: not defined.");
+ #else
+ #if (FLT_EVAL_METHOD == -1)
+ // In this case the C99 standard states that the
+ // precision of float_t and double_t is indeterminable.
+ #elif (FLT_EVAL_METHOD == 0)
+ if(!VerifyValue<size_t>(sizeof(float_t), sizeof(float)))
+ DoError(nErrorCount, "float_t size test");
+ if(!VerifyValue<size_t>(sizeof(double_t), sizeof(double)))
+ DoError(nErrorCount, "double_t size test");
+ #elif (FLT_EVAL_METHOD == 1)
+ if(!VerifyValue<size_t>(sizeof(float_t), sizeof(double)))
+ DoError(nErrorCount, "float_t size test");
+ if(!VerifyValue<size_t>(sizeof(double_t), sizeof(double)))
+ DoError(nErrorCount, "double_t size test");
+ #elif (FLT_EVAL_METHOD == 2)
+ if(!VerifyValue<size_t>(sizeof(float_t), sizeof(long double)))
+ DoError(nErrorCount, "float_t size test");
+ if(!VerifyValue<size_t>(sizeof(double_t), sizeof(long double)))
+ DoError(nErrorCount, "double_t size test");
+ #else
+ DoError(nErrorCount, "FLT_EVAL_METHOD test: invalid value.");
+ #endif
+ #endif
+
+ if(sizeof(bool8_t) != 1)
+ DoError(nErrorCount, "bool8_t size test");
+
+ if(!VerifyValue<size_t>(sizeof(intptr_t), sizeof(void*)))
+ DoError(nErrorCount, "intptr_t size test");
+ if(!VerifyValue<size_t>(sizeof(uintptr_t), sizeof(void*)))
+ DoError(nErrorCount, "uintptr_t size test");
+
+ if(!VerifyValue<size_t>(sizeof(ssize_t), sizeof(size_t)))
+ DoError(nErrorCount, "ssize_t size test");
+
+ EA_DISABLE_VC_WARNING(6326)
+ const ssize_t ss(1); // Verify that ssize_t is a signed type.
+ if(ssize_t((ss ^ ss) - 1) >= 0)
+ DoError(nErrorCount, "ssize_t sign test");
+ EA_RESTORE_VC_WARNING()
+
+ if(!VerifyValue<size_t>(sizeof(char8_t), 1))
+ DoError(nErrorCount, "char8_t size test");
+ if(!VerifyValue<size_t>(sizeof(char16_t), 2))
+ DoError(nErrorCount, "char16_t size test");
+ if(!VerifyValue<size_t>(sizeof(char32_t), 4))
+ DoError(nErrorCount, "char32_t size test");
+
+ #if (EA_WCHAR_SIZE == 2) || (EA_WCHAR_SIZE == 4)
+ if(!VerifyValue<size_t>(sizeof(wchar_t), EA_WCHAR_SIZE))
+ DoError(nErrorCount, "EA_WCHAR_SIZE test");
+ #else
+ DoError(nErrorCount, "EA_WCHAR_SIZE test");
+ #endif
+ }
+
+ // Test CHAR8_MIN, etc.
+ {
+ // The C standard allows compilers/platforms to use -127 as the min 8 bit value, but we've never seen it in modern systems.
+ static_assert(((((CHAR8_MIN == -128) && (CHAR8_MAX == 127))) || ((CHAR8_MIN == 0) && (CHAR8_MAX == 255))), "CHAR8_MAX failure");
+ static_assert(((((CHAR16_MIN == -32768) && (CHAR16_MAX == 32767))) || ((CHAR16_MIN == 0) && (CHAR16_MAX == 65535))), "CHAR16_MAX failure");
+ static_assert(((((CHAR32_MIN == -INT64_C(2147483648)) && (CHAR32_MAX == INT64_C(2147483647)))) || ((CHAR32_MIN == 0) && (CHAR32_MAX == INT64_C(4294967295)))), "CHAR32_MAX failure");
+ }
+
+ // Test char8_t, char16_t, char32_t string literals.
+ {
+ const char8_t* p8 = EA_CHAR8("abc");
+ const char8_t c8 = EA_CHAR8('a');
+
+ #ifdef EA_CHAR16
+ const char16_t* p16 = EA_CHAR16("abc"); // Under GCC, this assumes compiling with -fshort-wchar
+ const char16_t c16 = EA_CHAR16('\x3001');
+ #else
+ const char16_t* p16 = NULL;
+ const char16_t c16 = static_cast<char16_t>('X');
+ #endif
+
+ #ifdef EA_CHAR32
+ const char32_t* p32 = EA_CHAR32("abc");
+ const char32_t c32 = EA_CHAR32('\x3001');
+ #else
+ const char32_t p32[] = { 'a', 'b', 'c', '\0' }; // Microsoft doesn't support 32 bit strings here, and GCC doesn't use them when we compile with -fshort-wchar (which we do).
+ #ifdef EA_CHAR16
+ const char32_t c32 = EA_CHAR16('\x3001'); // 16 bit should silently convert to 32 bit.
+ #else
+ const char32_t c32 = static_cast<char16_t>('X'); // 16 bit should silently convert to 32 bit.
+ #endif
+ #endif
+
+ const wchar_t* pW = EA_WCHAR("abc");
+ const wchar_t cW = EA_WCHAR('\x3001');
+
+ EA_UNUSED(p8);
+ EA_UNUSED(c8);
+ EA_UNUSED(p16);
+ EA_UNUSED(c16);
+ EA_UNUSED(p32);
+ EA_UNUSED(c32);
+ EA_UNUSED(pW);
+ EA_UNUSED(cW);
+ }
+
+ // Verify sized type signs
+ {
+ int8_t i8(1);
+ if(int8_t((i8 ^ i8) - 1) >= 0)
+ DoError(nErrorCount, "int8_t sign test");
+
+ uint8_t u8(1);
+ if(uint8_t((u8 ^ u8) - 1) <= 0)
+ DoError(nErrorCount, "uint8_t sign test");
+
+ int16_t i16(1);
+ if(int16_t((i16 ^ i16) - 1) >= 0)
+ DoError(nErrorCount, "int16_t sign test");
+
+ uint16_t u16(1);
+ if(uint16_t((u16 ^ u16) - 1) <= 0)
+ DoError(nErrorCount, "uint16_t sign test");
+
+ int32_t i32(1);
+ if(int32_t((i32 ^ i32) - 1) >= 0)
+ DoError(nErrorCount, "int32_t sign test");
+
+ uint32_t u32(1);
+ if(uint32_t((u32 ^ u32) - 1) <= 0)
+ DoError(nErrorCount, "uint32_t sign test");
+
+ int64_t i64(1);
+ if(int64_t((i64 ^ i64) - 1) >= 0)
+ DoError(nErrorCount, "int64_t sign test");
+
+ uint64_t u64(1);
+ if(uint64_t((u64 ^ u64) - 1) <= 0)
+ DoError(nErrorCount, "uint64_t sign test");
+
+
+
+ intptr_t ip(1);
+ if(intptr_t((ip ^ ip) - 1) >= 0)
+ DoError(nErrorCount, "intptr_t sign test");
+
+ uintptr_t up(1);
+ if(uintptr_t((up ^ up) - 1) <= 0)
+ DoError(nErrorCount, "uintptr_t sign test");
+
+
+ // The following sign tests have been disabled, as the determination of
+ // the sign of type char and wchar_t are in the hands of the compiler and
+ // the user's configuration of that compiler.
+
+ //char8_t c8(1); // We expect it to be signed, though the need for such a requirement is debateable.
+ //if(char8_t((c8 ^ c8) - 1) >= 0)
+ // DoError(nErrorCount, "char8_t sign test");
+
+ //char16_t c16(1); // We expect it to be unsigned
+ //if(char16_t((c16 ^ c16) - 1) <= 0)
+ // DoError(nErrorCount, "char16_t sign test");
+
+ //char32_t c32(1); // We expect it to be unsigned
+ //if(char32_t((c32 ^ c32) - 1) <= 0)
+ // DoError(nErrorCount, "char32_t sign test");
+ }
+
+
+
+ //Test Constant macros
+ {
+ char buffer[256];
+
+ const int8_t i8Min = INT8_C(-128); // Strictly speaking, the C language standard allows this to be -127 as well.
+ const int8_t i8Max = INT8_C(127);
+
+ const uint8_t u8Min = UINT8_C(0);
+ const uint8_t u8Max = UINT8_C(255);
+
+ const int16_t i16Min = INT16_C(-32767) - 1;
+ const int16_t i16Max = INT16_C( 32767);
+
+ const uint16_t u16Min = UINT16_C(0);
+ const uint16_t u16Max = UINT16_C(65535);
+
+ const int32_t i32Min = INT32_C(-2147483647) - 1;
+ const int32_t i32Max = INT32_C( 2147483647);
+
+ const uint32_t u32Min = UINT32_C(0);
+ const uint32_t u32Max = UINT32_C(4294967295);
+
+ #if defined(__GNUC__) && (__GNUC__ < 4) // If using a broken version of UINT64_C/INT64_C macros...
+ const int64_t i64Min = -9223372036854775807LL - 1;
+ const int64_t i64Max = 9223372036854775807LL;
+
+ const uint64_t u64Min = UINT64_C(0);
+ const uint64_t u64Max = 18446744073709551615ULL;
+ #else
+ const int64_t i64Min = INT64_C(-9223372036854775807) - 1;
+ const int64_t i64Max = INT64_C( 9223372036854775807);
+
+ const uint64_t u64Min = UINT64_C(0);
+ const uint64_t u64Max = UINT64_C(18446744073709551615);
+ #endif
+
+ sprintf(buffer, "%d %d %u %u %d %d %u %u %d %d %u %u %" SCNd64" %" SCNd64" %" SCNu64" %" SCNu64,
+ (int)i8Min, (int)i8Max, (unsigned)u8Min, (unsigned)u8Max,
+ (int)i16Min, (int)i16Max, (unsigned)u16Min, (unsigned)u16Max,
+ (int)i32Min, (int)i32Max, (unsigned)u32Min, (unsigned)u32Max,
+ i64Min, i64Max, u64Min, u64Max);
+ if(strcmp(buffer, "-128 127 0 255 -32768 32767 0 65535 -2147483648 2147483647 0 4294967295 -9223372036854775808 9223372036854775807 0 18446744073709551615"))
+ DoError(nErrorCount, "INT_C test");
+
+ EA_DISABLE_VC_WARNING(6326)
+ // Verify the use of hex numbers with INT64_C
+ const int64_t i64Hex = INT64_C(0x1111111122222222);
+ if(i64Hex != INT64_C(1229782938533634594))
+ DoError(nErrorCount, "INT64_C hex error");
+ EA_RESTORE_VC_WARNING()
+
+ // Verify the use of hex numbers with UINT64_C
+ const uint64_t u64Hex = UINT64_C(0xaaaaaaaabbbbbbbb);
+
+ #if defined(__GNUC__) && (__GNUC__ < 4) // If using a broken version of UINT64_C/INT64_C macros...
+ const uint64_t temp = 12297829382759365563ULL;
+ #else
+ const uint64_t temp = UINT64_C(12297829382759365563);
+ #endif
+
+ EA_DISABLE_VC_WARNING(6326)
+ if(u64Hex != temp)
+ DoError(nErrorCount, "UINT64_C hex error");
+ EA_RESTORE_VC_WARNING()
+
+ // Verify that the compiler both allows division with uint64_t but
+ // also that it allows it via UINT64_MAX. A bad implementation of
+ // UINT64_MAX would cause the code below to mis-execute or not compile.
+ EA_DISABLE_VC_WARNING(6326)
+ const uint64_t resultUint64 = UINT64_MAX / 2;
+ if(resultUint64 != UINT64_C(9223372036854775807))
+ DoError(nErrorCount, "UINT64_MAX error");
+ EA_RESTORE_VC_WARNING()
+ }
+
+ {
+ static_assert(INTPTR_MIN == eastl::numeric_limits<intptr_t>::min(), "INTPTR_MIN failure");
+ static_assert(INTPTR_MAX == eastl::numeric_limits<intptr_t>::max(), "INTPTR_MAX failure");
+ //static_assert(UINTPTR_MIN == eastl::numeric_limits<uintptr_t>::min(), "UINTPTR_MIN failure"); // not specified by the standard
+ static_assert(UINTPTR_MAX == eastl::numeric_limits<uintptr_t>::max(), "UINTPTR_MAX failure");
+ static_assert(INTMAX_MIN == eastl::numeric_limits<intmax_t>::min(), "INTMAX_MIN failure");
+ static_assert(INTMAX_MAX == eastl::numeric_limits<intmax_t>::max(), "INTMAX_MAX failure");
+ //static_assert(UINTMAX_MIN == eastl::numeric_limits<uintmax_t>::MIN(), "UINTMAX_MIN failure"); // not specified by the standard
+ static_assert(UINTMAX_MAX == eastl::numeric_limits<uintmax_t>::max(), "UINTMAX_MAX failure");
+ }
+
+ //Test sized printf format specifiers
+ {
+ char buffer[256];
+
+ int8_t d8(INT8_MAX), i8(INT8_MIN), o8(INT8_MAX);
+ uint8_t u8(UINT8_MAX), x8(UINT8_MAX), X8(UINT8_MAX);
+ sprintf(buffer, "%" PRId8 " %" PRIi8 " %" PRIo8 " %" PRIu8 " %" PRIx8 " %" PRIX8, d8, i8, o8, u8, x8, X8);
+
+ #ifdef EA_COMPILER_GNUC
+ if(Stricmp(buffer, "127 -128 177 255 ff FF"))
+ DoError(nErrorCount, "PRI8 test"); // This is known to fail with compilers such as VC++ which don't support %hh.
+ #endif
+
+ int16_t d16(INT16_MAX), i16(INT16_MIN), o16(INT16_MAX);
+ uint16_t u16(UINT16_MAX), x16(UINT16_MAX), X16(UINT16_MAX);
+ sprintf(buffer, "%" PRId16 " %" PRIi16 " %" PRIo16 " %" PRIu16 " %" PRIx16 " %" PRIX16, d16, i16, o16, u16, x16, X16);
+ if(Stricmp(buffer, "32767 -32768 77777 65535 ffff FFFF"))
+ DoError(nErrorCount, "PRI16 test");
+
+ int32_t d32(INT32_MAX), i32(INT32_MIN), o32(INT32_MAX);
+ uint32_t u32(UINT32_MAX), x32(UINT32_MAX), X32(UINT32_MAX);
+ sprintf(buffer, "%" PRId32 " %" PRIi32 " %" PRIo32 " %" PRIu32 " %" PRIx32 " %" PRIX32, d32, i32, o32, u32, x32, X32);
+ if(Stricmp(buffer, "2147483647 -2147483648 17777777777 4294967295 ffffffff FFFFFFFF"))
+ DoError(nErrorCount, "PRI32 test");
+
+ int64_t d64(INT64_MAX), i64(INT64_MIN), o64(INT64_MAX);
+ uint64_t u64(UINT64_MAX), x64(UINT64_MAX), X64(UINT64_MAX);
+ sprintf(buffer, "%" PRId64 " %" PRIi64 " %" PRIo64 " %" PRIu64 " %" PRIx64 " %" PRIX64, d64, i64, o64, u64, x64, X64);
+ if(Stricmp(buffer, "9223372036854775807 -9223372036854775808 777777777777777777777 18446744073709551615 ffffffffffffffff FFFFFFFFFFFFFFFF"))
+ DoError(nErrorCount, "PRI64 test");
+
+ // Many compilers give warnings for the following code because they
+ // recognize that a pointer is being formatted as an integer.
+ // This is what we want to do and what the C99 standard intends here.
+ #if defined(_MSC_VER) && (_MSC_VER >= 1300)
+ #pragma warning(disable: 4313) // Warning C4313: 'sprintf' : '%d' in format string conflicts with argument 1 of type 'void *'
+ #pragma warning(disable: 4777) // Warning C4777: 'sprintf' : format string '%lld' requires an argument of type '__int64', but variadic argument 1 has type 'intptr_t'
+ #endif
+
+ #if !defined(__GNUC__) // GCC generates warnings here which we can't work around.
+ void *dPtr = (void*)INT32_MAX, *iPtr = (void*)INT32_MIN, *oPtr = (void*)INT32_MAX, *uPtr = (void*)(uintptr_t)UINT64_MAX, *xPtr = (void*)(uintptr_t)UINT64_MAX, *XPtr = (void*)(uintptr_t)UINT64_MAX;
+ sprintf(buffer, "%" PRIdPTR " %" PRIiPTR " %" PRIoPTR " %" PRIuPTR " %" PRIxPTR " %" PRIXPTR, (intptr_t)dPtr, (intptr_t)iPtr, (uintptr_t)oPtr, (uintptr_t)uPtr, (uintptr_t)xPtr, (uintptr_t)XPtr);
+
+ #if (EA_PLATFORM_PTR_SIZE == 4)
+ if(Stricmp(buffer, "2147483647 -2147483648 17777777777 4294967295 ffffffff FFFFFFFF"))
+ DoError(nErrorCount, "PRIPTR test");
+ #else // EA_PLATFORM_PTR_SIZE == 8
+ if(Stricmp(buffer, "2147483647 -2147483648 17777777777 18446744073709551615 ffffffffffffffff FFFFFFFFFFFFFFFF"))
+ DoError(nErrorCount, "PRIPTR test");
+ #endif
+ #endif
+
+ #if defined(_MSC_VER) && (_MSC_VER >= 1300)
+ #pragma warning(default: 4313)
+ #pragma warning(default: 4777)
+ #endif
+ }
+
+ //Test sized scanf format specifiers
+ {
+ int numMatched = 0;
+ #ifdef EA_COMPILER_IS_C99 // Enabled for C99 only because this code will simply crash on many platforms if the format specifiers aren't supported.
+ int8_t d8, i8, o8;
+ uint8_t u8, x8;
+ numMatched = sscanf("127 -127 177 255 ff", "%" SCNd8 " %" SCNi8 " %" SCNo8 " %" SCNu8 " %" SCNx8, &d8, &i8, &o8, &u8, &x8);
+ if((numMatched != 5) || (d8 != 127) || (i8 != -127) || (o8 != 127) || (u8 != 255) || (x8 != 255))
+ DoError(nErrorCount, "SCN8 test"); // This is known to fail with compilers such as VC++ which don't support %hh.
+ #endif
+
+ int16_t d16, i16, o16;
+ uint16_t u16, x16;
+ numMatched = sscanf("32767 -32768 77777 65535 ffff", "%" SCNd16 " %" SCNi16 " %" SCNo16 " %" SCNu16 " %" SCNx16, &d16, &i16, &o16, &u16, &x16);
+ if((numMatched != 5) || (d16 != 32767) || (i16 != -32768) || (o16 != 32767) || (u16 != 65535) || (x16 != 65535))
+ DoError(nErrorCount, "SCN16 test");
+
+ int32_t d32, i32, o32;
+ uint32_t u32, x32;
+ numMatched = sscanf("2147483647 -2147483648 17777777777 4294967295 ffffffff", "%" SCNd32 " %" SCNi32 " %" SCNo32 " %" SCNu32 " %" SCNx32, &d32, &i32, &o32, &u32, &x32);
+ if((numMatched != 5) || (d32 != INT32_MAX) || (i32 != INT32_MIN) || (o32 != INT32_MAX) || (u32 != UINT32_MAX) || (x32 != UINT32_MAX))
+ DoError(nErrorCount, "SCN32 test");
+
+ int64_t d64, i64, o64;
+ uint64_t u64, x64;
+ numMatched = sscanf("9223372036854775807 -9223372036854775808 777777777777777777777 18446744073709551615 ffffffffffffffff", "%" SCNd64 " %" SCNi64 " %" SCNo64 " %" SCNu64 " %" SCNx64, &d64, &i64, &o64, &u64, &x64);
+ if((numMatched != 5) || (d64 != INT64_MAX) || (i64 != INT64_MIN) || (o64 != INT64_MAX) || (u64 != UINT64_MAX) || (x64 != UINT64_MAX))
+ DoError(nErrorCount, "SCN64 test");
+
+ // Many compilers give warnings for the following code because they
+ // recognize that a pointer is being formatted as an integer.
+ // This is what we want to do and what the C99 standard intends here.
+ #if !defined(__GNUC__) // GCC generates warnings here which we can't work around.
+ void *dPtr, *iPtr, *oPtr, *uPtr, *xPtr;
+ intptr_t dip, iip;
+ uintptr_t ouip, uuip, xuip;
+
+ EA_DISABLE_VC_WARNING(4777) // format string '%lld' requires an argument of type '__int64 *', but variadic argument 1 has type 'intptr_t *'
+ #if (EA_PLATFORM_PTR_SIZE == 4)
+ numMatched = sscanf("2147483647 -2147483648 17777777777 4294967295 ffffffff", "%" SCNdPTR " %" SCNiPTR " %" SCNoPTR " %" SCNuPTR " %" SCNxPTR, &dip, &iip, &ouip, &uuip, &xuip);
+ #else // EA_PLATFORM_PTR_SIZE == 8
+ numMatched = sscanf("2147483647 -2147483648 17777777777 18446744073709551615 ffffffffffffffff", "%" SCNdPTR " %" SCNiPTR " %" SCNoPTR " %" SCNuPTR " %" SCNxPTR, &dip, &iip, &ouip, &uuip, &xuip);
+ #endif
+ EA_RESTORE_VC_WARNING()
+
+ dPtr = (void*)dip;
+ iPtr = (void*)iip;
+ oPtr = (void*)ouip;
+ uPtr = (void*)uuip;
+ xPtr = (void*)xuip;
+
+ if((numMatched != 5) || (dPtr != (void*)INT32_MAX) || (iPtr != (void*)INT32_MIN) || (oPtr != (void*)INT32_MAX) || (uPtr != (void*)(uintptr_t)UINT64_MAX) || (xPtr != (void*)(uintptr_t)UINT64_MAX))
+ DoError(nErrorCount, "SCNPTR test");
+ #endif
+ }
+
+
+ // Test min/max
+ {
+ // The C standard allows INT8_MIN to be either -127 or -128. So in order to be able
+ // to test for this in a portable way, we do the logic below whereby we test for
+ // -127 (which all compiles should support) or -127 - 1 which all compilers should
+ // support if INT8_MIN isn't -127.
+ if(!VerifyValue<int8_t>(INT8_MIN, INT8_C(-127)) && !VerifyValue<int8_t>(INT8_MIN, INT8_C(-127) - 1))
+ DoError(nErrorCount, "INT8_MIN test");
+ if(!VerifyValue<int8_t>(INT8_MAX, INT8_C(127)))
+ DoError(nErrorCount, "INT8_MAX test");
+ if(!VerifyValue<uint8_t>(UINT8_MAX, UINT8_C(255)))
+ DoError(nErrorCount, "UINT8_MAX test");
+
+ if(!VerifyValue<int16_t>(INT16_MIN, INT16_C(-32767)) && !VerifyValue<int16_t>(INT16_MIN, INT16_C(-32767) - 1))
+ DoError(nErrorCount, "INT16_MIN test");
+ if(!VerifyValue<int16_t>(INT16_MAX, INT16_C(32767)))
+ DoError(nErrorCount, "INT16_MAX test");
+ if(!VerifyValue<uint16_t>(UINT16_MAX, UINT16_C(65535)))
+ DoError(nErrorCount, "UINT16_MAX test");
+
+ if(!VerifyValue<int32_t>(INT32_MIN, INT32_C(-2147483647)) && !VerifyValue<int32_t>(INT32_MIN, INT32_C(-2147483647) - 1))
+ DoError(nErrorCount, "INT32_MIN test");
+ if(!VerifyValue<int32_t>(INT32_MAX, INT32_C(2147483647)))
+ DoError(nErrorCount, "INT32_MAX test");
+ if(!VerifyValue<uint32_t>(UINT32_MAX, UINT32_C(4294967295)))
+ DoError(nErrorCount, "UINT32_MAX test");
+
+ if(!VerifyValue<int64_t>(INT64_MIN, INT64_C(-9223372036854775807)) && !VerifyValue<int64_t>(INT64_MIN, INT64_C(-9223372036854775807) - 1))
+ DoError(nErrorCount, "INT64_MIN test");
+ if(!VerifyValue<uint64_t>(INT64_MAX, INT64_C(9223372036854775807)))
+ DoError(nErrorCount, "INT64_MAX test");
+
+ #if defined(__GNUC__) && (__GNUC__ < 4) // If using a broken version of UINT64_C/INT64_C macros...
+ const uint64_t temp = 18446744073709551615ULL;
+ #else
+ const uint64_t temp = UINT64_C(18446744073709551615);
+ #endif
+
+ if(!VerifyValue<uint64_t>(UINT64_MAX, temp))
+ DoError(nErrorCount, "UINT64_MAX test");
+ }
+
+ {
+ NoopTakingString(L"");
+ // Compilation errors below indicate that the EA_CHAR16/EA_CHAR32 may be incorrectly defined, or EA_CHAR16_NATIVE/EA_CHAR32_NATIVE is incorrect set to 0.
+ #if EA_WCHAR_SIZE == 2 && defined(EA_CHAR16)
+ const char16_t *str = EA_CHAR16("");
+ NoopTakingString(str);
+ #elif EA_WCHAR_SIZE == 4 && defined(EA_CHAR32)
+ const char32_t *str = EA_CHAR32("");
+ NoopTakingString(str);
+ #endif
+ }
+
+ return nErrorCount;
+}
+
+
+
+int TestEAResult()
+{
+ int nErrorCount(0);
+
+ EA::result_type resultSuccess(EA::SUCCESS);
+ EA::result_type resultFailure(EA::FAILURE);
+ EA::result_type resultZero(0); // success
+ EA::result_type resultNeg(-1); // failure
+ EA::result_type resultPos(+1); // success
+
+
+ if(!EA_SUCCEEDED(resultSuccess))
+ DoError(nErrorCount, "EA::SUCCESS test");
+ if(EA_FAILED(resultSuccess))
+ DoError(nErrorCount, "EA::SUCCESS test");
+
+ if(EA_SUCCEEDED(resultFailure))
+ DoError(nErrorCount, "EA::FAILURE test");
+ if(!EA_FAILED(resultFailure))
+ DoError(nErrorCount, "EA::FAILURE test");
+
+ if(!EA_SUCCEEDED(resultZero))
+ DoError(nErrorCount, "EA::SUCCESS test");
+ if(EA_FAILED(resultZero))
+ DoError(nErrorCount, "EA::SUCCESS test");
+
+ if(EA_SUCCEEDED(resultNeg))
+ DoError(nErrorCount, "EA::FAILURE test");
+ if(!EA_FAILED(resultNeg))
+ DoError(nErrorCount, "EA::FAILURE test");
+
+ if(!EA_SUCCEEDED(resultPos))
+ DoError(nErrorCount, "EA::SUCCESS test");
+ if(EA_FAILED(resultPos))
+ DoError(nErrorCount, "EA::SUCCESS test");
+
+ return nErrorCount;
+}
+
+
+
+int TestEAPlatform()
+{
+ int nErrorCount(0);
+
+ // Test EA_PLATFORM_PTR_SIZE
+ {
+ #ifdef EA_PLATFORM_PTR_SIZE
+ if(!VerifyValue<size_t>(EA_PLATFORM_PTR_SIZE, sizeof(void*)))
+ DoError(nErrorCount, "EA_PLATFORM_PTR_SIZE test");
+ #else
+ DoError(nErrorCount, "EA_PLATFORM_PTR_SIZE test");
+ #endif
+ }
+
+
+ // Test EA_PLATFORM_NAME
+ {
+ #ifdef EA_PLATFORM_NAME
+ char buffer[256];
+ sprintf(buffer, "TestEAPlatform: EA_PLATFORM_NAME: %s\n", EA_PLATFORM_NAME);
+ #else
+ DoError(nErrorCount, "EA_PLATFORM_NAME test");
+ #endif
+ }
+
+
+ // Test EA_PLATFORM_DESCRIPTION
+ {
+ #ifdef EA_PLATFORM_DESCRIPTION
+ char buffer[256];
+ sprintf(buffer, "TestEAPlatform: EA_PLATFORM_DESCRIPTION: %s\n", EA_PLATFORM_DESCRIPTION);
+ #else
+ DoError(nErrorCount, "EA_PLATFORM_DESCRIPTION test");
+ #endif
+ }
+
+
+ // Test EA_SYSTEM_LITTLE_ENDIAN / EA_SYSTEM_BIG_ENDIAN
+ {
+ uint32_t kValue = 0x12345678;
+ uint8_t* pValue = (uint8_t*)&kValue;
+
+ #ifdef EA_SYSTEM_LITTLE_ENDIAN
+ if(pValue[0] != 0x78)
+ DoError(nErrorCount, "EA_SYSTEM_ENDIAN test");
+ #elif defined(EA_SYSTEM_BIG_ENDIAN)
+ if(pValue[0] != 0x12)
+ DoError(nErrorCount, "EA_SYSTEM_ENDIAN test");
+ #else
+ DoError(nErrorCount, "EA_SYSTEM_ENDIAN test");
+ #endif
+ }
+
+
+ // Test EA_ASM_STYLE
+ {
+ #if defined(EA_PROCESSOR_X86)
+ #if defined(EA_ASM_STYLE_ATT)
+ asm volatile ("nop");
+ #elif defined(EA_ASM_STYLE_INTEL)
+ __asm nop
+ #endif
+ #else
+ // Add other processors here.
+ #endif
+ }
+
+
+ return nErrorCount;
+}
+
+
+
+// Test compiler limitations
+// Easiest way to come up with tests for some of the more complicated versions
+// of these is to look at the Boost /libs/config/test/*.cxx files. Many of the
+// Boost compiler limitation defines are similar or match exactly to those
+// defined by EABase. See http://www.boost.org if you want to check this out.
+
+#ifndef EA_COMPILER_NO_STATIC_CONSTANTS // If class member static constants are allowed...
+ struct NSC
+ {
+ static const int x = 10;
+ };
+#endif
+
+#ifndef EA_COMPILER_NO_TEMPLATE_SPECIALIZATION
+ // Todo
+#endif
+
+#ifndef EA_COMPILER_NO_TEMPLATE_PARTIAL_SPECIALIZATION
+ // Todo
+#endif
+
+#ifndef EA_COMPILER_NO_MEMBER_TEMPLATES
+ // Todo
+#endif
+
+#ifndef EA_COMPILER_NO_MEMBER_TEMPLATE_SPECIALIZATION
+ // Todo
+#endif
+
+#ifndef EA_COMPILER_NO_TEMPLATE_TEMPLATES
+ // Todo
+#endif
+
+#ifndef EA_COMPILER_NO_MEMBER_TEMPLATE_FRIENDS
+ // Todo
+#endif
+
+#ifndef EA_COMPILER_NO_VOID_RETURNS
+ void TestNVR1();
+ void TestNVR();
+
+ void TestNVR1()
+ {
+ char buffer[8];
+ sprintf(buffer, " ");
+ }
+ void TestNVR()
+ {
+ return TestNVR1();
+ }
+#endif
+
+#ifndef EA_COMPILER_NO_COVARIANT_RETURN_TYPE
+ // Todo
+#endif
+
+#ifndef EA_COMPILER_NO_DEDUCED_TYPENAME
+ // Todo
+#endif
+
+#ifndef EA_COMPILER_NO_ARGUMENT_DEPENDENT_LOOKUP
+ // Todo
+#endif
+
+#if !defined(EA_COMPILER_NO_EXCEPTION_STD_NAMESPACE) && !defined(EA_COMPILER_NO_STANDARD_CPP_LIBRARY)
+ #include <exception>
+
+ static void TestNESN()
+ {
+ // iPhone gives us this error: Undefined symbols for architecture armv6: std::terminate()
+ // Android gives: undefined reference to std::terminate()
+ // We could possibly define our own std::terminate, but that might collide in the future unexpectedly.
+ #if defined(EA_PLATFORM_IPHONE) || defined(EA_PLATFORM_ANDROID)
+ void (*pTerminate)() = NULL;
+ #else
+ void (*pTerminate)() = std::terminate;
+ #endif
+ char buffer[32];
+ sprintf(buffer, "%p", pTerminate);
+ }
+#endif
+
+#ifndef EA_COMPILER_NO_EXPLICIT_FUNCTION_TEMPLATE_ARGUMENTS
+ // Todo
+#endif
+
+#ifndef EA_COMPILER_NO_EXCEPTIONS
+ static bool TestNE()
+ {
+ EA_DISABLE_VC_WARNING(4571)
+ try{
+ char buffer1[8];
+ sprintf(buffer1, " ");
+ throw int(0);
+ }
+ catch(...){
+ char buffer2[8];
+ sprintf(buffer2, " "); // If you are tracing this in a debugger and the debugger stops here, then you need to let the app continue.
+ }
+ return true;
+ EA_RESTORE_VC_WARNING()
+ }
+#endif
+
+
+struct UnwindTest
+{
+ static int x;
+ enum State{
+ kStateNone,
+ kStateConstructed,
+ kStateDestructed
+ };
+ UnwindTest()
+ { x = kStateConstructed; };
+ ~UnwindTest()
+ { x = kStateDestructed; };
+};
+int UnwindTest::x = kStateNone;
+
+#ifndef EA_COMPILER_NO_EXCEPTIONS
+ static void TestNU1()
+ {
+ UnwindTest ut;
+ #ifndef EA_COMPILER_NO_EXCEPTIONS
+ throw(int(0)); // If you are tracing this in a debugger and the debugger stops here, then you need to let the app continue.
+ #endif
+ }
+#endif
+
+bool TestNU()
+{
+ bool bReturnValue(false);
+
+ #ifdef EA_COMPILER_NO_EXCEPTIONS
+ bReturnValue = true; //Nothing to test, so we just return true.
+ #else
+ EA_DISABLE_VC_WARNING(4571)
+ try
+ {
+ TestNU1();
+ }
+ catch(...)
+ {
+ #ifdef EA_COMPILER_NO_UNWIND
+ if(UnwindTest::x == UnwindTest::kStateConstructed)
+ bReturnValue = true;
+ #else
+ if(UnwindTest::x == UnwindTest::kStateDestructed)
+ bReturnValue = true;
+ #endif
+ }
+ EA_RESTORE_VC_WARNING()
+ #endif
+
+ return bReturnValue;
+}
+
+#ifndef EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+ #include <vector> // We need do nothing more than #include this.
+#endif
+
+#ifndef EA_COMPILER_NO_COVARIANT_RETURN_TYPE
+ // Todo
+#endif
+
+#ifndef EA_COMPILER_NO_COVARIANT_RETURN_TYPE
+ // Todo
+#endif
+
+
+#if !defined(EA_COMPILER_NO_TRAILING_RETURN_TYPES)
+ // This usage assumes that C++11 auto is supported, which in practice is always the case because
+ // the case because otherwise trailing return types wouldn't be as useful.
+ static auto AddOne(int i)->int
+ {
+ return i + 1;
+ }
+
+ template <typename T>
+ struct AddTwoClass
+ {
+ typedef float Local_type;
+ Local_type AddTwo(T t);
+ };
+
+ template <typename T>
+ auto AddTwoClass<T>::AddTwo(T t)->Local_type
+ {
+ return (t + 2.f); // Assumes that t is a numerical type in this case.
+ }
+#endif
+
+
+#if !defined(EA_COMPILER_NO_VARIADIC_TEMPLATES)
+ template<typename...>
+ struct VariadicTemplateTuple{};
+
+ template<typename T1, typename T2>
+ struct VariadicTemplatePair
+ {
+ T1 x;
+ T1 y;
+ };
+
+ template<class ... Args1>
+ struct VariadicTemplateZip
+ {
+ template<class ... Args2> struct with
+ {
+ typedef VariadicTemplateTuple<VariadicTemplatePair<Args1, Args2> ... > type;
+ };
+ };
+
+ // VariadicTemplateType is Tuple<Pair<short, unsigned short>, Pair<int, unsigned> >
+ typedef VariadicTemplateZip<short, int>::with<unsigned short, unsigned>::type VariadicTemplateType;
+#endif
+
+
+#if !defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ template<typename T>
+ using VectorAlias = eastl::vector<T, EASTLAllocatorType>;
+#endif
+
+#if !defined(EA_COMPILER_NO_VARIABLE_TEMPLATES)
+ template<class T>
+ constexpr T pi = T(3.1415926535897932385);
+#endif
+
+
+int TestEACompiler()
+{
+ int nErrorCount(0);
+
+ // As of this writing, eacompiler.h defines at least the following compilers:
+ // EA_COMPILER_GNUC
+ // EA_COMPILER_INTEL
+ // EA_COMPILER_METROWERKS
+ // EA_COMPILER_MSVC, EA_COMPILER_MSVC6, EA_COMPILER_MSVC7, EA_COMPILER_MSVC7_1
+
+
+ // Test EA_COMPILER_NAME
+ {
+ #ifdef EA_COMPILER_NAME
+ char buffer[256];
+ sprintf(buffer, "TestEACompiler: EA_COMPILER_NAME: %s\n", EA_COMPILER_NAME);
+ #else
+ DoError(nErrorCount, "EA_COMPILER_NAME test");
+ #endif
+ }
+
+
+ // Test EA_COMPILER_VERSION
+ {
+ #ifdef EA_COMPILER_VERSION
+ char buffer[256];
+ sprintf(buffer, "TestEACompiler: EA_COMPILER_VERSION: %d\n", EA_COMPILER_VERSION);
+ #else
+ DoError(nErrorCount, "EA_COMPILER_VERSION test");
+ #endif
+ }
+
+
+ // Test EA_COMPILER_STRING
+ {
+ #ifdef EA_COMPILER_STRING
+ char buffer[256];
+ sprintf(buffer, "TestEACompiler: EA_COMPILER_STRING: %s\n", EA_COMPILER_STRING);
+ #else
+ DoError(nErrorCount, "EA_COMPILER_STRING test");
+ #endif
+ }
+
+
+ // Test EA_COMPILER_NO_STATIC_CONSTANTS
+ {
+ char buffer[256];
+ sprintf(buffer, "%d", (int)NSC::x);
+ if(buffer[0] != '1')
+ DoError(nErrorCount, "EA_COMPILER_NO_STATIC_CONSTANTS test");
+ }
+
+
+ // Test EA_COMPILER_NO_VOID_RETURNS
+ #ifndef EA_COMPILER_NO_VOID_RETURNS
+ TestNVR1(); // Nothing to test for except successful compilation.
+ #endif
+
+
+ // Test EA_COMPILER_NO_EXCEPTION_STD_NAMESPACE
+ #if !defined(EA_COMPILER_NO_EXCEPTION_STD_NAMESPACE) && !defined(EA_COMPILER_NO_STANDARD_CPP_LIBRARY)
+ TestNESN();
+ #endif
+
+ #if !(defined(EA_PLATFORM_IPHONE) && defined(EA_COMPILER_CLANG)) || defined(__IPHONE_7_0)
+ {
+ // There was a bug in Apple's exception code in iOS SDK versions
+ // prior to 7.0, which is why this test is disabled for versions
+ // of the SDK before 7.0.
+ // Note that __IPHONE_7_0 will be defined for all future SDKs as
+ // well, because Apple simply adds another define with each release
+ // and does not remove the old ones.
+
+ // Test EA_COMPILER_NO_EXCEPTIONS
+ #ifndef EA_COMPILER_NO_EXCEPTIONS
+ if(!TestNE())
+ DoError(nErrorCount, "EA_COMPILER_NO_EXCEPTIONS test");
+ #endif
+
+
+ // Test EA_COMPILER_NO_UNWIND
+ if(!TestNU())
+ DoError(nErrorCount, "EA_COMPILER_NO_UNWIND test");
+ }
+ #endif
+
+
+ // Test EA_COMPILER_NO_RVALUE_REFERENCES
+ #ifndef EA_COMPILER_NO_RVALUE_REFERENCES
+ {
+ // Trivial test
+ int&& i = 2;
+ FunctionWithUnusedVariables(i);
+ }
+ #endif
+
+
+ // Test EA_COMPILER_NO_RANGE_BASED_FOR_LOOP
+ #if !defined(EA_COMPILER_NO_RANGE_BASED_FOR_LOOP)
+ {
+ float floatArray[2] = { 0.0f, 1.0f };
+
+ for(float& f : floatArray)
+ f += 1.0;
+
+ EATEST_VERIFY(floatArray[1] == 2.0f);
+ }
+ #endif
+
+
+ // Test EA_COMPILER_NO_AUTO
+ #if !defined(EA_COMPILER_NO_AUTO)
+ {
+ auto length = strlen("test");
+ EATEST_VERIFY(length == 4);
+ }
+ #endif
+
+
+ // Test EA_COMPILER_NO_DECLTYPE
+ #if !defined(EA_COMPILER_NO_DECLTYPE)
+ {
+ struct A { double x; };
+ int b = 9;
+ A a; a.x = 7;
+ decltype(b) x2 = 2;
+ decltype(a.x) x3 = 3.5;
+ EATEST_VERIFY((b + a.x) == 16);
+ EATEST_VERIFY((x3 + x2) == 5.5);
+ }
+ #endif
+
+
+ // Test EA_COMPILER_NO_LAMBDA_EXPRESSIONS
+ #if !defined(EA_COMPILER_NO_LAMBDA_EXPRESSIONS)
+ {
+ struct LambaTest
+ {
+ static void SortViaAbs(float* x, size_t n)
+ {
+ eastl::insertion_sort(x, x + n,
+ [](float a, float b)
+ { return (a < b); }
+ );
+ }
+ };
+
+ float floatArray[3] = { 0.f, 1.f, 3.f };
+ LambaTest::SortViaAbs(floatArray, EAArrayCount(floatArray));
+ EATEST_VERIFY(floatArray[1] == 1.f);
+ }
+ #endif
+
+
+ // Test EA_COMPILER_NO_TRAILING_RETURN_TYPES
+ #if !defined(EA_COMPILER_NO_TRAILING_RETURN_TYPES)
+ {
+ int x = AddOne(2); // AddOne declared above.
+ EATEST_VERIFY(x == 3);
+
+ AddTwoClass<float> a;
+ float y = a.AddTwo(2.f);
+ EATEST_VERIFY(y == 4.f);
+ }
+ #endif
+
+
+ // Test EA_COMPILER_NO_FORWARD_DECLARED_ENUMS
+ // Forward declared enum support requires strongly typed enum support.
+ #if !defined(EA_COMPILER_NO_FORWARD_DECLARED_ENUMS) && !defined(EA_COMPILER_NO_STRONGLY_TYPED_ENUMS)
+ {
+ // This happen to be used below in the EA_COMPILER_NO_STRONGLY_TYPED_ENUMS section.
+ enum class Color;
+ enum class Size : uint8_t;
+ enum Distance: uint8_t;
+ }
+ #endif
+
+
+ // Test EA_COMPILER_NO_STRONGLY_TYPED_ENUMS
+ #if !defined(EA_COMPILER_NO_STRONGLY_TYPED_ENUMS)
+ {
+ enum class Color { red, blue, green };
+ enum class Size : uint8_t { little = 1, med = 1, large = 2 };
+ enum Distance : uint8_t { close = 1, faraway = 2 };
+
+ Color c = Color::red;
+ EATEST_VERIFY(c != Color::blue);
+
+ Size s = Size::med;
+ EATEST_VERIFY(s != Size::large);
+ static_assert(sizeof(s) == 1, "EA_COMPILER_NO_STRONGLY_TYPED_ENUMS failure");
+
+ Distance d = close;
+ EATEST_VERIFY(d != faraway);
+ static_assert(sizeof(d) == 1, "EA_COMPILER_NO_STRONGLY_TYPED_ENUMS failure");
+ }
+ #endif
+
+
+ // Test EA_COMPILER_NO_VARIADIC_TEMPLATES
+ #if !defined(EA_COMPILER_NO_VARIADIC_TEMPLATES)
+ {
+ // This uses types defined above.
+ VariadicTemplateType x;
+
+ static_assert(sizeof(x) > 0, "EA_COMPILER_NO_VARIADIC_TEMPLATES failure");
+
+ char buffer[32];
+ sprintf(buffer, "%p", &x);
+ // Ignore the result, as we're just verifying that it compiles.
+ }
+ #endif
+
+
+ // Test EA_COMPILER_NO_TEMPLATE_ALIASES
+ #if !defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ {
+ // This uses types defined above.
+ // Same as vector<int, EASTLAllocatorType> v;
+ VectorAlias<int> v;
+ EATEST_VERIFY(v.empty());
+ }
+ #endif
+
+
+ // Test EA_COMPILER_NO_VARIABLE_TEMPLATES
+ #if !defined(EA_COMPILER_NO_VARIABLE_TEMPLATES)
+ static_assert(pi<int> == 3, "variable template failure");
+ static_assert(pi<intmax_t> == 3, "variable template failure");
+ static_assert(pi<double> == 3.1415926535897932385, "variable template failure");
+ #endif
+
+
+ // Test EA_COMPILER_NO_INITIALIZER_LISTS
+ #if !defined(EA_COMPILER_NO_INITIALIZER_LISTS)
+ {
+ int a = { 1 };
+ EATEST_VERIFY(a == 1);
+
+ int* e{};
+ EATEST_VERIFY(!e);
+
+ double x = double{1};
+ EATEST_VERIFY(x == 1.0);
+
+ //Disabled until we have a compiler and standard library that can exercise this.
+ //#include <initializer_list>
+ //eastl::vector<eastl::string, int> anim = { {"bear", 4}, {"cassowary", 2}, {"tiger", 7} };
+ //EATEST_VERIFY(!anim.empty());
+
+ // Other tests to do.
+ //std::complex<double> z{1,2};
+ //eastl::vector<int>{1, 2, 3, 4};
+ //f({"Nicholas","Annemarie"});
+ //return { "Norah" };
+ }
+ #endif
+
+
+ // Test EA_COMPILER_NO_NORETURN / EA_NORETURN
+ #if !defined(EA_COMPILER_NO_NORETURN) && !defined(EA_PLATFORM_PS4) // Kettle SDK up to at least v.915 has a broken definition of the exit() function and fails to compile the valid code below.
+ {
+ struct NoReturnTest
+ {
+ EA_NORETURN void DoesNotReturn()
+ { exit(0); }
+
+ int DoesReturn()
+ { return 17; }
+ };
+
+ NoReturnTest nrt;
+ if(nrt.DoesReturn() == 18)
+ nrt.DoesNotReturn();
+ }
+ #endif
+
+
+ // Test EA_COMPILER_NO_CARRIES_DEPENDENCY / EA_CARRIES_DEPENDENCY
+ #if !defined(EA_COMPILER_NO_CARRIES_DEPENDENCY)
+ {
+ struct CarriesDependencyTest
+ {
+ CarriesDependencyTest() : mX(0){}
+
+ EA_CARRIES_DEPENDENCY int* Test1(){ return &mX; }
+ void Test2(int* f EA_CARRIES_DEPENDENCY) { char buffer[32]; sprintf(buffer, "%p", f); }
+
+ int mX;
+ };
+
+ CarriesDependencyTest cdt;
+ cdt.Test2(cdt.Test1());
+ }
+ #endif
+
+
+ // Test EA_COMPILER_NO_FALLTHROUGH / EA_FALLTHROUGH
+ #if !defined(EA_COMPILER_NO_FALLTHROUGH)
+ {
+ int i = 1;
+ switch (i)
+ {
+ case 1:
+ i++;
+
+ EA_FALLTHROUGH;
+ case 2: { i = 42; }
+ break;
+ }
+
+ EATEST_VERIFY(i == 42);
+ }
+ #endif
+
+
+ // Test EA_COMPILER_NO_NODISCARD / EA_NODISCARD
+ #if !defined(EA_COMPILER_NO_NODISCARD)
+ {
+ struct EA_NODISCARD DoNotDiscardMe {};
+ auto result = [](void) -> DoNotDiscardMe { return {}; }();
+ (void)result; // use the result to avoid [[nodiscard]] compiler warnings
+ }
+ #endif
+
+
+ // Test EA_COMPILER_NO_MAYBE_UNUSED / EA_MAYBE_UNUSED
+ #if !defined(EA_COMPILER_NO_MAYBE_UNUSED)
+ {
+ {
+ EA_MAYBE_UNUSED int notAlwaysUsed = 42;
+
+ // Do not use expressions below. It defeats the purpose of the test.
+ // (void)notAlwaysUsed;
+ // EA_UNUSED(notAlwaysUsed);
+ }
+
+ {
+ [](EA_MAYBE_UNUSED bool b1, EA_MAYBE_UNUSED bool b2) { EA_ASSERT(b1 && b2); }(true, true);
+ }
+ }
+ #endif
+
+ #if !defined(EA_COMPILER_NO_NONSTATIC_MEMBER_INITIALIZERS)
+ {
+ struct NonstaticInitializerTest
+ {
+ int a = 7;
+ int b = a;
+ };
+
+ NonstaticInitializerTest nit;
+ EATEST_VERIFY((nit.a == 7) && (nit.b == 7));
+ }
+ #endif
+
+ #if !defined(EA_COMPILER_NO_RIGHT_ANGLE_BRACKETS)
+ {
+ eastl::vector<eastl::vector<int>> listList;
+ EA_UNUSED(listList);
+ }
+ #endif
+
+
+ #if !defined(EA_COMPILER_NO_ALIGNOF)
+ {
+ char buffer[32];
+ sprintf(buffer, "%u", (unsigned)alignof(uint64_t));
+ }
+ #endif
+
+
+ #if !defined(EA_COMPILER_NO_ALIGNAS)
+ {
+ struct alignas(32) AlignAsTest1
+ { float mData[4]; };
+
+ struct alignas(uint64_t) AlignAsTest2
+ { float mData[4]; };
+
+ char buffer[32];
+ sprintf(buffer, "%u %u", (unsigned)EA_ALIGN_OF(AlignAsTest1), (unsigned)EA_ALIGN_OF(AlignAsTest2));
+ }
+ #endif
+
+
+ #if !defined(EA_COMPILER_NO_DELEGATING_CONSTRUCTORS)
+ {
+ struct DCTest
+ {
+ char mChar;
+ double mDouble;
+
+ DCTest() : mChar('\0'), mDouble(1.23){ };
+ DCTest(double d, char c): mChar(c), mDouble(d) { }
+ DCTest(char c) : DCTest(1.23, c) { }
+ DCTest(double d): DCTest(d, 'a') { }
+ DCTest(char*): DCTest() { }
+ };
+
+ DCTest dcTest(1.5);
+ EATEST_VERIFY(dcTest.mDouble == 1.5);
+ }
+ #endif
+
+
+ #if !defined(EA_COMPILER_NO_INHERITING_CONSTRUCTORS)
+ {
+ struct B1{
+ B1(int x) : mX(x){}
+ int mX;
+ };
+
+ struct B2{
+ B2(int x = 13, int y = 42) : mX(x), mY(y){}
+ int mX, mY;
+ };
+
+ struct D1 : B1 {
+ using B1::B1;
+ };
+
+ struct D2 : B2 {
+ using B2::B2;
+ };
+
+ D1 d1(3);
+ D2 d2a(17, 22);
+ D2 d2b;
+
+ EATEST_VERIFY((d1.mX == 3) &&
+ (d2a.mX == 17) && (d2a.mY == 22) &&
+ (d2b.mX == 13) && (d2b.mY == 42));
+ }
+ #endif
+
+
+ #if !defined(EA_COMPILER_NO_USER_DEFINED_LITERALS)
+ {
+ // The operators are defined above.
+
+ // Conversion example
+ double x = 90.0_deg; // x = 1.570796
+ EATEST_VERIFY((x > 1.57) && (x < 1.58));
+
+ // Custom type example
+ UDLTest y(123_udl);
+ EATEST_VERIFY(y.mX == 123);
+ }
+ #endif
+
+
+ #if !defined(EA_COMPILER_NO_STANDARD_LAYOUT_TYPES)
+ {
+ // We don't currently have a good way of testing this without bringing in <type_traits>.
+ }
+ #endif
+
+
+ #if !defined(EA_COMPILER_NO_EXTENDED_SIZEOF)
+ {
+ struct SizeofTest{
+ int32_t mMember;
+ };
+
+ const size_t testSize = sizeof(SizeofTest::mMember);
+ EATEST_VERIFY(testSize == sizeof(int32_t));
+ char buffer[32];
+ sprintf(buffer, "%u", (unsigned)testSize);
+ }
+ #endif
+
+
+ #if !defined(EA_COMPILER_NO_INLINE_NAMESPACES)
+ {
+ // The namespaces are defined above.
+
+ INSNamespace::A<INSClass> a;
+ int result = g(a);
+ EATEST_VERIFY(result == 37);
+ }
+ #endif
+
+
+ #if !defined(EA_COMPILER_NO_UNRESTRICTED_UNIONS)
+ {
+ struct Point {
+ int mX, mY;
+
+ Point(int x = 0, int y = 0) : mX(x), mY(y) {}
+ };
+
+ union U {
+ int z;
+ double w;
+ Point p; // Illegal in C++03; legal in C++11.
+
+ U() { new(&p) Point(); } // Due to the Point member, a constructor definition is now required.
+ };
+ }
+ #endif
+
+
+ #if !defined(EA_COMPILER_NO_EXPLICIT_CONVERSION_OPERATORS)
+ {
+ // bool cast test
+ struct Testable
+ {
+ explicit operator bool() const
+ { return false; }
+
+ Testable() : mX(37) { }
+ int mX;
+ };
+
+ Testable a;
+
+ if(a)
+ EATEST_VERIFY(a.mX == 37);
+
+ // Class cast test
+ struct Y {
+ int mY;
+ Y(int y = 0) : mY(y) { }
+ };
+
+ struct Z {
+ int mZ;
+ Z(int z = 0) : mZ(z) { }
+ explicit operator Y() const { return Y(mZ); }
+ };
+
+ Z z(3);
+ Y y1(z); // Direct initialization
+ Y y2 = (Y)z; // Cast notation
+
+ EATEST_VERIFY((z.mZ == 3) && (y1.mY == 3) && (y2.mY == 3));
+ }
+ #endif
+
+
+ #if !defined(EA_COMPILER_NO_FUNCTION_TEMPLATE_DEFAULT_ARGS)
+ {
+ // FunctionTemplateTest is declared above.
+ int result = FunctionTemplateTest::AddOne((int)3);
+ EATEST_VERIFY(result == 4);
+ }
+ #endif
+
+
+ #if !defined(EA_COMPILER_NO_LOCAL_CLASS_TEMPLATE_PARAMETERS)
+ {
+ struct LocalStruct{};
+ eastl::fixed_vector<LocalStruct, 2, false> localStructArray;
+ EATEST_VERIFY(localStructArray.empty());
+ }
+ #endif
+
+
+ #if !defined(EA_COMPILER_NO_NOEXCEPT)
+ {
+ EATEST_VERIFY(NoExceptTestFunction() == 37);
+ EATEST_VERIFY(NoExceptTestTemplate<NoExceptTestStruct>() == 37);
+ }
+ #endif
+
+
+ #if !defined(EA_COMPILER_NO_RAW_LITERALS)
+ {
+ // Older versions of GCC are preventing us from using " below in str1. Due to the way the preprocessor
+ // works, it encounters what it sees as a string problem before it handles the #if above. No #ifdefs
+ // can make this problem go away.
+ const char str1[] = R"(This slash is just a slash: \ This quote is just a quote: ' )";
+ const char str2[] = R"delimiter(This slash is just a slash: \ This paren is just a paren: ) )delimiter";
+ EA_UNUSED(str1);
+ EA_UNUSED(str2);
+
+ static_assert(EAArrayCount(str1) == 61, "EA_COMPILER_NO_RAW_LITERALS failure.");
+ static_assert(EAArrayCount(str2) == 61, "EA_COMPILER_NO_RAW_LITERALS failure.");
+ }
+ #endif
+
+
+ #if !defined(EA_COMPILER_NO_UNICODE_STRING_LITERALS)
+ {
+ const char8_t str1[] = u8"Unicode: \u2018."; // This assumes that \u and \U are supported by the compiler.
+ const char16_t str2[] = u"Unicode: \U00002018.";
+ const char32_t str3[] = U"Unicode: \U00022018.";
+
+ static_assert(EAArrayCount(str1) == 14, "EA_COMPILER_NO_UNICODE_STRING_LITERALS failure.");
+ static_assert(EAArrayCount(str2) == 12, "EA_COMPILER_NO_UNICODE_STRING_LITERALS failure.");
+ static_assert(EAArrayCount(str3) == 12, "EA_COMPILER_NO_UNICODE_STRING_LITERALS failure.");
+ }
+ #endif
+
+
+ #if !defined(EA_COMPILER_NO_UNICODE_CHAR_NAME_LITERALS)
+ {
+ const char8_t str1[] = "\u2018\u2019";
+ static_assert(EAArrayCount(str1) == 7, "EA_COMPILER_NO_UNICODE_CHAR_NAME_LITERALS failure.");
+
+ #if (EA_WCHAR_SIZE >= 2)
+ const wchar_t str2[] = L"\U00002018\U00002019";
+ static_assert(EAArrayCount(str2) == 3, "EA_COMPILER_NO_UNICODE_CHAR_NAME_LITERALS failure."); // This test assumes that wchar_t is a 16bit or greater value.
+ #endif
+
+ #if defined(EA_CHAR16_NATIVE) && EA_CHAR16_NATIVE
+ const char16_t str3[] = u"\U00002018\U00002019";
+ static_assert(EAArrayCount(str3) == 3, "EA_COMPILER_NO_UNICODE_CHAR_NAME_LITERALS failure.");
+ #endif
+ }
+ #endif
+
+ #ifndef EA_COMPILER_NO_RVALUE_REFERENCES
+ {
+ const int MAX_ARR_SIZE = 4096;
+ struct StructWithArray { int arr[MAX_ARR_SIZE]; };
+ static_assert(EAArrayCount(StructWithArray().arr) == MAX_ARR_SIZE, "");
+ }
+ #endif
+
+ #if !defined(EA_COMPILER_NO_UNIFIED_INITIALIZATION_SYNTAX)
+ {
+ struct InitTest1
+ {
+ int mX;
+ double mY;
+ };
+
+ struct InitTest2
+ {
+ InitTest2(int x, double y) : mX{x}, mY{y} {}
+
+ int mX;
+ double mY;
+ };
+
+ InitTest1 var1{5, 3.2};
+ InitTest2 var2{2, 4.3};
+
+ EATEST_VERIFY(var1.mY == 3.2);
+ EATEST_VERIFY(var2.mY == 4.3);
+ }
+ #endif
+
+ #if !defined(EA_COMPILER_NO_EXTENDED_FRIEND_DECLARATIONS)
+ {
+ class G;
+
+ class X1 {
+ friend G;
+ };
+ }
+ #endif
+
+
+ #if !defined(EA_COMPILER_NO_THREAD_LOCAL)
+ {
+ // We don't yet test this because we don't have a sufficient compiler to test it with.
+ }
+ #endif
+
+ return nErrorCount;
+}
+
+
+#if defined(EA_COMPILER_MSVC) && EA_COMPILER_VERSION >= 1900 // VS2015+
+ EA_DISABLE_VC_WARNING(5029); // nonstandard extension used: alignment attributes in C++ apply to variables, data members and tag types only
+#endif
+int TestEACompilerTraits()
+{
+ int nErrorCount(0);
+
+ // EA_COMPILER_IS_ANSIC
+ // EA_COMPILER_IS_C99
+ // EA_COMPILER_IS_CPLUSPLUS
+ // EA_COMPILER_MANAGED_CPP
+
+ {
+ // EA_COMPILER_INTMAX_SIZE
+
+ #if (EA_COMPILER_INTMAX_SIZE == 16)
+ /* To do: Test this when we get a machine that supports it (e.g. Linux64)/
+ #if defined(__GNUC__)
+ #define int128_t __int128_t
+ #define uint128_t __uint128_t
+ #endif
+
+ int128_t x = UINT128_C(0x12345678123456781234567812345678);
+ uint128_t y = (x * 2);
+
+ if(x == (int128_t)y)
+ DoError(nErrorCount, "EA_COMPILER_INTMAX_SIZE test");
+ */
+
+ #elif (EA_COMPILER_INTMAX_SIZE == 8)
+ int64_t x = UINT64_C(0x1234567812345678);
+ uint64_t y = (x * 2);
+
+ if(x == (int64_t)y)
+ DoError(nErrorCount, "EA_COMPILER_INTMAX_SIZE test");
+
+ #elif (EA_COMPILER_INTMAX_SIZE == 32)
+ int32_t x = UINT64_C(0x12345678);
+ uint32_t y = (x * 2);
+
+ if(x == (int32_t)y)
+ DoError(nErrorCount, "EA_COMPILER_INTMAX_SIZE test");
+
+ #else
+ int16_t x = UINT16_C(0x1234);
+ uint16_t y = (x * 2);
+
+ if(x == (int16_t)y)
+ DoError(nErrorCount, "EA_COMPILER_INTMAX_SIZE test");
+ #endif
+ }
+
+ {
+ // EA_OFFSETOF
+ const size_t o = EA_OFFSETOF(OffsetofTestClass, mY);
+ EA_DISABLE_VC_WARNING(6326)
+ if(o != 4)
+ DoError(nErrorCount, "EA_OFFSETOF test");
+ EA_RESTORE_VC_WARNING()
+ }
+
+ {
+ // EA_SIZEOF_MEMBER
+ const size_t s = EA_SIZEOF_MEMBER(SizeofMemberTestClass, mY);
+ EA_DISABLE_VC_WARNING(6326)
+ if(s != 4)
+ DoError(nErrorCount, "EA_SIZEOF_MEMBER test");
+ EA_RESTORE_VC_WARNING()
+
+ // There have been problems on some platforms (SNC version < 405) where extended sizeof was not properly
+ // supported when used within a member function, so we test for that here.
+
+ class TestClass
+ {
+ public:
+ void TestExtendedSizeof(int& nErrorCount)
+ {
+ EA_DISABLE_VC_WARNING(6326)
+ const size_t sizeOfmY = EA_SIZEOF_MEMBER(SizeofMemberTestClass, mY);
+ if(sizeOfmY != 4)
+ DoError(nErrorCount, "EA_SIZEOF_MEMBER test: within member function");
+ EA_RESTORE_VC_WARNING()
+ }
+ }tc;
+
+ tc.TestExtendedSizeof(nErrorCount);
+ }
+
+ { // EA_ALIGN_OF, EA_PREFIX_ALIGN, etc.
+ size_t a = EA_ALIGN_OF(int);
+ EA_PREFIX_ALIGN(4) int b = 5;
+ EA_ALIGN(8) int c;
+ int d EA_POSTFIX_ALIGN(8);
+ int e EA_POSTFIX_ALIGN(8) = 5;
+ int f EA_POSTFIX_ALIGN(8)(5);
+ struct EA_ALIGN(8) G { int x; };
+ struct EA_PREFIX_ALIGN(8) GG { int x; } EA_POSTFIX_ALIGN(8);
+ EA_ALIGNED(int, h, 8) = 5;
+ EA_ALIGNED(int, i, ALIGNMENT_AMOUNT_16)(5);
+ EA_ALIGNED(int, j[3], ALIGNMENT_AMOUNT_16);
+ EA_ALIGNED(int, k[3], ALIGNMENT_AMOUNT_16) = { 1, 2, 3 };
+ struct EA_ALIGN(8) L { int x; int y; };
+
+ EA_DISABLE_VC_WARNING(4359) // ARM64: C4359: 'TestEACompilerTraits::X': Alignment specifier is less than actual alignment (4), and will be ignored.
+ EA_ALIGN(ALIGNMENT_AMOUNT_32) struct X { int x; int y; } m;
+ EA_RESTORE_VC_WARNING()
+
+ //int N[3] EA_PACKED; // Some compilers (e.g. GCC) don't support this or ignore this and generate a warning.
+ struct P { int x EA_PACKED; int y EA_PACKED; };
+ struct Q { int x; int y; } EA_PACKED;
+ typedef EA_ALIGNED(int, r, ALIGNMENT_AMOUNT_16);
+ r rInstance;
+ typedef EA_ALIGNED(Q, X16, ALIGNMENT_AMOUNT_16);
+ X16 x16Instance;
+
+ char buffer[256];
+ sprintf(buffer, "%p %p %p %p %p %p %p %p %p %p %p %p %p", &a, &b, &c, &d, &e, &f, &h, &i, &j, &k, &m, &rInstance, &x16Instance);
+ }
+
+ { // Test EA_ALIGN_OF
+ if(EA_ALIGN_OF(int8_t) != sizeof(int8_t)) // This may not be a kosher test.
+ DoError(nErrorCount, "EA_ALIGN_OF test (int16_t)");
+
+ if(EA_ALIGN_OF(int16_t) != sizeof(int16_t)) // This may not be a kosher test.
+ DoError(nErrorCount, "EA_ALIGN_OF test (int16_t)");
+
+ if(EA_ALIGN_OF(int32_t) != sizeof(int32_t)) // This may not be a kosher test.
+ DoError(nErrorCount, "EA_ALIGN_OF test (int32_t)");
+
+ #if !defined(EA_ABI_ARM_APPLE)
+ if(EA_ALIGN_OF(int64_t) != sizeof(int64_t)) // This may not be a kosher test.
+ DoError(nErrorCount, "EA_ALIGN_OF test (int64_t)");
+ #endif
+
+ typedef void (*AlignTestFunctionType)();
+ if(EA_ALIGN_OF(AlignTestFunctionType) != sizeof(void*)) // This may not be a kosher test.
+ DoError(nErrorCount, "EA_ALIGN_OF test (AlignTestFunctionType)");
+ }
+
+ { // Test EA_ALIGN
+ #ifdef EA_ALIGN
+ char buffer[32];
+
+ EA_ALIGN(ALIGNMENT_AMOUNT_64) int x(0);
+ sprintf(buffer, "%d", x);
+ if(buffer[0] != '0')
+ DoError(nErrorCount, "EA_ALIGN test 1");
+ if((intptr_t)&x & (ALIGNMENT_AMOUNT_64 -1))
+ DoError(nErrorCount, "EA_ALIGN test 2");
+
+ EA_ALIGN(ALIGNMENT_AMOUNT_64) ClassWithDefaultCtor cdcA;
+ //EA_ALIGN(64) ClassWithoutDefaultCtor cwdcA;
+ if((intptr_t)&cdcA & (ALIGNMENT_AMOUNT_64 -1))
+ DoError(nErrorCount, "EA_ALIGN test 3");
+
+ EA_ALIGN(ALIGNMENT_AMOUNT_64) ClassWithDefaultCtor cdcB(3);
+ if((intptr_t)&cdcB & (ALIGNMENT_AMOUNT_64 -1))
+ DoError(nErrorCount, "EA_ALIGN test 4");
+
+ EA_ALIGN(ALIGNMENT_AMOUNT_64) ClassWithoutDefaultCtor cwdcB(3);
+ if((intptr_t)&cwdcB & (ALIGNMENT_AMOUNT_64 -1))
+ DoError(nErrorCount, "EA_ALIGN test 5");
+ #else
+ DoError(nErrorCount, "EA_ALIGN test 6");
+ #endif
+ }
+
+ { // Test EA_PREFIX_ALIGN
+ #ifdef EA_PREFIX_ALIGN
+ char buffer[32];
+ EA_PREFIX_ALIGN(ALIGNMENT_AMOUNT_64) int x(0);
+ sprintf(buffer, "%d", x);
+ if(buffer[0] != '0')
+ DoError(nErrorCount, "EA_PREFIX_ALIGN test 1");
+
+ EA_PREFIX_ALIGN(64) ClassWithDefaultCtor cdcA;
+ //EA_PREFIX_ALIGN(64) ClassWithoutDefaultCtor cwdcA;
+
+ EA_PREFIX_ALIGN(64) ClassWithDefaultCtor cdcB(3);
+ EA_PREFIX_ALIGN(64) ClassWithoutDefaultCtor cwdcB(3);
+ #else
+ DoError(nErrorCount, "EA_PREFIX_ALIGN test 2");
+ #endif
+ }
+
+
+ { // Test EA_POSTFIX_ALIGN
+ #ifdef EA_POSTFIX_ALIGN
+ char buffer[32];
+ int x EA_POSTFIX_ALIGN(ALIGNMENT_AMOUNT_64) = 0;
+ sprintf(buffer, "%d", x);
+ if(buffer[0] != '0')
+ DoError(nErrorCount, "EA_POSTFIX_ALIGN test 1");
+
+ ClassWithDefaultCtor cdcA EA_POSTFIX_ALIGN(ALIGNMENT_AMOUNT_64);
+ //ClassWithoutDefaultCtor cwdcA EA_POSTFIX_ALIGN(64);
+
+ ClassWithDefaultCtor cdcB EA_POSTFIX_ALIGN(ALIGNMENT_AMOUNT_64)(3);
+ ClassWithoutDefaultCtor cwdcB EA_POSTFIX_ALIGN(ALIGNMENT_AMOUNT_64)(3);
+ #else
+ DoError(nErrorCount, "EA_POSTFIX_ALIGN test 2");
+ #endif
+ }
+
+
+ { // Test EA_ALIGNED
+ #ifdef EA_ALIGNED
+ char buffer[64];
+
+ // Verify that a simple declaration works.
+ EA_ALIGNED(int, xA, ALIGNMENT_AMOUNT_64); xA = 0;
+ sprintf(buffer, "%d", xA);
+ if((intptr_t)&xA & (ALIGNMENT_AMOUNT_64 -1))
+ DoError(nErrorCount, "EA_ALIGNED test 1");
+
+ // Verify that a declaration with assignment works.
+ EA_ALIGNED(int, xB, ALIGNMENT_AMOUNT_64) = 0;
+ sprintf(buffer, "%d", xB);
+ if((intptr_t)&xB & (ALIGNMENT_AMOUNT_64 -1))
+ DoError(nErrorCount, "EA_ALIGNED test 2");
+
+ // Verify that a declaration with construction works.
+ EA_ALIGNED(int, xC, ALIGNMENT_AMOUNT_64)(0);
+ sprintf(buffer, "%d", xC);
+ if((intptr_t)&xC & (ALIGNMENT_AMOUNT_64 -1))
+ DoError(nErrorCount, "EA_ALIGNED test 3");
+
+ // Verify that a typedefd declaration works.
+ typedef EA_ALIGNED(int, int16, ALIGNMENT_AMOUNT_16);
+ int16 n16 = 0;
+ sprintf(buffer, "%p", &n16);
+ if((intptr_t)&n16 & (ALIGNMENT_AMOUNT_16 - 1))
+ DoError(nErrorCount, "EA_ALIGNED test 4");
+
+ // Verify that the following tests compile. These tests are here
+ // because the SN compiler (EDG front-end) has some problems with
+ // GCC compatibility related to the 'aligned' __attribute__.
+ ClassWithDefaultCtor cdc;
+ ClassWithoutDefaultCtor cwdc(3);
+ sprintf(buffer, "%p%p", &cdc, &cwdc);
+
+ // Verify that regular usage of EA_ALIGNED works.
+ EA_ALIGNED(ClassWithDefaultCtor, cdc16A, ALIGNMENT_AMOUNT_16);
+ //EA_ALIGNED(ClassWithoutDefaultCtor, cwdcA, 16); // Doesn't have a default ctor, so this can't be done.
+ sprintf(buffer, "%p%p", &cdc16A, (void*)NULL);
+
+ // Verify that argument usage of EA_ALIGNED works.
+ EA_ALIGNED(ClassWithDefaultCtor, cdcB, ALIGNMENT_AMOUNT_16)(3);
+ EA_ALIGNED(ClassWithoutDefaultCtor, cwdcB, ALIGNMENT_AMOUNT_16)(3);
+ sprintf(buffer, "%p%p", &cdcB, &cwdcB);
+
+ // Verify that usage of EA_ALIGNED works within a typedef.
+ typedef EA_ALIGNED(ClassWithDefaultCtor, ClassWithDefaultCtor16, ALIGNMENT_AMOUNT_16);
+ ClassWithDefaultCtor16 cdcC(3);
+ typedef EA_ALIGNED(ClassWithoutDefaultCtor, ClassWithoutDefaultCtor16, ALIGNMENT_AMOUNT_16);
+ ClassWithoutDefaultCtor16 cwdcC(3);
+ sprintf(buffer, "%p%p", &cdcC, &cwdcC);
+ #else
+ DoError(nErrorCount, "EA_ALIGNED test");
+ #endif
+ }
+
+
+ { // Test EA_NO_INLINE / EA_PREFIX_NO_INLINE / EA_POSTFIX_NO_INLINE
+ DoNothingInline();
+ DoNothingPrefixInline();
+ }
+
+
+ { // Test EA_FORCE_INLINE / EA_PREFIX_FORCE_INLINE / EA_POSTFIX_FORCE_INLINE
+ DoNothingForceInline();
+ DoNothingPrefixForceInline();
+ }
+
+ { // Test EA_FORCE_INLINE_LAMBDA
+ auto testLambda = []() EA_FORCE_INLINE_LAMBDA
+ {
+ };
+ testLambda();
+ }
+
+
+ { // Test EA_PACKED
+ #ifdef EA_PACKED
+ char buffer[32];
+ struct X { int x; } EA_PACKED;
+ X x = { 0 };
+ sprintf(buffer, "%d", x.x);
+ if(buffer[0] != '0')
+ DoError(nErrorCount, "EA_PACKED test");
+ #else
+ DoError(nErrorCount, "EA_PACKED test");
+ #endif
+ }
+
+
+ { // Test EA_LIKELY
+
+ if(EA_UNLIKELY(nErrorCount > 0))
+ {
+ if(EA_LIKELY(nErrorCount == 999999)) // Actually this isn't likely, but that's beside the point.
+ DoError(nErrorCount, "EA_LIKELY test");
+ }
+ }
+
+
+ { // Test EA_INIT_PRIORITY
+
+ // We don't test that the init priority succeeded in modifying the init priority.
+ // We merely test that this compiles on all platforms and assume the compiler's
+ // support of this is not broken.
+ if(gInitPriorityTestClass0.mX != 0)
+ DoError(nErrorCount, "EA_INIT_PRIORITY test.");
+
+ if(gInitPriorityTestClass1.mX != 1)
+ DoError(nErrorCount, "EA_INIT_PRIORITY test.");
+ }
+
+
+ { // Test EA_INIT_SEG
+ // We don't test that the init_seg succeeded in modifying the init priority.
+ // We merely test that this compiles on all platforms and assume the compiler's
+ // support of this is not broken.
+ if(gInitSegTestSection.mX != 2300)
+ DoError(nErrorCount, "EA_INIT_SEG test.");
+ }
+
+
+ { // Test EA_MAY_ALIAS
+ // We don't test that the init priority succeeded in modifying the init priority.
+ // We merely test that this compiles on all platforms and assume the compiler's
+ // support of this is not broken.
+ if(gPtr0 != NULL)
+ DoError(nErrorCount, "EA_MAY_ALIAS test.");
+
+ if(gPtr1 != NULL)
+ DoError(nErrorCount, "EA_MAY_ALIAS test.");
+ }
+
+
+ { // Test EA_ASSUME
+ switch (nErrorCount / (nErrorCount + 1))
+ {
+ case 0:
+ Stricmp("nop0", "nop0");
+ break;
+ case 1:
+ Stricmp("nop1", "nop1");
+ break;
+ default:
+ EA_ASSUME(0);
+ }
+ }
+
+
+ { // Test EA_PURE
+ if(!PureFunction())
+ DoError(nErrorCount, "EA_PURE test");
+ }
+
+
+ { // EA_WEAK
+ if(gWeakVariable != 1)
+ DoError(nErrorCount, "EA_WEAK test");
+ }
+
+
+ { // Test EA_NO_VTABLE
+ NoVTable1 nvt1;
+ NoVTable2 nvt2;
+ nvt1.InterfaceFunction();
+ nvt2.InterfaceFunction();
+ }
+
+ { // Test EA_WCHAR_SIZE
+ EA_DISABLE_VC_WARNING(6326)
+ #ifdef EA_WCHAR_SIZE
+ if((EA_WCHAR_SIZE != 1) && (EA_WCHAR_SIZE != 2) && (EA_WCHAR_SIZE != 4))
+ DoError(nErrorCount, "EA_WCHAR_SIZE test");
+ #else
+ DoError(nErrorCount, "EA_WCHAR_SIZE test");
+ #endif
+ EA_RESTORE_VC_WARNING()
+ }
+
+
+ { // Test EA_RESTRICT
+ struct TestRestrict{
+ static size_t Test(char* EA_RESTRICT p){ return sizeof(p); }
+ };
+ char* p = NULL;
+ if(TestRestrict::Test(p) == 0) // This isn't a real test. If there is a failure, it will happen at compile time.
+ DoError(nErrorCount, "EA_RESTRICT test");
+ }
+
+
+ { // Test EA_DEPRECATED
+ /* This causes warnings on compilers, so just disable it.
+ #if defined(EA_DEPRECATED) && (!defined(__GNUC__) || ((__GNUC__ * 100 + __GNUC_MINOR__) < 402)) // GCC 4.2+ is converting deprecated into an error instead of a warning.
+ char buffer[32];
+ EA_DEPRECATED int x(0);
+ sprintf(buffer, "%d", x); (void)x;
+ if(buffer[0] != '0')
+ DoError(nErrorCount, "EA_DEPRECATED test");
+ #elif !defined (EA_DEPRECATED)
+ DoError(nErrorCount, "EA_DEPRECATED test");
+ #endif
+ */
+ }
+
+
+ { // Test EA_PASCAL
+ #ifdef EA_PASCAL
+ struct X{ void EA_PASCAL DoNothing(){} };
+ X x;
+ x.DoNothing();
+ #else
+ DoError(nErrorCount, "EA_PASCAL test");
+ #endif
+ }
+
+
+ { // Test EA_PASCAL_FUNC
+ #ifdef EA_PASCAL_FUNC
+ struct X{ void EA_PASCAL_FUNC(DoNothing()){} };
+ X x;
+ x.DoNothing();
+ #else
+ DoError(nErrorCount, "EA_PASCAL_FUNC test");
+ #endif
+ }
+
+
+ // EA_SSE
+ // Not sure how to properly test at this time.
+
+ { // EA_FP16C
+ #if EA_FP16C
+ // For this test just try to call an intrinsic that is only
+ // available when FP16C is available. The test can make sure the
+ // platform actually supports FP16C when it claims to support it,
+ // but it can't verify a platform doesn't support FP16C.
+ _mm_cvtph_ps(_mm_set1_epi32(42));
+ #endif
+ }
+
+ { // EA_IMPORT
+ // Not possible to do this because import means it will come from outside.
+ //struct X{ EA_IMPORT void DoNothing(){} };
+ //X x;
+ //x.DoNothing();
+ }
+
+
+ { // EA_EXPORT
+ struct X{ EA_EXPORT void DoNothing(){} };
+ X x;
+ x.DoNothing();
+ }
+
+
+ // EA_PREPROCESSOR_JOIN
+ // EA_STRINGIFY
+ {
+ char buffer[32];
+ char bufferExpected[32];
+ const int line = (__LINE__ + 2);
+
+ sprintf(buffer, "%s %s", EA_STRINGIFY(EA_PREPROCESSOR_JOIN(test_, __LINE__)), EA_STRINGIFY(__LINE__));
+ sprintf(bufferExpected, "test_%d %d", line, line);
+
+ if(strcmp(buffer, bufferExpected) != 0)
+ DoError(nErrorCount, "EA_PREPROCESSOR_JOIN/EA_STRINGIFY test");
+ }
+
+
+ { // EAArrayCount
+ const int testArray[13] = { 0 };
+ const size_t arrayCount = EAArrayCount(testArray);
+
+ EA_DISABLE_VC_WARNING(6326)
+ if((arrayCount != 13) || (testArray[0] != 0))
+ DoError(nErrorCount, "EAArrayCount test");
+ EA_RESTORE_VC_WARNING()
+
+ const float testArray2[EAArrayCount(testArray)] = {};
+ static_assert(EAArrayCount(testArray2) == EAArrayCount(testArray), "Array counts should be equivalent.");
+ static_assert(EAArrayCount(testArray2) == 13, "Float array should have 13 elements.");
+
+ EA_DISABLE_VC_WARNING(6326)
+ if (EAArrayCount(testArray2) != EAArrayCount(testArray))
+ DoError(nErrorCount, "EAArrayCount - Array counts should be equivalent.");
+ EA_RESTORE_VC_WARNING()
+
+ EA_DISABLE_VC_WARNING(6326)
+ if (EAArrayCount(testArray2) != 13)
+ DoError(nErrorCount, "EAArrayCount - Float array should have 13 elements.");
+ EA_UNUSED(testArray2);
+ EA_RESTORE_VC_WARNING()
+
+ // Regresssion of user bug report that static_assert<member array> fails with some C++11 compilers.
+ // We revised the templated definition of EAArrayCount to deal with the failure.
+ struct Example
+ {
+ int32_t mItems[7];
+ Example()
+ { static_assert(EAArrayCount(mItems) == 7, "invalid size"); memset(mItems, 0x77, sizeof(mItems)); } // This was failing with the original templated version of EAArrayCount.
+ };
+
+ Example example;
+ EATEST_VERIFY(example.mItems[0] == 0x77777777);
+ }
+
+ { // static_assert
+
+ // Should succeed.
+ static_assert(sizeof(int32_t) == 4, "static_assert failure");
+
+ // Should fail.
+ //static_assert(sizeof(int32_t) == 8, "static_assert failure");
+ }
+
+ { // EA_OPTIMIZE_OFF / EA_OPTIMIZE_ON
+ int result = DisabledOptimizations(2);
+
+ if(result != 2*37)
+ DoError(nErrorCount, "EA_OPTIMIZE_OFF test");
+ }
+
+ { // EA_UNUSED
+ FunctionWithUnusedVariables(3);
+ }
+
+ { // EA_EXTERN_TEMPLATE
+
+ eabase_template<char> x;
+ x.value = 0;
+ if(x.GetValue() != 0)
+ DoError(nErrorCount, "EA_EXTERN_TEMPLATE test");
+ }
+
+ { // EA_FUNCTION_DELETE
+ EA_FUNCTION_DELETE_Test test(17);
+ EATEST_VERIFY(test.x == 17);
+ }
+
+ { // EA_NON_COPYABLE / EANonCopyable
+ NonCopyableA ncA1;
+ ncA1.x = 1;
+ //NonCopyableA ncA2(ncA1); // Both of these lines should result in
+ //ncA1 = ncA1; // compiler errors if enabled.
+ EA_UNUSED(ncA1);
+
+ NonCopyableB ncB1;
+ ncB1.x = 1;
+ //NonCopyableB ncB2(ncB1); // Both of these lines should result in
+ //ncB1 = ncB1; // compiler errors if enabled.
+ EA_UNUSED(ncB1);
+
+ NonCopyableSubclass ncs1(3);
+ //NonCopyableSubclass ncs2(ncs1); // Both of these lines should result in
+ //ncs2 = ncs2; // compiler errors if enabled.
+ EATEST_VERIFY(ncs1.mX == 3);
+
+ struct NonCopyableLocal
+ {
+ NonCopyableLocal(){}
+ int x;
+
+ EA_NON_COPYABLE(NonCopyableLocal)
+ };
+ NonCopyableLocal ncLocal1;
+ ncLocal1.x = 1;
+ //NonCopyableLocal ncLocal2(ncLocal1); // Both of these lines should result in
+ //ncLocal1 = ncLocal1; // compiler errors if enabled.
+ EA_UNUSED(ncLocal1);
+ }
+
+ return nErrorCount;
+}
+#if defined(EA_COMPILER_MSVC) && EA_COMPILER_VERSION >= 1900 // VS2015+
+ EA_RESTORE_VC_WARNING();
+#endif
+
+
+/////////////////////////////////////////////////
+// nullptr test
+/////////////////////////////////////////////////
+
+#if !defined(EA_HAVE_nullptr_IMPL)
+ #define EA_RTTI_ENABLED 0 // This is something that ideally would be defined in EABase.
+
+
+ int mfCCount = 0;
+ struct C
+ {
+ void mf()
+ {
+ mfCCount++;
+ }
+ };
+
+
+ int fDoubleCount = 0;
+ static void f(double*)
+ {
+ fDoubleCount++;
+ }
+
+
+ int fIntCount = 0;
+ static void f(int)
+ {
+ fIntCount++;
+ }
+
+
+ int gTCount = 0;
+ template<typename T>
+ void g(T*)
+ {
+ gTCount++;
+ }
+
+ int hTCount = 0;
+ template<typename T>
+ void h(T)
+ {
+ hTCount++;
+ }
+#endif
+
+
+static int TestNullPtr()
+{
+ int nErrorCount(0);
+
+ #if defined(EA_HAVE_nullptr_IMPL) // If the compiler provides a native version...
+ // Don't question it. VC++ nullptr -seems- to be not entirely conforming anyway.
+ #else
+ using namespace std;
+
+ // DoError("TestNullptr\n");
+ void* pv = nullptr; // OK
+ EATEST_VERIFY(pv == 0);
+ EATEST_VERIFY(pv == nullptr);
+ #ifndef __MWERKS__ // Currently the Metrowerks compiler crashes on this code.
+ EATEST_VERIFY(nullptr == pv);
+ #endif
+
+ pv = &pv; // OK
+ EATEST_VERIFY(pv != 0);
+ #if !defined(__GNUC__) || (__GNUC__ > 3)
+ EATEST_VERIFY(pv != nullptr);
+ #endif
+
+ const char* pc = nullptr; // OK
+ EATEST_VERIFY(pc == 0);
+ EATEST_VERIFY(pc == nullptr);
+ #ifndef __MWERKS__
+ EATEST_VERIFY(nullptr == pc);
+ #endif
+
+ C* pC = nullptr; // OK
+ EATEST_VERIFY(pC == 0);
+ EATEST_VERIFY(pC == nullptr);
+ #ifndef __MWERKS__
+ EATEST_VERIFY(nullptr == pC);
+ #endif
+
+ f(nullptr); // OK. Calls f(double*).
+ EATEST_VERIFY(fDoubleCount == 1);
+
+ f(0); // OK. Calls f(int)
+ EATEST_VERIFY(fIntCount == 1);
+
+ //g(nullptr); // Not OK. Can't deduce T
+
+ h(0); // OK. Deduces T = int
+ EATEST_VERIFY(hTCount == 1);
+
+ h(nullptr); // OK. Deduces T = nullptr_t
+ EATEST_VERIFY(hTCount == 2);
+
+ h((float*)nullptr); // OK. Deduces T = float*
+ EATEST_VERIFY(hTCount == 3);
+
+ void (C::*pmf)() = 0; // OK
+ EATEST_VERIFY(pmf == 0);
+
+ #if !defined(__GNUC__) || (__GNUC__ > 3)
+ void (C::*pmf2)() = nullptr; // OK
+ EA_UNUSED(pmf2);
+ #ifndef __MWERKS__ // CodeWarrior is not following the C++ Standard properly.
+ EATEST_VERIFY(pmf2 == 0);
+ EATEST_VERIFY(pmf2 == nullptr);
+ EATEST_VERIFY(nullptr == pmf2);
+ #endif
+ #endif
+
+ #if !defined(__GNUC__) || (__GNUC__ > 3)
+ void (C::*pmf3)() = &C::mf;
+ #ifndef __MWERKS__
+ EATEST_VERIFY(pmf3 != nullptr);
+ #endif
+ #endif
+
+ nullptr_t n1 = nullptr, n2 = nullptr; // OK
+ n1 = n2; // OK
+ h(n1);
+
+ //const int const0 = 0;
+ //if(const0 == nullptr) {} // Not OK.
+
+ //int n = 0;
+ //if(n == nullptr) {} // Not OK.
+
+ //nullptr_t* pN = &n1; // Not OK. Address can't be taken.
+
+ EATEST_VERIFY(!nullptr); // Supposedly OK, but VC++ doesn't accept it.
+ if(nullptr) // Supposedly OK, but VC++ doesn't accept it.
+ EATEST_VERIFY(false);
+
+ int val = 0;
+ char* ch3 = val ? nullptr : nullptr; // OK.
+ EATEST_VERIFY(ch3 == 0);
+
+ //char* ch4 = val ? 0 : nullptr; // Not OK. Types are not compatible.
+ //int n3 = val ? nullptr : nullptr; // Not OK. nullptr can't be converted to int.
+ //int n4 = val ? 0 : nullptr; // Not OK. Types are not compatible.
+
+ // void* p = 0;
+ // reinterpret_cast<nullptr>(p); // Not OK. But all compilers allow this. A reinterpret_cast cannot be used to convert a value of any type to the type std::nullptr_t.
+
+ //This is supposed to succeed, but we can't make it so, given the conflicting requirements of the C++ and nullptr standards.
+ //EATEST_VERIFY(sizeof(nullptr) == sizeof(void*)); // I don't currently have a means to make this work. See the class for why.
+
+ #ifndef __MWERKS__
+ nullptr_t n3 = nullptr, n4 = nullptr;
+ EATEST_VERIFY(n3 == n4);
+ EATEST_VERIFY(!(n3 != n4));
+ EATEST_VERIFY(n3 <= n4);
+ EATEST_VERIFY(n3 >= n4);
+ EATEST_VERIFY(!(n3 < n4));
+ EATEST_VERIFY(!(n3 > n4));
+ #endif
+
+
+ #if EA_RTTI_ENABLED
+ typeid(nullptr); // OK
+ #endif
+
+ #ifndef EA_COMPILER_NO_EXCEPTIONS
+ try{
+ pv = 0;
+ throw nullptr; // OK
+ }
+ catch(nullptr_t n)
+ {
+ EATEST_VERIFY(n == pv); // OK
+ h(n);
+ }
+ #endif
+ #endif // EA_HAVE_nullptr_IMPL
+
+ return nErrorCount;
+}
+
+
+static int TestEAHave()
+{
+ int nErrorCount(0);
+
+ // EA_HAVE_XXX_DECL
+ //
+ // We don't have a simple way to test these, as they indicate the presence of
+ // declarations and not necessarily the presence of implementations.
+ //
+ // EA_HAVE_mkstemps_DECL
+ // EA_HAVE_gettimeofday_DECL
+ // EA_HAVE_strcasecmp_DECL
+ // EA_HAVE_strncasecmp_DECL
+ // EA_HAVE_mmap_DECL
+ // EA_HAVE_fopen_DECL
+ // EA_HAVE_ISNAN(x)
+ // EA_HAVE_ISINF(x)
+ // EA_HAVE_itoa_DECL
+ // EA_HAVE_nanosleep_DECL
+ // EA_HAVE_utime_DECL
+ // EA_HAVE_ftruncate_DECL
+ // EA_HAVE_localtime_DECL
+ // EA_HAVE_pthread_getattr_np_DECL
+
+ #if defined(EA_HAVE_ISNAN)
+ EATEST_VERIFY(EA_HAVE_ISNAN(1.f) == 0);
+ #endif
+ #if defined(EA_HAVE_ISINF)
+ EATEST_VERIFY(EA_HAVE_ISINF(1.f) == 0);
+ #endif
+
+
+ // EA_HAVE_XXX_IMPL
+
+ #if defined(EA_HAVE_WCHAR_IMPL)
+ size_t wlen = wcslen(L"test");
+ EATEST_VERIFY(wlen == 4); // Expect success.
+ #endif
+
+ #if defined(EA_HAVE_getenv_IMPL)
+ char* p = getenv("nonexistent_asdf");
+ EATEST_VERIFY(!p); // Expect failure.
+ #endif
+
+ #if defined(EA_HAVE_setenv_IMPL)
+ // http://pubs.opengroup.org/onlinepubs/009695399/functions/setenv.html
+ // int setenv(const char *envname, const char *envval, int overwrite);
+ setenv("test_asdf", "value", 0); // We ignore the return value, as we can't tell if the platform allows it.
+ #endif
+
+ #if defined(EA_HAVE_unsetenv_IMPL)
+ unsetenv("test_asdf"); // Ignore the return value.
+ #endif
+
+ #if defined(EA_HAVE_putenv_IMPL)
+ // int putenv(char* string);
+ char str[] = "a=b";
+ #if defined(EA_PLATFORM_MICROSOFT) && defined(EA_COMPILER_MICROSOFT)
+ // Microsoft uses _putenv, while others use putenv.
+ int putenvSuccess = _putenv(str);
+ #else
+ int putenvSuccess = putenv(str);
+ #endif
+ EATEST_VERIFY(putenvSuccess == 0);
+ #endif
+
+ #if defined(EA_HAVE_time_IMPL)
+ time_t timeResult = time(NULL);
+ EATEST_VERIFY(timeResult != 0); // Expect success.
+ #endif
+
+ #if defined(EA_HAVE_clock_IMPL)
+ // http://www.cplusplus.com/reference/ctime/clock/
+ clock_t clockResult = clock();
+ EATEST_VERIFY(clockResult != (clock_t) -1); // Expect success.
+ #endif
+
+ #if defined(EA_HAVE_fopen_IMPL)
+ // We don't have a portable way of testing the success of this, as different platforms have different file systems and requirements.
+ // since we want this to fail, we will use a normal Windows path as some platforms /require/ a windows-like mount path else they call abort()
+ FILE* pFile = fopen("Q:\\nonexistent_pleasedontexist", "r");
+ EATEST_VERIFY(pFile == NULL); // Expect failure.
+ if(pFile)
+ fclose(pFile);
+ #endif
+
+ #if defined(EA_HAVE_inet_ntop_IMPL)
+ char inetResult[32];
+ const char* pInetNtopResult = inet_ntop(0, "", inetResult, (uint16_t)EAArrayCount(inetResult)); // Cast to uint16_t because different libraries declare this arg differently, and this is a lowest common denominator.
+ EATEST_VERIFY(pInetNtopResult == NULL); // Expect failure.
+ #endif
+
+ #if defined(EA_HAVE_inet_pton_IMPL)
+ char inetPtonResult[32];
+ int inetResultVal = inet_pton(0, "", inetPtonResult);
+ EATEST_VERIFY(inetResultVal <= 0); // Expect failure.
+ #endif
+
+ #if defined(EA_HAVE_clock_gettime_IMPL)
+ struct timespec tp;
+ int clockGettimeResult = clock_gettime(CLOCK_MONOTONIC, &tp);
+ EATEST_VERIFY(clockGettimeResult <= 0); // Expect success or error.
+ #endif
+
+ #if defined(EA_HAVE_getcwd_IMPL)
+ {
+ char cwdBuffer[1];
+ char *result = getcwd(cwdBuffer, EAArrayCount(cwdBuffer));
+ EA_UNUSED(result);
+ }
+ #endif
+
+ #if defined(EA_HAVE_tmpnam_IMPL)
+ {
+ char tmpnamBuffer[L_tmpnam];
+ char *result = tmpnam(tmpnamBuffer);
+ EA_UNUSED(result);
+ }
+ #endif
+
+ #if defined(EA_HAVE_nullptr_IMPL)
+ // This is exercised elsewhere in this test.
+ #endif
+
+ #if defined(EA_HAVE_std_terminate_IMPL)
+ if(nErrorCount == INT_MIN) // This is impossible.
+ std::terminate();
+ #endif
+
+ #if defined(EA_HAVE_CPP11_ITERATOR_IMPL)
+ // <iterator>: std::begin, std::end, std::prev, std::next, std::move_iterator.
+ #if defined(EA_HAVE_CPP11_INITIALIZER_LIST)
+ eastl::vector<int> intArray;
+ EATEST_VERIFY(std::begin(intArray) == std::end(intArray));
+ #endif
+
+ char charArray[16] = { 0 };
+ EATEST_VERIFY(std::begin(charArray) != std::end(charArray));
+ #endif
+
+ #if defined(EA_HAVE_CPP11_SMART_POINTER_IMPL)
+ // std::weak_ptr, std::shared_ptr, std::unique_ptr, std::bad_weak_ptr
+ std::shared_ptr<int> spInt;
+ std::weak_ptr<int> wpInt;
+ std::unique_ptr<int> upInt;
+ //std::bad_weak_ptr<int> bwpInt;
+ #endif
+
+ #if defined(EA_HAVE_CPP11_FUNCTIONAL_IMPL) && !defined(EA_PLATFORM_ANDROID) // Our Android build system is failing to link _1, _2, etc.
+ // function, mem_fn, bad_function_call, is_bind_expression, is_placeholder, reference_wrapper, hash, bind, ref, cref.
+ // It turns out that all compiler/library combinations that support this also support C++11 auto, so we can use it.
+
+ #if !defined(EA_PLATFORM_ANDROID) // Our Android build system is failing to link _1, _2, etc.
+ using namespace std::placeholders; //for _1, _2, _3...
+
+ int n = 7;
+ auto f = std::bind(BindTestFunction, _2, _1, 42, std::cref(n), n);
+ f(1, 2, 1001); // 1 is bound by _2, 2 is bound by _1, 1001 is unused
+
+ BindTestStruct bts;
+ auto f2 = std::bind(&BindTestStruct::Test, bts, 95, _1);
+ f2(5);
+ #endif
+
+ std::hash<uint32_t> hash32;
+ EATEST_VERIFY(hash32(37) == hash32(37));
+ #endif
+
+ #if defined(EA_HAVE_CPP11_EXCEPTION_IMPL)
+ // current_exception, rethrow_exception, exception_ptr, make_exception_ptr
+ #if !defined(EA_COMPILER_NO_EXCEPTIONS)
+ EA_DISABLE_VC_WARNING(4571)
+
+ if(nErrorCount == 9999999) // This will never be true.
+ {
+ std::exception_ptr ep = std::make_exception_ptr(std::logic_error("logic_error"));
+
+ try {
+ std::rethrow_exception(ep);
+ }
+ catch (...) {
+ ep = std::current_exception();
+ std::rethrow_exception(ep);
+ }
+ }
+ EA_RESTORE_VC_WARNING()
+ #endif
+ #endif
+
+ #if defined(EA_HAVE_CPP11_TYPE_TRAITS)
+ // Some form of type traits have been supported by compilers since well before C++11. But C++11 introduced
+ // a number of type traits that weren't previously supported by compilers. We require that full C++11 type
+ // traits be supported. See the C++11 Standard, section 20.9.2.
+
+ // We currently test a sampling of specific traits that didn't exist in preliminary standard library versions.
+ bool ttResult = std::is_nothrow_move_constructible<int>::value;
+ EATEST_VERIFY(ttResult);
+
+ ttResult = std::is_standard_layout<int>::value;
+ EATEST_VERIFY(ttResult);
+ #endif
+
+ return nErrorCount;
+}
+
+
+static int TestEAAlignment()
+{
+ // This test does a couple of allocations and for each allocation it determines
+ // the minimal alignment. If this (local) minimum is less than the global minimum
+ // then the global minimum is updated. After all the allocation sizes and iterations
+ // it checks this minimum to make sure that the EABase EA_PLATFORM_MIN_MALLOC_ALIGNMENT
+ // is at least that number, since you would never want to ask for finer grained
+ // allocations as malloc can't give them.
+
+ int nErrorCount(0);
+
+ const size_t MAX_SIZE = 128;
+ const size_t NUM_ITERATIONS = 32;
+
+ size_t minAlignment = MAX_SIZE;
+
+ for(size_t size = 1; size <= MAX_SIZE; ++size)
+ {
+
+ for(size_t iteration = 0; iteration < NUM_ITERATIONS; ++iteration)
+ {
+ void* ptr = malloc(size);
+ size_t address = static_cast<size_t>(reinterpret_cast<uintptr_t>(ptr));
+
+ size_t alignment = MAX_SIZE;
+
+ do
+ {
+ if((address & (alignment - 1)) == 0)
+ {
+ break;
+ }
+ else
+ {
+ alignment >>= 1;
+ }
+
+ } while(alignment > 0);
+
+ if(alignment < minAlignment)
+ minAlignment = alignment;
+
+ free(ptr);
+ }
+ }
+
+ EATEST_VERIFY_F(EA_PLATFORM_MIN_MALLOC_ALIGNMENT <= minAlignment,
+ "'EA_PLATFORM_MIN_MALLOC_ALIGNMENT=%d' <= 'minAlignment=%d' failure on '%s'",
+ EA_PLATFORM_MIN_MALLOC_ALIGNMENT, minAlignment, EA_PLATFORM_DESCRIPTION);
+
+ return nErrorCount;
+}
+
+
+
+
+#include <EABase/eastdarg.h>
+#include <EAStdC/EASprintf.h>
+#include <EAStdC/EAString.h>
+
+
+static void TestEAStdargReferenceHelp(char* p, va_list_reference args)
+{
+ EA::StdC::Sprintf(p, "%d", va_arg(args, int));
+}
+
+static void TestEAStdargReference(char* p1, char* p2, ...) // Must be called with two ints for ...
+{
+ va_list args;
+ va_start(args, p2);
+ TestEAStdargReferenceHelp(p1, args); // We pass args to TestEAStdargReferenceHelp by reference, which results in args being
+ TestEAStdargReferenceHelp(p2, args); // modified upon return. So upon this second call args should have used the first int arg.
+ va_end(args);
+}
+
+
+static void TestEAStdargCopy(char* p1, char* p2, ...) // Must be called with two ints for ...
+{
+ va_list args, argsCopy;
+ va_start(args, p2);
+ va_copy(argsCopy, args);
+ EA::StdC::Vsprintf(p1, "%d", args);
+ EA::StdC::Vsprintf(p2, "%d", argsCopy);
+ va_end(args);
+ va_end(argsCopy);
+}
+
+
+
+static int TestEAStdarg()
+{
+ int nErrorCount(0);
+
+ // VA_ARG_COUNT
+ static_assert(VA_ARG_COUNT() == 0, "VA_ARG_COUNT()");
+ static_assert(VA_ARG_COUNT(1) == 1, "VA_ARG_COUNT(1)");
+ static_assert(VA_ARG_COUNT(2, 2) == 2, "VA_ARG_COUNT(2)");
+ static_assert(VA_ARG_COUNT(3, 3, 3) == 3, "VA_ARG_COUNT(3)");
+ static_assert(VA_ARG_COUNT(4, 4, 4, 4) == 4, "VA_ARG_COUNT(4)");
+ static_assert(VA_ARG_COUNT(5, 5, 5, 5, 5) == 5, "VA_ARG_COUNT(5)");
+
+
+ char buffer1[64];
+ char buffer2[64];
+
+ // va_copy
+ TestEAStdargCopy(buffer1, buffer2, 17, 99);
+ EATEST_VERIFY((EA::StdC::AtoI32(buffer1) == 17) && (EA::StdC::AtoI32(buffer2) == 17));
+
+ // va_list_reference
+ TestEAStdargReference(buffer1, buffer2, 17, 99);
+ EATEST_VERIFY((EA::StdC::AtoI32(buffer1) == 17) && (EA::StdC::AtoI32(buffer2) == 99));
+
+ return nErrorCount;
+}
+
+
+
+static int TestEAUnits()
+{
+ int nErrorCount(0);
+
+ static_assert(EA_BYTE(64) == 64, "SI units mismatch");
+ static_assert(EA_BYTE(1000) == 1000, "SI units mismatch");
+
+ static_assert(EA_KILOBYTE(1) != EA_KIBIBYTE(1), "SI units mismatch");
+ static_assert(EA_MEGABYTE(1) != EA_MEBIBYTE(1), "SI units mismatch");
+ static_assert(EA_GIGABYTE(1) != EA_GIBIBYTE(1), "SI units mismatch");
+
+ static_assert((4 % EA_BYTE(4*10)) == 4, "Order of operations error"); //If unit macros aren't enclosed in parentheses, this will cause order of operation problems in this situation.
+ static_assert((4 % EA_MEBIBYTE(4)) == 4, "Order of operations error"); //If unit macros aren't enclosed in parentheses, this will cause order of operation problems in this situation.
+#ifndef EA_PROCESSOR_X86
+ static_assert(EA_TERABYTE(1) != EA_TEBIBYTE(1), "SI units mismatch");
+ static_assert(EA_PETABYTE(1) != EA_PEBIBYTE(1), "SI units mismatch");
+ static_assert(EA_EXABYTE(1) != EA_EXBIBYTE(1), "SI units mismatch");
+#endif
+
+ static_assert(EA_KILOBYTE(1) == 1000, "SI units mismatch");
+ static_assert(EA_MEGABYTE(2) == EA_KILOBYTE(2) * 1000, "SI units mismatch");
+ static_assert(EA_GIGABYTE(3) == EA_MEGABYTE(3) * 1000, "SI units mismatch");
+#ifndef EA_PROCESSOR_X86
+ static_assert(EA_TERABYTE(4) == EA_GIGABYTE(4) * 1000, "SI units mismatch");
+ static_assert(EA_PETABYTE(5) == EA_TERABYTE(5) * 1000, "SI units mismatch");
+ static_assert(EA_EXABYTE(6) == EA_PETABYTE(6) * 1000, "SI units mismatch");
+#endif
+
+ static_assert(EA_KIBIBYTE(1) == 1024, "SI units mismatch");
+ static_assert(EA_MEBIBYTE(2) == EA_KIBIBYTE(2) * 1024, "SI units mismatch");
+ static_assert(EA_GIBIBYTE(3) == EA_MEBIBYTE(3) * 1024, "SI units mismatch");
+#ifndef EA_PROCESSOR_X86
+ static_assert(EA_TEBIBYTE(4) == EA_GIBIBYTE(4) * 1024, "SI units mismatch");
+ static_assert(EA_PEBIBYTE(5) == EA_TEBIBYTE(5) * 1024, "SI units mismatch");
+ static_assert(EA_EXBIBYTE(6) == EA_PEBIBYTE(6) * 1024, "SI units mismatch");
+#endif
+
+ return nErrorCount;
+}
+
+
+
+template<typename Int128T, typename MakeInt128T>
+static void TestInt128T(MakeInt128T MakeInt128, const char* errorMsg, int& nErrorCount)
+{
+ auto VERIFY = [&](bool result) { if(!result) DoError(nErrorCount, errorMsg); };
+ const auto TestValue = MakeInt128(0x1234567812345678, 0x1234567812345678);
+ const Int128T zero = MakeInt128(0, 0);
+ const Int128T one = MakeInt128(0, 1);
+ const Int128T two = MakeInt128(0, 2);
+ const Int128T big = MakeInt128(0x1234567812345678, 0x1234567812345678);;
+ const Int128T negative_one = MakeInt128(0xffffffffffffffff, 0xffffffffffffffff);
+ const Int128T half_range = MakeInt128(0x0, 0xffffffffffffffff);
+
+ {
+ // Int128T a1 = 42.f;
+ // Int128T a2 = 42.0f;
+ // Int128T a3 = 42;
+ // Int128T a4 = 42u;
+ // Int128T a5 = 42ul;
+ // Int128T a6 = 42ull;
+ }
+
+ // default ctor
+ {
+ { Int128T a; (void)a; }
+ { Int128T a{}; (void)a; }
+
+ static_assert(eastl::is_trivially_default_constructible_v<Int128T>, "128-bit integer failure");
+ }
+
+ // operator-
+ {
+ VERIFY(negative_one == -one);
+ }
+
+ // operator~
+ {
+ auto not_one = ~one;
+ VERIFY(not_one == MakeInt128(0xffffffffffffffff, 0xfffffffffffffffe));
+ }
+
+ // operator+
+ {
+ VERIFY(zero == +zero);
+ VERIFY(one == +one);
+ VERIFY(big == +big);
+ }
+
+ // operator+
+ // operator-
+ // operator*
+ // operator/
+ // operator%
+ {
+ auto i = MakeInt128(42, 42);
+
+ i = i + one;
+ VERIFY(i == MakeInt128(42, 43));
+
+ i = i - one;
+ VERIFY(i == MakeInt128(42, 42));
+
+ i = i * two;
+ VERIFY(i == MakeInt128(84, 84));
+
+ i = i / two;
+ VERIFY(i == MakeInt128(42, 42));
+ }
+
+ // operator== / operator!=
+ {
+ VERIFY(TestValue == MakeInt128(0x1234567812345678, 0x1234567812345678));
+ VERIFY(TestValue == TestValue);
+ VERIFY(MakeInt128(0x1, 0x1) != MakeInt128(0x1, 0x2));
+ }
+
+ // operator<
+ {
+ VERIFY(zero < one);
+ VERIFY(one < two);
+ VERIFY(zero < two);
+ VERIFY(zero < big);
+ VERIFY(one < big);
+
+ VERIFY(MakeInt128(123, 122) < MakeInt128(123, 123));
+ VERIFY(MakeInt128(122, 123) < MakeInt128(123, 123));
+ }
+
+ // operator> / operator>=
+ {
+ VERIFY(TestValue > MakeInt128(0, 0x1234567812345678));
+ VERIFY(TestValue >= MakeInt128(0, 0x1234567812345678));
+ VERIFY(TestValue >= TestValue);
+ VERIFY(TestValue >= TestValue);
+ }
+
+ // operator< / operator<=
+ {
+ VERIFY(MakeInt128(0, 0x1234567812345678) < TestValue);
+ VERIFY(MakeInt128(0, 0x1234567812345678) <= TestValue);
+ VERIFY(TestValue <= TestValue);
+ VERIFY(TestValue <= TestValue);
+ }
+
+ // operator++
+ {
+ auto i = MakeInt128(0, 0);
+ VERIFY(i++ == MakeInt128(0, 0));
+ VERIFY(++i == MakeInt128(0, 2));
+ VERIFY(++i == MakeInt128(0, 3));
+ VERIFY(i++ == MakeInt128(0, 3));
+
+ {
+ auto n1 = half_range;
+ VERIFY(++n1 == MakeInt128(1, 0));
+ }
+ }
+
+ // operator--
+ {
+ auto i = MakeInt128(0, 5);
+ VERIFY(i-- == MakeInt128(0, 4));
+ VERIFY(--i == MakeInt128(0, 4));
+ VERIFY(--i == MakeInt128(0, 3));
+ VERIFY(i-- == MakeInt128(0, 2));
+
+ {
+ auto n1 = MakeInt128(1, 0);
+ VERIFY(n1-- == half_range);
+ }
+ }
+
+ // operator+=
+ // operator-=
+ // operator*=
+ // operator/=
+ // operator%=
+ {
+ auto n = MakeInt128(0, 5);
+
+ n += MakeInt128(0, 15);
+ VERIFY(n == MakeInt128(0, 20));
+
+ n -= MakeInt128(0, 18);
+ VERIFY(n == MakeInt128(0, 2));
+
+ n *= MakeInt128(0, 2);
+ VERIFY(n == MakeInt128(0, 4));
+
+ n /= MakeInt128(0, 2);
+ VERIFY(n == MakeInt128(0, 2));
+
+ n %= MakeInt128(0, 2);
+ VERIFY(n == MakeInt128(0, 0));
+ }
+
+ // operator>>
+ // operator<<
+ // operator>>=
+ // operator<<=
+ {
+ auto n = MakeInt128(0, 0x4);
+
+ {
+ auto a = n >> 1;
+ VERIFY(a == MakeInt128(0, 0x2));
+
+ a >>= 1;
+ VERIFY(a == MakeInt128(0, 0x1));
+ }
+
+ {
+ auto a = n << 1;
+ VERIFY(a == MakeInt128(0, 0x8));
+
+ a <<= 1;
+ VERIFY(a == MakeInt128(0, 0x10));
+ }
+
+ {
+ auto a = half_range;
+
+ a <<= 1;
+ VERIFY(a == MakeInt128(0x1, 0xfffffffffffffffe));
+ }
+
+ {
+ auto a = half_range;
+ a >>= 1;
+ VERIFY(a == MakeInt128(0x0, 0x7fffffffffffffff));
+ }
+ }
+
+ // operator^
+ // operator|
+ // operator&
+ // operator^=
+ // operator|=
+ // operator&=
+ {
+ const auto n1 = MakeInt128(0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA);
+ const auto n2 = MakeInt128(0x5555555555555555, 0x5555555555555555);
+
+ {
+ auto i = n1 ^ n2;
+ VERIFY(i == negative_one);
+
+ auto n3 = n1;
+ n3 ^= n2;
+ VERIFY(n3 == negative_one);
+ }
+
+ {
+ auto i = n1 | n2;
+ VERIFY(i == negative_one);
+
+ auto n3 = n1;
+ n3 |= n2;
+ VERIFY(n3 == negative_one);
+ }
+
+ {
+ auto i = n1 & n2;
+ VERIFY(i == zero);
+
+ auto n3 = n1;
+ n3 &= n2;
+ VERIFY(n3 == zero);
+ }
+
+ }
+
+ // Test loop counter
+ {
+ {
+ int counter = 0;
+ Int128T i = MakeInt128(0,0);
+
+ for (; i < MakeInt128(0,10); i++)
+ counter++;
+
+ VERIFY(i == MakeInt128(0, counter));
+ }
+
+ {
+ // int counter = 0;
+
+ // for (Int128T i = 0; i < 10; i++)
+ // counter++;
+
+ // VERIFY(i == counter);
+ }
+ }
+}
+
+
+
+static int TestEAInt128_t()
+{
+ int nErrorCount(0);
+
+ TestInt128T<uint128_t>(UINT128_C, "uint128_t test failure", nErrorCount);
+ TestInt128T<int128_t>(INT128_C, "int128_t test failure", nErrorCount);
+
+ return nErrorCount;
+}
+
+
+
+// EA_WCHAR_UNIQUE
+template <typename T>
+struct wchar_unique { enum { value = 1 }; }; // if wchar_t is unique then wchar_unique<wchar_t>::value should be 1
+template <> struct wchar_unique<char8_t> { enum { value = 0 }; }; // if wchar_unique is not unique then it should match one of the specializations and the value will be 0.
+template <> struct wchar_unique<char16_t> { enum { value = 0 }; };
+template <> struct wchar_unique<char32_t> { enum { value = 0 }; };
+#if EA_WCHAR_UNIQUE
+ static_assert( wchar_unique<wchar_t>::value == 1, "WCHAR_UNIQUE appears to be incorrectly defined to 1 by EABase" );
+#else
+ static_assert( wchar_unique<wchar_t>::value == 0, "WCHAR_UNIQUE appears to be incorrectly defined to 0 by EABase" );
+#endif
+
+
+//
+// Tests for EA_IS_ENABLED
+//
+#define EABASE_TEST_FEATURE_A EA_ENABLED
+#if EA_IS_ENABLED(EABASE_TEST_FEATURE_A)
+ // Feature A is enabled
+#else
+ #error Error EABASE_TEST_FEATURE_A should be enabled.
+#endif
+// Make sure it is possible to successfully negate the test.
+#if !EA_IS_ENABLED(EABASE_TEST_FEATURE_A)
+ #error Error EABASE_TEST_FEATURE_A should be enabled.
+#endif
+
+#define EABASE_TEST_FEATURE_B EA_DISABLED
+#if EA_IS_ENABLED(EABASE_TEST_FEATURE_B)
+ #error Error EABASE_TEST_FEATURE_B should be disabled.
+#endif
+// Make sure it is possible to successfully negate the test.
+#if !EA_IS_ENABLED(EABASE_TEST_FEATURE_B)
+ // Feature B is not enabled
+#else
+ #error Error EABASE_TEST_FEATURE_B should be disabled.
+#endif
+
+// The test below should cause compilation to fail if it is uncommented. However we can't
+// obviously enable the test because it will break the build. It should be tested manually
+// if changes to EA_IS_ENABLED are made.
+//
+// #if EA_IS_ENABLED(EABASE_TEST_FEATURE_WITH_NO_DEFINE)
+// #endif
+
+
+int EAMain(int, char**)
+{
+ int nErrorCount = 0;
+ int nTotalErrorCount = 0;
+
+
+ nErrorCount = TestEABase();
+ EA::EAMain::Report("EABase test failure count: %d\n\n", nErrorCount);
+ nTotalErrorCount += nErrorCount;
+
+ nErrorCount = TestEAResult();
+ EA::EAMain::Report("EAResult test failure count: %d\n\n", nErrorCount);
+ nTotalErrorCount += nErrorCount;
+
+ nErrorCount = TestEAPlatform();
+ EA::EAMain::Report("EAPlatform test failure count: %d\n\n", nErrorCount);
+ nTotalErrorCount += nErrorCount;
+
+ nErrorCount = TestEACompiler();
+ EA::EAMain::Report("EACompiler test failure count: %d\n\n", nErrorCount);
+ nTotalErrorCount += nErrorCount;
+
+ nErrorCount = TestEACompilerTraits();
+ EA::EAMain::Report("EACompilerTraits test failure count: %d\n\n", nErrorCount);
+ nTotalErrorCount += nErrorCount;
+
+ nErrorCount = TestNullPtr();
+ EA::EAMain::Report("nullptr test failure count: %d\n\n", nErrorCount);
+ nTotalErrorCount += nErrorCount;
+
+ nErrorCount = TestEAHave();
+ EA::EAMain::Report("EAHave test failure count: %d\n\n", nErrorCount);
+ nTotalErrorCount += nErrorCount;
+
+ nErrorCount = TestEAAlignment();
+ EA::EAMain::Report("EAAlignment test failure count: %d\n\n", nErrorCount);
+ nTotalErrorCount += nErrorCount;
+
+ nErrorCount = TestEAStdarg();
+ EA::EAMain::Report("EAStdarg test failure count: %d\n\n", nErrorCount);
+ nTotalErrorCount += nErrorCount;
+
+ nErrorCount = TestEAUnits();
+ EA::EAMain::Report("EAUnits test failure count: %d\n\n", nErrorCount);
+ nTotalErrorCount += nErrorCount;
+
+ nErrorCount = TestEAInt128_t();
+ EA::EAMain::Report("EAInt128_t test failure count: %d\n\n", nErrorCount);
+ nTotalErrorCount += nErrorCount;
+
+
+ if (nTotalErrorCount == 0)
+ EA::EAMain::Report("\nAll tests completed successfully.\n");
+ else
+ EA::EAMain::Report("\nTests failed. Total error count: %d\n", nTotalErrorCount);
+
+ return nTotalErrorCount;
+}
+
+EA_RESTORE_VC_WARNING() // for the following from above: EA_DISABLE_VC_WARNING(4265 4296 4310 4350 4481 4530 4625 4626 4996)
diff --git a/EASTL/test/packages/EABase/test/source/TestEABase.h b/EASTL/test/packages/EABase/test/source/TestEABase.h
new file mode 100644
index 0000000..20c3bf6
--- /dev/null
+++ b/EASTL/test/packages/EABase/test/source/TestEABase.h
@@ -0,0 +1,40 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+#include <EABase/eabase.h>
+#include <EABase/version.h>
+
+
+// What we do here is verify that EA_PRAGMA_ONCE_SUPPORTED works as intended.
+// This header file should be #included two times by TestEABase.cpp
+// in order to test this.
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+ const int EABaseOncePerTranslationUnitTestVariable = 0; // This should get compiled only once ever for a compilation unit.
+#else
+ // Just implement a classic manual header include guard.
+ // In this case we aren't really testing anything.
+ #ifndef TESTEABASE_H
+ #define TESTEABASE_H
+ const int EABaseOncePerTranslationUnitTestVariable = 0;
+ #endif
+#endif
+
+
+
+// EA_EXTERN_TEMPLATE / EA_COMPILER_NO_EXTERN_TEMPLATE
+
+#if defined(__cplusplus)
+ template <typename T>
+ struct eabase_template
+ {
+ T value;
+ T GetValue() const { return value; }
+ };
+
+ EA_EXTERN_TEMPLATE(struct eabase_template<char>);
+#endif
+
+
diff --git a/EASTL/test/packages/EABase/test/source/TestEABaseC.c b/EASTL/test/packages/EABase/test/source/TestEABaseC.c
new file mode 100644
index 0000000..71f492f
--- /dev/null
+++ b/EASTL/test/packages/EABase/test/source/TestEABaseC.c
@@ -0,0 +1,1213 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+#include "TestEABase.h"
+#include "TestEABase.h" // Intentionally double-include the same header file, to test it.
+#include <EABase/eabase.h>
+#include <EABase/eahave.h>
+
+#if defined(_MSC_VER)
+ #pragma warning(disable: 4296 4310 4255) // expression is always true, cast truncates const value.
+#endif
+
+#include <stddef.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <ctype.h>
+#include <assert.h>
+#include <math.h>
+
+#if defined(EA_COMPILER_MSVC) && defined(EA_PLATFORM_MICROSOFT)
+ EA_DISABLE_ALL_VC_WARNINGS()
+ #if defined(EA_PLATFORM_XENON)
+ #define NOD3D
+ #define NONET
+ #include <Xtl.h>
+ #else
+ #pragma warning(disable: 28252)
+ #pragma warning(disable: 28253)
+ #ifndef WIN32_LEAN_AND_MEAN
+ #define WIN32_LEAN_AND_MEAN
+ #endif
+ #include <Windows.h>
+ #endif
+ EA_RESTORE_ALL_VC_WARNINGS()
+#elif defined(EA_PLATFORM_ANDROID)
+ #include <android/log.h>
+#endif
+
+
+// Some CPU/Compiler combinations don't support arbitrary alignment declarations.
+// In particular some ARM compilers often don't. You can use EAStdC's EAAlignment to
+// achieve arbitrary alignment if EA_ALIGN doesn't work.
+#if (EA_ALIGN_MAX_AUTOMATIC < 64)
+ #define ALIGNMENT_AMOUNT_64 EA_ALIGN_MAX_AUTOMATIC
+#else
+ #define ALIGNMENT_AMOUNT_64 64
+#endif
+
+#if (EA_ALIGN_MAX_AUTOMATIC < 32)
+ #define ALIGNMENT_AMOUNT_32 EA_ALIGN_MAX_AUTOMATIC
+#else
+ #define ALIGNMENT_AMOUNT_32 32
+#endif
+
+#if (EA_ALIGN_MAX_AUTOMATIC < 16)
+ #define ALIGNMENT_AMOUNT_16 EA_ALIGN_MAX_AUTOMATIC
+#else
+ #define ALIGNMENT_AMOUNT_16 16
+#endif
+
+
+
+#if defined(__cplusplus)
+struct ClassWithDefaultCtor
+{
+ ClassWithDefaultCtor(int x = 0)
+ { char buffer[16]; sprintf(buffer, "%d", x); }
+};
+
+
+struct ClassWithoutDefaultCtor
+{
+ ClassWithoutDefaultCtor(int x)
+ { char buffer[16]; sprintf(buffer, "%d", x); }
+};
+#endif
+
+
+// Forward declarations
+int Stricmp(const char* pString1, const char* pString2);
+int TestEABase(void);
+int TestEAResult(void);
+int TestEAPlatform(void);
+int TestNU(void);
+int TestEACompiler(void);
+int TestEACompilerTraits(void);
+int Verify(int bTest, const char* pMessage);
+
+#define DoError(nErrorCount, pMessage) DoErrorC(&nErrorCount, pMessage)
+void DoErrorC(int* nErrorCount, const char* pMessage);
+
+
+
+/* Test EA_DISABLE_WARNING */
+EA_DISABLE_VC_WARNING(4548 4127)
+EA_DISABLE_ALL_VC_WARNINGS()
+EA_RESTORE_ALL_VC_WARNINGS()
+
+EA_DISABLE_GCC_WARNING(-Wuninitialized)
+
+EA_DISABLE_SN_WARNING(1787)
+EA_DISABLE_ALL_SN_WARNINGS()
+EA_RESTORE_ALL_SN_WARNINGS()
+
+EA_DISABLE_GHS_WARNING(123)
+
+EA_DISABLE_EDG_WARNING(193)
+
+EA_DISABLE_CW_WARNING(10317)
+EA_DISABLE_ALL_CW_WARNINGS()
+EA_RESTORE_ALL_CW_WARNINGS()
+
+static void Printf(const char8_t* pFormat, ...)
+{
+ #if defined(_MSC_VER)
+ #define vsnprintf _vsnprintf
+ #endif
+
+ if(pFormat)
+ {
+ char pMessage[512];
+ int nReturnValue;
+
+ va_list arguments;
+ va_start(arguments, pFormat);
+ nReturnValue = vsnprintf(pMessage, EAArrayCount(pMessage), pFormat, arguments);
+ va_end(arguments);
+
+ if(nReturnValue > 0)
+ {
+ #if defined(EA_PLATFORM_ANDROID)
+ __android_log_write(ANDROID_LOG_INFO, "EABase.Printf", pMessage);
+ #else
+ fputs(pMessage, stdout);
+ #endif
+
+ #if defined(EA_COMPILER_MSVC) && defined(EA_PLATFORM_MICROSOFT)
+ OutputDebugStringA(pMessage);
+ #endif
+ }
+ }
+}
+
+/* Test EA_DISABLE_WARNING */
+EA_RESTORE_VC_WARNING()
+EA_RESTORE_GCC_WARNING()
+EA_RESTORE_SN_WARNING()
+EA_RESTORE_GHS_WARNING()
+EA_RESTORE_EDG_WARNING()
+EA_RESTORE_CW_WARNING(10317)
+
+
+
+int Verify(int bTest, const char* pMessage)
+{
+ if(!bTest)
+ {
+ if(pMessage)
+ Printf("Test warning: %s\n", pMessage);
+ }
+
+ return bTest ? 0 : 1;
+}
+
+
+void DoErrorC(int* nErrorCount, const char* pMessage)
+{
+ ++*nErrorCount;
+ if(pMessage)
+ Printf("Test error: %s\n", pMessage);
+}
+
+
+int Stricmp(const char* pString1, const char* pString2)
+{
+ char c1, c2;
+
+ while((c1 = (char)tolower((int)*pString1++)) == (c2 = (char)tolower((int)*pString2++)))
+ {
+ if(c1 == 0)
+ return 0;
+ }
+
+ return (c1 - c2);
+}
+
+
+// GCC requires that function attributes be declared in the function
+// declaration and not in the function definition.
+// RVCT seems to require attributes to be in both the declaration
+// and definition otherwise you get a "declaration is incompatible" error
+int PureFunction(void) EA_PURE;
+
+int PureFunction(void)
+#if defined(EA_COMPILER_RVCT)
+EA_PURE
+#endif
+{
+ return (strlen("abc") == 3);
+}
+
+
+struct InitPriorityTestClass
+{
+ int mX;
+};
+
+
+
+// EA_MAY_ALIAS
+void* EA_MAY_ALIAS gPtr0 = NULL;
+
+typedef void* EA_MAY_ALIAS pvoid_may_alias;
+pvoid_may_alias gPtr1 = NULL;
+
+
+
+// static_asset at global scope
+// Should succeed.
+static_assert(sizeof(int32_t) == 4, "static_assert failure");
+// Should fail.
+//static_assert(sizeof(int32_t) == 8, "static_assert failure");
+
+
+
+static size_t RestrictTest(char* EA_RESTRICT p)
+{
+ return sizeof(p);
+}
+
+
+// EA_OPTIMIZE_OFF / EA_OPTIMIZE_ON
+EA_OPTIMIZE_OFF()
+static EA_NO_INLINE int DisabledOptimizations(int x)
+{
+ return x * 37;
+}
+EA_OPTIMIZE_ON()
+
+
+// EA_UNUSED
+static void FunctionWithUnusedVariables(int x)
+{
+ int y = 0;
+ EA_UNUSED(x);
+ EA_UNUSED(y);
+}
+
+
+
+int TestEABase(void)
+{
+ int nErrorCount = 0;
+
+
+ // Test NULL
+ {
+ EA_DISABLE_VC_WARNING(6326)
+ Verify(NULL == (void*)0, "unspecified test");
+ EA_RESTORE_VC_WARNING()
+ }
+
+ // Verify sized type sizes
+ {
+ const ssize_t ss = 1; // Verify that ssize_t is a signed type.
+
+ Verify(sizeof(int8_t) == 1, "int8_t size test");
+ Verify(sizeof(uint8_t) == 1, "uint8_t size test");
+ Verify(sizeof(int16_t) == 2, "int16_t size test");
+ Verify(sizeof(uint16_t) == 2, "uint16_t size test");
+ Verify(sizeof(int32_t) == 4, "int32_t size test");
+ Verify(sizeof(uint32_t) == 4, "uint32_t size test");
+ Verify(sizeof(int64_t) == 8, "int64_t size test");
+ Verify(sizeof(uint64_t) == 8, "uint64_t size test");
+
+ #if !defined(FLT_EVAL_METHOD)
+ #error EABase should always define FLT_EVAL_METHOD
+ Verify(0, "FLT_EVAL_METHOD test: not defined.");
+ #else
+ #if !defined(__GNUC__) || defined(__USE_ISOC99) // GCC doesn't define float_t/double_t unless __USE_ISOC99 is defined (compiled with -std=c99)
+ #if (FLT_EVAL_METHOD == -1)
+ // In this case the C99 standard states that the
+ // precision of float_t and double_t is indeterminable.
+ #elif (FLT_EVAL_METHOD == 0)
+ Verify(sizeof(float_t) == sizeof(float), "float_t size test");
+ Verify(sizeof(double_t) == sizeof(double), "double_t size test");
+ #elif (FLT_EVAL_METHOD == 1)
+ Verify(sizeof(float_t) == sizeof(double), "float_t size test");
+ Verify(sizeof(double_t) == sizeof(double), "double_t size test");
+ #elif (FLT_EVAL_METHOD == 2)
+ Verify(sizeof(float_t) == sizeof(long double), "float_t size test");
+ Verify(sizeof(double_t) == sizeof(long double), "double_t size test");
+ #else
+ DoError(nErrorCount, "FLT_EVAL_METHOD test: invalid value.");
+ #endif
+ #endif
+ #endif
+
+ Verify(sizeof(bool8_t) == 1, "bool8_t size test");
+ Verify(sizeof(intptr_t) == sizeof(void*), "intptr_t size test");
+ Verify(sizeof(uintptr_t) == sizeof(void*), "uintptr_t size test");
+ Verify(sizeof(ssize_t) == sizeof(size_t), "ssize_t size test");
+ Verify((ssize_t)((ss ^ ss) - 1) < 0, "ssize_t sign test");
+ Verify(sizeof(char8_t) == 1, "char8_t size test");
+ Verify(sizeof(char16_t) == 2, "char16_t size test");
+ Verify(sizeof(char32_t) == 4, "char32_t test");
+
+ #if (EA_WCHAR_SIZE == 2) || (EA_WCHAR_SIZE == 4)
+ Verify(sizeof(wchar_t) == EA_WCHAR_SIZE, "EA_WCHAR_SIZE test");
+ #else
+ Verify(0, "EA_WCHAR_SIZE test");
+ #endif
+ }
+
+ // Test char8_t, char16_t, char32_t string literals.
+ {
+ const char8_t* p8 = "abc";
+ const char8_t c8 = 'a';
+
+ #ifdef EA_CHAR16
+ const char16_t* p16 = EA_CHAR16("abc"); // Under GCC, this assumes compiling with -fshort-wchar
+ const char16_t c16 = EA_CHAR16('\x3001');
+ #else
+ const char16_t* p16 = NULL;
+ const char16_t c16 = (char16_t)'X';
+ #endif
+
+ #if EA_CHAR16_NATIVE
+ const char32_t* p32 = EA_CHAR32("abc");
+ const char32_t c32 = EA_CHAR32('\x3001');
+ #else
+ const char32_t p32[] = { 'a', 'b', 'c', '\0' }; // Microsoft doesn't support 32 bit strings here, and GCC doesn't use them when we compile with -fshort-wchar (which we do).
+ #ifdef EA_CHAR16
+ const char32_t c32 = EA_CHAR16('\x3001'); // 16 bit should silently convert to 32 bit.
+ #else
+ const char32_t c32 = (char16_t)'X'; // 16 bit should silently convert to 32 bit.
+ #endif
+ #endif
+
+ char buffer[128];
+ sprintf(buffer, "%p %p %p %p %p %p", &p8, &c8, &p16, &c16, &p32, &c32); // Make possible compiler warnings about unused variables go away.
+ }
+
+ #if 0
+
+ // Verify sized type signs
+ {
+ int8_t i8(1);
+ if(int8_t((i8 ^ i8) - 1) >= 0)
+ DoError(nErrorCount, "int8_t sign test");
+
+ uint8_t u8(1);
+ if(uint8_t((u8 ^ u8) - 1) <= 0)
+ DoError(nErrorCount, "uint8_t sign test");
+
+ int16_t i16(1);
+ if(int16_t((i16 ^ i16) - 1) >= 0)
+ DoError(nErrorCount, "int16_t sign test");
+
+ uint16_t u16(1);
+ if(uint16_t((u16 ^ u16) - 1) <= 0)
+ DoError(nErrorCount, "uint16_t sign test");
+
+ int32_t i32(1);
+ if(int32_t((i32 ^ i32) - 1) >= 0)
+ DoError(nErrorCount, "int32_t sign test");
+
+ uint32_t u32(1);
+ if(uint32_t((u32 ^ u32) - 1) <= 0)
+ DoError(nErrorCount, "uint32_t sign test");
+
+ int64_t i64(1);
+ if(int64_t((i64 ^ i64) - 1) >= 0)
+ DoError(nErrorCount, "int64_t sign test");
+
+ uint64_t u64(1);
+ if(uint64_t((u64 ^ u64) - 1) <= 0)
+ DoError(nErrorCount, "uint64_t sign test");
+
+
+
+ intptr_t ip(1);
+ if(intptr_t((ip ^ ip) - 1) >= 0)
+ DoError(nErrorCount, "intptr_t sign test");
+
+ uintptr_t up(1);
+ if(uintptr_t((up ^ up) - 1) <= 0)
+ DoError(nErrorCount, "uintptr_t sign test");
+
+
+ // The following sign tests have been disabled, as the determination of
+ // the sign of type char and wchar_t are in the hands of the compiler and
+ // the user's configuration of that compiler.
+
+ //char8_t c8(1); // We expect it to be signed, though the need for such a requirement is debateable.
+ //if(char8_t((c8 ^ c8) - 1) >= 0)
+ // DoError(nErrorCount, "char8_t sign test");
+
+ //char16_t c16(1); // We expect it to be unsigned
+ //if(char16_t((c16 ^ c16) - 1) <= 0)
+ // DoError(nErrorCount, "char16_t sign test");
+
+ //char32_t c32(1); // We expect it to be unsigned
+ //if(char32_t((c32 ^ c32) - 1) <= 0)
+ // DoError(nErrorCount, "char32_t sign test");
+ }
+
+
+
+ //Test Constant macros
+ {
+ char buffer[256];
+
+ const int8_t i8Min = INT8_C(-128); // Strictly speaking, the C language standard allows this to be -127 as well.
+ const int8_t i8Max = INT8_C(127);
+
+ const uint8_t u8Min = UINT8_C(0);
+ const uint8_t u8Max = UINT8_C(255);
+
+ const int16_t i16Min = INT16_C(-32767) - 1;
+ const int16_t i16Max = INT16_C( 32767);
+
+ const uint16_t u16Min = UINT16_C(0);
+ const uint16_t u16Max = UINT16_C(65535);
+
+ const int32_t i32Min = INT32_C(-2147483647) - 1;
+ const int32_t i32Max = INT32_C( 2147483647);
+
+ const uint32_t u32Min = UINT32_C(0);
+ const uint32_t u32Max = UINT32_C(4294967295);
+
+ #if defined(__GNUC__) && (__GNUC__ < 4) // If using a broken version of UINT64_C/INT64_C macros...
+ const int64_t i64Min = -9223372036854775807LL - 1;
+ const int64_t i64Max = 9223372036854775807LL;
+
+ const uint64_t u64Min = UINT64_C(0);
+ const uint64_t u64Max = 18446744073709551615ULL;
+ #else
+ const int64_t i64Min = INT64_C(-9223372036854775807) - 1;
+ const int64_t i64Max = INT64_C( 9223372036854775807);
+
+ const uint64_t u64Min = UINT64_C(0);
+ const uint64_t u64Max = UINT64_C(18446744073709551615);
+ #endif
+
+ sprintf(buffer, "%d %d %u %u %d %d %u %u %d %d %u %u %"SCNd64" %"SCNd64" %"SCNu64" %"SCNu64,
+ (int)i8Min, (int)i8Max, (unsigned)u8Min, (unsigned)u8Max,
+ (int)i16Min, (int)i16Max, (unsigned)u16Min, (unsigned)u16Max,
+ (int)i32Min, (int)i32Max, (unsigned)u32Min, (unsigned)u32Max,
+ i64Min, i64Max, u64Min, u64Max);
+ if(strcmp(buffer, "-128 127 0 255 -32768 32767 0 65535 -2147483648 2147483647 0 4294967295 -9223372036854775808 9223372036854775807 0 18446744073709551615"))
+ DoError(nErrorCount, "INT_C test");
+
+ // Verify the use of hex numbers with INT64_C
+ const int64_t i64Hex = INT64_C(0x1111111122222222);
+ if(i64Hex != INT64_C(1229782938533634594))
+ DoError(nErrorCount, "INT64_C hex error");
+
+ // Verify the use of hex numbers with UINT64_C
+ const uint64_t u64Hex = UINT64_C(0xaaaaaaaabbbbbbbb);
+
+ #if defined(__GNUC__) && (__GNUC__ < 4) // If using a broken version of UINT64_C/INT64_C macros...
+ const uint64_t temp = 12297829382759365563ULL;
+ #else
+ const uint64_t temp = UINT64_C(12297829382759365563);
+ #endif
+
+ if(u64Hex != temp)
+ DoError(nErrorCount, "UINT64_C hex error");
+
+ // Verify that the compiler both allows division with uint64_t but
+ // also that it allows it via UINT64_MAX. A bad implementation of
+ // UINT64_MAX would cause the code below to mis-execute or not compile.
+ const uint64_t resultUint64 = UINT64_MAX / 2;
+ if(resultUint64 != UINT64_C(9223372036854775807))
+ DoError(nErrorCount, "UINT64_MAX error");
+ }
+
+ //Test sized Printf format specifiers
+ {
+ char buffer[256];
+
+ int8_t d8(INT8_MAX), i8(INT8_MIN), o8(INT8_MAX);
+ uint8_t u8(UINT8_MAX), x8(UINT8_MAX), X8(UINT8_MAX);
+ sprintf(buffer, "%" PRId8 " %" PRIi8 " %" PRIo8 " %" PRIu8 " %" PRIx8 " %" PRIX8, d8, i8, o8, u8, x8, X8);
+
+ #ifdef EA_COMPILER_GNUC
+ if(Stricmp(buffer, "127 -128 177 255 ff FF"))
+ DoError(nErrorCount, "PRI8 test"); // This is known to fail with compilers such as VC++ which don't support %hh.
+ #endif
+
+ int16_t d16(INT16_MAX), i16(INT16_MIN), o16(INT16_MAX);
+ uint16_t u16(UINT16_MAX), x16(UINT16_MAX), X16(UINT16_MAX);
+ sprintf(buffer, "%" PRId16 " %" PRIi16 " %" PRIo16 " %" PRIu16 " %" PRIx16 " %" PRIX16, d16, i16, o16, u16, x16, X16);
+ if(Stricmp(buffer, "32767 -32768 77777 65535 ffff FFFF"))
+ DoError(nErrorCount, "PRI16 test");
+
+ int32_t d32(INT32_MAX), i32(INT32_MIN), o32(INT32_MAX);
+ uint32_t u32(UINT32_MAX), x32(UINT32_MAX), X32(UINT32_MAX);
+ sprintf(buffer, "%" PRId32 " %" PRIi32 " %" PRIo32 " %" PRIu32 " %" PRIx32 " %" PRIX32, d32, i32, o32, u32, x32, X32);
+ if(Stricmp(buffer, "2147483647 -2147483648 17777777777 4294967295 ffffffff FFFFFFFF"))
+ DoError(nErrorCount, "PRI32 test");
+
+ int64_t d64(INT64_MAX), i64(INT64_MIN), o64(INT64_MAX);
+ uint64_t u64(UINT64_MAX), x64(UINT64_MAX), X64(UINT64_MAX);
+ sprintf(buffer, "%" PRId64 " %" PRIi64 " %" PRIo64 " %" PRIu64 " %" PRIx64 " %" PRIX64, d64, i64, o64, u64, x64, X64);
+ if(Stricmp(buffer, "9223372036854775807 -9223372036854775808 777777777777777777777 18446744073709551615 ffffffffffffffff FFFFFFFFFFFFFFFF"))
+ DoError(nErrorCount, "PRI64 test");
+
+ // Many compilers give warnings for the following code because they
+ // recognize that a pointer is being formatted as an integer.
+ // This is what we want to do and what the C99 standard intends here.
+ #if defined(_MSC_VER) && (_MSC_VER >= 1300)
+ #pragma warning(disable: 4313) // Warning C4313: 'sprintf' : '%d' in format string conflicts with argument 1 of type 'void *'
+ #endif
+
+ void *dPtr = (void*)INT32_MAX, *iPtr = (void*)INT32_MIN, *oPtr = (void*)INT32_MAX, *uPtr = (void*)(uintptr_t)UINT64_MAX, *xPtr = (void*)(uintptr_t)UINT64_MAX, *XPtr = (void*)(uintptr_t)UINT64_MAX;
+ sprintf(buffer, "%" PRIdPTR " %" PRIiPTR " %" PRIoPTR " %" PRIuPTR " %" PRIxPTR " %" PRIXPTR, (intptr_t)dPtr, (intptr_t)iPtr, (uintptr_t)oPtr, (uintptr_t)uPtr, (uintptr_t)xPtr, (uintptr_t)XPtr);
+
+ #if (EA_PLATFORM_PTR_SIZE == 4)
+ if(Stricmp(buffer, "2147483647 -2147483648 17777777777 4294967295 ffffffff FFFFFFFF"))
+ DoError(nErrorCount, "PRIPTR test");
+ #else // EA_PLATFORM_PTR_SIZE == 8
+ if(Stricmp(buffer, "2147483647 -2147483648 17777777777 18446744073709551615 ffffffffffffffff FFFFFFFFFFFFFFFF"))
+ DoError(nErrorCount, "PRIPTR test");
+ #endif
+
+ #if defined(_MSC_VER) && (_MSC_VER >= 1300)
+ #pragma warning(default: 4313)
+ #endif
+ }
+
+ //Test sized scanf format specifiers
+ {
+ #ifdef EA_COMPILER_IS_C99 // Enabled for C99 only because this code will simply crash on many platforms if the format specifiers aren't supported.
+ int8_t d8, i8, o8;
+ uint8_t u8, x8;
+ sscanf("127 -127 177 255 ff", "%"SCNd8 " %"SCNi8 " %"SCNo8 " %"SCNu8 " %"SCNx8, &d8, &i8, &o8, &u8, &x8);
+ if((d8 != 127) || (i8 != -127) || (o8 != 127) || (u8 != 255) || (x8 != 255))
+ DoError(nErrorCount, "SCN8 test"); // This is known to fail with compilers such as VC++ which don't support %hh.
+ #endif
+
+ int16_t d16, i16, o16;
+ uint16_t u16, x16;
+ sscanf("32767 -32768 77777 65535 ffff", "%"SCNd16 " %"SCNi16 " %"SCNo16 " %"SCNu16 " %"SCNx16, &d16, &i16, &o16, &u16, &x16);
+ if((d16 != 32767) || (i16 != -32768) || (o16 != 32767) || (u16 != 65535) || (x16 != 65535))
+ DoError(nErrorCount, "SCN16 test");
+
+ int32_t d32, i32, o32;
+ uint32_t u32, x32;
+ sscanf("2147483647 -2147483648 17777777777 4294967295 ffffffff", "%"SCNd32 " %"SCNi32 " %"SCNo32 " %"SCNu32 " %"SCNx32, &d32, &i32, &o32, &u32, &x32);
+ if((d32 != INT32_MAX) || (i32 != INT32_MIN) || (o32 != INT32_MAX) || (u32 != UINT32_MAX) || (x32 != UINT32_MAX))
+ DoError(nErrorCount, "SCN32 test");
+
+ int64_t d64, i64, o64;
+ uint64_t u64, x64;
+ sscanf("9223372036854775807 -9223372036854775808 777777777777777777777 18446744073709551615 ffffffffffffffff", "%"SCNd64 " %"SCNi64 " %"SCNo64 " %"SCNu64 " %"SCNx64, &d64, &i64, &o64, &u64, &x64);
+ if((d64 != INT64_MAX) || (i64 != INT64_MIN) || (o64 != INT64_MAX) || (u64 != UINT64_MAX) || (x64 != UINT64_MAX))
+ DoError(nErrorCount, "SCN64 test");
+
+ // Many compilers give warnings for the following code because they
+ // recognize that a pointer is being formatted as an integer.
+ // This is what we want to do and what the C99 standard intends here.
+ void *dPtr, *iPtr, *oPtr, *uPtr, *xPtr;
+
+ #if (EA_PLATFORM_PTR_SIZE == 4)
+ sscanf("2147483647 -2147483648 17777777777 4294967295 ffffffff", "%"SCNdPTR " %"SCNiPTR " %"SCNoPTR " %"SCNuPTR " %"SCNxPTR, (intptr_t*)&dPtr, (intptr_t*)&iPtr, (uintptr_t*)&oPtr, (uintptr_t*)&uPtr, (uintptr_t*)&xPtr);
+ #else // EA_PLATFORM_PTR_SIZE == 8
+ sscanf("2147483647 -2147483648 17777777777 18446744073709551615 ffffffffffffffff", "%"SCNdPTR " %"SCNiPTR " %"SCNoPTR " %"SCNuPTR " %"SCNxPTR, (intptr_t*)&dPtr,(intptr_t*)&iPtr, (uintptr_t*)&oPtr, (uintptr_t*)&uPtr, (uintptr_t*)&xPtr);
+ #endif
+
+ if((dPtr != (void*)INT32_MAX) || (iPtr != (void*)INT32_MIN) || (oPtr != (void*)INT32_MAX) || (uPtr != (void*)(uintptr_t)UINT64_MAX) || (xPtr != (void*)(uintptr_t)UINT64_MAX))
+ DoError(nErrorCount, "SCNPTR test");
+ }
+
+
+ // Test min/max
+ {
+ // The C standard allows INT8_MIN to be either -127 or -128. So in order to be able
+ // to test for this in a portable way, we do the logic below whereby we test for
+ // -127 (which all compiles should support) or -127 - 1 which all compilers should
+ // support if INT8_MIN isn't -127.
+ if(!Verify(INT8_MIN == INT8_C(-127)) && !Verify(INT8_MIN == INT8_C(-127) - 1))
+ DoError(nErrorCount, "INT8_MIN test");
+ if(!Verify(INT8_MAX == INT8_C(127)))
+ DoError(nErrorCount, "INT8_MAX test");
+ if(!Verify(UINT8_MAX == UINT8_C(255)))
+ DoError(nErrorCount, "UINT8_MAX test");
+
+ if(!Verify(INT16_MIN == INT16_C(-32767)) && !Verify(INT16_MIN == INT16_C(-32767) - 1))
+ DoError(nErrorCount, "INT16_MIN test");
+ if(!Verify(INT16_MAX == INT16_C(32767)))
+ DoError(nErrorCount, "INT16_MAX test");
+ if(!Verify(UINT16_MAX == UINT16_C(65535)))
+ DoError(nErrorCount, "UINT16_MAX test");
+
+ if(!Verify(INT32_MIN == INT32_C(-2147483647)) && !Verify(INT32_MIN == INT32_C(-2147483647) - 1))
+ DoError(nErrorCount, "INT32_MIN test");
+ if(!Verify(INT32_MAX == INT32_C(2147483647)))
+ DoError(nErrorCount, "INT32_MAX test");
+ if(!Verify(UINT32_MAX == UINT32_C(4294967295)))
+ DoError(nErrorCount, "UINT32_MAX test");
+
+ if(!Verify(INT64_MIN == INT64_C(-9223372036854775807)) && !Verify(INT64_MIN == INT64_C(-9223372036854775807) - 1))
+ DoError(nErrorCount, "INT64_MIN test");
+ if(!Verify(INT64_MAX == INT64_C(9223372036854775807)))
+ DoError(nErrorCount, "INT64_MAX test");
+
+ #if defined(__GNUC__) && (__GNUC__ < 4) // If using a broken version of UINT64_C/INT64_C macros...
+ const uint64_t temp = 18446744073709551615ULL;
+ #else
+ const uint64_t temp = UINT64_C(18446744073709551615);
+ #endif
+
+ if(!Verify(UINT64_MAX == temp))
+ DoError(nErrorCount, "UINT64_MAX test");
+ }
+
+ #endif
+
+ return nErrorCount;
+}
+
+
+
+
+int TestEAPlatform(void)
+{
+ int nErrorCount = 0;
+
+ // Test EA_PLATFORM_PTR_SIZE
+ {
+ #ifdef EA_PLATFORM_PTR_SIZE
+ Verify(EA_PLATFORM_PTR_SIZE == sizeof(void*), "EA_PLATFORM_PTR_SIZE test");
+ #else
+ DoError(nErrorCount, "EA_PLATFORM_PTR_SIZE test");
+ #endif
+ }
+
+
+ // Test EA_PLATFORM_NAME
+ {
+ #ifdef EA_PLATFORM_NAME
+ char buffer[256];
+ sprintf(buffer, "TestEAPlatform: EA_PLATFORM_NAME: %s\n", EA_PLATFORM_NAME);
+ #else
+ DoError(nErrorCount, "EA_PLATFORM_NAME test");
+ #endif
+ }
+
+
+ // Test EA_PLATFORM_DESCRIPTION
+ {
+ #ifdef EA_PLATFORM_DESCRIPTION
+ char buffer[256];
+ sprintf(buffer, "TestEAPlatform: EA_PLATFORM_DESCRIPTION: %s\n", EA_PLATFORM_DESCRIPTION);
+ #else
+ DoError(nErrorCount, "EA_PLATFORM_DESCRIPTION test");
+ #endif
+ }
+
+
+ // Test EA_SYSTEM_LITTLE_ENDIAN / EA_SYSTEM_BIG_ENDIAN
+ {
+ uint32_t kValue = 0x12345678;
+ uint8_t* pValue = (uint8_t*)&kValue;
+
+ #ifdef EA_SYSTEM_LITTLE_ENDIAN
+ Verify(pValue[0] == 0x78, "EA_SYSTEM_ENDIAN test");
+ #elif defined(EA_SYSTEM_BIG_ENDIAN)
+ Verify(pValue[0] == 0x12, "EA_SYSTEM_ENDIAN test");
+ #else
+ Verify(0, "EA_SYSTEM_ENDIAN test");
+ #endif
+ }
+
+
+ // Test EA_ASM_STYLE
+ {
+ #if defined(EA_PROCESSOR_X86)
+ #if defined(EA_ASM_STYLE_ATT)
+ asm volatile ("nop");
+ #elif defined(EA_ASM_STYLE_INTEL)
+ __asm nop
+ #endif
+ #else
+ // Add other processors here.
+ #endif
+ }
+
+
+ return nErrorCount;
+}
+
+
+
+// Test compiler limitations
+// Easiest way to come up with tests for some of the more complicated versions
+// of these is to look at the Boost /libs/config/test/*.cxx files. Many of the
+// Boost compiler limitation defines are similar or match exactly to those
+// defined by EABase. See http://www.boost.org if you want to check this out.
+
+#ifndef EA_COMPILER_NO_STATIC_CONSTANTS // If class member static constants are allowed...
+ // Todo
+#endif
+
+#ifndef EA_COMPILER_NO_TEMPLATE_SPECIALIZATION
+ // Todo
+#endif
+
+#ifndef EA_COMPILER_NO_TEMPLATE_PARTIAL_SPECIALIZATION
+ // Todo
+#endif
+
+#ifndef EA_COMPILER_NO_MEMBER_TEMPLATES
+ // Todo
+#endif
+
+#ifndef EA_COMPILER_NO_MEMBER_TEMPLATE_SPECIALIZATION
+ // Todo
+#endif
+
+#ifndef EA_COMPILER_NO_TEMPLATE_TEMPLATES
+ // Todo
+#endif
+
+#ifndef EA_COMPILER_NO_MEMBER_TEMPLATE_FRIENDS
+ // Todo
+#endif
+
+#ifndef EA_COMPILER_NO_VOID_RETURNS
+#endif
+
+#ifndef EA_COMPILER_NO_COVARIANT_RETURN_TYPE
+ // Todo
+#endif
+
+#ifndef EA_COMPILER_NO_DEDUCED_TYPENAME
+ // Todo
+#endif
+
+#ifndef EA_COMPILER_NO_ARGUMENT_DEPENDENT_LOOKUP
+ // Todo
+#endif
+
+// Not applicable to C:
+//#ifndef EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+// #include <vector>
+//#endif
+
+#ifndef EA_COMPILER_NO_COVARIANT_RETURN_TYPE
+ // Todo
+#endif
+
+#ifndef EA_COMPILER_NO_COVARIANT_RETURN_TYPE
+ // Todo
+#endif
+
+#ifndef EA_COMPILER_NO_VARIADIC_MACROS
+ #define MY_PRINTF(format, ...) Printf(format, __VA_ARGS__)
+#endif
+
+
+
+
+int TestEACompiler(void)
+{
+ int nErrorCount = 0;
+
+ #if 0
+
+ // As of this writing, eacompiler.h defines at least the following compilers:
+ // EA_COMPILER_GNUC
+ // EA_COMPILER_BORLANDC
+ // EA_COMPILER_INTEL
+ // EA_COMPILER_METROWERKS
+ // EA_COMPILER_MSVC, EA_COMPILER_MSVC6, EA_COMPILER_MSVC7, EA_COMPILER_MSVC7_1
+
+
+ // Test EA_COMPILER_NAME
+ {
+ #ifdef EA_COMPILER_NAME
+ char buffer[256];
+ sprintf(buffer, "TestEACompiler: EA_COMPILER_NAME: %s\n", EA_COMPILER_NAME);
+ #else
+ DoError(nErrorCount, "EA_COMPILER_NAME test");
+ #endif
+ }
+
+
+ // Test EA_COMPILER_VERSION
+ {
+ #ifdef EA_COMPILER_VERSION
+ char buffer[256];
+ sprintf(buffer, "TestEACompiler: EA_COMPILER_VERSION: %d\n", EA_COMPILER_VERSION);
+ #else
+ DoError(nErrorCount, "EA_COMPILER_VERSION test");
+ #endif
+ }
+
+
+ // Test EA_COMPILER_STRING
+ {
+ #ifdef EA_COMPILER_STRING
+ char buffer[256];
+ sprintf(buffer, "TestEACompiler: EA_COMPILER_STRING: %s\n", EA_COMPILER_STRING);
+ #else
+ DoError(nErrorCount, "EA_COMPILER_STRING test");
+ #endif
+ }
+
+
+ // Test EA_COMPILER_NO_STATIC_CONSTANTS
+ {
+ char buffer[256];
+ sprintf(buffer, "%d", (int)NSC::x);
+ if(buffer[0] != '1')
+ DoError(nErrorCount, "EA_COMPILER_NO_STATIC_CONSTANTS test");
+ }
+
+
+ // Test EA_COMPILER_NO_VOID_RETURNS
+ #ifndef EA_COMPILER_NO_VOID_RETURNS
+ TestNVR1(); // Nothing to test for except successful compilation.
+ #endif
+
+
+ // Test EA_COMPILER_NO_EXCEPTION_STD_NAMESPACE
+ #ifndef EA_COMPILER_NO_EXCEPTION_STD_NAMESPACE
+ TestNESN();
+ #endif
+
+
+ // Test EA_COMPILER_NO_EXCEPTIONS
+ #ifndef EA_COMPILER_NO_EXCEPTIONS
+ if(!TestNE())
+ DoError(nErrorCount, "EA_COMPILER_NO_EXCEPTIONS test");
+ #endif
+
+
+ // Test EA_COMPILER_NO_UNWIND
+ if(!TestNU())
+ DoError(nErrorCount, "EA_COMPILER_NO_UNWIND test");
+
+ #endif
+
+ return nErrorCount;
+}
+
+
+int TestEACompilerTraits(void)
+{
+ int nErrorCount = 0;
+
+ // EA_COMPILER_IS_ANSIC
+ // EA_COMPILER_IS_C99
+ // EA_COMPILER_IS_CPLUSPLUS
+ // EA_COMPILER_MANAGED_CPP
+
+ { // Test EA_ALIGN_OF
+ typedef void (*AlignTestFunctionType)(void);
+ if(EA_ALIGN_OF(AlignTestFunctionType) != sizeof(void*)) // This may not be a kosher test.
+ DoError(nErrorCount, "EA_ALIGN_OF test (AlignTestFunctionType)");
+
+ if(EA_ALIGN_OF(int8_t) != sizeof(int8_t)) // This may not be a kosher test.
+ DoError(nErrorCount, "EA_ALIGN_OF test (int16_t)");
+
+ if(EA_ALIGN_OF(int16_t) != sizeof(int16_t)) // This may not be a kosher test.
+ DoError(nErrorCount, "EA_ALIGN_OF test (int16_t)");
+
+ if(EA_ALIGN_OF(int32_t) != sizeof(int32_t)) // This may not be a kosher test.
+ DoError(nErrorCount, "EA_ALIGN_OF test (int32_t)");
+
+ #if !defined (EA_ABI_ARM_APPLE)
+ if(EA_ALIGN_OF(int64_t) != sizeof(int64_t)) // This may not be a kosher test.
+ DoError(nErrorCount, "EA_ALIGN_OF test (int64_t)");
+ #endif
+ }
+
+ { // Test EA_PREFIX_ALIGN
+ #ifdef EA_PREFIX_ALIGN
+ char buffer[32];
+ EA_PREFIX_ALIGN(64) int x = 0;
+ sprintf(buffer, "%d", x);
+ if(buffer[0] != '0')
+ DoError(nErrorCount, "EA_PREFIX_ALIGN test");
+
+ #if defined(__cplusplus)
+ EA_PREFIX_ALIGN(64) ClassWithDefaultCtor cdcA;
+ //EA_PREFIX_ALIGN(64) ClassWithoutDefaultCtor cwdcA;
+
+ EA_PREFIX_ALIGN(64) ClassWithDefaultCtor cdcB(3);
+ EA_PREFIX_ALIGN(64) ClassWithoutDefaultCtor cwdcB(3);
+ #endif
+
+ #else
+ DoError(nErrorCount, "EA_PREFIX_ALIGN test");
+ #endif
+ }
+
+
+ { // Test EA_POSTFIX_ALIGN
+ #ifdef EA_POSTFIX_ALIGN
+ char buffer[32];
+ int x EA_POSTFIX_ALIGN(ALIGNMENT_AMOUNT_64) = 0;
+ sprintf(buffer, "%d", x);
+ if(buffer[0] != '0')
+ DoError(nErrorCount, "EA_POSTFIX_ALIGN test");
+ #else
+ DoError(nErrorCount, "EA_POSTFIX_ALIGN test");
+ #endif
+ }
+
+
+ { // Test EA_ALIGNED
+ #ifdef EA_ALIGNED
+ char buffer[32];
+
+ // Verify that a simple declaration works.
+ EA_ALIGNED(int, xA, ALIGNMENT_AMOUNT_64);
+
+ // Verify that a declaration with assignment works.
+ EA_ALIGNED(int, xB, ALIGNMENT_AMOUNT_64) = 0;
+
+ // Verify that a typedefd declaration works.
+ typedef EA_ALIGNED(int, int16, ALIGNMENT_AMOUNT_16);
+ int16 n16 = 0;
+
+ // Verify that a declaration with construction works.
+ #if defined(__cplusplus)
+ EA_ALIGNED(int, xC, ALIGNMENT_AMOUNT_64)(0);
+ #endif
+
+ xA = 0;
+ sprintf(buffer, "%d", xA);
+ sprintf(buffer, "%d", xB);
+ sprintf(buffer, "%p", &n16);
+ #if defined(__cplusplus)
+ sprintf(buffer, "%d", xC);
+ #endif
+
+ #if defined(__cplusplus)
+ // Verify that the following tests compile. These tests are here
+ // because the SN compiler (EDG front-end) has some problems with
+ // GCC compatibility related to the 'aligned' __attribute__.
+ ClassWithDefaultCtor cdc;
+ ClassWithoutDefaultCtor cwdc(3);
+ sprintf(buffer, "%p%p", &cdc, &cwdc);
+
+ // Verify that regular usage of EA_ALIGNED works.
+ EA_ALIGNED(ClassWithDefaultCtor, cdc16A, ALIGNMENT_AMOUNT_16);
+ //EA_ALIGNED(ClassWithoutDefaultCtor, cwdcA, 16); // Doesn't have a default ctor, so this can't be done.
+ sprintf(buffer, "%p%p", &cdc16A, (void*)NULL);
+
+ // Verify that argument usage of EA_ALIGNED works.
+ EA_ALIGNED(ClassWithDefaultCtor, cdcB, ALIGNMENT_AMOUNT_16)(3);
+ EA_ALIGNED(ClassWithoutDefaultCtor, cwdcB, ALIGNMENT_AMOUNT_16)(3);
+ sprintf(buffer, "%p%p", &cdcB, &cwdcB);
+
+ // Verify that usage of EA_ALIGNED works within a typedef.
+ typedef EA_ALIGNED(ClassWithDefaultCtor, ClassWithDefaultCtor16, ALIGNMENT_AMOUNT_16);
+ ClassWithDefaultCtor16 cdcC(3);
+ typedef EA_ALIGNED(ClassWithoutDefaultCtor, ClassWithoutDefaultCtor16, ALIGNMENT_AMOUNT_16);
+ ClassWithoutDefaultCtor16 cwdcC(3);
+ sprintf(buffer, "%p%p", &cdcC, &cwdcC);
+ #endif
+ #else
+ DoError(nErrorCount, "EA_ALIGNED test");
+ #endif
+ }
+
+
+ { // Test EA_PACKED
+ #if defined(__cplusplus)
+ #ifdef EA_PACKED
+ char buffer[32];
+ struct X { int x; } EA_PACKED;
+ X x = { 0 };
+ sprintf(buffer, "%d", x.x);
+ if(buffer[0] != '0')
+ DoError(nErrorCount, "EA_PACKED test");
+ #else
+ DoError(nErrorCount, "EA_PACKED test");
+ #endif
+ #endif
+ }
+
+
+ { // Test EA_LIKELY
+
+ if(EA_UNLIKELY(nErrorCount > 0))
+ {
+ if(EA_LIKELY(nErrorCount == 999999)) // Actually this isn't likely, but that's beside the point.
+ DoError(nErrorCount, "EA_LIKELY test");
+ }
+ }
+
+
+
+ { // Test EA_ASSUME
+ switch (nErrorCount / (nErrorCount + 1))
+ {
+ case 0:
+ Stricmp("nop0", "nop0");
+ break;
+ case 1:
+ Stricmp("nop1", "nop1");
+ break;
+ default:
+ EA_ASSUME(0);
+ }
+ }
+
+
+ { // Test EA_PURE
+ if(!PureFunction())
+ DoError(nErrorCount, "EA_PURE test");
+ }
+
+
+ { // Test EA_WCHAR_SIZE
+ EA_DISABLE_VC_WARNING(6326)
+ #ifdef EA_WCHAR_SIZE
+ if((EA_WCHAR_SIZE != 1) && (EA_WCHAR_SIZE != 2) && (EA_WCHAR_SIZE != 4))
+ DoError(nErrorCount, "EA_WCHAR_SIZE test");
+ #else
+ DoError(nErrorCount, "EA_WCHAR_SIZE test");
+ #endif
+ EA_RESTORE_VC_WARNING()
+ }
+
+
+ { // Test EA_RESTRICT
+ char* p = NULL;
+ if(RestrictTest(p) == 0) // This isn't a real test. If there is a failure, it will happen at compile time.
+ DoError(nErrorCount, "EA_RESTRICT test");
+ }
+
+
+ { // Test EA_DEPRECATED
+ /* This causes warnings on compilers, so just disable it.
+ #if defined(EA_DEPRECATED) && (!defined(__GNUC__) || ((__GNUC__ * 100 + __GNUC_MINOR__) < 402)) // GCC 4.2+ is converting deprecated into an error instead of a warning.
+ char buffer[32];
+ EA_DEPRECATED int x = 0;
+ sprintf(buffer, "%d", x); (void)x;
+ if(buffer[0] != '0')
+ DoError(nErrorCount, "EA_DEPRECATED test");
+ #else
+ DoError(nErrorCount, "EA_DEPRECATED test");
+ #endif
+ */
+ }
+
+
+ { // Test EA_PASCAL
+ #if defined(__cplusplus)
+ #ifdef EA_PASCAL
+ struct X{ void EA_PASCAL DoNothing(void){} };
+ X x;
+ x.DoNothing();
+ #else
+ DoError(nErrorCount, "EA_PASCAL test");
+ #endif
+ #endif
+ }
+
+
+ { // Test EA_PASCAL_FUNC
+ #if defined(__cplusplus)
+ #ifdef EA_PASCAL_FUNC
+ struct X{ void EA_PASCAL_FUNC(DoNothing()){} };
+ X x;
+ x.DoNothing();
+ #else
+ DoError(nErrorCount, "EA_PASCAL_FUNC test");
+ #endif
+ #endif
+ }
+
+
+ // EA_SSE
+ // Not sure how to properly test at this time.
+
+
+ { // EA_IMPORT
+ // Not possible to do this because import means it will come from outside.
+ //struct X{ EA_IMPORT void DoNothing(){} };
+ //X x;
+ //x.DoNothing();
+ }
+
+
+ { // EA_EXPORT
+ #if defined(__cplusplus)
+ struct X{ EA_EXPORT void DoNothing(){} };
+ X x;
+ x.DoNothing();
+ #endif
+ }
+
+
+ // EA_PREPROCESSOR_JOIN
+ // EA_STRINGIFY
+ {
+ char buffer[32];
+ char bufferExpected[32];
+ const int line = (__LINE__ + 2);
+
+ sprintf(buffer, "%s %s", EA_STRINGIFY(EA_PREPROCESSOR_JOIN(test_, __LINE__)), EA_STRINGIFY(__LINE__));
+ sprintf(bufferExpected, "test_%d %d", line, line);
+
+ if(strcmp(buffer, bufferExpected) != 0)
+ DoError(nErrorCount, "EA_PREPROCESSOR_JOIN/EA_STRINGIFY test");
+ }
+
+
+ { // EAArrayCount
+ const int testArray[13] = { 0 };
+ const size_t arrayCount = EAArrayCount(testArray);
+
+ if((arrayCount != 13) || (testArray[0] != 0))
+ DoError(nErrorCount, "EAArrayCount test");
+ }
+
+ { // static_assert
+
+ // Should succeed.
+ static_assert(sizeof(int32_t) == 4, "static_assert failure");
+
+ // Should fail.
+ //static_assert(sizeof(int32_t) == 8, "static_assert failure");
+ }
+
+ { // EA_OPTIMIZE_OFF / EA_OPTIMIZE_ON
+ int result = DisabledOptimizations(2);
+
+ if(result != 2*37)
+ DoError(nErrorCount, "EA_OPTIMIZE_OFF test");
+ }
+
+ { // EA_UNUSED
+ FunctionWithUnusedVariables(3);
+ }
+
+ return nErrorCount;
+}
+
+
+//
+// Tests for EA_IS_ENABLED
+//
+#define EABASE_TEST_FEATURE_A EA_ENABLED
+#if EA_IS_ENABLED(EABASE_TEST_FEATURE_A)
+ // Feature A is enabled
+#else
+ #error Error EABASE_TEST_FEATURE_A should be enabled.
+#endif
+// Make sure it is possible to successfully negate the test.
+#if !EA_IS_ENABLED(EABASE_TEST_FEATURE_A)
+ #error Error EABASE_TEST_FEATURE_A should be enabled.
+#endif
+
+#define EABASE_TEST_FEATURE_B EA_DISABLED
+#if EA_IS_ENABLED(EABASE_TEST_FEATURE_B)
+ #error Error EABASE_TEST_FEATURE_B should be disabled.
+#endif
+// Make sure it is possible to successfully negate the test.
+#if !EA_IS_ENABLED(EABASE_TEST_FEATURE_B)
+ // Feature B is not enabled
+#else
+ #error Error EABASE_TEST_FEATURE_B should be disabled.
+#endif
+
+// The test below should cause compilation to fail if it is uncommented. However we can't
+// obviously enable the test because it will break the build. It should be tested manually
+// if changes to EA_IS_ENABLED are made.
+//
+// #if EA_IS_ENABLED(EABASE_TEST_FEATURE_WITH_NO_DEFINE)
+// #endif
+
+
+int EAMain(int argc, char** argv)
+{
+ int nErrorCount = 0, nTotalErrorCount = 0;
+
+ (void)argc;
+ (void)argv;
+
+ nErrorCount = TestEABase();
+ Printf("EABase test error count: %d\n\n", nErrorCount);
+ nTotalErrorCount += nErrorCount;
+
+ nErrorCount = TestEAPlatform();
+ Printf("EAPlatform test error count: %d\n\n", nErrorCount);
+ nTotalErrorCount += nErrorCount;
+
+ nErrorCount = TestEACompiler();
+ Printf("EACompiler test error count: %d\n\n", nErrorCount);
+ nTotalErrorCount += nErrorCount;
+
+ nErrorCount = TestEACompilerTraits();
+ Printf("EACompilerTraits test error count: %d\n\n", nErrorCount);
+ nTotalErrorCount += nErrorCount;
+
+ if (nTotalErrorCount == 0)
+ Printf("\nAll tests completed successfully.\n");
+ else
+ Printf("\nTests failed. Total error count: %d\n", nTotalErrorCount);
+
+ return nTotalErrorCount;
+}
diff --git a/EASTL/test/packages/EABase/test/source/TestEABaseSeparate.cpp b/EASTL/test/packages/EABase/test/source/TestEABaseSeparate.cpp
new file mode 100644
index 0000000..ca709a0
--- /dev/null
+++ b/EASTL/test/packages/EABase/test/source/TestEABaseSeparate.cpp
@@ -0,0 +1,34 @@
+// The purpose of this compilation unit is to test EABase in the absence of other system headers.
+// For example TestEABase.cpp directly includes system headers like ctype.h, stddef.h, stdarg, etc.
+// However, these headers make it impossible to verify that certain definitions are being provided
+// by EABase instead of the system headers being included directly.
+
+#include <EABase/eabase.h>
+
+// This structure tests that EABase types are properly defined.
+struct EABaseDefinedTypesStruct
+{
+ char8_t mChar8_t;
+ char16_t mChar16_t;
+ char32_t mChar32_t;
+ wchar_t mWchar_t;
+ bool8_t mBool8_t;
+ int8_t mInt8_t;
+ int16_t mInt16_t;
+ int32_t mInt32_t;
+ int64_t mInt64_t;
+ uint8_t mUint8_t;
+ uint16_t mUint16_t;
+ uint32_t mUint32_t;
+ uint64_t mUint64_t;
+ intmax_t mIntmax_t;
+ uintmax_t mUintmax_t;
+ size_t mSize_t;
+ ssize_t mSsize_t;
+ float_t mFloat_t;
+ double_t mDouble_t;
+ intptr_t mIntptr_t;
+ uintptr_t mUintptr_t;
+ ptrdiff_t mPtrdiff_t;
+};
+
diff --git a/EASTL/test/source/ConceptImpls.h b/EASTL/test/source/ConceptImpls.h
new file mode 100644
index 0000000..e6c20ef
--- /dev/null
+++ b/EASTL/test/source/ConceptImpls.h
@@ -0,0 +1,192 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+#ifndef CONCEPTSIMPLS_H
+#define CONCEPTSIMPLS_H
+
+#include <EASTL/type_traits.h>
+
+#if !defined(EA_COMPILER_NO_DEFAULTED_FUNCTIONS) && !defined(EA_COMPILER_NO_DELETED_FUNCTIONS)
+
+#define EASTL_TEST_CONCEPT_IMPLS
+
+// This header provides a variety of helper classes that have interfaces corresponding to the concepts used to specify
+// requirements of many STL containers and algorithms. These helper classes are used in tests to verify that containers
+// and algorithms do not impose stricter requirements than specified by the standard on their arguments.
+
+// Destructible - only valid operation on this class is to destroy it.
+
+class Destructible
+{
+public:
+ ~Destructible() = default;
+
+ Destructible() = delete;
+ Destructible(const Destructible&) = delete;
+ Destructible(Destructible&&) = delete;
+ Destructible& operator=(const Destructible&) = delete;
+ Destructible& operator=(Destructible&&) = delete;
+};
+
+// Unfortunately not all compilers handle type_traits reliably correctly currently so we can't straightforwardly
+// static_assert everything that should be true of this class
+static_assert(eastl::is_destructible<Destructible>::value, "eastl::is_destructible<Destructible>::value");
+// static_assert(!eastl::is_default_constructible<Destructible>::value,
+// "!eastl::is_default_constructible<Destructible>::value");
+// static_assert(!is_copy_constructible<Destructible>::value, "!eastl::is_copy_constructible<Destructible>::value");
+static_assert(!eastl::is_copy_assignable<Destructible>::value, "!eastl::is_copy_assignable<Destructible>::value");
+// static_assert(!eastl::is_move_constructible<Destructible>::value,
+// "!eastl::is_move_constructible<Destructible>::value");
+static_assert(!eastl::is_move_assignable<Destructible>::value, "!eastl::is_move_assignable<Destructible>::value");
+
+class DefaultConstructible
+{
+public:
+ static const int defaultValue = 42;
+
+ DefaultConstructible() : value(defaultValue) {}
+ ~DefaultConstructible() = default;
+
+ DefaultConstructible(const DefaultConstructible&) = delete;
+ DefaultConstructible(DefaultConstructible&&) = delete;
+ DefaultConstructible& operator=(const DefaultConstructible&) = delete;
+ DefaultConstructible& operator=(DefaultConstructible&&) = delete;
+
+ const int value;
+};
+
+
+struct NotDefaultConstructible
+{
+ NotDefaultConstructible() = delete;
+};
+static_assert(!eastl::is_default_constructible<NotDefaultConstructible>::value, "'NotDefaultConstructible' is default constructible.");
+
+
+class CopyConstructible
+{
+public:
+ static const int defaultValue = 42;
+ static CopyConstructible Create()
+ {
+ CopyConstructible x;
+ return x;
+ }
+
+ CopyConstructible(const CopyConstructible&) = default;
+ ~CopyConstructible() = default;
+
+ CopyConstructible& operator=(const CopyConstructible&) = delete;
+ CopyConstructible& operator=(CopyConstructible&&) = delete;
+
+ const int value;
+
+private:
+ CopyConstructible() : value(defaultValue) {}
+};
+
+// Unfortunately not all compilers handle type_traits reliably correctly currently so we can't straightforwardly
+// static_assert everything that should be true of this class
+static_assert(eastl::is_destructible<CopyConstructible>::value, "eastl::is_destructible<CopyConstructible>::value");
+// static_assert(!eastl::is_default_constructible<CopyConstructible>::value,
+// "!eastl::is_default_constructible<CopyConstructible>::value");
+// static_assert(is_copy_constructible<CopyConstructible>::value, "is_copy_constructible<CopyConstructible>::value");
+static_assert(eastl::is_copy_constructible<CopyConstructible>::value,
+ "eastl::is_copy_constructible<CopyConstructible>::value");
+static_assert(!eastl::is_copy_assignable<CopyConstructible>::value,
+ "!eastl::is_copy_assignable<CopyConstructible>::value");
+// static_assert(!eastl::is_move_constructible<CopyConstructible>::value,
+// "!eastl::is_move_constructible<CopyConstructible>::value");
+static_assert(!eastl::is_move_assignable<CopyConstructible>::value,
+ "!eastl::is_move_assignable<CopyConstructible>::value");
+
+class MoveConstructible
+{
+public:
+ static const int defaultValue = 42;
+ static MoveConstructible Create()
+ {
+ return MoveConstructible{};
+ }
+
+ MoveConstructible(MoveConstructible&& x) : value(x.value) {}
+ ~MoveConstructible() = default;
+
+ MoveConstructible(const MoveConstructible&) = delete;
+ MoveConstructible& operator=(const MoveConstructible&) = delete;
+ MoveConstructible& operator=(MoveConstructible&&) = delete;
+
+ const int value;
+
+private:
+ MoveConstructible() : value(defaultValue) {}
+};
+
+class MoveAssignable
+{
+public:
+ static const int defaultValue = 42;
+ static MoveAssignable Create()
+ {
+ return MoveAssignable{};
+ }
+
+ MoveAssignable(MoveAssignable&& x) : value(x.value) {}
+ MoveAssignable& operator=(MoveAssignable&& x)
+ {
+ value = x.value;
+ return *this;
+ }
+ ~MoveAssignable() = default;
+
+ MoveAssignable(const MoveAssignable&) = delete;
+ MoveAssignable& operator=(const MoveAssignable&) = delete;
+
+ int value;
+
+private:
+ MoveAssignable() : value(defaultValue) {}
+};
+
+struct MoveAndDefaultConstructible
+{
+ static const int defaultValue = 42;
+
+ MoveAndDefaultConstructible() : value(defaultValue) {}
+ MoveAndDefaultConstructible(MoveAndDefaultConstructible&& x) : value(x.value) {}
+ ~MoveAndDefaultConstructible() = default;
+
+ MoveAndDefaultConstructible(const MoveAndDefaultConstructible&) = delete;
+ MoveAndDefaultConstructible& operator=(const MoveAndDefaultConstructible&) = delete;
+ MoveAndDefaultConstructible& operator=(MoveAndDefaultConstructible&&) = delete;
+
+ const int value;
+};
+
+struct MissingMoveConstructor
+{
+ MissingMoveConstructor() {}
+ MissingMoveConstructor(const MissingMoveConstructor&) {}
+ MissingMoveConstructor& operator=(MissingMoveConstructor&&) { return *this; }
+ MissingMoveConstructor& operator=(const MissingMoveConstructor&) { return *this; }
+ bool operator<(const MissingMoveConstructor&) const { return true; }
+};
+
+struct MissingMoveAssignable
+{
+ MissingMoveAssignable() {}
+ MissingMoveAssignable(const MissingMoveAssignable&) {}
+ MissingMoveAssignable(MissingMoveAssignable&&) {}
+ MissingMoveAssignable& operator=(const MissingMoveAssignable&) { return *this; }
+ bool operator<(const MissingMoveAssignable&) const { return true; }
+};
+
+struct MissingEquality
+{
+ MissingEquality& operator==(const MissingEquality&) = delete;
+};
+
+#endif // !defined(EA_COMPILER_NO_DEFAULTED_FUNCTIONS) && !defined(EA_COMPILER_NO_DELETED_FUNCTIONS)
+
+#endif // CONCEPTSIMPLS_H
diff --git a/EASTL/test/source/EASTLTest.cpp b/EASTL/test/source/EASTLTest.cpp
new file mode 100644
index 0000000..389de4c
--- /dev/null
+++ b/EASTL/test/source/EASTLTest.cpp
@@ -0,0 +1,273 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include <EABase/eabase.h>
+#include <EASTL/version.h>
+
+#ifdef _MSC_VER
+ #pragma warning(push, 0)
+#endif
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <new>
+#if !defined(EA_COMPILER_NO_STANDARD_CPP_LIBRARY)
+ #include <vector> // Used to detect the C++ Standard Library version.
+#endif
+
+#ifndef EA_PLATFORM_PLAYSTSATION2
+ #include <wchar.h>
+#endif
+#if defined(EA_PLATFORM_WINDOWS)
+ #include <Windows.h>
+#elif defined(EA_PLATFORM_ANDROID)
+ #include <android/log.h>
+#endif
+#if defined(_MSC_VER) && defined(EA_PLATFORM_MICROSOFT)
+ #include <crtdbg.h>
+#endif
+
+
+#ifdef _MSC_VER
+ #pragma warning(pop)
+#endif
+
+
+#include "EASTLTestAllocator.h"
+#include "EASTLTest.h" // Include this last, as it enables compiler warnings.
+
+///////////////////////////////////////////////////////////////////////////////
+// init_seg
+//
+#ifdef _MSC_VER
+ // Set initialization order between init_seg(compiler) (.CRT$XCC) and
+ // init_seg(lib) (.CRT$XCL). The linker sorts the .CRT sections
+ // alphabetically so we simply need to pick a name that is between
+ // XCC and XCL.
+ #pragma warning(disable: 4075) // warning C4075: initializers put in unrecognized initialization area
+ #pragma init_seg(".CRT$XCF")
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EA_INIT_PRIORITY
+//
+// This is simply a wrapper for the GCC init_priority attribute that allows
+// multiplatform code to be easier to read.
+//
+// Example usage:
+// SomeClass gSomeClass EA_INIT_PRIORITY(2000);
+//
+#if !defined(EA_INIT_PRIORITY)
+ #if defined(__GNUC__)
+ #define EA_INIT_PRIORITY(x) __attribute__ ((init_priority (x)))
+ #else
+ #define EA_INIT_PRIORITY(x)
+ #endif
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// TestObject
+//
+int64_t TestObject::sTOCount = 0;
+int64_t TestObject::sTOCtorCount = 0;
+int64_t TestObject::sTODtorCount = 0;
+int64_t TestObject::sTODefaultCtorCount = 0;
+int64_t TestObject::sTOArgCtorCount = 0;
+int64_t TestObject::sTOCopyCtorCount = 0;
+int64_t TestObject::sTOMoveCtorCount = 0;
+int64_t TestObject::sTOCopyAssignCount = 0;
+int64_t TestObject::sTOMoveAssignCount = 0;
+int TestObject::sMagicErrorCount = 0;
+
+
+///////////////////////////////////////////////////////////////////////////////
+// MallocAllocator
+//
+int MallocAllocator::mAllocCountAll = 0;
+int MallocAllocator::mFreeCountAll = 0;
+size_t MallocAllocator::mAllocVolumeAll = 0;
+void* MallocAllocator::mpLastAllocation = NULL;
+
+
+///////////////////////////////////////////////////////////////////////////////
+// InstanceAllocator
+//
+int InstanceAllocator::mMismatchCount = 0;
+
+
+///////////////////////////////////////////////////////////////////////////////
+// CountingAllocator
+//
+uint64_t CountingAllocator::activeAllocCount = 0;
+uint64_t CountingAllocator::totalAllocCount = 0;
+uint64_t CountingAllocator::totalDeallocCount = 0;
+uint64_t CountingAllocator::totalCtorCount = 0;
+uint64_t CountingAllocator::defaultCtorCount = 0;
+uint64_t CountingAllocator::copyCtorCount = 0;
+uint64_t CountingAllocator::assignOpCount = 0;
+uint64_t CountingAllocator::totalAllocatedMemory = 0;
+uint64_t CountingAllocator::activeAllocatedMemory = 0;
+
+
+///////////////////////////////////////////////////////////////////////////////
+// gEASTL_TestLevel
+//
+int gEASTL_TestLevel = 0;
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTLTest_CheckMemory_Imp
+//
+int EASTLTest_CheckMemory_Imp(const char* pFile, int nLine)
+{
+ int nErrorCount(0);
+ bool bMemoryOK(true);
+
+ #if defined(_DEBUG) && (defined(EA_COMPILER_MSVC) && defined(EA_PLATFORM_MICROSOFT))
+ if(!_CrtCheckMemory())
+ bMemoryOK = false;
+ #endif
+
+ #ifdef EA_DEBUG
+ if(!EASTLTest_ValidateHeap())
+ bMemoryOK = false;
+ #endif
+
+ if(!bMemoryOK)
+ {
+ nErrorCount++;
+ EASTLTest_Printf("Memory check failure:\n%s: line %d\n\n", pFile, nLine);
+ }
+
+ return nErrorCount;
+}
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// GetStdSTLType
+//
+StdSTLType GetStdSTLType()
+{
+ #if defined(_STLPORT_VERSION)
+ return kSTLPort; // Descendent of the old HP / SGI STL.
+ #elif defined(_RWSTD_VER_STR)
+ return kSTLApache; // a.k.a. Rogue Wave, which is a descendent of the old HP / SGI STL.
+ #elif defined(_CPPLIB_VER)
+ return kSTLDinkumware; // Indicated by the presence of the central yvals.h header.
+ #elif defined(_LIBCPP_VECTOR)
+ return kSTLClang;
+ #elif defined(_GLIBCXX_VECTOR)
+ return kSTLGCC; // a.k.a. libstdc++
+ #elif defined(_MSC_VER)
+ return kSTLMS; // This is a tweaked version of Dinkumware.
+ #else
+ return kSTLUnknown;
+ #endif
+}
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// GetStdSTLName
+//
+const char* GetStdSTLName()
+{
+ // We may need to tweak this function over time.
+ // Theoretically it is possible to have multiple standard
+ // STL versions active, as some of them have options to
+ // put themselves in different namespaces.
+
+ // Tests for specific STL versions directly.
+ #if defined(_STLPORT_VERSION)
+ return "STLPort";
+ #elif defined(__SGI_STL_VECTOR)
+ return "SGI";
+ #elif defined(_RWSTD_VER_STR)
+ return "Apache";
+
+ // Tests for specific platforms that have specific STL versions.
+ #elif defined(EA_PLATFORM_SONY)
+ return "Sony Dinkumware";
+
+ // Special case for Dinkumware.
+ #elif defined(_CPPLIB_VER)
+ #if defined(_MSC_VER)
+ return "VC++ Dinkumware";
+ #else
+ return "Dinkumware";
+ #endif
+
+ // Tests for specific compilers as a fallback.
+ #elif defined(_MSC_VER)
+ return "VC++ ???";
+ #elif defined(_LIBCPP_VECTOR)
+ return "clang libc++";
+ #elif defined(__GNUC__) || defined(_GLIBCXX_VECTOR)
+ return "GCC (or emulated GCC) libstdc++";
+ #else
+ #error Need to be able to identify the standard STL version.
+ #endif
+}
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// MallocAllocator
+///////////////////////////////////////////////////////////////////////////////
+
+// The following function is defined here instead of in the header because GCC
+// generates a bogus warning about freeing a non-heap pointer when this function
+// is declared inline.
+
+void MallocAllocator::deallocate(void *p, size_t n)
+{
+ ++mFreeCount;
+ mAllocVolume -= n;
+ ++mFreeCountAll;
+ mAllocVolumeAll -= n;
+
+ return free(p);
+}
+
+void* MallocAllocator::allocate(size_t n, int)
+{
+ ++mAllocCount; mAllocVolume += n; ++mAllocCountAll; mAllocVolumeAll += n; mpLastAllocation = malloc(n); return mpLastAllocation;
+}
+
+void* MallocAllocator::allocate(size_t n, size_t, size_t, int)
+{
+ ++mAllocCount; mAllocVolume += n; ++mAllocCountAll; mAllocVolumeAll += n; mpLastAllocation = malloc(n); return mpLastAllocation;
+}
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// CustomAllocator
+///////////////////////////////////////////////////////////////////////////////
+
+void* CustomAllocator::allocate(size_t n, int flags)
+{
+ return ::operator new[](n, get_name(), flags, 0, __FILE__, __LINE__);
+}
+
+void* CustomAllocator::allocate(size_t n, size_t alignment, size_t offset, int flags)
+{
+ return ::operator new[](n, alignment, offset, get_name(), flags, 0, __FILE__, __LINE__);
+}
+
+void CustomAllocator::deallocate(void* p, size_t /*n*/)
+{
+ ::operator delete((char*)p);
+}
+
+
+
+
+
+
diff --git a/EASTL/test/source/EASTLTest.h b/EASTL/test/source/EASTLTest.h
new file mode 100644
index 0000000..fca6b2c
--- /dev/null
+++ b/EASTL/test/source/EASTLTest.h
@@ -0,0 +1,1588 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTLTEST_H
+#define EASTLTEST_H
+
+
+#include <EABase/eabase.h>
+#include <EATest/EATest.h>
+
+EA_DISABLE_ALL_VC_WARNINGS()
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <vector> // For the STD_STL_TYPE defines below.
+#if EASTL_EXCEPTIONS_ENABLED
+ #include <stdexcept>
+ #include <new>
+#endif
+EA_RESTORE_ALL_VC_WARNINGS();
+
+
+int TestAlgorithm();
+int TestAllocator();
+int TestAny();
+int TestArray();
+int TestBitVector();
+int TestBitset();
+int TestCharTraits();
+int TestChrono();
+int TestCppCXTypeTraits();
+int TestDeque();
+int TestExtra();
+int TestFinally();
+int TestFixedFunction();
+int TestFixedHash();
+int TestFixedList();
+int TestFixedMap();
+int TestFixedSList();
+int TestFixedSet();
+int TestFixedString();
+int TestFixedTupleVector();
+int TestFixedVector();
+int TestFunctional();
+int TestHash();
+int TestHeap();
+int TestIntrusiveHash();
+int TestIntrusiveList();
+int TestIntrusiveSDList();
+int TestIntrusiveSList();
+int TestIterator();
+int TestList();
+int TestListMap();
+int TestLruCache();
+int TestMap();
+int TestMemory();
+int TestMeta();
+int TestNumericLimits();
+int TestOptional();
+int TestRandom();
+int TestRatio();
+int TestRingBuffer();
+int TestSList();
+int TestSegmentedVector();
+int TestSet();
+int TestSmartPtr();
+int TestSort();
+int TestSpan();
+int TestString();
+int TestStringHashMap();
+int TestStringMap();
+int TestStringView();
+int TestTuple();
+int TestTupleVector();
+int TestTypeTraits();
+int TestUtility();
+int TestVariant();
+int TestVector();
+int TestVectorMap();
+int TestVectorSet();
+int TestAtomicBasic();
+int TestAtomicAsm();
+int TestBitcast();
+
+
+// Now enable warnings as desired.
+#ifdef _MSC_VER
+ #pragma warning(disable: 4324) // 'struct_name' : structure was padded due to __declspec(align())
+ //#pragma warning(disable: 4512) // 'class' : assignment operator could not be generated
+ //#pragma warning(disable: 4100) // 'identifier' : unreferenced formal parameter
+ //#pragma warning(disable: 4706) // assignment within conditional expression
+
+ #pragma warning(default: 4056) // Floating-point constant arithmetic generates a result that exceeds the maximum allowable value
+ #pragma warning(default: 4061) // The enumerate has no associated handler in a switch statement
+ #pragma warning(default: 4062) // The enumerate has no associated handler in a switch statement, and there is no default label
+ #pragma warning(default: 4191) // Calling this function through the result pointer may cause your program to crash
+ #pragma warning(default: 4217) // Member template functions cannot be used for copy-assignment or copy-construction
+ //#pragma warning(default: 4242) // 'variable' : conversion from 'type' to 'type', possible loss of data
+ #pragma warning(default: 4254) // 'operator' : conversion from 'type1' to 'type2', possible loss of data
+ #pragma warning(default: 4255) // 'function' : no function prototype given: converting '()' to '(void)'
+ #pragma warning(default: 4263) // 'function' : member function does not override any base class virtual member function
+ #pragma warning(default: 4264) // 'virtual_function' : no override available for virtual member function from base 'class'; function is hidden
+ #pragma warning(default: 4287) // 'operator' : unsigned/negative constant mismatch
+ #pragma warning(default: 4289) // Nonstandard extension used : 'var' : loop control variable declared in the for-loop is used outside the for-loop scope
+ #pragma warning(default: 4296) // 'operator' : expression is always false
+ #pragma warning(default: 4302) // 'conversion' : truncation from 'type 1' to 'type 2'
+ #pragma warning(default: 4339) // 'type' : use of undefined type detected in CLR meta-data - use of this type may lead to a runtime exception
+ #pragma warning(default: 4347) // Behavior change: 'function template' is called instead of 'function'
+ //#pragma warning(default: 4514) // unreferenced inline/local function has been removed
+ #pragma warning(default: 4529) // 'member_name' : forming a pointer-to-member requires explicit use of the address-of operator ('&') and a qualified name
+ #pragma warning(default: 4545) // Expression before comma evaluates to a function which is missing an argument list
+ #pragma warning(default: 4546) // Function call before comma missing argument list
+ #pragma warning(default: 4547) // 'operator' : operator before comma has no effect; expected operator with side-effect
+ //#pragma warning(default: 4548) // expression before comma has no effect; expected expression with side-effect
+ #pragma warning(default: 4549) // 'operator' : operator before comma has no effect; did you intend 'operator'?
+ #pragma warning(default: 4536) // 'type name' : type-name exceeds meta-data limit of 'limit' characters
+ #pragma warning(default: 4555) // Expression has no effect; expected expression with side-effect
+ #pragma warning(default: 4557) // '__assume' contains effect 'effect'
+ //#pragma warning(default: 4619) // #pragma warning : there is no warning number 'number'
+ #pragma warning(default: 4623) // 'derived class' : default constructor could not be generated because a base class default constructor is inaccessible
+ //#pragma warning(default: 4625) // 'derived class' : copy constructor could not be generated because a base class copy constructor is inaccessible
+ //#pragma warning(default: 4626) // 'derived class' : assignment operator could not be generated because a base class assignment operator is inaccessible
+ #pragma warning(default: 4628) // Digraphs not supported with -Ze. Character sequence 'digraph' not interpreted as alternate token for 'char'
+ #pragma warning(default: 4640) // 'instance' : construction of local static object is not thread-safe
+ #pragma warning(default: 4668) // 'symbol' is not defined as a preprocessor macro, replacing with '0' for 'directives'
+ #pragma warning(default: 4682) // 'parameter' : no directional parameter attribute specified, defaulting to [in]
+ #pragma warning(default: 4686) // 'user-defined type' : possible change in behavior, change in UDT return calling convention
+ //#pragma warning(default: 4710) // 'function' : function not inlined
+ //#pragma warning(default: 4786) // 'identifier' : identifier was truncated to 'number' characters in the debug information
+ #pragma warning(default: 4793) // Native code generated for function 'function': 'reason'
+ //#pragma warning(default: 4820) // 'bytes' bytes padding added after member 'member'
+ #pragma warning(default: 4905) // Wide string literal cast to 'LPSTR'
+ #pragma warning(default: 4906) // String literal cast to 'LPWSTR'
+ #pragma warning(default: 4917) // 'declarator' : a GUID cannot only be associated with a class, interface or namespace
+ #pragma warning(default: 4928) // Illegal copy-initialization; more than one user-defined conversion has been implicitly applied
+ #pragma warning(default: 4931) // We are assuming the type library was built for number-bit pointers
+ #pragma warning(default: 4946) // reinterpret_cast used between related classes: 'class1' and 'class2'
+
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL includes
+//
+// Intentionally keep these includes below the warning settings specified above.
+//
+#include <EASTL/iterator.h>
+#include <EASTL/algorithm.h>
+
+
+
+
+/// EASTL_TestLevel
+///
+/// Defines how extensive our testing is. A low level is for a desktop or
+/// nightly build in which the test can run quickly but still hit the
+/// majority of functionality. High level is for heavy testing and internal
+/// validation which may take numerous hours to run.
+///
+enum EASTL_TestLevel
+{
+ kEASTL_TestLevelLow = 1, /// ~10 seconds for test completion.
+ kEASTL_TestLevelHigh = 10 /// Numerous hours for test completion.
+};
+
+extern int gEASTL_TestLevel;
+
+
+
+/// EASTLTest_CheckMemory
+///
+/// Does a global memory heap validation check. Returns 0 if OK and
+/// an error count if there is a problem.
+///
+/// Example usage:
+/// EASTLTest_CheckMemory();
+///
+int EASTLTest_CheckMemory_Imp(const char* pFile, int nLine);
+#define EASTLTest_CheckMemory() EASTLTest_CheckMemory_Imp(__FILE__, __LINE__)
+
+
+
+// EASTLTEST_STD_STL_VER
+//
+#if defined(_STLPORT_VERSION)
+ #define EASTLTEST_STD_STL_VER_STLPORT
+#elif defined(_RWSTD_VER_STR) || defined(_RWSTD_NAMESPACE_END)
+ #define EASTLTEST_STD_STL_VER_APACHE
+#elif defined(_CPPLIB_VER)
+ #define EASTLTEST_STD_STL_VER_DINKUMWARE
+#elif defined(__GNUC__) && defined(_CXXCONFIG)
+ #define EASTLTEST_STD_STL_VER_GCC
+#else
+ #define EASTLTEST_STD_STL_VER_UNKNOWN
+#endif
+
+
+
+/// StdSTLType
+///
+enum StdSTLType
+{
+ kSTLUnknown, // Unknown type
+ kSTLPort, // STLPort. Descendent of the old HP / SGI STL.
+ kSTLApache, // Apache stdcxx (previously RogueWave), which is a descendent of the old HP / SGI STL.
+ kSTLClang, // Clang native. a.k.a. libc++
+ kSTLGCC, // GCC native. a.k.a. libstdc++
+ kSTLMS, // Microsoft. Tweaked version of Dinkumware.
+ kSTLDinkumware // Generic Dinkumware
+};
+
+StdSTLType GetStdSTLType();
+
+
+
+
+/// GetStdSTLName
+///
+/// Returns the name of the std C++ STL available to the current build.
+/// The returned value will be one of:
+/// "STLPort"
+/// "GCC"
+/// "VC++"
+// "Apache" // Previously RogueWave
+///
+const char* GetStdSTLName();
+
+
+/// gEASTLTest_AllocationCount
+///
+extern int gEASTLTest_AllocationCount;
+extern int gEASTLTest_TotalAllocationCount;
+
+
+
+// For backwards compatibility:
+#define EASTLTest_Printf EA::UnitTest::Report
+#define VERIFY EATEST_VERIFY
+
+
+///////////////////////////////////////////////////////////////////////////////
+/// EASTLTest_Rand
+///
+/// Implements a basic random number generator for EASTL unit tests. It's not
+/// intended to be a robust random number generator (though it is decent),
+/// but rather is present so the unit tests can have a portable random number
+/// generator they can rely on being present.
+///
+/// Example usage:
+/// EASTLTest_Rand rng;
+/// eastl_size_t x = rng(); // Generate value in range of [0, 0xffffffff] (i.e. generate any uint32_t)
+/// eastl_ssize_t y = rng.Rand(1000); // Generate value in range of [0, 1000)
+/// eastl_ssize_t z = rng.RandRange(-50, +30); // Generate value in range of [-50, +30)
+///
+/// Example usage in the random_shuffle algorithm:
+/// EASTLTest_Rand rng;
+/// random_shuffle(first, last, rnd);
+///
+class EASTLTest_Rand
+{
+public:
+ EASTLTest_Rand(eastl_size_t nSeed) // The user must supply a seed; we don't provide default values.
+ : mnSeed(nSeed) { }
+
+ eastl_size_t Rand()
+ {
+ // This is not designed to be a high quality random number generator.
+ if(mnSeed == 0)
+ mnSeed = UINT64_C(0xfefefefefefefefe); // Can't have a seed of zero.
+
+ const uint64_t nResult64A = ((mnSeed * UINT64_C(6364136223846793005)) + UINT64_C(1442695040888963407));
+ const uint64_t nResult64B = ((nResult64A * UINT64_C(6364136223846793005)) + UINT64_C(1442695040888963407));
+
+ mnSeed = (nResult64A >> 32) ^ nResult64B;
+
+ return (eastl_size_t)mnSeed; // For eastl_size_t == uint32_t, this is a chop.
+ }
+
+ eastl_size_t operator()() // Returns a pseudorandom value in range of [0, 0xffffffffffffffff)] (i.e. generate any eastl_size_t)
+ { return Rand(); }
+
+ eastl_size_t operator()(eastl_size_t n) // Returns a pseudorandom value in range of [0, n)
+ { return RandLimit(n); }
+
+ eastl_size_t RandLimit(eastl_size_t nLimit) // Returns a pseudorandom value in range of [0, nLimit)
+ {
+ // Can't do the following correct solution because we don't have a portable int128_t to work with.
+ // We could implement a 128 bit multiply manually. See EAStdC/int128_t.cpp.
+ // return (eastl_size_t)((Rand() * (uint128_t)nLimit) >> 64);
+
+ return (Rand() % nLimit); // This results in an imperfect distribution, especially for the case of nLimit being high relative to eastl_size_t.
+ }
+
+ eastl_ssize_t RandRange(eastl_ssize_t nBegin, eastl_ssize_t nEnd) // Returns a pseudorandom value in range of [nBegin, nEnd)
+ { return nBegin + (eastl_ssize_t)RandLimit((eastl_size_t)(nEnd - nBegin)); }
+
+protected:
+ uint64_t mnSeed;
+};
+
+
+///////////////////////////////////////////////////////////////////////////////
+/// RandGenT
+///
+/// A wrapper for EASTLTest_Rand which generates values of the given integral
+/// data type. This is mostly useful for clearnly avoiding compiler warnings,
+/// as we intentionally enable the highest warning levels in these tests.
+///
+template <typename Integer>
+struct RandGenT
+{
+ RandGenT(eastl_size_t nSeed)
+ : mRand(nSeed) { }
+
+ Integer operator()()
+ { return (Integer)mRand.Rand(); }
+
+ Integer operator()(eastl_size_t n)
+ { return (Integer)mRand.RandLimit(n); }
+
+ EASTLTest_Rand mRand;
+};
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+/// kMagicValue
+///
+/// Used as a unique integer. We assign this to TestObject in its constructor
+/// and verify in the TestObject destructor that the value is unchanged.
+/// This can be used to tell, for example, if an invalid object is being
+/// destroyed.
+///
+const uint32_t kMagicValue = 0x01f1cbe8;
+
+
+///////////////////////////////////////////////////////////////////////////////
+/// TestObject
+///
+/// Implements a generic object that is suitable for use in container tests.
+/// Note that we choose a very restricted set of functions that are available
+/// for this class. Do not add any additional functions, as that would
+/// compromise the intentions of the unit tests.
+///
+struct TestObject
+{
+ int mX; // Value for the TestObject.
+ bool mbThrowOnCopy; // Throw an exception of this object is copied, moved, or assigned to another.
+ int64_t mId; // Unique id for each object, equal to its creation number. This value is not coped from other TestObjects during any operations, including moves.
+ uint32_t mMagicValue; // Used to verify that an instance is valid and that it is not corrupted. It should always be kMagicValue.
+ static int64_t sTOCount; // Count of all current existing TestObjects.
+ static int64_t sTOCtorCount; // Count of times any ctor was called.
+ static int64_t sTODtorCount; // Count of times dtor was called.
+ static int64_t sTODefaultCtorCount; // Count of times the default ctor was called.
+ static int64_t sTOArgCtorCount; // Count of times the x0,x1,x2 ctor was called.
+ static int64_t sTOCopyCtorCount; // Count of times copy ctor was called.
+ static int64_t sTOMoveCtorCount; // Count of times move ctor was called.
+ static int64_t sTOCopyAssignCount; // Count of times copy assignment was called.
+ static int64_t sTOMoveAssignCount; // Count of times move assignment was called.
+ static int sMagicErrorCount; // Number of magic number mismatch errors.
+
+ explicit TestObject(int x = 0, bool bThrowOnCopy = false)
+ : mX(x), mbThrowOnCopy(bThrowOnCopy), mMagicValue(kMagicValue)
+ {
+ ++sTOCount;
+ ++sTOCtorCount;
+ ++sTODefaultCtorCount;
+ mId = sTOCtorCount;
+ }
+
+ // This constructor exists for the purpose of testing variadiac template arguments, such as with the emplace container functions.
+ TestObject(int x0, int x1, int x2, bool bThrowOnCopy = false)
+ : mX(x0 + x1 + x2), mbThrowOnCopy(bThrowOnCopy), mMagicValue(kMagicValue)
+ {
+ ++sTOCount;
+ ++sTOCtorCount;
+ ++sTOArgCtorCount;
+ mId = sTOCtorCount;
+ }
+
+ TestObject(const TestObject& testObject)
+ : mX(testObject.mX), mbThrowOnCopy(testObject.mbThrowOnCopy), mMagicValue(testObject.mMagicValue)
+ {
+ ++sTOCount;
+ ++sTOCtorCount;
+ ++sTOCopyCtorCount;
+ mId = sTOCtorCount;
+ if(mbThrowOnCopy)
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ throw "Disallowed TestObject copy";
+ #endif
+ }
+ }
+
+ // Due to the nature of TestObject, there isn't much special for us to
+ // do in our move constructor. A move constructor swaps its contents with
+ // the other object, whhich is often a default-constructed object.
+ TestObject(TestObject&& testObject)
+ : mX(testObject.mX), mbThrowOnCopy(testObject.mbThrowOnCopy), mMagicValue(testObject.mMagicValue)
+ {
+ ++sTOCount;
+ ++sTOCtorCount;
+ ++sTOMoveCtorCount;
+ mId = sTOCtorCount; // testObject keeps its mId, and we assign ours anew.
+ testObject.mX = 0; // We are swapping our contents with the TestObject, so give it our "previous" value.
+ if(mbThrowOnCopy)
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ throw "Disallowed TestObject copy";
+ #endif
+ }
+ }
+
+ TestObject& operator=(const TestObject& testObject)
+ {
+ ++sTOCopyAssignCount;
+
+ if(&testObject != this)
+ {
+ mX = testObject.mX;
+ // Leave mId alone.
+ mMagicValue = testObject.mMagicValue;
+ mbThrowOnCopy = testObject.mbThrowOnCopy;
+ if(mbThrowOnCopy)
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ throw "Disallowed TestObject copy";
+ #endif
+ }
+ }
+ return *this;
+ }
+
+ TestObject& operator=(TestObject&& testObject)
+ {
+ ++sTOMoveAssignCount;
+
+ if(&testObject != this)
+ {
+ eastl::swap(mX, testObject.mX);
+ // Leave mId alone.
+ eastl::swap(mMagicValue, testObject.mMagicValue);
+ eastl::swap(mbThrowOnCopy, testObject.mbThrowOnCopy);
+
+ if(mbThrowOnCopy)
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ throw "Disallowed TestObject copy";
+ #endif
+ }
+ }
+ return *this;
+ }
+
+ ~TestObject()
+ {
+ if(mMagicValue != kMagicValue)
+ ++sMagicErrorCount;
+ mMagicValue = 0;
+ --sTOCount;
+ ++sTODtorCount;
+ }
+
+ static void Reset()
+ {
+ sTOCount = 0;
+ sTOCtorCount = 0;
+ sTODtorCount = 0;
+ sTODefaultCtorCount = 0;
+ sTOArgCtorCount = 0;
+ sTOCopyCtorCount = 0;
+ sTOMoveCtorCount = 0;
+ sTOCopyAssignCount = 0;
+ sTOMoveAssignCount = 0;
+ sMagicErrorCount = 0;
+ }
+
+ static bool IsClear() // Returns true if there are no existing TestObjects and the sanity checks related to that test OK.
+ {
+ return (sTOCount == 0) && (sTODtorCount == sTOCtorCount) && (sMagicErrorCount == 0);
+ }
+};
+
+// Operators
+// We specifically define only == and <, in order to verify that
+// our containers and algorithms are not mistakenly expecting other
+// operators for the contained and manipulated classes.
+inline bool operator==(const TestObject& t1, const TestObject& t2)
+ { return t1.mX == t2.mX; }
+
+inline bool operator<(const TestObject& t1, const TestObject& t2)
+ { return t1.mX < t2.mX; }
+
+
+// TestObject hash
+// Normally you don't want to put your hash functions in the eastl namespace, as that namespace is owned by EASTL.
+// However, these are the EASTL unit tests and we can say that they are also owned by EASTL.
+namespace eastl
+{
+ template <>
+ struct hash<TestObject>
+ {
+ size_t operator()(const TestObject& a) const
+ { return static_cast<size_t>(a.mX); }
+ };
+}
+
+
+// use_mX
+// Used for printing TestObject contents via the PrintSequence function,
+// which is defined below. See the PrintSequence function for documentation.
+// This function is an analog of the eastl::use_self and use_first functions.
+// We declare this all in one line because the user should never need to
+// debug usage of this function.
+template <typename T> struct use_mX { int operator()(const T& t) const { return t.mX; } };
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// SizedPOD
+//
+// Exists for the purpose testing PODs that are larger than built-in types.
+//
+template <size_t kSize>
+struct SizedPOD
+{
+ char memory[kSize];
+};
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+/// ConstType
+///
+/// Used to test const type containers (e.g. vector<const ConstType>).
+///
+class ConstType
+{
+public:
+ ConstType(int value) : mDummy(value) {};
+ int mDummy;
+};
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+/// TestObjectHash
+///
+/// Implements a manually specified hash function for TestObjects.
+///
+struct TestObjectHash
+{
+ size_t operator()(const TestObject& t) const
+ {
+ return (size_t)t.mX;
+ }
+};
+
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+/// Align16
+///
+
+#if defined(EA_PROCESSOR_ARM)
+ #define kEASTLTestAlign16 8 //ARM processors can only align to 8
+#else
+ #define kEASTLTestAlign16 16
+#endif
+
+
+EA_PREFIX_ALIGN(kEASTLTestAlign16)
+struct Align16
+{
+ explicit Align16(int x = 0) : mX(x) {}
+ int mX;
+} EA_POSTFIX_ALIGN(kEASTLTestAlign16);
+
+inline bool operator==(const Align16& a, const Align16& b)
+ { return (a.mX == b.mX); }
+
+inline bool operator<(const Align16& a, const Align16& b)
+ { return (a.mX < b.mX); }
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+/// Align32
+///
+#if defined(EA_PROCESSOR_ARM)
+ #define kEASTLTestAlign32 8 //ARM processors can only align to 8
+#elif defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) < 400) // GCC 2.x, 3.x
+ #define kEASTLTestAlign32 16 // Some versions of GCC fail to support any alignment beyond 16.
+#else
+ #define kEASTLTestAlign32 32
+#endif
+
+EA_PREFIX_ALIGN(kEASTLTestAlign32)
+struct Align32
+{
+ explicit Align32(int x = 0) : mX(x) {}
+ int mX;
+} EA_POSTFIX_ALIGN(kEASTLTestAlign32);
+
+inline bool operator==(const Align32& a, const Align32& b)
+ { return (a.mX == b.mX); }
+
+inline bool operator<(const Align32& a, const Align32& b)
+ { return (a.mX < b.mX); }
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+/// Align64
+///
+/// Used for testing of alignment.
+///
+#if defined(EA_PROCESSOR_ARM)
+ #define kEASTLTestAlign64 8
+#elif defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) < 400) // GCC 2.x, 3.x
+ #define kEASTLTestAlign64 16 // Some versions of GCC fail to support any alignment beyond 16.
+#else
+ #define kEASTLTestAlign64 64
+#endif
+
+EA_PREFIX_ALIGN(kEASTLTestAlign64)
+struct Align64
+{
+ explicit Align64(int x = 0) : mX(x) {}
+ int mX;
+} EA_POSTFIX_ALIGN(kEASTLTestAlign64);
+
+inline bool operator==(const Align64& a, const Align64& b)
+ { return (a.mX == b.mX); }
+
+inline bool operator<(const Align64& a, const Align64& b)
+ { return (a.mX < b.mX); }
+
+namespace eastl
+{
+ template <>
+ struct hash < Align64 >
+ {
+ size_t operator()(const Align64& a) const
+ {
+ return static_cast<size_t>(a.mX);
+ }
+ };
+}
+
+
+
+
+
+/// test_use_self
+///
+/// Intentionally avoiding a dependency on eastl::use_self.
+///
+template <typename T>
+struct test_use_self
+{
+ const T& operator()(const T& x) const
+ { return x; }
+};
+
+
+
+/// GenerateIncrementalIntegers
+///
+/// Used to seed containers with incremental values based on integers.
+///
+/// Example usage:
+/// vector<int> v(10, 0);
+/// generate(v.begin(), v.end(), GenerateIncrementalIntegers<int>());
+/// // v will now have 0, 1, 2, ... 8, 9.
+///
+/// generate_n(intArray.begin(), 10, GenerateIncrementalIntegers<int>());
+/// // v will now have 0, 1, 2, ... 8, 9.
+///
+/// vector<TestObject> vTO(10, 0);
+/// generate(vTO.begin(), vTO.end(), GenerateIncrementalIntegers<TestObject>());
+/// // vTO will now have 0, 1, 2, ... 8, 9.
+///
+template <typename T>
+struct GenerateIncrementalIntegers
+{
+ int mX;
+
+ GenerateIncrementalIntegers(int x = 0)
+ : mX(x) { }
+
+ void reset(int x = 0)
+ { mX = x; }
+
+ T operator()()
+ { return T(mX++); }
+};
+
+
+
+/// SetIncrementalIntegers
+///
+/// Used to seed containers with incremental values based on integers.
+///
+/// Example usage:
+/// vector<int> v(10, 0);
+/// for_each(v.begin(), v.end(), SetIncrementalIntegers<int>());
+/// // v will now have 0, 1, 2, ... 8, 9.
+///
+template <typename T>
+struct SetIncrementalIntegers
+{
+ int mX;
+
+ SetIncrementalIntegers(int x = 0)
+ : mX(x) { }
+
+ void reset(int x = 0)
+ { mX = x; }
+
+ void operator()(T& t)
+ { t = T(mX++); }
+};
+
+
+
+/// CompareContainers
+///
+/// Does a comparison between the contents of two containers.
+///
+/// Specifically tests for the following properties:
+/// empty() is the same for both
+/// size() is the same for both
+/// iteration through both element by element yields equal values.
+///
+template <typename T1, typename T2, typename ExtractValue1, typename ExtractValue2>
+int CompareContainers(const T1& t1, const T2& t2, const char* ppName,
+ ExtractValue1 ev1 = test_use_self<T1>(), ExtractValue2 ev2 = test_use_self<T2>())
+{
+ int nErrorCount = 0;
+
+ // Compare emptiness.
+ VERIFY(t1.empty() == t2.empty());
+
+ // Compare sizes.
+ const size_t nSize1 = t1.size();
+ const size_t nSize2 = t2.size();
+
+ VERIFY(nSize1 == nSize2);
+ if(nSize1 != nSize2)
+ EASTLTest_Printf("%s: Container size difference: %u, %u\n", ppName, (unsigned)nSize1, (unsigned)nSize2);
+
+ // Compare values.
+ if(nSize1 == nSize2)
+ {
+ // Test iteration
+ typename T1::const_iterator it1 = t1.begin();
+ typename T2::const_iterator it2 = t2.begin();
+
+ for(unsigned j = 0; it1 != t1.end(); ++it1, ++it2, ++j)
+ {
+ const typename T1::value_type& v1 = *it1;
+ const typename T2::value_type& v2 = *it2;
+
+ VERIFY(ev1(v1) == ev2(v2));
+ if(!(ev1(v1) == ev2(v2)))
+ {
+ EASTLTest_Printf("%s: Container iterator difference at index %d\n", ppName, j);
+ break;
+ }
+ }
+
+ VERIFY(it1 == t1.end());
+ VERIFY(it2 == t2.end());
+ }
+
+ return nErrorCount;
+}
+
+
+
+
+
+/// VerifySequence
+///
+/// Allows the user to specify that a container has a given set of values.
+///
+/// Example usage:
+/// vector<int> v;
+/// v.push_back(1); v.push_back(3); v.push_back(5);
+/// VerifySequence(v.begin(), v.end(), int(), "v.push_back", 1, 3, 5, -1);
+///
+/// Note: The StackValue template argument is a hint to the compiler about what type
+/// the passed vararg sequence is.
+///
+template <typename InputIterator, typename StackValue>
+bool VerifySequence(InputIterator first, InputIterator last, StackValue /*unused*/, const char* pName, ...)
+{
+ typedef typename eastl::iterator_traits<InputIterator>::value_type value_type;
+
+ int argIndex = 0;
+ int seqIndex = 0;
+ bool bReturnValue = true;
+ StackValue next;
+
+ va_list args;
+ va_start(args, pName);
+
+ for( ; first != last; ++first, ++argIndex, ++seqIndex)
+ {
+ next = va_arg(args, StackValue);
+
+ if((next == StackValue(-1)) || !(value_type(next) == *first))
+ {
+ if(pName)
+ EASTLTest_Printf("[%s] Mismatch at index %d\n", pName, argIndex);
+ else
+ EASTLTest_Printf("Mismatch at index %d\n", argIndex);
+ bReturnValue = false;
+ }
+ }
+
+ for(; first != last; ++first)
+ ++seqIndex;
+
+ if(bReturnValue)
+ {
+ next = va_arg(args, StackValue);
+
+ if(!(next == StackValue(-1)))
+ {
+ do {
+ ++argIndex;
+ next = va_arg(args, StackValue);
+ } while(!(next == StackValue(-1)));
+
+ if(pName)
+ EASTLTest_Printf("[%s] Too many elements: expected %d, found %d\n", pName, argIndex, seqIndex);
+ else
+ EASTLTest_Printf("Too many elements: expected %d, found %d\n", argIndex, seqIndex);
+ bReturnValue = false;
+ }
+ }
+
+ va_end(args);
+
+ return bReturnValue;
+}
+
+
+
+
+/// PrintSequence
+///
+/// Allows the user to print a sequence of values.
+///
+/// Example usage:
+/// vector<int> v;
+/// PrintSequence(v.begin(), v.end(), use_self<int>(), 100, "vector", 1, 3, 5, -1);
+///
+/// Example usage:
+/// template <typename T> struct use_mX { int operator()(const T& t) const { return t.mX; } };
+/// vector<TestObject> v;
+/// PrintSequence(v.begin(), v.end(), use_mX<TestObject>(), 100, "vector", 1, 3, 5, -1);
+///
+template <typename InputIterator, typename ExtractInt>
+void PrintSequence(InputIterator first, InputIterator last, ExtractInt extractInt, int nMaxCount, const char* pName, ...)
+{
+ if(pName)
+ EASTLTest_Printf("[%s]", pName);
+
+ for(int i = 0; (i < nMaxCount) && (first != last); ++i, ++first)
+ {
+ EASTLTest_Printf("%d ", (int)extractInt(*first));
+ }
+
+ EASTLTest_Printf("\n");
+}
+
+
+
+
+/// demoted_iterator
+///
+/// Converts an iterator into a demoted category. For example, you can convert
+/// an iterator of type bidirectional_iterator_tag to forward_iterator_tag.
+/// The following is a list of iterator types. A demonted iterator can be demoted
+/// only to a lower iterator category (earlier in the following list):
+/// input_iterator_tag
+/// forward_iterator_tag
+/// bidirectional_iterator_tag
+/// random_access_iterator_tag
+/// contiguous_iterator_tag
+///
+/// Converts something which can be iterated into a formal input iterator.
+/// This class is useful for testing functions and algorithms that expect
+/// InputIterators, which are the lowest and 'weakest' form of iterators.
+///
+/// Key traits of InputIterators:
+/// Algorithms on input iterators should never attempt to pass
+/// through the same iterator twice. They should be single pass
+/// algorithms. value_type T is not required to be an lvalue type.
+///
+/// Example usage:
+/// typedef demoted_iterator<int*, eastl::bidirectional_iterator_tag> PointerAsBidirectionalIterator;
+/// typedef demoted_iterator<MyVector::iterator, eastl::forward_iterator_tag> VectorIteratorAsForwardIterator;
+///
+/// Example usage:
+/// IntVector v;
+/// comb_sort(to_forward_iterator(v.begin()), to_forward_iterator(v.end()));
+///
+template <typename Iterator, typename IteratorCategory>
+class demoted_iterator
+{
+protected:
+ Iterator mIterator;
+
+public:
+ typedef demoted_iterator<Iterator, IteratorCategory> this_type;
+ typedef Iterator iterator_type;
+ typedef IteratorCategory iterator_category;
+ typedef typename eastl::iterator_traits<Iterator>::value_type value_type;
+ typedef typename eastl::iterator_traits<Iterator>::difference_type difference_type;
+ typedef typename eastl::iterator_traits<Iterator>::reference reference;
+ typedef typename eastl::iterator_traits<Iterator>::pointer pointer;
+
+ demoted_iterator()
+ : mIterator() { }
+
+ explicit demoted_iterator(const Iterator& i)
+ : mIterator(i) { }
+
+ demoted_iterator(const this_type& x)
+ : mIterator(x.mIterator) { }
+
+ this_type& operator=(const Iterator& i)
+ { mIterator = i; return *this; }
+
+ this_type& operator=(const this_type& x)
+ { mIterator = x.mIterator; return *this; }
+
+ reference operator*() const
+ { return *mIterator; }
+
+ pointer operator->() const
+ { return mIterator; }
+
+ this_type& operator++()
+ { ++mIterator; return *this; }
+
+ this_type operator++(int)
+ { return this_type(mIterator++); }
+
+ this_type& operator--()
+ { --mIterator; return *this; }
+
+ this_type operator--(int)
+ { return this_type(mIterator--); }
+
+ reference operator[](const difference_type& n) const
+ { return mIterator[n]; }
+
+ this_type& operator+=(const difference_type& n)
+ { mIterator += n; return *this; }
+
+ this_type operator+(const difference_type& n) const
+ { return this_type(mIterator + n); }
+
+ this_type& operator-=(const difference_type& n)
+ { mIterator -= n; return *this; }
+
+ this_type operator-(const difference_type& n) const
+ { return this_type(mIterator - n); }
+
+ const iterator_type& base() const
+ { return mIterator; }
+
+}; // class demoted_iterator
+
+template<typename Iterator1, typename IteratorCategory1, typename Iterator2, typename IteratorCategory2>
+inline bool
+operator==(const demoted_iterator<Iterator1, IteratorCategory1>& a, const demoted_iterator<Iterator2, IteratorCategory2>& b)
+ { return a.base() == b.base(); }
+
+template<typename Iterator1, typename IteratorCategory1, typename Iterator2, typename IteratorCategory2>
+inline bool
+operator!=(const demoted_iterator<Iterator1, IteratorCategory1>& a, const demoted_iterator<Iterator2, IteratorCategory2>& b)
+ { return !(a == b); }
+
+template<typename Iterator1, typename IteratorCategory1, typename Iterator2, typename IteratorCategory2>
+inline bool
+operator<(const demoted_iterator<Iterator1, IteratorCategory1>& a, const demoted_iterator<Iterator2, IteratorCategory2>& b)
+ { return a.base() < b.base(); }
+
+template<typename Iterator1, typename IteratorCategory1, typename Iterator2, typename IteratorCategory2>
+inline bool
+operator<=(const demoted_iterator<Iterator1, IteratorCategory1>& a, const demoted_iterator<Iterator2, IteratorCategory2>& b)
+ { return !(b < a); }
+
+template<typename Iterator1, typename IteratorCategory1, typename Iterator2, typename IteratorCategory2>
+inline bool
+operator>(const demoted_iterator<Iterator1, IteratorCategory1>& a, const demoted_iterator<Iterator2, IteratorCategory2>& b)
+ { return b < a; }
+
+template<typename Iterator1, typename IteratorCategory1, typename Iterator2, typename IteratorCategory2>
+inline bool
+operator>=(const demoted_iterator<Iterator1, IteratorCategory1>& a, const demoted_iterator<Iterator2, IteratorCategory2>& b)
+ { return !(a < b); }
+
+template<typename Iterator1, typename IteratorCategory1, typename Iterator2, typename IteratorCategory2>
+inline demoted_iterator<Iterator1, IteratorCategory1>
+operator-(const demoted_iterator<Iterator1, IteratorCategory1>& a, const demoted_iterator<Iterator2, IteratorCategory2>& b)
+ { return demoted_iterator<Iterator1, IteratorCategory1>(a.base() - b.base()); }
+
+template<typename Iterator1, typename IteratorCategory1>
+inline demoted_iterator<Iterator1, IteratorCategory1>
+operator+(typename demoted_iterator<Iterator1, IteratorCategory1>::difference_type n, const demoted_iterator<Iterator1, IteratorCategory1>& a)
+ { return a + n; }
+
+
+// to_xxx_iterator
+//
+// Returns a demoted iterator
+//
+template <typename Iterator>
+inline demoted_iterator<Iterator, EASTL_ITC_NS::input_iterator_tag>
+to_input_iterator(const Iterator& i)
+ { return demoted_iterator<Iterator, EASTL_ITC_NS::input_iterator_tag>(i); }
+
+template <typename Iterator>
+inline demoted_iterator<Iterator, EASTL_ITC_NS::forward_iterator_tag>
+to_forward_iterator(const Iterator& i)
+ { return demoted_iterator<Iterator, EASTL_ITC_NS::forward_iterator_tag>(i); }
+
+template <typename Iterator>
+inline demoted_iterator<Iterator, EASTL_ITC_NS::bidirectional_iterator_tag>
+to_bidirectional_iterator(const Iterator& i)
+ { return demoted_iterator<Iterator, EASTL_ITC_NS::bidirectional_iterator_tag>(i); }
+
+template <typename Iterator>
+inline demoted_iterator<Iterator, EASTL_ITC_NS::random_access_iterator_tag>
+to_random_access_iterator(const Iterator& i)
+ { return demoted_iterator<Iterator, EASTL_ITC_NS::random_access_iterator_tag>(i); }
+
+
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// MallocAllocator
+//
+// Implements an EASTL allocator that uses malloc/free as opposed to
+// new/delete or PPMalloc Malloc/Free. This is useful for testing
+// allocator behaviour of code.
+//
+// Example usage:
+// vector<int, MallocAllocator> intVector;
+//
+class MallocAllocator
+{
+public:
+ MallocAllocator(const char* = EASTL_NAME_VAL("MallocAllocator"))
+ : mAllocCount(0), mFreeCount(0), mAllocVolume(0) {}
+
+ MallocAllocator(const MallocAllocator& x)
+ : mAllocCount(x.mAllocCount), mFreeCount(x.mFreeCount), mAllocVolume(x.mAllocVolume) {}
+
+ MallocAllocator(const MallocAllocator& x, const char*) : MallocAllocator(x) {}
+
+ MallocAllocator& operator=(const MallocAllocator& x)
+ {
+ mAllocCount = x.mAllocCount;
+ mFreeCount = x.mFreeCount;
+ mAllocVolume = x.mAllocVolume;
+ return *this;
+ }
+
+ void* allocate(size_t n, int = 0);
+ void* allocate(size_t n, size_t, size_t, int = 0); // We don't support alignment, so you can't use this class where alignment is required.
+ void deallocate(void* p, size_t n);
+
+ const char* get_name() const { return "MallocAllocator"; }
+ void set_name(const char*) {}
+
+ static void reset_all()
+ {
+ mAllocCountAll = 0;
+ mFreeCountAll = 0;
+ mAllocVolumeAll = 0;
+ mpLastAllocation = NULL;
+ }
+
+public:
+ int mAllocCount;
+ int mFreeCount;
+ size_t mAllocVolume;
+
+ static int mAllocCountAll;
+ static int mFreeCountAll;
+ static size_t mAllocVolumeAll;
+ static void* mpLastAllocation;
+};
+
+inline bool operator==(const MallocAllocator&, const MallocAllocator&) { return true; }
+inline bool operator!=(const MallocAllocator&, const MallocAllocator&) { return false; }
+
+
+///////////////////////////////////////////////////////////////////////////////
+// CustomAllocator
+//
+// Implements an allocator that works just like eastl::allocator but is defined
+// within this test as opposed to within EASTL.
+//
+// Example usage:
+// vector<int, CustomAllocator> intVector;
+//
+class CustomAllocator
+{
+public:
+ CustomAllocator(const char* = NULL) {}
+ CustomAllocator(const CustomAllocator&) {}
+ CustomAllocator(const CustomAllocator&, const char*) {}
+ CustomAllocator& operator=(const CustomAllocator&) { return *this; }
+
+ void* allocate(size_t n, int flags = 0);
+ void* allocate(size_t n, size_t, size_t, int flags = 0);
+ void deallocate(void* p, size_t n);
+
+ const char* get_name() const { return "CustomAllocator"; }
+ void set_name(const char*) {}
+};
+
+inline bool operator==(const CustomAllocator&, const CustomAllocator&) { return true; }
+inline bool operator!=(const CustomAllocator&, const CustomAllocator&) { return false; }
+
+
+///////////////////////////////////////////////////////////////////////////////
+/// UnequalAllocator
+///
+/// Acts the same as eastl::allocator, but always compares as unequal to an
+/// instance of itself.
+///
+class UnequalAllocator
+{
+public:
+ EASTL_ALLOCATOR_EXPLICIT UnequalAllocator(const char* pName = EASTL_NAME_VAL(EASTL_ALLOCATOR_DEFAULT_NAME))
+ : mAllocator(pName) {}
+
+ UnequalAllocator(const UnequalAllocator& x) : mAllocator(x.mAllocator) {}
+ UnequalAllocator(const UnequalAllocator& x, const char* pName) : mAllocator(x.mAllocator) { set_name(pName); }
+ UnequalAllocator& operator=(const UnequalAllocator& x)
+ {
+ mAllocator = x.mAllocator;
+ return *this;
+ }
+
+ void* allocate(size_t n, int flags = 0) { return mAllocator.allocate(n, flags); }
+ void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0) { return mAllocator.allocate(n, alignment, offset, flags); }
+ void deallocate(void* p, size_t n) { return mAllocator.deallocate(p, n); }
+
+ const char* get_name() const { return mAllocator.get_name(); }
+ void set_name(const char* pName) { mAllocator.set_name(pName); }
+
+protected:
+ eastl::allocator mAllocator;
+};
+
+inline bool operator==(const UnequalAllocator&, const UnequalAllocator&) { return false; }
+inline bool operator!=(const UnequalAllocator&, const UnequalAllocator&) { return true; }
+
+
+///////////////////////////////////////////////////////////////////////////////
+/// CountingAllocator
+///
+/// Counts allocation events allowing unit tests to validate assumptions.
+///
+class CountingAllocator : public eastl::allocator
+{
+public:
+ using base_type = eastl::allocator;
+
+ EASTL_ALLOCATOR_EXPLICIT CountingAllocator(const char* pName = EASTL_NAME_VAL(EASTL_ALLOCATOR_DEFAULT_NAME))
+ : base_type(pName)
+ {
+ totalCtorCount++;
+ defaultCtorCount++;
+ }
+
+ CountingAllocator(const CountingAllocator& x) : base_type(x)
+ {
+ totalCtorCount++;
+ copyCtorCount++;
+ }
+
+ CountingAllocator(const CountingAllocator& x, const char* pName) : base_type(x)
+ {
+ totalCtorCount++;
+ copyCtorCount++;
+ set_name(pName);
+ }
+
+ CountingAllocator& operator=(const CountingAllocator& x)
+ {
+ base_type::operator=(x);
+ assignOpCount++;
+ return *this;
+ }
+
+ virtual void* allocate(size_t n, int flags = 0)
+ {
+ activeAllocCount++;
+ totalAllocCount++;
+ totalAllocatedMemory += n;
+ activeAllocatedMemory += n;
+ return base_type::allocate(n, flags);
+ }
+
+ virtual void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0)
+ {
+ activeAllocCount++;
+ totalAllocCount++;
+ totalAllocatedMemory += n;
+ activeAllocatedMemory += n;
+ return base_type::allocate(n, alignment, offset, flags);
+ }
+
+ void deallocate(void* p, size_t n)
+ {
+ activeAllocCount--;
+ totalDeallocCount--;
+ activeAllocatedMemory -= n;
+ return base_type::deallocate(p, n);
+ }
+
+ const char* get_name() const { return base_type::get_name(); }
+ void set_name(const char* pName) { base_type::set_name(pName); }
+
+ static auto getTotalAllocationCount() { return totalAllocCount; }
+ static auto getTotalAllocationSize() { return totalAllocatedMemory; }
+ static auto getActiveAllocationSize() { return activeAllocatedMemory; }
+ static auto getActiveAllocationCount() { return activeAllocCount; }
+ static auto neverUsed() { return totalAllocCount == 0; }
+
+ static void resetCount()
+ {
+ activeAllocCount = 0;
+ totalAllocCount = 0;
+ totalDeallocCount = 0;
+ totalCtorCount = 0;
+ defaultCtorCount = 0;
+ copyCtorCount = 0;
+ assignOpCount = 0;
+ totalAllocatedMemory = 0;
+ activeAllocatedMemory = 0;
+ }
+
+ virtual ~CountingAllocator() = default;
+
+ static uint64_t activeAllocCount;
+ static uint64_t totalAllocCount;
+ static uint64_t totalDeallocCount;
+ static uint64_t totalCtorCount;
+ static uint64_t defaultCtorCount;
+ static uint64_t copyCtorCount;
+ static uint64_t assignOpCount;
+ static uint64_t totalAllocatedMemory; // the total amount of memory allocated
+ static uint64_t activeAllocatedMemory; // currently allocated memory by allocator
+};
+
+inline bool operator==(const CountingAllocator& rhs, const CountingAllocator& lhs) { return operator==(CountingAllocator::base_type(rhs), CountingAllocator::base_type(lhs)); }
+inline bool operator!=(const CountingAllocator& rhs, const CountingAllocator& lhs) { return !(rhs == lhs); }
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// InstanceAllocator
+//
+// Implements an allocator which has a instance id that makes it different
+// from other InstanceAllocators of a different id. Allocations between
+// InstanceAllocators of different ids are incompatible. An allocation done
+// by an InstanceAllocator of id=0 cannot be freed by an InstanceAllocator
+// of id=1.
+//
+// Example usage:
+// InstanceAllocator ia0((uint8_t)0);
+// InstanceAllocator ia1((uint8_t)1);
+//
+// eastl::list<int, InstanceAllocator> list0(1, ia0);
+// eastl::list<int, InstanceAllocator> list1(1, ia1);
+//
+// list0 = list1; // list0 cannot free it's current contents with list1's allocator, and InstanceAllocator's purpose is to detect if it mistakenly does so.
+//
+class InstanceAllocator
+{
+public:
+ enum
+ {
+ kMultiplier = 16
+ }; // Use 16 because it's the highest currently known platform alignment requirement.
+
+ InstanceAllocator(const char* = NULL, uint8_t instanceId = 0) : mInstanceId(instanceId) {}
+ InstanceAllocator(uint8_t instanceId) : mInstanceId(instanceId) {}
+ InstanceAllocator(const InstanceAllocator& x) : mInstanceId(x.mInstanceId) {}
+ InstanceAllocator(const InstanceAllocator& x, const char*) : mInstanceId(x.mInstanceId) {}
+
+ InstanceAllocator& operator=(const InstanceAllocator& x)
+ {
+ mInstanceId = x.mInstanceId;
+ return *this;
+ }
+
+ void* allocate(size_t n, int = 0)
+ { // +1 so that we always have space to write mInstanceId.
+ uint8_t* p8 =
+ static_cast<uint8_t*>(malloc(n + (kMultiplier * (mInstanceId + 1)))); // We make allocations between
+ // different instances incompatible by
+ // tweaking their return values.
+ eastl::fill(p8, p8 + kMultiplier, 0xff);
+ EA_ANALYSIS_ASSUME(p8 != NULL);
+ *p8 = mInstanceId;
+ return p8 + (kMultiplier * (mInstanceId + 1));
+ }
+
+ void* allocate(size_t n, size_t, size_t, int = 0)
+ { // +1 so that we always have space to write mInstanceId.
+ uint8_t* p8 =
+ static_cast<uint8_t*>(malloc(n + (kMultiplier * (mInstanceId + 1)))); // We make allocations between
+ // different instances incompatible by
+ // tweaking their return values.
+ eastl::fill(p8, p8 + kMultiplier, 0xff);
+ EA_ANALYSIS_ASSUME(p8 != NULL);
+ *p8 = mInstanceId;
+ return p8 + (kMultiplier * (mInstanceId + 1));
+ }
+
+ void deallocate(void* p, size_t /*n*/)
+ {
+ uint8_t* p8 = static_cast<uint8_t*>(p) - (kMultiplier * (mInstanceId + 1));
+ EASTL_ASSERT(*p8 == mInstanceId); // mInstanceId must match the id used in allocate(), otherwise the behavior is
+ // undefined (probably a heap assert).
+ if (*p8 == mInstanceId) // It's possible that *p8 coincidentally matches mInstanceId if p8 is offset into memory
+ // we don't control.
+ free(p8);
+ else
+ ++mMismatchCount;
+ }
+
+ const char* get_name()
+ {
+ sprintf(mName, "InstanceAllocator %u", mInstanceId);
+ return mName;
+ }
+
+ void set_name(const char*) {}
+
+ static void reset_all() { mMismatchCount = 0; }
+
+public:
+ uint8_t mInstanceId;
+ char mName[32];
+
+ static int mMismatchCount;
+};
+
+inline bool operator==(const InstanceAllocator& a, const InstanceAllocator& b) { return (a.mInstanceId == b.mInstanceId); }
+inline bool operator!=(const InstanceAllocator& a, const InstanceAllocator& b) { return (a.mInstanceId != b.mInstanceId); }
+
+
+///////////////////////////////////////////////////////////////////////////////
+// ThrowingAllocator
+//
+// Implements an EASTL allocator that uses malloc/free as opposed to
+// new/delete or PPMalloc Malloc/Free. This is useful for testing
+// allocator behaviour of code.
+//
+// Example usage:
+// vector<int, ThrowingAllocator< false<> > intVector;
+//
+template <bool initialShouldThrow = true>
+class ThrowingAllocator
+{
+public:
+ ThrowingAllocator(const char* = EASTL_NAME_VAL("ThrowingAllocator")) : mbShouldThrow(initialShouldThrow) {}
+ ThrowingAllocator(const ThrowingAllocator& x) : mbShouldThrow(x.mbShouldThrow) {}
+ ThrowingAllocator(const ThrowingAllocator& x, const char*) : mbShouldThrow(x.mbShouldThrow) {}
+
+ ThrowingAllocator& operator=(const ThrowingAllocator& x)
+ {
+ mbShouldThrow = x.mbShouldThrow;
+ return *this;
+ }
+
+ void* allocate(size_t n, int = 0)
+ {
+#if EASTL_EXCEPTIONS_ENABLED
+ if (mbShouldThrow)
+ throw std::bad_alloc();
+#endif
+ return malloc(n);
+ }
+
+ void* allocate(size_t n, size_t, size_t, int = 0)
+ {
+#if EASTL_EXCEPTIONS_ENABLED
+ if (mbShouldThrow)
+ throw std::bad_alloc();
+#endif
+ return malloc(n); // We don't support alignment, so you can't use this class where alignment is required.
+ }
+
+ void deallocate(void* p, size_t) { free(p); }
+
+ const char* get_name() const { return "ThrowingAllocator"; }
+ void set_name(const char*) {}
+
+ void set_should_throw(bool shouldThrow) { mbShouldThrow = shouldThrow; }
+ bool get_should_throw() const { return mbShouldThrow; }
+
+protected:
+ bool mbShouldThrow;
+};
+
+template <bool initialShouldThrow>
+inline bool operator==(const ThrowingAllocator<initialShouldThrow>&, const ThrowingAllocator<initialShouldThrow>&)
+{
+ return true;
+}
+
+template <bool initialShouldThrow>
+inline bool operator!=(const ThrowingAllocator<initialShouldThrow>&, const ThrowingAllocator<initialShouldThrow>&)
+{
+ return false;
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Helper utility that does a case insensitive string comparsion with two sets of overloads
+//
+struct TestStrCmpI_2
+{
+ bool operator()(const char* pCStr, const eastl::string& str) const { return str.comparei(pCStr) == 0; }
+ bool operator()(const eastl::string& str, const char* pCStr) const { return str.comparei(pCStr) == 0; }
+};
+
+
+///////////////////////////////////////////////////////////////////////////////
+// StompDetectAllocator
+//
+// An allocator that has sentinal values surrounding its allocator in an
+// effort to detected if its internal memory has been stomped.
+//
+static uint64_t STOMP_MAGIC_V1 = 0x0101DEC1A551F1ED;
+static uint64_t STOMP_MAGIC_V2 = 0x12345C1A551F1ED5;
+
+struct StompDetectAllocator
+{
+ StompDetectAllocator() { Validate(); }
+ ~StompDetectAllocator() { Validate(); }
+
+ StompDetectAllocator(const char*) { Validate(); }
+
+ void* allocate(size_t n, int = 0) { return mMallocAllocator.allocate(n); }
+ void* allocate(size_t n, size_t, size_t, int = 0) { return mMallocAllocator.allocate(n); }
+ void deallocate(void* p, size_t n) { mMallocAllocator.deallocate(p, n); }
+
+ const char* get_name() const { return "FatAllocator"; }
+ void set_name(const char*) {}
+
+ void Validate() const
+ {
+ EASTL_ASSERT(mSentinal1 == STOMP_MAGIC_V1);
+ EASTL_ASSERT(mSentinal2 == STOMP_MAGIC_V2);
+ }
+
+ uint64_t mSentinal1 = STOMP_MAGIC_V1;
+ MallocAllocator mMallocAllocator;
+ uint64_t mSentinal2 = STOMP_MAGIC_V2;
+};
+
+inline bool operator==(const StompDetectAllocator& a, const StompDetectAllocator& b)
+{
+ a.Validate();
+ b.Validate();
+
+ return (a.mMallocAllocator == b.mMallocAllocator);
+}
+
+inline bool operator!=(const StompDetectAllocator& a, const StompDetectAllocator& b)
+{
+ a.Validate();
+ b.Validate();
+
+ return (a.mMallocAllocator != b.mMallocAllocator);
+}
+
+
+// Commonly used free-standing functions to test callables
+inline int ReturnVal(int param) { return param; }
+inline int ReturnZero() { return 0; }
+inline int ReturnOne() { return 1; }
+
+
+// ValueInit
+template<class T>
+struct ValueInitOf
+{
+ ValueInitOf() : mV() {}
+ ~ValueInitOf() = default;
+
+ ValueInitOf(const ValueInitOf&) = default;
+ ValueInitOf(ValueInitOf&&) = default;
+
+ ValueInitOf& operator=(const ValueInitOf&) = default;
+ ValueInitOf& operator=(ValueInitOf&&) = default;
+
+ T get() { return mV; }
+
+ T mV;
+};
+
+// MoveOnlyType - useful for verifying containers that may hold, e.g., unique_ptrs to make sure move ops are implemented
+struct MoveOnlyType
+{
+ MoveOnlyType() = delete;
+ MoveOnlyType(int val) : mVal(val) {}
+ MoveOnlyType(const MoveOnlyType&) = delete;
+ MoveOnlyType(MoveOnlyType&& x) : mVal(x.mVal) { x.mVal = 0; }
+ MoveOnlyType& operator=(const MoveOnlyType&) = delete;
+ MoveOnlyType& operator=(MoveOnlyType&& x)
+ {
+ mVal = x.mVal;
+ x.mVal = 0;
+ return *this;
+ }
+ bool operator==(const MoveOnlyType& o) const { return mVal == o.mVal; }
+
+ int mVal;
+};
+
+// MoveOnlyTypeDefaultCtor - useful for verifying containers that may hold, e.g., unique_ptrs to make sure move ops are implemented
+struct MoveOnlyTypeDefaultCtor
+{
+ MoveOnlyTypeDefaultCtor() = default;
+ MoveOnlyTypeDefaultCtor(int val) : mVal(val) {}
+ MoveOnlyTypeDefaultCtor(const MoveOnlyTypeDefaultCtor&) = delete;
+ MoveOnlyTypeDefaultCtor(MoveOnlyTypeDefaultCtor&& x) : mVal(x.mVal) { x.mVal = 0; }
+ MoveOnlyTypeDefaultCtor& operator=(const MoveOnlyTypeDefaultCtor&) = delete;
+ MoveOnlyTypeDefaultCtor& operator=(MoveOnlyTypeDefaultCtor&& x)
+ {
+ mVal = x.mVal;
+ x.mVal = 0;
+ return *this;
+ }
+ bool operator==(const MoveOnlyTypeDefaultCtor& o) const { return mVal == o.mVal; }
+
+ int mVal;
+};
+
+
+
+//////////////////////////////////////////////////////////////////////////////
+// Utility RAII class that sets a new default allocator for the scope
+//
+struct AutoDefaultAllocator
+{
+ eastl::allocator* mPrevAllocator = nullptr;
+
+ AutoDefaultAllocator(eastl::allocator* nextAllocator) { mPrevAllocator = SetDefaultAllocator(nextAllocator); }
+ ~AutoDefaultAllocator() { SetDefaultAllocator(mPrevAllocator); }
+};
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
diff --git a/EASTL/test/source/EASTLTestAllocator.cpp b/EASTL/test/source/EASTLTestAllocator.cpp
new file mode 100644
index 0000000..0f03d8a
--- /dev/null
+++ b/EASTL/test/source/EASTLTestAllocator.cpp
@@ -0,0 +1,492 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTLTEST_ALLOCATOR_H
+#define EASTLTEST_ALLOCATOR_H
+
+#include <EABase/eabase.h>
+#include <EASTL/internal/config.h>
+#include <new>
+#include <stdio.h>
+
+#if !EASTL_OPENSOURCE
+
+ #include <PPMalloc/EAGeneralAllocator.h>
+ #include <PPMalloc/EAGeneralAllocatorDebug.h>
+
+ #include <coreallocator/icoreallocator_interface.h>
+
+ #if defined(EA_COMPILER_MSVC)
+ #include <math.h> // VS2008 has an acknowledged bug that requires math.h (and possibly also string.h) to be #included before intrin.h.
+ #include <intrin.h>
+ #pragma intrinsic(_ReturnAddress)
+ #endif
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // EASTLTest_GetGeneralAllocator()
+ //
+ namespace EA
+ {
+ namespace Allocator
+ {
+ #ifdef EA_DEBUG
+ extern PPM_API GeneralAllocatorDebug* gpEAGeneralAllocatorDebug;
+ #else
+ extern PPM_API GeneralAllocator* gpEAGeneralAllocator;
+ #endif
+
+ static inline auto& EASTLTest_GetGeneralAllocator()
+ {
+ #ifdef EA_DEBUG
+ using GeneralAllocatorType = GeneralAllocatorDebug;
+ #else
+ using GeneralAllocatorType = GeneralAllocator;
+ #endif
+
+ static GeneralAllocatorType sGeneralAllocator;
+ return sGeneralAllocator;
+ }
+ }
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // allocator counts for debugging purposes
+ //
+ int gEASTLTest_AllocationCount = 0;
+ int gEASTLTest_TotalAllocationCount = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // EASTLTest_ValidateHeap
+ //
+ bool EASTLTest_ValidateHeap()
+ {
+ #ifdef EA_DEBUG
+ return EA::Allocator::EASTLTest_GetGeneralAllocator().ValidateHeap(EA::Allocator::GeneralAllocator::kHeapValidationLevelBasic);
+ #else
+ return true;
+ #endif
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // Microsoft function parameter annotations
+ // https://msdn.microsoft.com/en-CA/library/hh916382.aspx
+ //
+ #ifndef _Ret_maybenull_
+ #define _Ret_maybenull_
+ #endif
+
+ #ifndef _Post_writable_byte_size_
+ #define _Post_writable_byte_size_(x)
+ #endif
+
+ #ifndef _Ret_notnull_
+ #define _Ret_notnull_
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // operator new extensions
+ //
+ namespace
+ {
+ #ifdef EA_DEBUG
+ const char gUnattributedNewTag[] = "Anonymous new";
+ #endif
+
+ #if defined(EA_COMPILER_MSVC)
+ #define UNATTRIBUTED_NEW_FILE "raw_return_address"
+ #define UNATTRIBUTED_NEW_LINE ((int)(uintptr_t)_ReturnAddress())
+ #else
+ #define UNATTRIBUTED_NEW_FILE NULL
+ #define UNATTRIBUTED_NEW_LINE 0
+ #endif
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // system memory allocation helpers
+ //
+ namespace
+ {
+ void* PlatformMalloc(size_t size, size_t alignment = 16)
+ {
+ #ifdef EA_PLATFORM_MICROSOFT
+ return _aligned_malloc(size, alignment);
+ #else
+ void *p = nullptr;
+ alignment = alignment < sizeof( void *) ? sizeof( void *) : alignment;
+ posix_memalign(&p, alignment, size);
+ return p;
+ #endif
+ }
+
+ void PlatformFree(void* p)
+ {
+ #ifdef EA_PLATFORM_MICROSOFT
+ _aligned_free(p);
+ #else
+ free(p);
+ #endif
+ }
+
+ void* InternalMalloc(size_t size)
+ {
+ void* mem = nullptr;
+
+ auto& allocator = EA::Allocator::EASTLTest_GetGeneralAllocator();
+
+ #ifdef EA_DEBUG
+ mem = allocator.MallocDebug(size, 0, 0, gUnattributedNewTag, UNATTRIBUTED_NEW_FILE, UNATTRIBUTED_NEW_LINE);
+ #else
+ mem = allocator.Malloc(size);
+ #endif
+
+ if(mem == nullptr)
+ mem = PlatformMalloc(size);
+
+ return mem;
+ }
+
+ void* InternalMalloc(size_t size, const char* name, int flags, unsigned debugFlags, const char* file, int line)
+ {
+ void* mem = nullptr;
+
+ auto& allocator = EA::Allocator::EASTLTest_GetGeneralAllocator();
+
+ #ifdef EA_DEBUG
+ mem = allocator.MallocDebug(size, flags, debugFlags, name, file, line);
+ #else
+ mem = allocator.Malloc(size, flags);
+ EA_UNUSED(debugFlags);
+ EA_UNUSED(file);
+ EA_UNUSED(line);
+ EA_UNUSED(name);
+ #endif
+
+ if(mem == nullptr)
+ mem = PlatformMalloc(size);
+
+ return mem;
+ }
+
+ void* InternalMalloc(size_t size, size_t alignment, const char* name, int flags, unsigned debugFlags, const char* file, int line)
+ {
+ void* mem = nullptr;
+
+ auto& allocator = EA::Allocator::EASTLTest_GetGeneralAllocator();
+
+ #ifdef EA_DEBUG
+ mem = allocator.MallocAlignedDebug(size, alignment, 0, flags, debugFlags, name, file, line);
+ #else
+ mem = allocator.MallocAligned(size, alignment, flags);
+ EA_UNUSED(debugFlags);
+ EA_UNUSED(file);
+ EA_UNUSED(line);
+ EA_UNUSED(name);
+ #endif
+
+ if(mem == nullptr)
+ mem = PlatformMalloc(size, alignment);
+
+ return mem;
+ }
+
+ void* InternalMalloc(size_t size, size_t alignment)
+ {
+ void* mem = nullptr;
+
+ auto& allocator = EA::Allocator::EASTLTest_GetGeneralAllocator();
+
+ #ifdef EA_DEBUG
+ mem = allocator.MallocAlignedDebug(size, alignment, 0, 0, 0, gUnattributedNewTag, UNATTRIBUTED_NEW_FILE, UNATTRIBUTED_NEW_LINE);
+ #else
+ mem = allocator.MallocAligned(size, alignment);
+ #endif
+
+ if(mem == nullptr)
+ mem = PlatformMalloc(size, alignment);
+
+ return mem;
+ }
+
+ void InternalFree(void* p)
+ {
+ auto& allocator = EA::Allocator::EASTLTest_GetGeneralAllocator();
+
+ if(allocator.ValidateAddress(p, EA::Allocator::GeneralAllocator::kAddressTypeOwned) == p)
+ {
+ allocator.Free(p);
+ }
+ else
+ {
+ PlatformFree(p);
+ }
+ }
+ }
+
+ class EASTLTestICA : public EA::Allocator::ICoreAllocator
+ {
+ public:
+ EASTLTestICA()
+ {
+ }
+
+ virtual ~EASTLTestICA()
+ {
+ }
+
+ virtual void* Alloc(size_t size, const char* name, unsigned int flags)
+ {
+ return ::InternalMalloc(size, name, (int)flags, 0, NULL, 0);
+ }
+
+ virtual void* Alloc(size_t size, const char* name, unsigned int flags,
+ unsigned int align, unsigned int)
+ {
+ return ::InternalMalloc(size, (size_t)align, name, (int)flags, 0, NULL, 0);
+ }
+
+ virtual void Free(void* pData, size_t /*size*/)
+ {
+ return ::InternalFree(pData);
+ }
+ };
+
+ EA::Allocator::ICoreAllocator* EA::Allocator::ICoreAllocator::GetDefaultAllocator()
+ {
+ static EASTLTestICA sEASTLTestICA;
+
+ return &sEASTLTestICA;
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+ // operator new/delete implementations
+ //
+ _Ret_maybenull_ _Post_writable_byte_size_(size) void* operator new(size_t size, const std::nothrow_t&) EA_THROW_SPEC_NEW_NONE()
+ {
+ return InternalMalloc(size);
+ }
+
+
+ void operator delete(void* p, const std::nothrow_t&) EA_THROW_SPEC_DELETE_NONE()
+ {
+ if(p) // The standard specifies that 'delete NULL' is a valid operation.
+ {
+ gEASTLTest_AllocationCount--;
+ InternalFree(p);
+ }
+ }
+
+
+ _Ret_maybenull_ _Post_writable_byte_size_(size) void* operator new[](size_t size, const std::nothrow_t&) EA_THROW_SPEC_NEW_NONE()
+ {
+ gEASTLTest_AllocationCount++;
+ gEASTLTest_TotalAllocationCount++;
+
+ void* p = InternalMalloc(size);
+ return p;
+ }
+
+
+ void operator delete[](void* p, const std::nothrow_t&) EA_THROW_SPEC_DELETE_NONE()
+ {
+ if(p)
+ {
+ gEASTLTest_AllocationCount--;
+ InternalFree(p);
+ }
+ }
+
+
+ _Ret_notnull_ _Post_writable_byte_size_(size) void* operator new(size_t size)
+ {
+ gEASTLTest_AllocationCount++;
+ gEASTLTest_TotalAllocationCount++;
+
+ void* mem = InternalMalloc(size);
+
+ #if !defined(EA_COMPILER_NO_EXCEPTIONS)
+ if (mem == NULL)
+ {
+ throw std::bad_alloc();
+ }
+ #endif
+
+ return mem;
+ }
+
+
+ _Ret_notnull_ _Post_writable_byte_size_(size) void* operator new[](size_t size)
+ {
+ gEASTLTest_AllocationCount++;
+ gEASTLTest_TotalAllocationCount++;
+
+ void* mem = InternalMalloc(size);
+
+ #if !defined(EA_COMPILER_NO_EXCEPTIONS)
+ if (mem == NULL)
+ {
+ throw std::bad_alloc();
+ }
+ #endif
+
+ return mem;
+ }
+
+
+ void* operator new[](size_t size, const char* name, int flags, unsigned debugFlags, const char* file, int line)
+ {
+ gEASTLTest_AllocationCount++;
+ gEASTLTest_TotalAllocationCount++;
+
+ return InternalMalloc(size, name, flags, debugFlags, file, line);
+ }
+
+
+ void* operator new[](size_t size, size_t alignment, size_t alignmentOffset, const char* name, int flags, unsigned debugFlags, const char* file, int line)
+ {
+ gEASTLTest_AllocationCount++;
+ gEASTLTest_TotalAllocationCount++;
+
+ return InternalMalloc(size, alignment, name, flags, debugFlags, file, line);
+ }
+
+ // Used by GCC when you make new objects of classes with >= N bit alignment (with N depending on the compiler).
+ void* operator new(size_t size, size_t alignment)
+ {
+ gEASTLTest_AllocationCount++;
+ gEASTLTest_TotalAllocationCount++;
+
+ return InternalMalloc(size, alignment);
+ }
+
+ // Used by GCC when you make new objects of classes with >= N bit alignment (with N depending on the compiler).
+ void* operator new(size_t size, size_t alignment, const std::nothrow_t&) EA_THROW_SPEC_NEW_NONE()
+ {
+ gEASTLTest_AllocationCount++;
+ gEASTLTest_TotalAllocationCount++;
+
+ return InternalMalloc(size, alignment);
+ }
+
+ // Used by GCC when you make new objects of classes with >= N bit alignment (with N depending on the compiler).
+ void* operator new[](size_t size, size_t alignment)
+ {
+ gEASTLTest_AllocationCount++;
+ gEASTLTest_TotalAllocationCount++;
+
+ return InternalMalloc(size, alignment);
+ }
+
+ // Used by GCC when you make new objects of classes with >= N bit alignment (with N depending on the compiler).
+ void* operator new[](size_t size, size_t alignment, const std::nothrow_t&) EA_THROW_SPEC_NEW_NONE()
+ {
+ gEASTLTest_AllocationCount++;
+ gEASTLTest_TotalAllocationCount++;
+
+ return InternalMalloc(size, alignment);
+ }
+
+ void operator delete(void* p) EA_THROW_SPEC_DELETE_NONE()
+ {
+ if(p) // The standard specifies that 'delete NULL' is a valid operation.
+ {
+ gEASTLTest_AllocationCount--;
+ InternalFree(p);
+ }
+ }
+
+
+ void operator delete[](void* p) EA_THROW_SPEC_DELETE_NONE()
+ {
+ if(p)
+ {
+ gEASTLTest_AllocationCount--;
+ InternalFree(p);
+ }
+ }
+
+ void EASTLTest_SetGeneralAllocator()
+ {
+ EA::Allocator::SetGeneralAllocator(&EA::Allocator::EASTLTest_GetGeneralAllocator());
+ #ifdef EA_DEBUG
+ EA::Allocator::gpEAGeneralAllocatorDebug->SetDefaultDebugDataFlag(EA::Allocator::GeneralAllocatorDebug::kDebugDataIdGuard);
+ #endif
+ }
+
+#else
+ #if !defined(EA_PLATFORM_MICROSOFT) || defined(EA_PLATFORM_MINGW)
+ #include <stdlib.h>
+ #endif
+
+ namespace Internal
+ {
+ void* EASTLAlignedAlloc(size_t size, size_t alignment)
+ {
+ #ifdef EA_PLATFORM_MICROSOFT
+ return _aligned_malloc(size, alignment);
+ #else
+ void *p = nullptr;
+ alignment = alignment < sizeof( void *) ? sizeof( void *) : alignment;
+ posix_memalign(&p, alignment, size);
+ return p;
+ #endif
+ }
+
+ void EASTLAlignedFree(void* p)
+ {
+ #ifdef EA_PLATFORM_MICROSOFT
+ _aligned_free(p);
+ #else
+ free(p);
+ #endif
+ }
+ }
+
+ void* operator new(size_t size)
+ { return Internal::EASTLAlignedAlloc(size, 16); }
+
+ void* operator new[](size_t size)
+ { return Internal::EASTLAlignedAlloc(size, 16); }
+
+ void* operator new[](size_t size, const char* /*name*/, int /*flags*/, unsigned /*debugFlags*/, const char* /*file*/, int /*line*/)
+ { return Internal::EASTLAlignedAlloc(size, 16); }
+
+ void* operator new[](size_t size, size_t alignment, size_t /*alignmentOffset*/, const char* /*name*/, int /*flags*/, unsigned /*debugFlags*/, const char* /*file*/, int /*line*/)
+ { return Internal::EASTLAlignedAlloc(size, alignment); }
+
+ void* operator new(size_t size, size_t alignment)
+ { return Internal::EASTLAlignedAlloc(size, alignment); }
+
+ void* operator new(size_t size, size_t alignment, const std::nothrow_t&) EA_THROW_SPEC_NEW_NONE()
+ { return Internal::EASTLAlignedAlloc(size, alignment); }
+
+ void* operator new[](size_t size, size_t alignment)
+ { return Internal::EASTLAlignedAlloc(size, alignment); }
+
+ void* operator new[](size_t size, size_t alignment, const std::nothrow_t&)EA_THROW_SPEC_NEW_NONE()
+ { return Internal::EASTLAlignedAlloc(size, alignment); }
+
+ // C++14 deleter
+ void operator delete(void* p, std::size_t sz ) EA_THROW_SPEC_DELETE_NONE()
+ { Internal::EASTLAlignedFree(p); EA_UNUSED(sz); }
+
+ void operator delete[](void* p, std::size_t sz ) EA_THROW_SPEC_DELETE_NONE()
+ { Internal::EASTLAlignedFree(p); EA_UNUSED(sz); }
+
+ void operator delete(void* p) EA_THROW_SPEC_DELETE_NONE()
+ { Internal::EASTLAlignedFree(p); }
+
+ void operator delete[](void* p) EA_THROW_SPEC_DELETE_NONE()
+ { Internal::EASTLAlignedFree(p); }
+
+ void EASTLTest_SetGeneralAllocator() { /* intentionally blank */ }
+ bool EASTLTest_ValidateHeap() { return true; }
+
+#endif // !EASTL_OPENSOURCE
+
+#endif // Header include guard
diff --git a/EASTL/test/source/EASTLTestAllocator.h b/EASTL/test/source/EASTLTestAllocator.h
new file mode 100644
index 0000000..775aff6
--- /dev/null
+++ b/EASTL/test/source/EASTLTestAllocator.h
@@ -0,0 +1,26 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTLTEST_ALLOCATOR_H
+#define EASTLTEST_ALLOCATOR_H
+
+#include <EABase/eabase.h>
+#include <new>
+
+ void* operator new(size_t size);
+ void* operator new[](size_t size);
+ void* operator new[](size_t size, const char* /*name*/, int /*flags*/, unsigned /*debugFlags*/, const char* /*file*/, int /*line*/);
+ void* operator new[](size_t size, size_t alignment, size_t /*alignmentOffset*/, const char* /*name*/, int /*flags*/, unsigned /*debugFlags*/, const char* /*file*/, int /*line*/);
+ void* operator new(size_t size, size_t alignment);
+ void* operator new(size_t size, size_t alignment, const std::nothrow_t&) EA_THROW_SPEC_NEW_NONE();
+ void* operator new[](size_t size, size_t alignment);
+ void* operator new[](size_t size, size_t alignment, const std::nothrow_t&)EA_THROW_SPEC_NEW_NONE();
+ void operator delete(void* p) EA_THROW_SPEC_DELETE_NONE();
+ void operator delete[](void* p) EA_THROW_SPEC_DELETE_NONE();
+ void EASTLTest_SetGeneralAllocator();
+ bool EASTLTest_ValidateHeap();
+
+
+#endif // Header include guard
diff --git a/EASTL/test/source/GetTypeName.h b/EASTL/test/source/GetTypeName.h
new file mode 100644
index 0000000..f844167
--- /dev/null
+++ b/EASTL/test/source/GetTypeName.h
@@ -0,0 +1,119 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef GETTYPENAME_H
+#define GETTYPENAME_H
+
+
+#include <EABase/eabase.h>
+#include <EASTL/type_traits.h>
+#include <EASTL/string.h>
+#include <stdlib.h>
+#include <typeinfo>
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_LIBSTDCPP_DEMANGLE_AVAILABLE
+//
+// Defined as 0 or 1. The value depends on the compilation environment.
+// Indicates if we can use system-provided abi::__cxa_demangle() at runtime.
+//
+#if !defined(EASTL_LIBSTDCPP_DEMANGLE_AVAILABLE)
+ #if (defined(EA_PLATFORM_LINUX) || defined(EA_PLATFORM_APPLE)) && defined(EA_PLATFORM_DESKTOP)
+ #define EASTL_LIBSTDCPP_DEMANGLE_AVAILABLE 1
+ #else
+ #define EASTL_LIBSTDCPP_DEMANGLE_AVAILABLE 0
+ #endif
+#endif
+
+
+#if EASTL_LIBSTDCPP_DEMANGLE_AVAILABLE
+ #include <cxxabi.h>
+#elif EA_WINAPI_FAMILY_PARTITION(EA_WINAPI_PARTITION_DESKTOP)
+ EA_DISABLE_ALL_VC_WARNINGS();
+ #include <Windows.h>
+ #include <DbgHelp.h>
+ #pragma comment(lib, "dbghelp.lib")
+ EA_RESTORE_ALL_VC_WARNINGS();
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTLTEST_GETTYPENAME_AVAILABLE
+//
+// Defined as 0 or 1. The value depends on the compilation environment.
+// Indicates if we can use system-provided abi::__cxa_demangle() at runtime.
+//
+#if !defined(EASTLTEST_GETTYPENAME_AVAILABLE)
+ #if (EASTL_LIBSTDCPP_DEMANGLE_AVAILABLE || EA_WINAPI_FAMILY_PARTITION(EA_WINAPI_PARTITION_DESKTOP)) && (!defined(EA_COMPILER_NO_RTTI) || defined(_MSC_VER)) // VC++ works without RTTI enabled.
+ #define EASTLTEST_GETTYPENAME_AVAILABLE 1
+ #else
+ #define EASTLTEST_GETTYPENAME_AVAILABLE 0
+ #endif
+#endif
+
+
+/// GetTypeName
+///
+/// Returns the type name of a templated type.
+///
+template <typename T>
+eastl::string GetTypeName()
+{
+ eastl::string result;
+
+ #if !defined(EA_COMPILER_NO_RTTI) || defined(_MSC_VER) // VC++ works without RTTI enabled.
+ typedef typename eastl::remove_reference<T>::type TR;
+
+ const char* pName = typeid(TR).name();
+
+ #if EASTL_LIBSTDCPP_DEMANGLE_AVAILABLE
+ const char* pDemangledName = abi::__cxa_demangle(pName, NULL, NULL, NULL);
+
+ #elif EA_WINAPI_FAMILY_PARTITION(EA_WINAPI_PARTITION_DESKTOP)
+ char pDemangledName[1024];
+ DWORD count = UnDecorateSymbolName(pName, pDemangledName, (DWORD)EAArrayCount(pDemangledName), UNDNAME_NO_THISTYPE | UNDNAME_NO_ACCESS_SPECIFIERS | UNDNAME_NO_MEMBER_TYPE);
+ if(count == 0)
+ pDemangledName[0] = 0;
+ #else
+ const char* pDemangledName = NULL;
+ #endif
+
+ if(pDemangledName && pDemangledName[0])
+ result = pDemangledName;
+ else
+ result = pName;
+
+ if(eastl::is_const<TR>::value)
+ result += " const";
+
+ if(eastl::is_volatile<TR>::value)
+ result += " volatile";
+
+ if(eastl::is_lvalue_reference<T>::value)
+ result += "&";
+ else if(eastl::is_rvalue_reference<T>::value)
+ result += "&&";
+
+ if(pDemangledName)
+ {
+ #if EASTL_LIBSTDCPP_DEMANGLE_AVAILABLE
+ free((void*)(pDemangledName));
+ #endif
+ }
+ #endif
+
+ return result;
+}
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
diff --git a/EASTL/test/source/TestAlgorithm.cpp b/EASTL/test/source/TestAlgorithm.cpp
new file mode 100644
index 0000000..a0f64da
--- /dev/null
+++ b/EASTL/test/source/TestAlgorithm.cpp
@@ -0,0 +1,2761 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#if defined(_MSC_VER)
+ // We have little choice but to disable this warning. See the FAQ for why.
+ #pragma warning(disable: 4244) // conversion from '___' to '___', possible loss of data
+#endif
+
+
+#include <EASTL/algorithm.h>
+#include <EASTL/functional.h>
+#include <EASTL/unique_ptr.h>
+#include <EASTL/vector.h>
+#include <EASTL/array.h>
+#include <EASTL/deque.h>
+#include <EASTL/list.h>
+#include <EASTL/slist.h>
+#include <EASTL/string.h>
+#include <EASTL/set.h>
+#include <EASTL/sort.h>
+#include "ConceptImpls.h"
+#include <EAStdC/EAMemory.h>
+#include "EASTLTest.h" // Put this after the above so that it doesn't block any warnings from the includes above.
+
+namespace eastl
+{
+ #if 0
+ // These are some tests of altermative implementations of branch-free min/max functions.
+ /*
+ union FloatInt32Union
+ {
+ float f;
+ int32_t i;
+ };
+
+ inline float min_alt2(float a, float b)
+ {
+ FloatInt32Union uc;
+ uc.f = a - b;
+
+ const float choices[2] = { a, b };
+ return (choices + 1)[uc.i >> 31];
+ }
+
+ inline float min_alt3(float a, float b)
+ {
+ FloatInt32Union uc, ua, ub, ur;
+
+ uc.f = a - b;
+ uc.i >>= 31;
+ ua.f = a;
+ ub.f = b;
+ ur.i = (ua.i & uc.i) | (ub.i & ~uc.i);
+
+ return ur.f;
+ }
+ */
+ #endif
+}
+
+
+namespace
+{
+ struct A{
+ A(int n) : a(n){}
+ int a;
+ };
+ struct LessStruct{ bool operator()(const A& a1, const A& a2){ return a1.a < a2.a; } };
+
+
+ struct B{
+ B(int n) : b(n){}
+ int b;
+ };
+ inline bool LessFunction(const B& b1, const B& b2){ return b1.b < b2.b; }
+}
+
+enum TestMinMaxEnum
+{
+ teX = 0,
+ teY = 3
+};
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Greater
+//
+// A version of greater that uses operator < instead of operator >.
+//
+template <typename T>
+struct Greater : public eastl::binary_function<T, T, bool>
+{
+ bool operator()(const T& a, const T& b) const
+ { return (b < a); }
+};
+
+
+///////////////////////////////////////////////////////////////////////////////
+// DivisibleBy
+//
+struct DivisibleBy
+{
+ int d;
+ DivisibleBy(int n = 1) : d(n) {}
+ bool operator()(int n) const { return ((n % d) == 0); }
+};
+
+
+///////////////////////////////////////////////////////////////////////////////
+// TestObjectNegate
+//
+struct TestObjectNegate : public eastl::unary_function<TestObject, TestObject>
+{
+ TestObject operator()(const TestObject& a) const
+ { return TestObject(-a.mX); }
+};
+
+static int TestMinMax()
+{
+ using namespace eastl;
+
+ int nErrorCount = 0;
+
+ EA::UnitTest::Rand rng(EA::UnitTest::GetRandSeed());
+
+ {
+ // NOTE(rparolin): This compiles but it should not. We provide explicit eastl::max overloads for float, double,
+ // and long double which enable this behaviour. It is not standards compliant and it will be removed in a
+ // future release.
+ {
+ struct Foo
+ {
+ operator float() const { return 0; }
+ };
+
+ Foo f1;
+ float f2{};
+ eastl::max(f1, f2);
+ }
+
+ // NOTE(rparolin): This will not compile because we lack explicit eastl::max overloads for 'int'.
+ // {
+ // struct Foo
+ // {
+ // operator int() const { return 0; }
+ // };
+
+ // Foo f1;
+ // int f2{};
+ // eastl::max(f1, f2);
+ // }
+ }
+
+ {
+ // const T& min(const T& a, const T& b);
+ // const T& min(const T& a, const T& b, Compare compare)
+ // const T& max(const T& a, const T& b);
+ // const T& max(const T& a, const T& b, Compare compare)
+
+ A a1(1), a2(2), a3(3);
+ a3 = min(a1, a2, LessStruct());
+ EATEST_VERIFY(a3.a == 1);
+ a3 = max(a1, a2, LessStruct());
+ EATEST_VERIFY(a3.a == 2);
+
+ B b1(1), b2(2), b3(3);
+ b3 = min(b2, b1, LessFunction);
+ EATEST_VERIFY(b3.b == 1);
+ b3 = max(b2, b1, LessFunction);
+ EATEST_VERIFY(b3.b == 2);
+
+
+ TestObject t1(1), t2(2), t3(3);
+ t3 = min(t2, t1);
+ EATEST_VERIFY(t3.mX == 1);
+ t3 = max(t2, t1);
+ EATEST_VERIFY(t3.mX == 2);
+
+
+ int i1, i2(-1), i3(1);
+ i1 = min(i2, i3);
+ EATEST_VERIFY(i1 == -1);
+ i1 = min(i3, i2);
+ EATEST_VERIFY(i1 == -1);
+ i1 = max(i2, i3);
+ EATEST_VERIFY(i1 == 1);
+ i1 = max(i3, i2);
+ EATEST_VERIFY(i1 == 1);
+
+ const volatile int i2cv(-1), i3cv(1);
+ i1 = min(i2cv, i3cv);
+ EATEST_VERIFY(i1 == -1);
+ i1 = min(i3cv, i2cv);
+ EATEST_VERIFY(i1 == -1);
+ i1 = max(i2cv, i3cv);
+ EATEST_VERIFY(i1 == 1);
+ i1 = max(i3cv, i2cv);
+ EATEST_VERIFY(i1 == 1);
+
+ float f1, f2(-1), f3(1);
+ f1 = min(f2, f3);
+ EATEST_VERIFY(f1 == -1);
+ f1 = min(f3, f2);
+ EATEST_VERIFY(f1 == -1);
+ f1 = max(f2, f3);
+ EATEST_VERIFY(f1 == 1);
+ f1 = max(f3, f2);
+ EATEST_VERIFY(f1 == 1);
+
+ double d1, d2(-1), d3(1);
+ d1 = min(d2, d3);
+ EATEST_VERIFY(d1 == -1);
+ d1 = min(d3, d2);
+ EATEST_VERIFY(d1 == -1);
+ d1 = max(d2, d3);
+ EATEST_VERIFY(d1 == 1);
+ d1 = max(d3, d2);
+ EATEST_VERIFY(d1 == 1);
+
+ void* p1, *p2 = &d2, *p3 = &d3;
+ p1 = min(p2, p3);
+ EATEST_VERIFY((uintptr_t)p1 == min((uintptr_t)p2, (uintptr_t)p3));
+
+ double* pd1, *pd2 = &d2, *pd3 = &d3;
+ pd1 = min(pd2, pd3);
+ EATEST_VERIFY((uintptr_t)pd1 == min((uintptr_t)pd2, (uintptr_t)pd3));
+
+
+ // initializer_list tests
+ #if !defined(EA_COMPILER_NO_INITIALIZER_LISTS)
+ EATEST_VERIFY(min({ 3, 1, 2}) == 1);
+ EATEST_VERIFY(max({ 3, 1, 2}) == 3);
+ #endif
+
+
+ // Test scalar specializations
+ EATEST_VERIFY(min((char)1, (char)1) == 1);
+ EATEST_VERIFY(min((char)1, (char)2) == 1);
+ EATEST_VERIFY(min((char)2, (char)1) == 1);
+
+ EATEST_VERIFY(min((signed char)1, (signed char)1) == 1);
+ EATEST_VERIFY(min((signed char)1, (signed char)2) == 1);
+ EATEST_VERIFY(min((signed char)2, (signed char)1) == 1);
+
+ EATEST_VERIFY(min((unsigned char)1, (unsigned char)1) == 1);
+ EATEST_VERIFY(min((unsigned char)1, (unsigned char)2) == 1);
+ EATEST_VERIFY(min((unsigned char)2, (unsigned char)1) == 1);
+
+ EATEST_VERIFY(min((signed short)1, (signed short)1) == 1);
+ EATEST_VERIFY(min((signed short)1, (signed short)2) == 1);
+ EATEST_VERIFY(min((signed short)2, (signed short)1) == 1);
+
+ EATEST_VERIFY(min((unsigned short)1, (unsigned short)1) == 1);
+ EATEST_VERIFY(min((unsigned short)1, (unsigned short)2) == 1);
+ EATEST_VERIFY(min((unsigned short)2, (unsigned short)1) == 1);
+
+ EATEST_VERIFY(min((signed int)1, (signed int)1) == 1);
+ EATEST_VERIFY(min((signed int)1, (signed int)2) == 1);
+ EATEST_VERIFY(min((signed int)2, (signed int)1) == 1);
+
+ EATEST_VERIFY(min((unsigned int)1, (unsigned int)1) == 1);
+ EATEST_VERIFY(min((unsigned int)1, (unsigned int)2) == 1);
+ EATEST_VERIFY(min((unsigned int)2, (unsigned int)1) == 1);
+
+ EATEST_VERIFY(min((signed long)1, (signed long)1) == 1);
+ EATEST_VERIFY(min((signed long)1, (signed long)2) == 1);
+ EATEST_VERIFY(min((signed long)2, (signed long)1) == 1);
+
+ EATEST_VERIFY(min((unsigned long)1, (unsigned long)1) == 1);
+ EATEST_VERIFY(min((unsigned long)1, (unsigned long)2) == 1);
+ EATEST_VERIFY(min((unsigned long)2, (unsigned long)1) == 1);
+
+ EATEST_VERIFY(min((signed long long)1, (signed long long)1) == 1);
+ EATEST_VERIFY(min((signed long long)1, (signed long long)2) == 1);
+ EATEST_VERIFY(min((signed long long)2, (signed long long)1) == 1);
+
+ EATEST_VERIFY(min((unsigned long long)1, (unsigned long long)1) == 1);
+ EATEST_VERIFY(min((unsigned long long)1, (unsigned long long)2) == 1);
+ EATEST_VERIFY(min((unsigned long long)2, (unsigned long long)1) == 1);
+
+ EATEST_VERIFY(min((float)1, (float)1) == 1);
+ EATEST_VERIFY(min((float)1, (float)2) == 1);
+ EATEST_VERIFY(min((float)2, (float)1) == 1);
+
+ EATEST_VERIFY(min((double)1, (double)1) == 1);
+ EATEST_VERIFY(min((double)1, (double)2) == 1);
+ EATEST_VERIFY(min((double)2, (double)1) == 1);
+
+ EATEST_VERIFY(min((long double)1, (long double)1) == 1);
+ EATEST_VERIFY(min((long double)1, (long double)2) == 1);
+ EATEST_VERIFY(min((long double)2, (long double)1) == 1);
+
+
+ // Test max specializations
+ EATEST_VERIFY(max((char)1, (char)1) == 1);
+ EATEST_VERIFY(max((char)1, (char)2) == 2);
+ EATEST_VERIFY(max((char)2, (char)1) == 2);
+
+ EATEST_VERIFY(max((signed char)1, (signed char)1) == 1);
+ EATEST_VERIFY(max((signed char)1, (signed char)2) == 2);
+ EATEST_VERIFY(max((signed char)2, (signed char)1) == 2);
+
+ EATEST_VERIFY(max((unsigned char)1, (unsigned char)1) == 1);
+ EATEST_VERIFY(max((unsigned char)1, (unsigned char)2) == 2);
+ EATEST_VERIFY(max((unsigned char)2, (unsigned char)1) == 2);
+
+ EATEST_VERIFY(max((signed short)1, (signed short)1) == 1);
+ EATEST_VERIFY(max((signed short)1, (signed short)2) == 2);
+ EATEST_VERIFY(max((signed short)2, (signed short)1) == 2);
+
+ EATEST_VERIFY(max((unsigned short)1, (unsigned short)1) == 1);
+ EATEST_VERIFY(max((unsigned short)1, (unsigned short)2) == 2);
+ EATEST_VERIFY(max((unsigned short)2, (unsigned short)1) == 2);
+
+ EATEST_VERIFY(max((signed int)1, (signed int)1) == 1);
+ EATEST_VERIFY(max((signed int)1, (signed int)2) == 2);
+ EATEST_VERIFY(max((signed int)2, (signed int)1) == 2);
+
+ EATEST_VERIFY(max((unsigned int)1, (unsigned int)1) == 1);
+ EATEST_VERIFY(max((unsigned int)1, (unsigned int)2) == 2);
+ EATEST_VERIFY(max((unsigned int)2, (unsigned int)1) == 2);
+
+ EATEST_VERIFY(max((signed long)1, (signed long)1) == 1);
+ EATEST_VERIFY(max((signed long)1, (signed long)2) == 2);
+ EATEST_VERIFY(max((signed long)2, (signed long)1) == 2);
+
+ EATEST_VERIFY(max((unsigned long)1, (unsigned long)1) == 1);
+ EATEST_VERIFY(max((unsigned long)1, (unsigned long)2) == 2);
+ EATEST_VERIFY(max((unsigned long)2, (unsigned long)1) == 2);
+
+ EATEST_VERIFY(max((signed long long)1, (signed long long)1) == 1);
+ EATEST_VERIFY(max((signed long long)1, (signed long long)2) == 2);
+ EATEST_VERIFY(max((signed long long)2, (signed long long)1) == 2);
+
+ EATEST_VERIFY(max((unsigned long long)1, (unsigned long long)1) == 1);
+ EATEST_VERIFY(max((unsigned long long)1, (unsigned long long)2) == 2);
+ EATEST_VERIFY(max((unsigned long long)2, (unsigned long long)1) == 2);
+
+ EATEST_VERIFY(max((float)1, (float)1) == 1);
+ EATEST_VERIFY(max((float)1, (float)2) == 2);
+ EATEST_VERIFY(max((float)2, (float)1) == 2);
+
+ EATEST_VERIFY(max((double)1, (double)1) == 1);
+ EATEST_VERIFY(max((double)1, (double)2) == 2);
+ EATEST_VERIFY(max((double)2, (double)1) == 2);
+
+ EATEST_VERIFY(max((long double)1, (long double)1) == 1);
+ EATEST_VERIFY(max((long double)1, (long double)2) == 2);
+ EATEST_VERIFY(max((long double)2, (long double)1) == 2);
+
+
+ // Test min_alt specializations
+ EATEST_VERIFY(min_alt((char)1, (char)1) == 1);
+ EATEST_VERIFY(min_alt((char)1, (char)2) == 1);
+ EATEST_VERIFY(min_alt((char)2, (char)1) == 1);
+
+ EATEST_VERIFY(min_alt((signed char)1, (signed char)1) == 1);
+ EATEST_VERIFY(min_alt((signed char)1, (signed char)2) == 1);
+ EATEST_VERIFY(min_alt((signed char)2, (signed char)1) == 1);
+
+ EATEST_VERIFY(min_alt((unsigned char)1, (unsigned char)1) == 1);
+ EATEST_VERIFY(min_alt((unsigned char)1, (unsigned char)2) == 1);
+ EATEST_VERIFY(min_alt((unsigned char)2, (unsigned char)1) == 1);
+
+ EATEST_VERIFY(min_alt((signed short)1, (signed short)1) == 1);
+ EATEST_VERIFY(min_alt((signed short)1, (signed short)2) == 1);
+ EATEST_VERIFY(min_alt((signed short)2, (signed short)1) == 1);
+
+ EATEST_VERIFY(min_alt((unsigned short)1, (unsigned short)1) == 1);
+ EATEST_VERIFY(min_alt((unsigned short)1, (unsigned short)2) == 1);
+ EATEST_VERIFY(min_alt((unsigned short)2, (unsigned short)1) == 1);
+
+ EATEST_VERIFY(min_alt((signed int)1, (signed int)1) == 1);
+ EATEST_VERIFY(min_alt((signed int)1, (signed int)2) == 1);
+ EATEST_VERIFY(min_alt((signed int)2, (signed int)1) == 1);
+
+ EATEST_VERIFY(min_alt((unsigned int)1, (unsigned int)1) == 1);
+ EATEST_VERIFY(min_alt((unsigned int)1, (unsigned int)2) == 1);
+ EATEST_VERIFY(min_alt((unsigned int)2, (unsigned int)1) == 1);
+
+ EATEST_VERIFY(min_alt((signed long)1, (signed long)1) == 1);
+ EATEST_VERIFY(min_alt((signed long)1, (signed long)2) == 1);
+ EATEST_VERIFY(min_alt((signed long)2, (signed long)1) == 1);
+
+ EATEST_VERIFY(min_alt((unsigned long)1, (unsigned long)1) == 1);
+ EATEST_VERIFY(min_alt((unsigned long)1, (unsigned long)2) == 1);
+ EATEST_VERIFY(min_alt((unsigned long)2, (unsigned long)1) == 1);
+
+ EATEST_VERIFY(min_alt((signed long long)1, (signed long long)1) == 1);
+ EATEST_VERIFY(min_alt((signed long long)1, (signed long long)2) == 1);
+ EATEST_VERIFY(min_alt((signed long long)2, (signed long long)1) == 1);
+
+ EATEST_VERIFY(min_alt((unsigned long long)1, (unsigned long long)1) == 1);
+ EATEST_VERIFY(min_alt((unsigned long long)1, (unsigned long long)2) == 1);
+ EATEST_VERIFY(min_alt((unsigned long long)2, (unsigned long long)1) == 1);
+
+ EATEST_VERIFY(min_alt((float)1, (float)1) == 1);
+ EATEST_VERIFY(min_alt((float)1, (float)2) == 1);
+ EATEST_VERIFY(min_alt((float)2, (float)1) == 1);
+
+ EATEST_VERIFY(min_alt((double)1, (double)1) == 1);
+ EATEST_VERIFY(min_alt((double)1, (double)2) == 1);
+ EATEST_VERIFY(min_alt((double)2, (double)1) == 1);
+
+ EATEST_VERIFY(min_alt((long double)1, (long double)1) == 1);
+ EATEST_VERIFY(min_alt((long double)1, (long double)2) == 1);
+ EATEST_VERIFY(min_alt((long double)2, (long double)1) == 1);
+
+
+ // Test max_alt specializations
+ EATEST_VERIFY(max_alt((char)1, (char)1) == 1);
+ EATEST_VERIFY(max_alt((char)1, (char)2) == 2);
+ EATEST_VERIFY(max_alt((char)2, (char)1) == 2);
+
+ EATEST_VERIFY(max_alt((signed char)1, (signed char)1) == 1);
+ EATEST_VERIFY(max_alt((signed char)1, (signed char)2) == 2);
+ EATEST_VERIFY(max_alt((signed char)2, (signed char)1) == 2);
+
+ EATEST_VERIFY(max_alt((unsigned char)1, (unsigned char)1) == 1);
+ EATEST_VERIFY(max_alt((unsigned char)1, (unsigned char)2) == 2);
+ EATEST_VERIFY(max_alt((unsigned char)2, (unsigned char)1) == 2);
+
+ EATEST_VERIFY(max_alt((signed short)1, (signed short)1) == 1);
+ EATEST_VERIFY(max_alt((signed short)1, (signed short)2) == 2);
+ EATEST_VERIFY(max_alt((signed short)2, (signed short)1) == 2);
+
+ EATEST_VERIFY(max_alt((unsigned short)1, (unsigned short)1) == 1);
+ EATEST_VERIFY(max_alt((unsigned short)1, (unsigned short)2) == 2);
+ EATEST_VERIFY(max_alt((unsigned short)2, (unsigned short)1) == 2);
+
+ EATEST_VERIFY(max_alt((signed int)1, (signed int)1) == 1);
+ EATEST_VERIFY(max_alt((signed int)1, (signed int)2) == 2);
+ EATEST_VERIFY(max_alt((signed int)2, (signed int)1) == 2);
+
+ EATEST_VERIFY(max_alt((unsigned int)1, (unsigned int)1) == 1);
+ EATEST_VERIFY(max_alt((unsigned int)1, (unsigned int)2) == 2);
+ EATEST_VERIFY(max_alt((unsigned int)2, (unsigned int)1) == 2);
+
+ EATEST_VERIFY(max_alt((signed long)1, (signed long)1) == 1);
+ EATEST_VERIFY(max_alt((signed long)1, (signed long)2) == 2);
+ EATEST_VERIFY(max_alt((signed long)2, (signed long)1) == 2);
+
+ EATEST_VERIFY(max_alt((unsigned long)1, (unsigned long)1) == 1);
+ EATEST_VERIFY(max_alt((unsigned long)1, (unsigned long)2) == 2);
+ EATEST_VERIFY(max_alt((unsigned long)2, (unsigned long)1) == 2);
+
+ EATEST_VERIFY(max_alt((signed long long)1, (signed long long)1) == 1);
+ EATEST_VERIFY(max_alt((signed long long)1, (signed long long)2) == 2);
+ EATEST_VERIFY(max_alt((signed long long)2, (signed long long)1) == 2);
+
+ EATEST_VERIFY(max_alt((unsigned long long)1, (unsigned long long)1) == 1);
+ EATEST_VERIFY(max_alt((unsigned long long)1, (unsigned long long)2) == 2);
+ EATEST_VERIFY(max_alt((unsigned long long)2, (unsigned long long)1) == 2);
+
+ EATEST_VERIFY(max_alt((float)1, (float)1) == 1);
+ EATEST_VERIFY(max_alt((float)1, (float)2) == 2);
+ EATEST_VERIFY(max_alt((float)2, (float)1) == 2);
+
+ EATEST_VERIFY(max_alt((double)1, (double)1) == 1);
+ EATEST_VERIFY(max_alt((double)1, (double)2) == 2);
+ EATEST_VERIFY(max_alt((double)2, (double)1) == 2);
+
+ EATEST_VERIFY(max_alt((long double)1, (long double)1) == 1);
+ EATEST_VERIFY(max_alt((long double)1, (long double)2) == 2);
+ EATEST_VERIFY(max_alt((long double)2, (long double)1) == 2);
+ }
+
+ {
+ // const T& min_alt(const T& a, const T& b);
+ // const T& min_alt(const T& a, const T& b, Compare compare)
+ // const T& max_alt(const T& a, const T& b);
+ // const T& max_alt(const T& a, const T& b, Compare compare)
+
+ A a1(1), a2(2), a3(3);
+ a3 = min_alt(a1, a2, LessStruct());
+ EATEST_VERIFY(a3.a == 1);
+ a3 = max_alt(a1, a2, LessStruct());
+ EATEST_VERIFY(a3.a == 2);
+
+ B b1(1), b2(2), b3(3);
+ b3 = min_alt(b2, b1, LessFunction);
+ EATEST_VERIFY(b3.b == 1);
+ b3 = max_alt(b2, b1, LessFunction);
+ EATEST_VERIFY(b3.b == 2);
+
+
+ TestObject t1(1), t2(2), t3(3);
+ t3 = min_alt(t2, t1);
+ EATEST_VERIFY(t3.mX == 1);
+ t3 = max_alt(t2, t1);
+ EATEST_VERIFY(t3.mX == 2);
+
+
+ int i1, i2(-1), i3(1);
+ i1 = min_alt(i2, i3);
+ EATEST_VERIFY(i1 == -1);
+ i1 = min_alt(i3, i2);
+ EATEST_VERIFY(i1 == -1);
+ i1 = max_alt(i2, i3);
+ EATEST_VERIFY(i1 == 1);
+ i1 = max_alt(i3, i2);
+ EATEST_VERIFY(i1 == 1);
+
+ float f1, f2(-1), f3(1);
+ f1 = min_alt(f2, f3);
+ EATEST_VERIFY(f1 == -1);
+ f1 = min_alt(f3, f2);
+ EATEST_VERIFY(f1 == -1);
+ f1 = max_alt(f2, f3);
+ EATEST_VERIFY(f1 == 1);
+ f1 = max_alt(f3, f2);
+ EATEST_VERIFY(f1 == 1);
+
+ double d1, d2(-1), d3(1);
+ d1 = min_alt(d2, d3);
+ EATEST_VERIFY(d1 == -1);
+ d1 = min_alt(d3, d2);
+ EATEST_VERIFY(d1 == -1);
+ d1 = max_alt(d2, d3);
+ EATEST_VERIFY(d1 == 1);
+ d1 = max_alt(d3, d2);
+ EATEST_VERIFY(d1 == 1);
+
+ // Make sure enums work
+ static_assert(eastl::is_enum<TestMinMaxEnum>::value, "is_enum failure");
+ EATEST_VERIFY(eastl::min(teX, teY) == teX);
+
+ // Make sure pointers work
+ TestObject testObjectArray[2];
+ EATEST_VERIFY(eastl::min(&testObjectArray[0], &testObjectArray[1]) == &testObjectArray[0]);
+
+ // Regression for Microsoft warning C4347 (http://msdn.microsoft.com/en-us/library/x7wb5te0.aspx)
+ int32_t value = rng.RandRange(17, 18);
+ int32_t result = eastl::max_alt<int32_t>(0, value); // warning C4347: behavior change: 'const T &eastl::max_alt<int32_t>(const T &,const T &)' is called instead of 'int eastl::max_alt(int,int)'
+ EATEST_VERIFY(result == 17);
+
+ // Regression for Microsoft error C2666 (http://msdn.microsoft.com/en-us/library/dyafzty4%28v=vs.110%29.aspx)
+ uint32_t value2a = 17;
+ uint32_t value2b = 2;
+ uint32_t result2 = eastl::min_alt<uint32_t>(value2a - value2b, 4); // error C2666: 'eastl::min_alt' : 12 overloads have similar conversions
+ EATEST_VERIFY(result2 == 4);
+
+ // Regression for volatile arguments + literals
+ // This test is disabled until we come up with a solution for this. std::min gives the same result as below, so we aren't necessarily obligated to resolve this.
+ // volatile uint32_t value3 = 17;
+ // uint32_t result3 = eastl::min_alt<uint32_t>(value3, 4); // error C2664: 'const T &eastl::min_alt<unsigned int>(const T &,const T &)' : cannot convert parameter 1 from 'volatile uint32_t' to 'const unsigned int &'
+ // EATEST_VERIFY(result3 == 4);
+ }
+
+
+ {
+ // ForwardIterator min_element(ForwardIterator first, ForwardIterator last)
+ // ForwardIterator min_element(ForwardIterator first, ForwardIterator last, Compare compare)
+
+ int intArray[] = { -5, 2, 1, 5, 4, 5 };
+ int* pInt = min_element(intArray, intArray + 6);
+ EATEST_VERIFY(pInt && (*pInt == -5));
+
+ pInt = min_element(intArray, intArray + 6, Greater<int>());
+ EATEST_VERIFY(pInt && (*pInt == 5));
+
+
+ TestObject toArray[] = { TestObject(7), TestObject(2), TestObject(8), TestObject(5), TestObject(4), TestObject(-12) };
+ TestObject* pTO = min_element(toArray, toArray + 6);
+ EATEST_VERIFY(pTO && (*pTO == TestObject(-12)));
+
+ pTO = min_element(toArray, toArray + 6, Greater<TestObject>());
+ EATEST_VERIFY(pTO && (*pTO == TestObject(8)));
+ }
+
+
+ {
+ // ForwardIterator max_element(ForwardIterator first, ForwardIterator last)
+ // ForwardIterator max_element(ForwardIterator first, ForwardIterator last, Compare compare)
+
+ int intArray[] = { -5, 2, 1, 5, 4, 5 };
+ int* pInt = max_element(intArray, intArray + 6);
+ EATEST_VERIFY(pInt && (*pInt == 5));
+
+ pInt = max_element(intArray, intArray + 6, less<int>());
+ EATEST_VERIFY(pInt && (*pInt == 5));
+
+
+ TestObject toArray[] = { TestObject(7), TestObject(2), TestObject(8), TestObject(5), TestObject(4), TestObject(-12) };
+ TestObject* pTO = max_element(toArray, toArray + 6);
+ EATEST_VERIFY(pTO && (*pTO == TestObject(8)));
+
+ pTO = max_element(toArray, toArray + 6, less<TestObject>());
+ EATEST_VERIFY(pTO && (*pTO == TestObject(8)));
+ }
+
+ {
+ // template <class ForwardIterator, class Compare>
+ // eastl::pair<ForwardIterator, ForwardIterator>
+ // minmax_element(ForwardIterator first, ForwardIterator last)
+ //
+ // template <class ForwardIterator, class Compare>
+ // eastl::pair<ForwardIterator, ForwardIterator>
+ // minmax_element(ForwardIterator first, ForwardIterator last, Compare compare)
+
+ int intArray[] = { 5, -2, 1, 5, 6, 5 };
+
+ eastl::pair<int*, int*> result = eastl::minmax_element(intArray, intArray + 6);
+ EATEST_VERIFY((*result.first == -2) && (*result.second == 6));
+
+
+ // template <typename T>
+ // eastl::pair<const T&, const T&>
+ // minmax(const T& a, const T& b)
+ //
+ // template <typename T, typename Compare>
+ // eastl::pair<const T&, const T&>
+ // minmax(const T& a, const T& b, Compare comp)
+
+ // The VC++ compiler is broken in such a way that it can't compile the following without generating a warning:
+ // warning C4413: 'eastl::pair<T1,T2>::first' : reference member is initialized to a temporary that doesn't persist after the constructor exits.
+ // The Microsoft standard library definition of minmax doesn't generate this warning... because that minmax is broken and non-conforming. I think they
+ // made it the way they did because of the aforementioned compiler bug.
+ // Recent versions of clang seem to generate a warning of its own. To do: we need to address this.
+ // GCC 4.8 for x86 has a compiler bug in optimized builds for this code, so we currently enable this for non-optimized builds only.
+ #if defined(EA_COMPILER_CPP11_ENABLED) && ((defined(EA_COMPILER_CLANG) && EA_COMPILER_VERSION < 302) || (defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4007)) && !defined(__OPTIMIZE__))
+
+ int i3(3), i2(2);
+ eastl::pair<const int&, const int&> resulti = eastl::minmax(i3, i2);
+ EATEST_VERIFY_F((resulti.first == 2) && (resulti.second == 3), "minmax failure. %d %d", resulti.first, resulti.second);
+
+ char c3(3), c2(2);
+ eastl::pair<const char&, const char&> resultc = eastl::minmax(c3, c2);
+ EATEST_VERIFY_F((resultc.first == 2) && (resultc.second == 3), "minmax failure. %d %d", (int)resultc.first, (int)resultc.second);
+
+ float f3(3), f2(2);
+ eastl::pair<const float&, const float&> resultf = eastl::minmax(f3, f2);
+ EATEST_VERIFY_F((resultf.first == 2) && (resultf.second == 3), "minmax failure. %f %f", resultf.first, resultf.second);
+ #endif
+
+
+ // template <typename T>
+ // eastl::pair<T, T>
+ // minmax(std::initializer_list<T> ilist)
+ //
+ // template <typename T, class Compare>
+ // eastl::pair<T, T>
+ // minmax(std::initializer_list<T> ilist, Compare compare)
+ #if !defined(EA_COMPILER_NO_INITIALIZER_LISTS)
+ eastl::pair<int, int> result3 = eastl::minmax({3, 2});
+ EATEST_VERIFY((result3.first == 2) && (result3.second == 3));
+ #endif
+ }
+
+
+ return nErrorCount;
+}
+
+
+static int TestClamp()
+{
+ using namespace eastl;
+
+ int nErrorCount = 0;
+
+ EATEST_VERIFY(eastl::clamp(42, 1, 100) == 42);
+ EATEST_VERIFY(eastl::clamp(-42, 1, 100) == 1);
+ EATEST_VERIFY(eastl::clamp(420, 1, 100) == 100);
+ EATEST_VERIFY(eastl::clamp(1, 1, 100) == 1);
+ EATEST_VERIFY(eastl::clamp(100, 1, 100) == 100);
+
+ EATEST_VERIFY(eastl::clamp(42.f, 1.f, 100.f, less<float>()) == 42.f);
+ EATEST_VERIFY(eastl::clamp(-42.f, 1.f, 100.f, less<float>()) == 1.f);
+ EATEST_VERIFY(eastl::clamp(420.f, 1.f, 100.f, less<float>()) == 100.f);
+ EATEST_VERIFY(eastl::clamp(1.f, 1.f, 100.f, less<float>()) == 1.f);
+ EATEST_VERIFY(eastl::clamp(100.f, 1.f, 100.f, less<float>()) == 100.f);
+
+ EATEST_VERIFY(eastl::clamp(42., 1., 100., less<double>()) == 42.);
+ EATEST_VERIFY(eastl::clamp(-42., 1., 100., less<double>()) == 1.);
+ EATEST_VERIFY(eastl::clamp(420., 1., 100., less<double>()) == 100.);
+ EATEST_VERIFY(eastl::clamp(1., 1., 100., less<double>()) == 1.);
+ EATEST_VERIFY(eastl::clamp(100., 1., 100., less<double>()) == 100.);
+
+ EATEST_VERIFY(eastl::clamp(A(42), A(1), A(100), LessStruct()).a == A(42).a);
+ EATEST_VERIFY(eastl::clamp(A(-42), A(1), A(100), LessStruct()).a == A(1).a);
+ EATEST_VERIFY(eastl::clamp(A(420), A(1), A(100), LessStruct()).a == A(100).a);
+ EATEST_VERIFY(eastl::clamp(A(1), A(1), A(100), LessStruct()).a == A(1).a);
+ EATEST_VERIFY(eastl::clamp(A(100), A(1), A(100), LessStruct()).a == A(100).a);
+
+ return nErrorCount;
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+// TestAlgorithm
+//
+int TestAlgorithm()
+{
+ using namespace eastl;
+
+ int nErrorCount = 0;
+
+ EA::UnitTest::Rand rng(EA::UnitTest::GetRandSeed());
+
+ TestObject::Reset();
+
+ nErrorCount += TestMinMax();
+ nErrorCount += TestClamp();
+
+
+ // bool all_of (InputIterator first, InputIterator last, Predicate p);
+ // bool any_of (InputIterator first, InputIterator last, Predicate p);
+ // bool none_of(InputIterator first, InputIterator last, Predicate p);
+ {
+
+ eastl::vector<int> v;
+ v.push_back(2);
+ v.push_back(4);
+ v.push_back(6);
+ v.push_back(8);
+
+ EATEST_VERIFY(eastl::all_of( v.begin(), v.end(), DivisibleBy(2)));
+ EATEST_VERIFY(eastl::any_of( v.begin(), v.end(), DivisibleBy(3)));
+ EATEST_VERIFY(eastl::none_of(v.begin(), v.end(), DivisibleBy(5)));
+ }
+
+
+ {
+ // pair mismatch(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2)
+ // pair mismatch(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2, Predicate predicate)
+
+ int intArray1[] = { -5, 2, 1, 5, 4, 8888 };
+ int intArray2[] = { -5, 2, 1, 5, 4, 9999 };
+ int intArray3[] = { -5, 2, 1, 5, 4, 9999 };
+
+ eastl::pair<int*, int*> pairInt = mismatch(intArray1, intArray1, intArray2);
+ EATEST_VERIFY(pairInt.first == intArray1 + 0);
+ EATEST_VERIFY(pairInt.second == intArray2 + 0);
+
+ pairInt = mismatch(intArray1, intArray1 + 6, intArray2);
+ EATEST_VERIFY(pairInt.first == intArray1 + 5);
+ EATEST_VERIFY(pairInt.second == intArray2 + 5);
+ pairInt = mismatch(intArray2, intArray2 + 6, intArray3);
+
+ EATEST_VERIFY(pairInt.first == intArray2 + 6);
+ EATEST_VERIFY(pairInt.second == intArray3 + 6);
+
+
+ pairInt = mismatch(intArray1, intArray1, intArray2, equal_to<int>());
+ EATEST_VERIFY(pairInt.first == intArray1 + 0);
+ EATEST_VERIFY(pairInt.second == intArray2 + 0);
+
+ pairInt = mismatch(intArray1, intArray1 + 6, intArray2, equal_to<int>());
+ EATEST_VERIFY(pairInt.first == intArray1 + 5);
+ EATEST_VERIFY(pairInt.second == intArray2 + 5);
+
+ pairInt = mismatch(intArray2, intArray2 + 6, intArray3, equal_to<int>());
+ EATEST_VERIFY(pairInt.first == intArray2 + 6);
+ EATEST_VERIFY(pairInt.second == intArray3 + 6);
+ }
+
+
+ {
+ // void swap(T& a, T& b)
+ // void iter_swap(ForwardIterator1 a, ForwardIterator2 b)
+
+ int intArray[] = { -5, 2, 1, 5, 4, 5 };
+
+ swap(intArray[0], intArray[4]);
+ EATEST_VERIFY(VerifySequence(intArray, intArray + 6, int(), "swap", 4, 2, 1, 5, -5, 5, -1));
+
+ iter_swap(intArray + 2, intArray + 3);
+ EATEST_VERIFY(VerifySequence(intArray, intArray + 6, int(), "iter_swap", 4, 2, 5, 1, -5, 5, -1));
+
+
+ TestObject toArray[] = { TestObject(-5), TestObject(2), TestObject(1), TestObject(5), TestObject(4), TestObject(5) };
+
+ swap(toArray[0], toArray[4]);
+ EATEST_VERIFY(toArray[0] == TestObject(4));
+ EATEST_VERIFY(toArray[4] == TestObject(-5));
+
+ iter_swap(toArray + 2, toArray + 3);
+ EATEST_VERIFY(toArray[2] == TestObject(5));
+ EATEST_VERIFY(toArray[3] == TestObject(1));
+ }
+
+
+ {
+ // ForwardIterator2 swap_ranges(ForwardIterator1 first1, ForwardIterator1 last1, ForwardIterator2 first2)
+
+ int intArray1[] = { 3, 2, 6, 5, 4, 1 };
+ int intArray2[] = { 0, 0, 0, 0, 0, 0 };
+
+ swap_ranges(intArray1, intArray1 + 6, intArray2);
+ EATEST_VERIFY(VerifySequence(intArray1, intArray1 + 6, int(), "swap_ranges", 0, 0, 0, 0, 0, 0, -1));
+ EATEST_VERIFY(VerifySequence(intArray2, intArray2 + 6, int(), "swap_ranges", 3, 2, 6, 5, 4, 1, -1));
+
+
+ TestObject toArray1[] = { TestObject(3), TestObject(2), TestObject(6), TestObject(5), TestObject(4), TestObject(1) };
+ TestObject toArray2[] = { TestObject(0), TestObject(0), TestObject(0), TestObject(0), TestObject(0), TestObject(0) };
+
+ swap_ranges(toArray1, toArray1 + 6, toArray2);
+ EATEST_VERIFY(toArray1[0] == TestObject(0));
+ EATEST_VERIFY(toArray1[5] == TestObject(0));
+ EATEST_VERIFY(toArray2[0] == TestObject(3));
+ EATEST_VERIFY(toArray2[5] == TestObject(1));
+ }
+
+
+ {
+ // ForwardIterator adjacent_find(ForwardIterator first, ForwardIterator last)
+ // ForwardIterator adjacent_find(ForwardIterator first, ForwardIterator last, BinaryPredicate predicate)
+
+ int intArray[] = { 3, 2, 5, 5, 4, 1 };
+
+ int* pInt = adjacent_find(intArray + 0, intArray + 6);
+ EATEST_VERIFY(pInt == (intArray + 2));
+
+ pInt = adjacent_find(intArray + 3, intArray + 6);
+ EATEST_VERIFY(pInt == (intArray + 6)); // Verify not found
+
+
+ TestObject toArray[] = { TestObject(3), TestObject(2), TestObject(5), TestObject(5), TestObject(4), TestObject(1) };
+
+ TestObject* pTO = adjacent_find(toArray + 0, toArray + 6);
+ EATEST_VERIFY(pTO == (toArray + 2));
+
+ pTO = adjacent_find(toArray + 3, toArray + 6);
+ EATEST_VERIFY(pTO == (toArray + 6)); // Verify not found
+ }
+
+
+ {
+ // OutputIterator move(InputIterator first, InputIterator last, OutputIterator result)
+
+ int intArray1[] = { 3, 2, 6, 5, 4, 1 };
+ int intArray2[] = { 0, 0, 0, 0, 0, 0 };
+
+ move(intArray1, intArray1 + 0, intArray2);
+ EATEST_VERIFY(VerifySequence(intArray2, intArray2 + 6, int(), "move", 0, 0, 0, 0, 0, 0, -1));
+
+ move(intArray1, intArray1 + 6, intArray2);
+ EATEST_VERIFY(VerifySequence(intArray2, intArray2 + 6, int(), "move", 3, 2, 6, 5, 4, 1, -1));
+
+ move(intArray1 + 1, intArray1 + 6, intArray1 + 0); // Copy over self.
+ EATEST_VERIFY(VerifySequence(intArray1, intArray1 + 6, int(), "move", 2, 6, 5, 4, 1, 1, -1));
+ }
+
+
+ {
+ // OutputIterator copy(InputIterator first, InputIterator last, OutputIterator result)
+
+ int intArray1[] = { 3, 2, 6, 5, 4, 1 };
+ int intArray2[] = { 0, 0, 0, 0, 0, 0 };
+
+ copy(intArray1, intArray1 + 0, intArray2);
+ EATEST_VERIFY(VerifySequence(intArray2, intArray2 + 6, int(), "copy", 0, 0, 0, 0, 0, 0, -1));
+
+ copy(intArray1, intArray1 + 6, intArray2);
+ EATEST_VERIFY(VerifySequence(intArray2, intArray2 + 6, int(), "copy", 3, 2, 6, 5, 4, 1, -1));
+
+ copy(intArray1 + 1, intArray1 + 6, intArray1 + 0); // Copy over self.
+ EATEST_VERIFY(VerifySequence(intArray1, intArray1 + 6, int(), "copy", 2, 6, 5, 4, 1, 1, -1));
+ }
+
+
+ {
+ // OutputIterator copy_if(InputIterator first, InputIterator last, OutputIterator result, Predicate predicate)
+
+ int intArray1[] = { 9, 1, 9, 9, 9, 9, 1, 1, 9, 9 };
+ int intArray2[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+ copy_if(intArray1, intArray1 + 0, intArray2, bind2nd(equal_to<int>(), (int)1));
+ EATEST_VERIFY(VerifySequence(intArray2, intArray2 + 10, int(), "copy_if", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1));
+
+ copy_if(intArray1, intArray1 + 9, intArray2, bind2nd(equal_to<int>(), (int)1));
+ EATEST_VERIFY(VerifySequence(intArray2, intArray2 + 10, int(), "copy_if", 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, -1));
+
+ copy_if(intArray1 + 1, intArray1 + 9, intArray1 + 0, bind2nd(equal_to<int>(), (int)1)); // Copy over self.
+ EATEST_VERIFY(VerifySequence(intArray1, intArray1 + 10, int(), "copy_if", 1, 1, 1, 9, 9, 9, 1, 1, 9, 9, -1));
+ }
+
+
+ {
+ // OutputIterator copy_n(InputIterator first, Size count, OutputIterator result)
+
+ eastl::string in = "123456";
+ eastl::string out;
+
+ eastl::copy_n(in.begin(), 4, eastl::back_inserter(out));
+ EATEST_VERIFY(out == "1234");
+ }
+
+
+ {
+ // BidirectionalIterator2 copy_backward(BidirectionalIterator1 first, BidirectionalIterator1 last, BidirectionalIterator2 result)
+
+ int intArray1[] = { 3, 2, 6, 5, 4, 1 };
+ int intArray2[] = { 0, 0, 0, 0, 0, 0 };
+
+ copy_backward(intArray1, intArray1 + 0, intArray2 + 0);
+ EATEST_VERIFY(VerifySequence(intArray2, intArray2 + 6, int(), "copy_backward", 0, 0, 0, 0, 0, 0, -1));
+
+ copy_backward(intArray1, intArray1 + 6, intArray2 + 6);
+ EATEST_VERIFY(VerifySequence(intArray2, intArray2 + 6, int(), "copy_backward", 3, 2, 6, 5, 4, 1, -1));
+
+ copy_backward(intArray1, intArray1 + 5, intArray1 + 6); // Copy over self.
+ EATEST_VERIFY(VerifySequence(intArray1, intArray1 + 6, int(), "copy_backward", 3, 3, 2, 6, 5, 4, -1));
+ }
+
+
+ {
+ // OutputIterator move(InputIterator first, InputIterator last, OutputIterator result)
+ {
+ eastl::vector<eastl::string> src;
+ for(eastl_size_t i = 0; i < 4; i++)
+ src.push_back(eastl::string(1, (char8_t)('0' + i)));
+ eastl::vector<eastl::string> dest(src.size());
+
+ eastl::move(src.begin(), src.end(), dest.begin());
+ EATEST_VERIFY((dest[0] == "0") && (dest[3] == "3"));
+ EATEST_VERIFY(src[0].empty() && src[3].empty());
+ }
+
+ {
+ // BidirectionalIterator2 move_backward(BidirectionalIterator1 first, BidirectionalIterator1 last, BidirectionalIterator2 result)
+ eastl::vector<eastl::string> src;
+ for(eastl_size_t i = 0; i < 4; i++)
+ src.push_back(eastl::string(1, (char8_t)('0' + i)));
+ eastl::vector<eastl::string> dest(src.size());
+
+ eastl::move_backward(src.begin(), src.end(), dest.end());
+ EATEST_VERIFY((dest[0] == "0") && (dest[3] == "3"));
+ EATEST_VERIFY(src[0].empty() && src[3].empty());
+ }
+ }
+
+
+ {
+ // difference_type count(InputIterator first, InputIterator last, const T& value)
+
+ int intArray[] = { 1, 2, 1, 5, 4, 1 };
+ ptrdiff_t n = count(intArray, intArray + 6, 1);
+ EATEST_VERIFY(n == 3);
+
+ TestObject toArray[] = { TestObject(1), TestObject(2), TestObject(1), TestObject(5), TestObject(4), TestObject(1) };
+ n = count(toArray, toArray + 6, TestObject(1));
+ EATEST_VERIFY(n == 3);
+ }
+
+
+ {
+ // difference_type count_if(InputIterator first, InputIterator last, Predicate predicate)
+
+ int intArray[] = { 3, 2, 6, 5, 4, 1, 2, 4, 5, 4, 1, 2 };
+
+ // Count all items whose value is less than three.
+ ptrdiff_t n = count_if(intArray, intArray, bind2nd(less<int>(), (int)3)); // No-op
+ EATEST_VERIFY(n == 0);
+ n = count_if(intArray, intArray + 12, bind2nd(less<int>(), (int)3));
+ EATEST_VERIFY(n == 5);
+
+
+ // Count all items whose value is less than three.
+ TestObject toArray[] = { TestObject(1), TestObject(3), TestObject(1), TestObject(4), TestObject(2), TestObject(5) };
+
+ n = count_if(toArray, toArray, bind2nd(less<TestObject>(), TestObject(3))); // No-op
+ EATEST_VERIFY(n == 0);
+ n = count_if(toArray, toArray + 6, bind2nd(less<TestObject>(), TestObject(3)));
+ EATEST_VERIFY(n == 3);
+
+
+ // Count all items whose value is less than three.
+ slist<int> intList;
+ intList.push_front(1);
+ intList.push_front(3);
+ intList.push_front(1);
+ intList.push_front(4);
+ intList.push_front(2);
+ intList.push_front(5);
+
+ n = count_if(intList.begin(), intList.begin(), bind2nd(less<int>(), (int)3)); // No-op
+ EATEST_VERIFY(n == 0);
+ n = count_if(intList.begin(), intList.end(), bind2nd(less<int>(), (int)3));
+ EATEST_VERIFY(n == 3);
+ }
+
+
+ {
+ // void fill(ForwardIterator first, ForwardIterator last, const T& value)
+
+ vector<int> intArray(10);
+
+ EATEST_VERIFY(VerifySequence(intArray.begin(), intArray.end(), int(), "fill", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1));
+ fill(intArray.begin() + 3, intArray.begin() + 7, 4);
+ EATEST_VERIFY(VerifySequence(intArray.begin(), intArray.end(), int(), "fill", 0, 0, 0, 4, 4, 4, 4, 0, 0, 0, -1));
+
+
+ slist<int> intList(10);
+ slist<int>::iterator first = intList.begin();
+ slist<int>::iterator last = intList.begin();
+
+ advance(first, 3);
+ advance(last, 7);
+ EATEST_VERIFY(VerifySequence(intList.begin(), intList.end(), int(), "fill", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1));
+ fill(first, last, 4);
+ EATEST_VERIFY(VerifySequence(intList.begin(), intList.end(), int(), "fill", 0, 0, 0, 4, 4, 4, 4, 0, 0, 0, -1));
+
+
+ // Exercise specializations we have for some platform/compiler combinations
+ // void fill(uint64_t* first, uint64_t* last, uint64_t c);
+ // void fill( int64_t* first, int64_t* last, int64_t c);
+ // void fill(uint32_t* first, uint32_t* last, uint32_t c);
+ // void fill( int32_t* first, int32_t* last, int32_t c);
+ // void fill(uint16_t* first, uint16_t* last, uint16_t c);
+ // void fill( int16_t* first, int16_t* last, int16_t c);
+ const eastl_size_t kMaxSize = 300;
+ eastl::vector<uint64_t> vU64(kMaxSize, 0);
+ eastl::vector< int64_t> vI64(kMaxSize, 0);
+ eastl::vector<uint32_t> vU32(kMaxSize, 0);
+ eastl::vector< int32_t> vI32(kMaxSize, 0);
+ eastl::vector<uint16_t> vU16(kMaxSize, 0);
+ eastl::vector< int16_t> vI16(kMaxSize, 0);
+
+ for(eastl_size_t i = 0; i < kMaxSize; ++i)
+ {
+ eastl::fill(vU64.begin(), vU64.begin() + i, UINT64_C(0x0123456789abcdef));
+ EATEST_VERIFY(EA::StdC::Memcheck64(&vU64[0], UINT64_C(0x0123456789abcdef), i) == NULL);
+ EA::StdC::Memset64(&vU64[0], 0, i);
+
+ eastl::fill(vI64.begin(), vI64.begin() + i, UINT64_C(0x0123456789abcdef));
+ EATEST_VERIFY(EA::StdC::Memcheck64(&vI64[0], UINT64_C(0x0123456789abcdef), i) == NULL);
+ EA::StdC::Memset64(&vI64[0], 0, i);
+
+ eastl::fill(vU32.begin(), vU32.begin() + i, UINT32_C(0x01234567));
+ EATEST_VERIFY(EA::StdC::Memcheck32(&vU32[0], UINT32_C(0x01234567), i) == NULL);
+ EA::StdC::Memset32(&vU32[0], 0, i);
+
+ eastl::fill(vI32.begin(), vI32.begin() + i, UINT32_C(0x01234567));
+ EATEST_VERIFY(EA::StdC::Memcheck32(&vI32[0], UINT32_C(0x01234567), i) == NULL);
+ EA::StdC::Memset32(&vI32[0], 0, i);
+
+ eastl::fill(vU16.begin(), vU16.begin() + i, UINT16_C(0x0123));
+ EATEST_VERIFY(EA::StdC::Memcheck16(&vU16[0], UINT16_C(0x0123), i) == NULL);
+ EA::StdC::Memset16(&vU16[0], 0, i);
+
+ eastl::fill(vI16.begin(), vI16.begin() + i, UINT16_C(0x0123));
+ EATEST_VERIFY(EA::StdC::Memcheck16(&vI16[0], UINT16_C(0x0123), i) == NULL);
+ EA::StdC::Memset16(&vI16[0], 0, i);
+ }
+
+ { // Regression for user-reported compile failure.
+ enum TestEnum { eTestValue = -1 };
+ eastl::vector<int32_t> intArrayEnum;
+
+ eastl::fill<eastl::vector<int32_t>::iterator, int32_t>(intArrayEnum.begin(), intArrayEnum.end(), eTestValue);
+ EATEST_VERIFY(intArrayEnum.size() == 0);
+ }
+ }
+
+
+ {
+ // OutputIterator fill_n(OutputIterator first, Size n, const T& value)
+
+ vector<int> intArray(10);
+
+ EATEST_VERIFY(VerifySequence(intArray.begin(), intArray.end(), int(), "fill_n", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1));
+ fill_n(intArray.begin() + 3, 4, 4);
+ EATEST_VERIFY(VerifySequence(intArray.begin(), intArray.end(), int(), "fill_n", 0, 0, 0, 4, 4, 4, 4, 0, 0, 0, -1));
+
+
+ list<int> intList(10);
+ list<int>::iterator first = intList.begin();
+
+ advance(first, 3);
+ EATEST_VERIFY(VerifySequence(intList.begin(), intList.end(), int(), "fill_n", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1));
+ fill_n(first, 4, 4);
+ EATEST_VERIFY(VerifySequence(intList.begin(), intList.end(), int(), "fill_n", 0, 0, 0, 4, 4, 4, 4, 0, 0, 0, -1));
+
+ // Exercise specializations we have for some platform/compiler combinations
+ // template<typename Size>
+ // uint64_t* fill_n(uint64_t* first, Size n, uint64_t c);
+ // int64_t* fill_n( int64_t* first, Size n, int64_t c);
+ // uint32_t* fill_n(uint32_t* first, Size n, uint32_t c);
+ // int32_t* fill_n( int32_t* first, Size n, int32_t c);
+ // uint16_t* fill_n(uint16_t* first, Size n, uint16_t c);
+ // int16_t* fill_n( int16_t* first, Size n, int16_t c);
+ const eastl_size_t kMaxSize = 17;
+ eastl::vector<uint64_t> vU64(kMaxSize, 0);
+ eastl::vector< int64_t> vI64(kMaxSize, 0);
+ eastl::vector<uint32_t> vU32(kMaxSize, 0);
+ eastl::vector< int32_t> vI32(kMaxSize, 0);
+ eastl::vector<uint16_t> vU16(kMaxSize, 0);
+ eastl::vector< int16_t> vI16(kMaxSize, 0);
+
+ eastl::vector<uint64_t>::iterator itU64 = eastl::fill_n(vU64.begin(), kMaxSize, UINT64_C(0x0123456789abcdef));
+ EATEST_VERIFY(EA::StdC::Memcheck64(&vU64[0], UINT64_C(0x0123456789abcdef), kMaxSize) == NULL);
+ EATEST_VERIFY(itU64 == (vU64.begin() + kMaxSize));
+ EA::StdC::Memset64(&vU64[0], 0, kMaxSize);
+
+ eastl::vector<int64_t>::iterator itI64 = eastl::fill_n(vI64.begin(), kMaxSize, UINT64_C(0x0123456789abcdef));
+ EATEST_VERIFY(EA::StdC::Memcheck64(&vI64[0], UINT64_C(0x0123456789abcdef), kMaxSize) == NULL);
+ EATEST_VERIFY(itI64 == (vI64.begin() + kMaxSize));
+ EA::StdC::Memset64(&vI64[0], 0, kMaxSize);
+
+ eastl::vector<uint32_t>::iterator itU32 = eastl::fill_n(vU32.begin(), kMaxSize, UINT32_C(0x01234567));
+ EATEST_VERIFY(EA::StdC::Memcheck32(&vU32[0], UINT32_C(0x01234567), kMaxSize) == NULL);
+ EATEST_VERIFY(itU32 == (vU32.begin() + kMaxSize));
+ EA::StdC::Memset32(&vU32[0], 0, kMaxSize);
+
+ eastl::vector<int32_t>::iterator itI32 = eastl::fill_n(vI32.begin(), kMaxSize, UINT32_C(0x01234567));
+ EATEST_VERIFY(EA::StdC::Memcheck32(&vI32[0], UINT32_C(0x01234567), kMaxSize) == NULL);
+ EATEST_VERIFY(itI32 == (vI32.begin() + kMaxSize));
+ EA::StdC::Memset32(&vI32[0], 0, kMaxSize);
+
+ eastl::vector<uint16_t>::iterator itU16 = eastl::fill_n(vU16.begin(), kMaxSize, UINT16_C(0x0123));
+ EATEST_VERIFY(EA::StdC::Memcheck16(&vU16[0], UINT16_C(0x0123), kMaxSize) == NULL);
+ EATEST_VERIFY(itU16 == (vU16.begin() + kMaxSize));
+ EA::StdC::Memset16(&vU16[0], 0, kMaxSize);
+
+ eastl::vector<int16_t>::iterator itI16 = eastl::fill_n(vI16.begin(), kMaxSize, UINT16_C(0x0123));
+ EATEST_VERIFY(EA::StdC::Memcheck16(&vI16[0], UINT16_C(0x0123), kMaxSize) == NULL);
+ EATEST_VERIFY(itI16 == (vI16.begin() + kMaxSize));
+ EA::StdC::Memset16(&vI16[0], 0, kMaxSize);
+ }
+
+
+ {
+ // InputIterator find(InputIterator first, InputIterator last, const T& value)
+ vector<int> intArray;
+ intArray.push_back(0);
+ intArray.push_back(1);
+ intArray.push_back(2);
+ intArray.push_back(3);
+
+ vector<int>::iterator it = find(intArray.begin(), intArray.end(), 2);
+ EATEST_VERIFY(it == (intArray.begin() + 2));
+ EATEST_VERIFY(*it == 2);
+
+ it = find(intArray.begin(), intArray.end(), 7);
+ EATEST_VERIFY(it == intArray.end());
+ }
+
+
+ {
+ // InputIterator find_if(InputIterator first, InputIterator last, Predicate predicate)
+ // InputIterator find_if_not(InputIterator first, InputIterator last, Predicate predicate)
+
+ int intArray[] = { 3, 2, 6, 5, 4, 1, 2, 4, 5, 4, 1, 2 };
+
+ // Find an item which is equal to 1.
+ int* pInt = find_if(intArray, intArray, bind2nd(equal_to<int>(), (int)1)); // No-op
+ EATEST_VERIFY(pInt == (intArray));
+ pInt = find_if(intArray, intArray + 12, bind2nd(equal_to<int>(), (int)1));
+ EATEST_VERIFY(pInt == (intArray + 5));
+ pInt = find_if(intArray, intArray + 12, bind2nd(equal_to<int>(), (int)99));
+ EATEST_VERIFY(pInt == (intArray + 12));
+
+ pInt = find_if_not(intArray, intArray + 12, bind2nd(equal_to<int>(), (int)3));
+ EATEST_VERIFY(pInt == (intArray + 1));
+
+ // Find an item which is equal to 1.
+ TestObject toArray[] = { TestObject(4), TestObject(3), TestObject(2), TestObject(1), TestObject(2), TestObject(5) };
+
+ TestObject* pTO = find_if(toArray, toArray, bind2nd(equal_to<TestObject>(), TestObject(1))); // No-op
+ EATEST_VERIFY(pTO == (toArray));
+ pTO = find_if(toArray, toArray + 6, bind2nd(equal_to<TestObject>(), TestObject(1)));
+ EATEST_VERIFY(pTO == (toArray + 3));
+ pTO = find_if(toArray, toArray + 6, bind2nd(equal_to<TestObject>(), TestObject(99)));
+ EATEST_VERIFY(pTO == (toArray + 6));
+
+ pTO = find_if_not(toArray, toArray + 6, bind2nd(equal_to<TestObject>(), TestObject(4)));
+ EATEST_VERIFY(pTO == (toArray + 1));
+
+ // Find an item which is equal to 1.
+ slist<int> intList;
+ intList.push_front(4);
+ intList.push_front(3);
+ intList.push_front(2);
+ intList.push_front(1);
+ intList.push_front(2);
+ intList.push_front(5);
+
+ // The list is now: { 5, 2, 1, 2, 3, 4 }
+ slist<int>::iterator it = find_if(intList.begin(), intList.begin(), bind2nd(equal_to<int>(), (int)1)); // No-op
+ EATEST_VERIFY(it == intList.begin());
+ it = find_if(intList.begin(), intList.end(), bind2nd(equal_to<int>(), (int)1));
+ EATEST_VERIFY(*it == 1);
+ it = find_if(intList.begin(), intList.end(), bind2nd(equal_to<int>(), (int)99));
+ EATEST_VERIFY(it == intList.end());
+
+ it = find_if_not(intList.begin(), intList.end(), bind2nd(equal_to<int>(), (int)5));
+ EATEST_VERIFY(*it == 2);
+ }
+
+
+ {
+ // ForwardIterator1 find_first_of(ForwardIterator1 first1, ForwardIterator1 last1, ForwardIterator2 first2, ForwardIterator2 last2)
+ // ForwardIterator1 find_first_of(ForwardIterator1 first1, ForwardIterator1 last1, ForwardIterator2 first2, ForwardIterator2 last2, BinaryPredicate predicate)
+
+ int intArray1[10] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 };
+ int intArray2[3] = { 7, 6, 5 };
+
+ int* pInt = find_first_of(intArray1, intArray1, intArray2, intArray2 + 3);
+ EATEST_VERIFY(pInt == intArray1);
+ pInt = find_first_of(intArray1, intArray1 + 10, intArray2, intArray2);
+ EATEST_VERIFY(pInt == intArray1 + 10);
+ pInt = find_first_of(intArray1, intArray1 + 10, intArray2, intArray2 + 3);
+ EATEST_VERIFY(pInt == intArray1 + 5);
+
+ pInt = find_first_of(intArray1, intArray1, intArray2, intArray2 + 3, equal_to<int>());
+ EATEST_VERIFY(pInt == intArray1);
+ pInt = find_first_of(intArray1, intArray1 + 10, intArray2, intArray2, equal_to<int>());
+ EATEST_VERIFY(pInt == intArray1 + 10);
+ pInt = find_first_of(intArray1, intArray1 + 10, intArray2, intArray2 + 3, equal_to<int>());
+ EATEST_VERIFY(pInt == intArray1 + 5);
+ }
+
+
+ {
+ // ForwardIterator1 find_first_not_of(ForwardIterator1 first1, ForwardIterator1 last1, ForwardIterator2 first2, ForwardIterator2 last2)
+ // ForwardIterator1 find_first_not_of(ForwardIterator1 first1, ForwardIterator1 last1, ForwardIterator2 first2, ForwardIterator2 last2), BinaryPredicate predicate)
+
+ int intArray1[10] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 };
+ int intArray2[3] = { 0, 1, 2 };
+
+ int* pInt = find_first_not_of(intArray1, intArray1, intArray2, intArray2 + 3);
+ EATEST_VERIFY(pInt == intArray1);
+ pInt = find_first_not_of(intArray1, intArray1 + 10, intArray2, intArray2);
+ EATEST_VERIFY(pInt == intArray1 + 0);
+ pInt = find_first_not_of(intArray1, intArray1 + 10, intArray2, intArray2 + 3);
+ EATEST_VERIFY(pInt == intArray1 + 3);
+
+ pInt = find_first_not_of(intArray1, intArray1, intArray2, intArray2 + 3, equal_to<int>());
+ EATEST_VERIFY(pInt == intArray1);
+ pInt = find_first_not_of(intArray1, intArray1 + 10, intArray2, intArray2, equal_to<int>());
+ EATEST_VERIFY(pInt == intArray1 + 0);
+ pInt = find_first_not_of(intArray1, intArray1 + 10, intArray2, intArray2 + 3, equal_to<int>());
+ EATEST_VERIFY(pInt == intArray1 + 3);
+ }
+
+
+ {
+ // ForwardIterator1 find_last_of(ForwardIterator1 first1, ForwardIterator1 last1, ForwardIterator2 first2, ForwardIterator2 last2)
+ // ForwardIterator1 find_last_of(ForwardIterator1 first1, ForwardIterator1 last1, ForwardIterator2 first2, ForwardIterator2 last2, BinaryPredicate predicate)
+
+ int intArray1[10] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 };
+ int intArray2[3] = { 3, 4, 5 };
+
+ int* pInt = find_last_of(intArray1, intArray1, intArray2, intArray2 + 3);
+ EATEST_VERIFY(pInt == intArray1);
+ pInt = find_last_of(intArray1, intArray1 + 10, intArray2, intArray2);
+ EATEST_VERIFY(pInt == intArray1 + 10);
+ pInt = find_last_of(intArray1, intArray1 + 10, intArray2, intArray2 + 3);
+ EATEST_VERIFY(pInt == intArray1 + 5);
+
+ pInt = find_last_of(intArray1, intArray1, intArray2, intArray2 + 3, equal_to<int>());
+ EATEST_VERIFY(pInt == intArray1);
+ pInt = find_last_of(intArray1, intArray1 + 10, intArray2, intArray2, equal_to<int>());
+ EATEST_VERIFY(pInt == intArray1 + 10);
+ pInt = find_last_of(intArray1, intArray1 + 10, intArray2, intArray2 + 3, equal_to<int>());
+ EATEST_VERIFY(pInt == intArray1 + 5);
+ }
+
+
+ {
+ // ForwardIterator1 find_last_not_of(ForwardIterator1 first1, ForwardIterator1 last1, ForwardIterator2 first2, ForwardIterator2 last2)
+ // ForwardIterator1 find_last_not_of(ForwardIterator1 first1, ForwardIterator1 last1, ForwardIterator2 first2, ForwardIterator2 last2), BinaryPredicate predicate)
+
+ int intArray1[10] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 };
+ int intArray2[3] = { 7, 8, 9 };
+
+ int* pInt = find_last_not_of(intArray1, intArray1, intArray2, intArray2 + 3);
+ EATEST_VERIFY(pInt == intArray1);
+ pInt = find_last_not_of(intArray1, intArray1 + 10, intArray2, intArray2);
+ EATEST_VERIFY(pInt == intArray1 + 10);
+ pInt = find_last_not_of(intArray1, intArray1 + 10, intArray2, intArray2 + 3);
+ EATEST_VERIFY(pInt == intArray1 + 6);
+
+ pInt = find_last_not_of(intArray1, intArray1, intArray2, intArray2 + 3, equal_to<int>());
+ EATEST_VERIFY(pInt == intArray1);
+ pInt = find_last_not_of(intArray1, intArray1 + 10, intArray2, intArray2, equal_to<int>());
+ EATEST_VERIFY(pInt == intArray1 + 10);
+ pInt = find_last_not_of(intArray1, intArray1 + 10, intArray2, intArray2 + 3, equal_to<int>());
+ EATEST_VERIFY(pInt == intArray1 + 6);
+ }
+
+
+ {
+ // Function for_each(InputIterator first, InputIterator last, Function function)
+
+ deque<int> intDeque(1000);
+ SetIncrementalIntegers<int> sii; // We define this class at the top of this file.
+ eastl_size_t i;
+
+ sii = for_each(intDeque.begin(), intDeque.end(), sii);
+ EATEST_VERIFY(sii.mX == 1000);
+ for(i = 0; i < 1000; i++)
+ {
+ if(intDeque[i] != (int)i)
+ break;
+ }
+ EATEST_VERIFY(i == 1000);
+
+
+ array<int, 1000> intArray;
+ sii.reset();
+
+ sii = for_each(intArray.begin(), intArray.end(), sii);
+ EATEST_VERIFY(sii.mX == 1000);
+ for(i = 0; i < 1000; i++)
+ {
+ if(intArray[i] != (int)i)
+ break;
+ }
+ EATEST_VERIFY(i == 1000);
+ }
+
+ // for_each_n
+ {
+ {
+ vector<int> v = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ for_each_n(v.begin(), 5, [](auto& e) { e += 10; });
+
+ vector<int> expected = {10, 11, 12, 13, 14, 5, 6, 7, 8, 9};
+ EATEST_VERIFY(v == expected);
+ }
+
+ // verify lambda can return a result that is ignored.
+ {
+ vector<int> v = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ for_each_n(v.begin(), 5, [](auto& e) { e += 10; return 42; });
+
+ vector<int> expected = {10, 11, 12, 13, 14, 5, 6, 7, 8, 9};
+ EATEST_VERIFY(v == expected);
+ }
+ }
+
+ {
+ // void generate(ForwardIterator first, ForwardIterator last, Generator generator)
+ // OutputIterator generate_n(OutputIterator first, Size n, Generator generator)
+
+ deque<int> intDeque((eastl_size_t)rng.RandRange(100, 1000));
+ GenerateIncrementalIntegers<int> gii(0); // We define this class at the top of this file.
+ int i, iEnd;
+
+ generate(intDeque.begin(), intDeque.end(), gii);
+ for(i = 0, iEnd = (int)intDeque.size(); i < iEnd; i++)
+ {
+ if(intDeque[(eastl_size_t)i] != i)
+ break;
+ }
+ EATEST_VERIFY(i == iEnd);
+
+
+ array<int, 1000> intArray;
+ gii.reset(0);
+
+ generate(intArray.begin(), intArray.end(), gii);
+ for(i = 0; i < 1000; i++)
+ {
+ if(intArray[(eastl_size_t)i] != i)
+ break;
+ }
+ EATEST_VERIFY(i == 1000);
+ }
+
+
+ {
+ // OutputIterator transform(InputIterator first, InputIterator last, OutputIterator result, UnaryOperation unaryOperation)
+
+ deque<int> intDeque((eastl_size_t)rng.RandRange(1, 1000));
+ int i, iEnd;
+
+ for(i = 0, iEnd = (int)intDeque.size(); i < iEnd; i++)
+ intDeque[(eastl_size_t)i] = 1;
+ transform(intDeque.begin(), intDeque.begin(), intDeque.begin(), negate<int>()); // No-op
+ EATEST_VERIFY(intDeque[0] == 1); // Verify nothing happened
+ transform(intDeque.begin(), intDeque.end(), intDeque.begin(), negate<int>());
+ for(i = 0, iEnd = (int)intDeque.size(); i < iEnd; i++)
+ {
+ if(intDeque[(eastl_size_t)i] != -1)
+ break;
+ }
+ EATEST_VERIFY(i == iEnd);
+
+
+ slist<TestObject> sList;
+ for(i = 0, iEnd = rng.RandRange(1, 100); i < iEnd; i++)
+ sList.push_front(TestObject(1));
+ transform(sList.begin(), sList.begin(), sList.begin(), TestObjectNegate()); // No-op
+ EATEST_VERIFY(sList.front() == TestObject(1));
+ transform(sList.begin(), sList.end(), sList.begin(), TestObjectNegate()); // TestObjectNegate is a custom function we define for this test.
+ slist<TestObject>::iterator it = sList.begin();
+ for(; it != sList.end(); it++)
+ {
+ if(!(*it == TestObject(-1)))
+ break;
+ }
+ EATEST_VERIFY(it == sList.end());
+ }
+
+
+ {
+ // OutputIterator transform(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, OutputIterator result, BinaryOperation binaryOperation)
+
+ int intArray1[12] = { 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1 };
+ int intArray2[12] = { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 };
+
+ int* pInt = transform(intArray1, intArray1, intArray2, intArray2, plus<int>());
+ EATEST_VERIFY(pInt == intArray2);
+ EATEST_VERIFY(VerifySequence(intArray1, intArray1 + 12, int(), "transform", 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, -1));
+ EATEST_VERIFY(VerifySequence(intArray2, intArray2 + 12, int(), "transform", 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, -1));
+
+ pInt = transform(intArray1, intArray1 + 12, intArray2, intArray2, plus<int>());
+ EATEST_VERIFY(pInt == intArray2 + 12);
+ EATEST_VERIFY(VerifySequence(intArray1, intArray1 + 12, int(), "transform", 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, -1));
+ EATEST_VERIFY(VerifySequence(intArray2, intArray2 + 12, int(), "transform", 3, 3, 4, 4, 3, 3, 4, 4, 3, 3, 4, 4, -1));
+ }
+
+
+ {
+ // bool equal(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2)
+ // bool equal(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, BinaryPredicate predicate)
+
+ vector<eastl_size_t> intArray(100);
+ list<eastl_size_t> intList(100);
+ generate(intArray.begin(), intArray.end(), rng);
+ copy(intArray.begin(), intArray.end(), intList.begin());
+
+ bool b = equal(intArray.begin(), intArray.begin(), (eastl_size_t*)NULL);
+ EATEST_VERIFY(b);
+ b = equal(intArray.begin(), intArray.end(), intList.begin());
+ EATEST_VERIFY(b);
+ intArray[50] += 1;
+ b = equal(intArray.begin(), intArray.end(), intList.begin());
+ EATEST_VERIFY(!b);
+
+ intArray[50] -= 1; // resulttore its original value so the containers are equal again.
+ b = equal(intArray.begin(), intArray.begin(), (eastl_size_t*)NULL, equal_to<eastl_size_t>());
+ EATEST_VERIFY(b);
+ b = equal(intArray.begin(), intArray.end(), intList.begin(), equal_to<eastl_size_t>());
+ EATEST_VERIFY(b);
+ intArray[50] += 1;
+ b = equal(intArray.begin(), intArray.end(), intList.begin(), equal_to<eastl_size_t>());
+ EATEST_VERIFY(!b);
+ }
+
+
+ {
+ // bool identical(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2)
+ // bool identical(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2, BinaryPredicate predicate)
+
+ vector<eastl_size_t> intArray(100);
+ list<eastl_size_t> intList(100);
+ generate(intArray.begin(), intArray.end(), rng);
+ copy(intArray.begin(), intArray.end(), intList.begin());
+
+
+ bool b = identical(intArray.begin(), intArray.begin(), (eastl_size_t*)NULL, (eastl_size_t*)NULL);
+ EATEST_VERIFY(b);
+ b = identical(intArray.begin(), intArray.end(), intList.begin(), intList.end());
+ EATEST_VERIFY(b);
+ b = identical(intArray.begin(), intArray.end() - 10, intList.begin(), intList.end());
+ EATEST_VERIFY(!b);
+ b = identical(intList.begin(), intList.end(), intArray.begin() + 10, intArray.end());
+ EATEST_VERIFY(!b);
+ intArray[50] += 1;
+ b = identical(intArray.begin(), intArray.end(), intList.begin(), intList.end());
+ EATEST_VERIFY(!b);
+
+
+ intArray[50] -= 1; // resulttore its original value so the containers are equal again.
+ b = identical(intArray.begin(), intArray.begin(), (eastl_size_t*)NULL, (eastl_size_t*)NULL, equal_to<eastl_size_t>());
+ EATEST_VERIFY(b);
+ b = identical(intArray.begin(), intArray.end(), intList.begin(), intList.end(), equal_to<eastl_size_t>());
+ EATEST_VERIFY(b);
+ b = identical(intArray.begin(), intArray.end() - 10, intList.begin(), intList.end(), equal_to<eastl_size_t>());
+ EATEST_VERIFY(!b);
+ b = identical(intList.begin(), intList.end(), intArray.begin() + 10, intArray.end(), equal_to<eastl_size_t>());
+ EATEST_VERIFY(!b);
+ intArray[50] += 1;
+ b = identical(intArray.begin(), intArray.end(), intList.begin(), intList.end(), equal_to<eastl_size_t>());
+ EATEST_VERIFY(!b);
+ }
+
+
+ {
+ // bool lexicographical_compare(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2)
+ // bool lexicographical_compare(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2, Compare compare)
+
+ int intArray1[6] = { 0, 1, 2, 3, 4, 5 };
+ int intArray2[6] = { 0, 1, 2, 3, 4, 6 };
+ int intArray3[5] = { 0, 1, 2, 3, 4 };
+
+ bool b = lexicographical_compare(intArray1, intArray1, intArray2, intArray2); // Test empty range.
+ EATEST_VERIFY(!b);
+ b = lexicographical_compare(intArray1, intArray1 + 6, intArray2, intArray2 + 6);
+ EATEST_VERIFY( b);
+ b = lexicographical_compare(intArray2, intArray2 + 6, intArray1, intArray1 + 6);
+ EATEST_VERIFY(!b);
+ b = lexicographical_compare(intArray1, intArray1 + 6, intArray3, intArray3 + 5);
+ EATEST_VERIFY(!b);
+
+ b = lexicographical_compare(intArray1, intArray1, intArray2, intArray2, greater<int>()); // Test empty range.
+ EATEST_VERIFY(!b);
+ b = lexicographical_compare(intArray1, intArray1 + 6, intArray2, intArray2 + 6, greater<int>());
+ EATEST_VERIFY(!b);
+ b = lexicographical_compare(intArray2, intArray2 + 6, intArray1, intArray1 + 6, greater<int>());
+ EATEST_VERIFY( b);
+ b = lexicographical_compare(intArray3, intArray3 + 5, intArray1, intArray1 + 6, less<int>());
+ EATEST_VERIFY( b);
+ }
+
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ {
+ // <compairison_category> lexicographical_compare_three_way(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2, Compare compare)
+
+ int intArray1[6] = {0, 1, 2, 3, 4, 5};
+ int intArray2[6] = {0, 1, 2, 3, 4, 6};
+ int intArray3[5] = {0, 1, 2, 3, 4};
+ int intArray4[5] = {4, 3, 2, 1, 0};
+
+ // strong ordering
+ auto compare_strong = [](int first, int second)
+ {
+ return (first < second) ? std::strong_ordering::less :
+ (first > second) ? std::strong_ordering::greater :
+ std::strong_ordering::equal;
+ };
+
+ auto b = lexicographical_compare_three_way(intArray1, intArray1 + 6, intArray2, intArray2 + 6, compare_strong);
+ EATEST_VERIFY(b == std::strong_ordering::less);
+ b = lexicographical_compare_three_way(intArray3, intArray3 + 5, intArray2, intArray2 + 6, compare_strong);
+ EATEST_VERIFY(b == std::strong_ordering::less);
+ b = lexicographical_compare_three_way(intArray3, intArray3 + 5, intArray2, intArray2 + 6, synth_three_way{});
+ EATEST_VERIFY(b == std::strong_ordering::less);
+
+ b = lexicographical_compare_three_way(intArray2, intArray2 + 6, intArray1, intArray1 + 6, compare_strong);
+ EATEST_VERIFY(b == std::strong_ordering::greater);
+ b = lexicographical_compare_three_way(intArray2, intArray2 + 6, intArray1, intArray1 + 6, synth_three_way{});
+ EATEST_VERIFY(b == std::strong_ordering::greater);
+
+ b = lexicographical_compare_three_way(intArray1, intArray1 + 6, intArray3, intArray3 + 5, compare_strong);
+ EATEST_VERIFY(b == std::strong_ordering::greater);
+ b = lexicographical_compare_three_way(intArray1, intArray1 + 6, intArray3, intArray3 + 5, synth_three_way{});
+ EATEST_VERIFY(b == std::strong_ordering::greater);
+
+ b = lexicographical_compare_three_way(intArray1, intArray1, intArray2, intArray2, compare_strong); // Test empty range.
+ EATEST_VERIFY(b == std::strong_ordering::equal);
+ b = lexicographical_compare_three_way(intArray1, intArray1, intArray2, intArray2, synth_three_way{}); // Test empty range.
+ EATEST_VERIFY(b == std::strong_ordering::equal);
+
+ // weak ordering
+ auto compare_weak = [](int first, int second)
+ {
+ return (first < second) ? std::weak_ordering::less :
+ (first > second) ? std::weak_ordering::greater :
+ std::weak_ordering::equivalent;
+ };
+
+ auto c = lexicographical_compare_three_way(intArray3, intArray3 + 5, intArray4, intArray4 + 5, compare_weak);
+ EATEST_VERIFY(c == std::weak_ordering::less);
+ c = lexicographical_compare_three_way(intArray4, intArray4 + 5, intArray3, intArray3 + 5, compare_weak);
+ EATEST_VERIFY(c == std::weak_ordering::greater);
+ c = lexicographical_compare_three_way(intArray3, intArray3 + 5, intArray4, intArray4 + 5, synth_three_way{});
+ EATEST_VERIFY(c == std::weak_ordering::less);
+ c = lexicographical_compare_three_way(intArray4, intArray4 + 5, intArray3, intArray3 + 5, synth_three_way{});
+ EATEST_VERIFY(c == std::weak_ordering::greater);
+ }
+
+ {
+ EATEST_VERIFY(synth_three_way{}(1, 1) == std::strong_ordering::equal);
+ EATEST_VERIFY(synth_three_way{}(2, 1) == std::strong_ordering::greater);
+ EATEST_VERIFY(synth_three_way{}(1, 2) == std::strong_ordering::less);
+
+ struct weak_struct
+ {
+ int val;
+ inline std::weak_ordering operator<=>(const weak_struct& b) const
+ {
+ return val <=> b.val;
+ }
+ };
+
+ EATEST_VERIFY(synth_three_way{}(weak_struct{1}, weak_struct{2}) == std::weak_ordering::less);
+ EATEST_VERIFY(synth_three_way{}(weak_struct{2}, weak_struct{1}) == std::weak_ordering::greater);
+ EATEST_VERIFY(synth_three_way{}(weak_struct{1}, weak_struct{1}) == std::weak_ordering::equivalent);
+ }
+#endif
+
+ {
+ // ForwardIterator lower_bound(ForwardIterator first, ForwardIterator last, const T& value)
+ // ForwardIterator lower_bound(ForwardIterator first, ForwardIterator last, const T& value, Compare compare)
+
+ int i;
+
+ int* pInt = lower_bound((int*)NULL, (int*)NULL, 100);
+ EATEST_VERIFY(pInt == NULL);
+
+
+ for(i = 0; i < 20 + (gEASTL_TestLevel * 20); i++)
+ {
+ deque<int> intDeque((eastl_size_t)rng.RandRange(1, 500));
+
+ for(int j = 0, jEnd = (int)intDeque.size(); j < jEnd; j++)
+ intDeque[(eastl_size_t)j] = (int)rng.RandLimit(jEnd / 2); // This will result in both gaps and duplications.
+
+ for(int k = 0, kEnd = (int)intDeque.size(); k < kEnd; k++)
+ {
+ deque<int>::iterator it = lower_bound(intDeque.begin(), intDeque.end(), k);
+
+ if(it != intDeque.begin())
+ EATEST_VERIFY(*(it - 1) < k);
+
+ if(it != intDeque.end())
+ EATEST_VERIFY((k < *it) || !(*it < k)); // Verify tha k <= *it by using only operator<
+ }
+ }
+
+
+ for(i = 0; i < 20 + (gEASTL_TestLevel * 20); i++)
+ {
+ list<TestObject> toList;
+ int nSize = (int)rng.RandRange(1, 500);
+
+ for(int j = 0, jEnd = nSize; j < jEnd; j++)
+ toList.push_back(TestObject((int)rng.RandLimit(jEnd / 2))); // This will result in both gaps and duplications.
+
+ for(int k = 0; k < nSize; k++)
+ {
+ TestObject toK(k);
+ list<TestObject>::iterator it = lower_bound(toList.begin(), toList.end(), toK);
+
+ if(it != toList.begin())
+ {
+ --it;
+ EATEST_VERIFY(*it < toK);
+ ++it;
+ }
+
+ if(it != toList.end())
+ EATEST_VERIFY((toK < *it) || !(*it < toK)); // Verify tha k <= *it by using only operator<
+ }
+ }
+ }
+
+
+ {
+ // ForwardIterator upper_bound(ForwardIterator first, ForwardIterator last, const T& value)
+ // ForwardIterator upper_bound(ForwardIterator first, ForwardIterator last, const T& value, Compare compare)
+
+ int i;
+
+ int* pInt = upper_bound((int*)NULL, (int*)NULL, 100);
+ EATEST_VERIFY(pInt == NULL);
+
+
+ for(i = 0; i < 20 + (gEASTL_TestLevel * 20); i++)
+ {
+ deque<int> intDeque((eastl_size_t)rng.RandRange(1, 500));
+
+ for(eastl_size_t j = 0, jEnd = intDeque.size(); j < jEnd; j++)
+ intDeque[j] = (int)rng.RandLimit((uint32_t)jEnd / 2); // This will result in both gaps and duplications.
+
+ for(int k = 0, kEnd = (int)intDeque.size(); k < kEnd; k++)
+ {
+ deque<int>::iterator it = upper_bound(intDeque.begin(), intDeque.end(), k);
+
+ if(it != intDeque.begin())
+ EATEST_VERIFY((*(it - 1) < k) || !(k < *(it - 1))); // Verify tha *it <= k by using only operator<
+
+ if(it != intDeque.end())
+ EATEST_VERIFY(k < *it);
+ }
+ }
+
+
+ for(i = 0; i < 20 + (gEASTL_TestLevel * 20); i++)
+ {
+ list<TestObject> toList;
+ int nSize = (int)rng.RandRange(1, 500);
+
+ for(int j = 0, jEnd = nSize; j < jEnd; j++)
+ toList.push_back(TestObject((int)rng.RandLimit(jEnd / 2))); // This will result in both gaps and duplications.
+
+ for(int k = 0; k < nSize; k++)
+ {
+ TestObject toK(k);
+ list<TestObject>::iterator it = upper_bound(toList.begin(), toList.end(), toK);
+
+ if(it != toList.begin())
+ {
+ --it;
+ EATEST_VERIFY((*it < toK) || !(toK < *it)); // Verify tha *it <= k by using only operator<
+ ++it;
+ }
+
+ if(it != toList.end())
+ EATEST_VERIFY(toK < *it);
+ }
+ }
+ }
+
+
+ {
+ // pair<ForwardIterator, ForwardIterator> equal_range(ForwardIterator first, ForwardIterator last, const T& value)
+ // pair<ForwardIterator, ForwardIterator> equal_range(ForwardIterator first, ForwardIterator last, const T& value, Compare compare)
+
+ int i;
+
+ pair<int*, int*> pInt = equal_range((int*)NULL, (int*)NULL, 100);
+ EATEST_VERIFY(pInt.first == NULL);
+ EATEST_VERIFY(pInt.second == NULL);
+
+
+ for(i = 0; i < 20 + (gEASTL_TestLevel * 20); i++)
+ {
+ deque<int> intDeque((eastl_size_t)rng.RandRange(1, 500));
+
+ for(int j = 0, jEnd = (int)intDeque.size(); j < jEnd; j++)
+ intDeque[(eastl_size_t)j] = (int)rng.RandLimit(jEnd / 2); // This will result in both gaps and duplications.
+
+ for(int k = 0, kEnd = (int)intDeque.size(); k < kEnd; k++)
+ {
+ pair<deque<int>::iterator, deque<int>::iterator> it = equal_range(intDeque.begin(), intDeque.end(), k);
+
+ // Test it.first as lower_bound.
+ if(it.first != intDeque.begin())
+ EATEST_VERIFY(*(it.first - 1) < k);
+
+ if(it.first != intDeque.end())
+ EATEST_VERIFY((k < *it.first) || !(*it.first < k)); // Verify tha k <= *it by using only operator<
+
+ // Test it.second as upper_bound.
+ if(it.second != intDeque.begin())
+ EATEST_VERIFY((*(it.second - 1) < k) || !(k < *(it.second - 1))); // Verify tha *it <= k by using only operator<
+
+ if(it.second != intDeque.end())
+ EATEST_VERIFY(k < *it.second);
+ }
+ }
+
+
+ for(i = 0; i < 20 + (gEASTL_TestLevel * 20); i++)
+ {
+ list<TestObject> toList;
+ int nSize = (int)rng.RandRange(1, 500);
+
+ for(int j = 0, jEnd = nSize; j < jEnd; j++)
+ toList.push_back(TestObject((int)rng.RandLimit(jEnd / 2))); // This will result in both gaps and duplications.
+
+ for(int k = 0; k < nSize; k++)
+ {
+ TestObject toK(k);
+ pair<list<TestObject>::iterator, list<TestObject>::iterator> it = equal_range(toList.begin(), toList.end(), toK);
+
+ // Test it.first as lower_bound
+ if(it.first != toList.begin())
+ {
+ --it.first;
+ EATEST_VERIFY(*it.first < toK);
+ ++it.first;
+ }
+
+ if(it.first != toList.end())
+ EATEST_VERIFY((toK < *it.first) || !(*it.first < toK)); // Verify tha k <= *it by using only operator<
+
+ // Test it.second as upper_bound
+ if(it.second != toList.begin())
+ {
+ --it.second;
+ EATEST_VERIFY((*it.second < toK) || !(toK < *it.second)); // Verify tha *it <= k by using only operator<
+ ++it.second;
+ }
+
+ if(it.second != toList.end())
+ EATEST_VERIFY(toK < *it.second);
+ }
+ }
+ }
+
+
+ {
+ // void replace(ForwardIterator first, ForwardIterator last, const T& old_value, const T& new_value)
+ // void replace_if(ForwardIterator first, ForwardIterator last, Predicate predicate, const T& new_value)
+
+ int intArray[8] = { 0, 3, 2, 7, 5, 4, 5, 3, };
+
+ // Convert 3s to 99s.
+ replace(intArray, intArray, 3, 99); // No-op
+ EATEST_VERIFY((intArray[1] == 3) && (intArray[7] == 3));
+ replace(intArray, intArray + 8, 3, 99); // No-op
+ EATEST_VERIFY((intArray[1] == 99) && (intArray[7] == 99));
+
+ // Convert 99s to 88s.
+ replace_if(intArray, intArray, bind2nd(equal_to<int>(), (int)99), 88); // No-op
+ EATEST_VERIFY((intArray[1] == 99) && (intArray[7] == 99));
+ replace_if(intArray, intArray + 8, bind2nd(equal_to<int>(), (int)99), 88);
+ EATEST_VERIFY((intArray[1] == 88) && (intArray[7] == 88));
+
+
+ slist<TestObject> toList;
+ slist<TestObject>::iterator it;
+ toList.push_front(TestObject(3));
+ toList.push_front(TestObject(5));
+ toList.push_front(TestObject(4));
+ toList.push_front(TestObject(5));
+ toList.push_front(TestObject(7));
+ toList.push_front(TestObject(2));
+ toList.push_front(TestObject(3));
+ toList.push_front(TestObject(0));
+
+ // Convert 3s to 99s.
+ replace(toList.begin(), toList.begin(), TestObject(3), TestObject(99)); // No-op
+ it = toList.begin();
+ advance(it, 1);
+ EATEST_VERIFY(*it == TestObject(3));
+ advance(it, 6);
+ EATEST_VERIFY(*it == TestObject(3));
+ replace(toList.begin(), toList.end(), TestObject(3), TestObject(99));
+ it = toList.begin();
+ advance(it, 1);
+ EATEST_VERIFY(*it == TestObject(99));
+ advance(it, 6);
+ EATEST_VERIFY(*it == TestObject(99));
+
+ // Convert 99s to 88s.
+ replace_if(toList.begin(), toList.begin(), bind2nd(equal_to<TestObject>(), TestObject(99)), TestObject(88)); // No-op
+ it = toList.begin();
+ advance(it, 1);
+ EATEST_VERIFY(*it == TestObject(99));
+ advance(it, 6);
+ EATEST_VERIFY(*it == TestObject(99));
+ replace_if(toList.begin(), toList.end(), bind2nd(equal_to<TestObject>(), TestObject(99)), TestObject(88));
+ it = toList.begin();
+ advance(it, 1);
+ EATEST_VERIFY(*it == TestObject(88));
+ advance(it, 6);
+ EATEST_VERIFY(*it == TestObject(88));
+ }
+
+
+ {
+ // OutputIterator remove_copy(InputIterator first, InputIterator last, OutputIterator result, const T& value)
+ // OutputIterator remove_copy_if(InputIterator first, InputIterator last, OutputIterator result, Predicate predicate)
+
+ int intArray1[12] = { 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1 };
+ int intArray2[12] = { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 };
+
+ int* pInt = remove_copy(intArray1, intArray1, intArray2, 1); // No-op
+ EATEST_VERIFY(pInt == intArray2);
+ EATEST_VERIFY(VerifySequence(intArray1, intArray1 + 12, int(), "remove_copy", 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, -1));
+ EATEST_VERIFY(VerifySequence(intArray2, intArray2 + 12, int(), "remove_copy", 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, -1));
+
+ pInt = remove_copy(intArray1, intArray1 + 12, intArray2, 1);
+ EATEST_VERIFY(pInt == intArray2 + 6);
+ EATEST_VERIFY(VerifySequence(intArray1, intArray1 + 12, int(), "remove_copy", 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, -1));
+ EATEST_VERIFY(VerifySequence(intArray2, intArray2 + 12, int(), "remove_copy", 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, -1));
+
+
+ pInt = remove_copy_if(intArray1, intArray1, intArray2, bind2nd(equal_to<int>(), (int)0)); // No-op
+ EATEST_VERIFY(pInt == intArray2);
+ EATEST_VERIFY(VerifySequence(intArray1, intArray1 + 12, int(), "remove_copy_if", 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, -1));
+ EATEST_VERIFY(VerifySequence(intArray2, intArray2 + 12, int(), "remove_copy_if", 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, -1));
+
+ pInt = remove_copy_if(intArray1, intArray1 + 12, intArray2, bind2nd(equal_to<int>(), (int)0));
+ EATEST_VERIFY(pInt == intArray2 + 6);
+ EATEST_VERIFY(VerifySequence(intArray1, intArray1 + 12, int(), "remove_copy_if", 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, -1));
+ EATEST_VERIFY(VerifySequence(intArray2, intArray2 + 12, int(), "remove_copy_if", 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 3, -1));
+ }
+
+
+ {
+ // ForwardIterator remove(ForwardIterator first, ForwardIterator last, const T& value)
+ // ForwardIterator remove_if(ForwardIterator first, ForwardIterator last, Predicate predicate)
+
+ int intArray1[12] = { 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1 };
+ int intArray2[12] = { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 };
+
+ int* pInt = remove(intArray1, intArray1, 1);
+ EATEST_VERIFY(pInt == intArray1);
+ EATEST_VERIFY(VerifySequence(intArray1, intArray1 + 12, int(), "remove", 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, -1));
+ pInt = remove(intArray1, intArray1 + 12, 1);
+ EATEST_VERIFY(pInt == intArray1 + 6);
+ EATEST_VERIFY(VerifySequence(intArray1, intArray1 + 12, int(), "remove", 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, -1));
+
+ pInt = remove(intArray2, intArray2, 1);
+ EATEST_VERIFY(pInt == intArray2);
+ EATEST_VERIFY(VerifySequence(intArray2, intArray2 + 12, int(), "remove", 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, -1));
+ pInt = remove(intArray2, intArray2 + 12, 1);
+ EATEST_VERIFY(pInt == intArray2 + 12);
+ EATEST_VERIFY(VerifySequence(intArray2, intArray2 + 12, int(), "remove", 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, -1));
+ }
+
+
+ {
+ // ForwardIterator apply_and_remove(ForwardIterator first, ForwardIterator last, Function function, const T&
+ // value) ForwardIterator apply_and_remove_if(ForwardIterator first, ForwardIterator last, Function function,
+ // Predicate predicate)
+
+ // Test for empty range and full container range
+ {
+ int intArray[12] = {0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1};
+ vector<int> output;
+ auto func = [&output](int a) { output.push_back(a); };
+ int* pInt = apply_and_remove(intArray, intArray, func, 1);
+ EATEST_VERIFY(pInt == intArray);
+ EATEST_VERIFY(VerifySequence(intArray, intArray + 12, int(), "apply_and_remove", 0, 0, 1, 1, 0, 0, 1, 1, 0,
+ 0, 1, 1, -1));
+ EATEST_VERIFY(VerifySequence(output.begin(), output.end(), int(), "apply_and_remove", -1));
+ pInt = apply_and_remove(intArray, intArray + 12, func, 1);
+ EATEST_VERIFY(pInt == intArray + 6);
+ EATEST_VERIFY(VerifySequence(intArray, intArray + 6, int(), "apply_and_remove", 0, 0, 0, 0, 0, 0, -1));
+ EATEST_VERIFY(
+ VerifySequence(output.begin(), output.end(), int(), "apply_and_remove", 1, 1, 1, 1, 1, 1, -1));
+ }
+
+ // Test for no match on empty range and full container range
+ {
+ int intArray[12] = {3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3};
+ vector<int> output;
+ auto func = [&output](int a) { output.push_back(a); };
+ int* pInt = apply_and_remove(intArray, intArray, func, 1);
+ EATEST_VERIFY(pInt == intArray);
+ EATEST_VERIFY(VerifySequence(intArray, intArray + 12, int(), "apply_and_remove", 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, -1));
+ EATEST_VERIFY(VerifySequence(output.begin(), output.end(), int(), "apply_and_remove", -1));
+ pInt = apply_and_remove(intArray, intArray + 12, func, 1);
+ EATEST_VERIFY(pInt == intArray + 12);
+ EATEST_VERIFY(VerifySequence(intArray, intArray + 12, int(), "apply_and_remove", 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, -1));
+ EATEST_VERIFY(VerifySequence(output.begin(), output.end(), int(), "apply_and_remove", -1));
+ }
+
+ // Test for empty range and full container range
+ {
+ int intArray[12] = {0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1};
+ vector<int> output;
+ auto func = [&output](int a) { output.push_back(a); };
+ int* pInt = apply_and_remove_if(intArray, intArray, func, bind2nd(equal_to<int>(), (int)1));
+ EATEST_VERIFY(pInt == intArray);
+ EATEST_VERIFY(VerifySequence(intArray, intArray + 12, int(), "apply_and_remove_if", 0, 0, 1, 1, 0, 0, 1, 1,
+ 0, 0, 1, 1, -1));
+ EATEST_VERIFY(VerifySequence(output.begin(), output.end(), int(), "apply_and_remove_if", -1));
+ pInt = apply_and_remove_if(intArray, intArray + 12, func, bind2nd(equal_to<int>(), (int)1));
+ EATEST_VERIFY(pInt == intArray + 6);
+ EATEST_VERIFY(VerifySequence(intArray, intArray + 6, int(), "apply_and_remove_if", 0, 0, 0, 0, 0, 0, -1));
+ EATEST_VERIFY(
+ VerifySequence(output.begin(), output.end(), int(), "apply_and_remove_if", 1, 1, 1, 1, 1, 1, -1));
+ }
+
+ // Test for no match on empty range and full container range
+ {
+ int intArray[12] = {3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3};
+ vector<int> output;
+ auto func = [&output](int a) { output.push_back(a); };
+ int* pInt = apply_and_remove_if(intArray, intArray, func, bind2nd(equal_to<int>(), (int)1));
+ EATEST_VERIFY(pInt == intArray);
+ EATEST_VERIFY(VerifySequence(intArray, intArray + 12, int(), "apply_and_remove_if", 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, -1));
+ EATEST_VERIFY(VerifySequence(output.begin(), output.end(), int(), "apply_and_remove_if", -1));
+ pInt = apply_and_remove_if(intArray, intArray + 12, func, bind2nd(equal_to<int>(), (int)1));
+ EATEST_VERIFY(pInt == intArray + 12);
+ EATEST_VERIFY(VerifySequence(intArray, intArray + 12, int(), "apply_and_remove_if", 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, -1));
+ EATEST_VERIFY(VerifySequence(output.begin(), output.end(), int(), "apply_and_remove_if", -1));
+ }
+
+ auto even = [](int a) { return (a % 2) == 0; };
+ // Test to verify that the remaining element have stable ordering
+ {
+ int intArray[12] = {7, 8, 2, 3, 4, 5, 6, 0, 1, 9, 10, 11};
+ vector<int> output;
+ auto func = [&output](int a) { output.push_back(a); };
+ int* pInt = apply_and_remove_if(intArray, intArray + 12, func, even);
+ EATEST_VERIFY(pInt == intArray + 6);
+ EATEST_VERIFY(VerifySequence(intArray, intArray + 6, int(), "apply_and_remove_if", 7, 3, 5, 1, 9, 11, -1));
+ EATEST_VERIFY(
+ VerifySequence(output.begin(), output.end(), int(), "apply_and_remove_if", 8, 2, 4, 6, 0, 10, -1));
+ }
+ {
+ int intArray[12] = {7, 8, 0, 0, 4, 5, 6, 0, 1, 9, 0, 11};
+ vector<int> output;
+ auto func = [&output](int a) { output.push_back(a); };
+ int* pInt = apply_and_remove(intArray, intArray + 12, func, 0);
+ EATEST_VERIFY(pInt == intArray + 8);
+ EATEST_VERIFY(
+ VerifySequence(intArray, intArray + 8, int(), "apply_and_remove", 7, 8, 4, 5, 6, 1, 9, 11, -1));
+ EATEST_VERIFY(VerifySequence(output.begin(), output.end(), int(), "apply_and_remove", 0, 0, 0, 0, -1));
+ }
+
+ // Tests on a list (i.e. non-contiguous memory container)
+ {
+ list<int> intList = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
+ vector<int> output;
+ auto func = [&output](int a) { output.push_back(a); };
+ auto listIter = apply_and_remove_if(intList.begin(), intList.begin(), func, even);
+ EATEST_VERIFY(listIter == intList.begin());
+ EATEST_VERIFY(VerifySequence(intList.begin(), intList.end(), int(), "apply_and_remove_if", 0, 1, 2, 3, 4, 5,
+ 6, 7, 8, 9, 10, 11, -1));
+ EATEST_VERIFY(VerifySequence(output.begin(), output.end(), int(), "apply_and_remove_if", -1));
+ listIter = apply_and_remove_if(intList.begin(), intList.end(), func, even);
+ EATEST_VERIFY(listIter == next(intList.begin(), 6));
+ EATEST_VERIFY(
+ VerifySequence(intList.begin(), listIter, int(), "apply_and_remove_if", 1, 3, 5, 7, 9, 11, -1));
+ EATEST_VERIFY(
+ VerifySequence(output.begin(), output.end(), int(), "apply_and_remove_if", 0, 2, 4, 6, 8, 10, -1));
+ }
+ {
+ list<int> intList = {0, 4, 2, 3, 4, 5, 6, 4, 4, 4, 10, 11};
+ vector<int> output;
+ auto func = [&output](int a) { output.push_back(a); };
+ auto listIter = apply_and_remove(intList.begin(), intList.begin(), func, 4);
+ EATEST_VERIFY(listIter == intList.begin());
+ EATEST_VERIFY(VerifySequence(intList.begin(), intList.end(), int(), "apply_and_remove", 0, 4, 2, 3, 4, 5, 6,
+ 4, 4, 4, 10, 11, -1));
+ EATEST_VERIFY(VerifySequence(output.begin(), output.end(), int(), "apply_and_remove", -1));
+ listIter = apply_and_remove(intList.begin(), intList.end(), func, 4);
+ EATEST_VERIFY(listIter == next(intList.begin(), 7));
+ EATEST_VERIFY(
+ VerifySequence(intList.begin(), listIter, int(), "apply_and_remove", 0, 2, 3, 5, 6, 10, 11, -1));
+ EATEST_VERIFY(VerifySequence(output.begin(), output.end(), int(), "apply_and_remove", 4, 4, 4, 4, 4, -1));
+ }
+
+ // Tests on a part of a container
+ {
+ vector<int> intVector = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
+ vector<int> output;
+ auto func = [&output](int a) { output.push_back(a); };
+ auto vectorIter = apply_and_remove_if(next(intVector.begin(), 3), prev(intVector.end(), 2), func, even);
+ EATEST_VERIFY(vectorIter == next(intVector.begin(), 7));
+ EATEST_VERIFY(
+ VerifySequence(intVector.begin(), vectorIter, int(), "apply_and_remove_if", 0, 1, 2, 3, 5, 7, 9, -1));
+ EATEST_VERIFY(
+ VerifySequence(prev(intVector.end(), 2), intVector.end(), int(), "apply_and_remove_if", 10, 11, -1));
+ EATEST_VERIFY(VerifySequence(output.begin(), output.end(), int(), "apply_and_remove_if", 4, 6, 8, -1));
+ }
+ {
+ vector<int> intVector = {5, 1, 5, 3, 4, 5, 5, 7, 8, 5, 10, 5};
+ vector<int> output;
+ auto func = [&output](int a) { output.push_back(a); };
+ auto vectorIter = apply_and_remove(next(intVector.begin(), 2), prev(intVector.end(), 3), func, 5);
+ EATEST_VERIFY(vectorIter == next(intVector.begin(), 6));
+ EATEST_VERIFY(
+ VerifySequence(intVector.begin(), vectorIter, int(), "apply_and_remove", 5, 1, 3, 4, 7, 8, -1));
+ EATEST_VERIFY(
+ VerifySequence(prev(intVector.end(), 3), intVector.end(), int(), "apply_and_remove", 5, 10, 5, -1));
+ EATEST_VERIFY(VerifySequence(output.begin(), output.end(), int(), "apply_and_remove", 5, 5, 5, -1));
+ }
+ }
+
+
+ {
+ // OutputIterator replace_copy(InputIterator first, InputIterator last, OutputIterator result, const T& old_value, const T& new_value)
+ // OutputIterator replace_copy_if(InputIterator first, InputIterator last, OutputIterator result, Predicate predicate, const T& new_value)
+
+ int intArray1[12] = { 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1 };
+ int intArray2[12] = { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 };
+
+ int* pInt = replace_copy(intArray1, intArray1, intArray2, 1, 4);
+ EATEST_VERIFY(pInt == intArray2);
+ EATEST_VERIFY(VerifySequence(intArray1, intArray1 + 12, int(), "replace_copy", 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, -1));
+ EATEST_VERIFY(VerifySequence(intArray2, intArray2 + 12, int(), "replace_copy", 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, -1));
+
+ pInt = replace_copy(intArray1, intArray1 + 12, intArray2, 1, 4);
+ EATEST_VERIFY(pInt == intArray2 + 12);
+ EATEST_VERIFY(VerifySequence(intArray1, intArray1 + 12, int(), "replace_copy", 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, -1));
+ EATEST_VERIFY(VerifySequence(intArray2, intArray2 + 12, int(), "replace_copy", 0, 0, 4, 4, 0, 0, 4, 4, 0, 0, 4, 4, -1));
+ }
+
+
+ {
+ // void reverse(BidirectionalIterator first, BidirectionalIterator last)
+
+ vector<int> intArray;
+ for(int i = 0; i < 10; i++)
+ intArray.push_back(i);
+
+ reverse(intArray.begin(), intArray.begin()); // No-op
+ EATEST_VERIFY(VerifySequence(intArray.begin(), intArray.end(), int(), "reverse", 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1));
+
+ reverse(intArray.begin(), intArray.end());
+ EATEST_VERIFY(VerifySequence(intArray.begin(), intArray.end(), int(), "reverse", 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, -1));
+
+
+ list<TestObject> toList;
+ for(int j = 0; j < 10; j++)
+ toList.push_back(TestObject(j));
+
+ reverse(toList.begin(), toList.begin()); // No-op
+ EATEST_VERIFY(toList.front() == TestObject(0));
+ EATEST_VERIFY(toList.back() == TestObject(9));
+
+ reverse(toList.begin(), toList.end());
+ EATEST_VERIFY(toList.front() == TestObject(9));
+ EATEST_VERIFY(toList.back() == TestObject(0));
+
+ // Verify that reversing an empty range executes without exception.
+ reverse(toList.begin(), toList.begin());
+ }
+
+
+ {
+ // reverse_copy(BidirectionalIterator first, BidirectionalIterator last, OutputIterator result)
+
+ vector<int> intArray1;
+ int intArray2[10] = { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 };
+
+ for(int i = 0; i < 10; i++)
+ intArray1.push_back(i);
+
+ int* pInt = reverse_copy(intArray1.begin(), intArray1.begin(), intArray2); // No-op
+ EATEST_VERIFY(pInt == intArray2);
+ EATEST_VERIFY(VerifySequence(intArray2, intArray2 + 10, int(), "reverse_copy", 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, -1));
+
+ pInt = reverse_copy(intArray1.begin(), intArray1.end(), intArray2);
+ EATEST_VERIFY(pInt == intArray2 + intArray1.size());
+ EATEST_VERIFY(VerifySequence(intArray2, intArray2 + 10, int(), "reverse_copy", 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, -1));
+
+
+ list<TestObject> toList;
+ TestObject toArray2[10];
+
+ for(int j = 0; j < 10; j++)
+ {
+ toList.push_back(TestObject(j));
+ toArray2[j] = TestObject(5);
+ }
+
+ TestObject* pTO = reverse_copy(toList.begin(), toList.begin(), toArray2); // No-op
+ EATEST_VERIFY(pTO == toArray2);
+ EATEST_VERIFY(toArray2[0] == TestObject(5));
+ EATEST_VERIFY(toArray2[9] == TestObject(5));
+
+ pTO = reverse_copy(toList.begin(), toList.end(), toArray2);
+ EATEST_VERIFY(pTO == toArray2 + 10);
+ }
+
+
+ {
+ // ForwardIterator1 search(ForwardIterator1 first1, ForwardIterator1 last1, ForwardIterator2 first2, ForwardIterator2 last2)
+ // ForwardIterator1 search(ForwardIterator1 first1, ForwardIterator1 last1, ForwardIterator2 first2, ForwardIterator2 last2, BinaryPredicate predicate)
+
+ // Test via bidirectional/random_access iterator.
+ basic_string<char> sTest("abcdefg abcdefg abcdefg");
+ const char* pSubstring1 = " abcd";
+ const char* pSubstring2 = "1234";
+
+ basic_string<char>::iterator iString = search(sTest.begin(), sTest.end(), pSubstring1, pSubstring1 + strlen(pSubstring1));
+ EATEST_VERIFY(&*iString == &sTest[7]);
+
+ iString = search(sTest.begin(), sTest.end(), pSubstring1, pSubstring1 + 1); // Search for sequence of 1.
+ EATEST_VERIFY(&*iString == &sTest[7]);
+
+ iString = search(sTest.begin(), sTest.end(), pSubstring2, pSubstring2 + strlen(pSubstring2));
+ EATEST_VERIFY(&*iString == sTest.end());
+
+ iString = search(sTest.begin(), sTest.end(), pSubstring2, pSubstring2); // Search with empty search pattern.
+ EATEST_VERIFY(&*iString == sTest.begin());
+
+ // Test via forward iterator.
+ slist<char> sListTest;
+ for(slist<char>::size_type i = sTest.size(); i > 0; --i)
+ sListTest.push_front(sTest[i - 1]);
+
+ slist<char>::iterator iSlist = search(sListTest.begin(), sListTest.end(), pSubstring1, pSubstring1 + 5);
+ slist<char>::iterator i7 = sListTest.begin();
+ advance(i7, 7);
+ EATEST_VERIFY(iSlist == i7);
+
+ iSlist = search(sListTest.begin(), sListTest.end(), pSubstring2, pSubstring2 + strlen(pSubstring2));
+ EATEST_VERIFY(iSlist == sListTest.end());
+
+ iSlist = search(sListTest.begin(), sListTest.end(), pSubstring2, pSubstring2); // Search with empty search pattern.
+ EATEST_VERIFY(iSlist == sListTest.begin());
+ }
+
+
+ {
+ // ForwardIterator search_n(ForwardIterator first, ForwardIterator last, Size count, const T& value)
+
+ const char* pString1 = "Hello wwworld";
+ const char* presultult = search_n(pString1, pString1 + strlen(pString1), 1, 'w');
+ EATEST_VERIFY(presultult == pString1 + 6);
+ }
+
+
+ {
+ // bool binary_search(ForwardIterator first, ForwardIterator last, const T& value)
+ // bool binary_search(ForwardIterator first, ForwardIterator last, const T& value, Compare compare)
+
+ // ForwardIterator binary_search_i(ForwardIterator first, ForwardIterator last, const T& value)
+ // ForwardIterator binary_search_i(ForwardIterator first, ForwardIterator last, const T& value, Compare compare)
+
+ vector<int> intArray;
+ for(int i = 0; i < 1000; i++)
+ intArray.push_back(i);
+
+ bool b = binary_search(intArray.begin(), intArray.begin(), 0);
+ EATEST_VERIFY(b == false);
+
+ b = binary_search(intArray.begin(), intArray.begin() + 1, 0);
+ EATEST_VERIFY(b == true);
+
+ b = binary_search(intArray.begin(), intArray.end(), 733, less<int>());
+ EATEST_VERIFY(b == true);
+
+
+ vector<int>::iterator it = binary_search_i(intArray.begin(), intArray.begin(), 0);
+ EATEST_VERIFY(it == intArray.begin());
+
+ it = binary_search_i(intArray.begin(), intArray.begin() + 1, 0, less<int>());
+ EATEST_VERIFY(it == intArray.begin());
+
+ it = binary_search_i(intArray.begin(), intArray.end(), 733);
+ EATEST_VERIFY(it == intArray.begin() + 733);
+
+
+ list<TestObject> toList;
+ list<TestObject>::iterator toI;
+ for(int j = 0; j < 1000; j++)
+ toList.push_back(TestObject(j));
+
+ b = binary_search(toList.begin(), toList.begin(), TestObject(0), less<TestObject>());
+ EATEST_VERIFY(b == false);
+
+ toI = toList.begin();
+ toI++;
+ b = binary_search(toList.begin(), toI, TestObject(0));
+ EATEST_VERIFY(b == true);
+
+ b = binary_search(toList.begin(), toList.end(), TestObject(733));
+ EATEST_VERIFY(b == true);
+
+
+ toI = binary_search_i(toList.begin(), toList.begin(), TestObject(0), less<TestObject>()); // No-op
+ EATEST_VERIFY(toI == toList.begin());
+
+ toI = toList.begin();
+ toI++;
+ toI = binary_search_i(toList.begin(), toI, TestObject(0));
+ EATEST_VERIFY(*toI == TestObject(0));
+
+ toI = binary_search_i(toList.begin(), toList.end(), TestObject(733));
+ EATEST_VERIFY(*toI == TestObject(733));
+ }
+
+
+ {
+ // ForwardIterator unique(ForwardIterator first, ForwardIterator last)
+ // ForwardIterator unique(ForwardIterator first, ForwardIterator last, BinaryPredicate predicate)
+
+ int intArray[] = { 1, 2, 3, 3, 4, 4 };
+
+ int* pInt = unique(intArray, intArray + 0);
+ EATEST_VERIFY(pInt == intArray);
+ EATEST_VERIFY(VerifySequence(intArray, intArray + 6, int(), "unique", 1, 2, 3, 3, 4, 4, -1));
+
+ pInt = unique(intArray, intArray + 6, equal_to<int>());
+ EATEST_VERIFY(pInt == intArray + 4);
+ EATEST_VERIFY(VerifySequence(intArray, intArray + 6, int(), "unique", 1, 2, 3, 4, 4, 4, -1));
+
+
+ TestObject toArray[] = { TestObject(1), TestObject(2), TestObject(3), TestObject(3), TestObject(4), TestObject(4) };
+
+ TestObject* pTO = unique(toArray, toArray + 6);
+ EATEST_VERIFY(pTO == toArray + 4);
+ EATEST_VERIFY(toArray[3] == TestObject(4));
+ }
+
+
+ {
+ // ForwardIterator1 find_end(ForwardIterator1 first1, ForwardIterator1 last1, ForwardIterator2 first2, ForwardIterator2 last2)
+ // ForwardIterator1 find_end(ForwardIterator1 first1, ForwardIterator1 last1, ForwardIterator2 first2, ForwardIterator2 last2, BinaryPredicate predicate)
+
+ // Test via bidirectional/random_access iterator.
+ basic_string<char> sTest("abcdefg abcdefg abcdefg");
+ const char* pSubstring1 = "abcd";
+ const char* pSubstring2 = "1234";
+
+ basic_string<char>::iterator iString = find_end(sTest.begin(), sTest.end(), pSubstring1, pSubstring1 + 4);
+ EATEST_VERIFY(&*iString == &sTest[16]);
+
+ iString = find_end(sTest.begin(), sTest.end(), pSubstring1, pSubstring1 + 4, equal_to<char>());
+ EATEST_VERIFY(&*iString == &sTest[16]);
+
+ iString = find_end(sTest.begin(), sTest.end(), pSubstring2, pSubstring2 + strlen(pSubstring2));
+ EATEST_VERIFY(iString == sTest.end());
+
+ iString = find_end(sTest.begin(), sTest.end(), pSubstring2, pSubstring2 + strlen(pSubstring2), equal_to<char>());
+ EATEST_VERIFY(iString == sTest.end());
+
+ // Test via forward iterator.
+ slist<char> sListTest;
+ for(slist<char>::size_type i = sTest.size(); i > 0; --i)
+ sListTest.push_front(sTest[i - 1]);
+
+ slist<char>::iterator iSlist = find_end(sListTest.begin(), sListTest.end(), pSubstring1, pSubstring1 + strlen(pSubstring1));
+ slist<char>::iterator i16 = sListTest.begin();
+ advance(i16, 16);
+ EATEST_VERIFY(iSlist == i16);
+
+ iSlist = find_end(sListTest.begin(), sListTest.end(), pSubstring1, pSubstring1 + strlen(pSubstring1), equal_to<char>());
+ i16 = sListTest.begin();
+ advance(i16, 16);
+ EATEST_VERIFY(iSlist == i16);
+
+ iSlist = find_end(sListTest.begin(), sListTest.end(), pSubstring2, pSubstring2 + strlen(pSubstring2));
+ EATEST_VERIFY(iSlist == sListTest.end());
+
+ iSlist = find_end(sListTest.begin(), sListTest.end(), pSubstring2, pSubstring2 + strlen(pSubstring2), equal_to<char>());
+ EATEST_VERIFY(iSlist == sListTest.end());
+ }
+
+
+ {
+ // OutputIterator set_difference(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2, OutputIterator result)
+ // OutputIterator set_difference(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2, OutputIterator result, Compare compare)
+
+ int intArray1[] = { 0, 0, 2, 5, 8, 8, 12, 24, 26, 43 };
+ int intArray2[] = { 0, 0, 0, 5, 7, 8, 11, 24, 25, 43 };
+ int intArray3[] = { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 };
+
+ set_difference(intArray1, intArray1 + 0, intArray2, intArray2 + 0, intArray3);
+ EATEST_VERIFY(VerifySequence(intArray3, intArray3 + 10, int(), "set_difference", 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, -1));
+
+ set_difference(intArray1, intArray1 + 10, intArray2, intArray2 + 10, intArray3);
+ EATEST_VERIFY(VerifySequence(intArray3, intArray3 + 10, int(), "set_difference", 2, 8, 12, 26, 9, 9, 9, 9, 9, 9, -1));
+
+ intArray3[0] = intArray3[1] = intArray3[2] = 9;
+
+ set_difference(intArray1, intArray1 + 10, intArray2, intArray2 + 10, intArray3, less<int>());
+ EATEST_VERIFY(VerifySequence(intArray3, intArray3 + 10, int(), "set_difference", 2, 8, 12, 26, 9, 9, 9, 9, 9, 9, -1));
+ }
+
+
+ {
+ // OutputIterator set_symmetric_difference(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2, OutputIterator result)
+ // OutputIterator set_symmetric_difference(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2, OutputIterator result, Compare compare)
+
+ int intArray1[] = { 0, 0, 2, 5, 8, 8, 12, 24, 26, 43 };
+ int intArray2[] = { 0, 0, 0, 5, 7, 8, 11, 24, 25, 43 };
+ int intArray3[] = { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 };
+
+ set_symmetric_difference(intArray1, intArray1 + 0, intArray2, intArray2 + 0, intArray3);
+ EATEST_VERIFY(VerifySequence(intArray3, intArray3 + 10, int(), "set_symmetric_difference", 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, -1));
+
+ set_symmetric_difference(intArray1, intArray1 + 10, intArray2, intArray2 + 10, intArray3);
+ EATEST_VERIFY(VerifySequence(intArray3, intArray3 + 10, int(), "set_symmetric_difference", 0, 2, 7, 8, 11, 12, 25, 26, 9, 9, -1));
+
+ intArray3[0] = intArray3[1] = intArray3[2] = intArray3[4] = intArray3[5] = intArray3[6] = 9;
+
+ set_symmetric_difference(intArray1, intArray1 + 10, intArray2, intArray2 + 10, intArray3, less<int>());
+ EATEST_VERIFY(VerifySequence(intArray3, intArray3 + 10, int(), "set_symmetric_difference", 0, 2, 7, 8, 11, 12, 25, 26, 9, 9, -1));
+ }
+
+
+ {
+ // OutputIterator set_intersection(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2, OutputIterator result)
+ // OutputIterator set_intersection(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2, OutputIterator result, Compare compare)
+
+ int intArray1[] = { 0, 0, 2, 5, 8, 8, 12, 24, 26, 43 };
+ int intArray2[] = { 0, 0, 0, 5, 7, 8, 11, 24, 25, 43 };
+ int intArray3[] = { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 };
+
+ set_intersection(intArray1, intArray1 + 0, intArray2, intArray2 + 0, intArray3);
+ EATEST_VERIFY(VerifySequence(intArray3, intArray3 + 10, int(), "set_intersection", 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, -1));
+
+ set_intersection(intArray1, intArray1 + 10, intArray2, intArray2 + 10, intArray3);
+ EATEST_VERIFY(VerifySequence(intArray3, intArray3 + 10, int(), "set_intersection", 0, 0, 5, 8, 24, 43, 9, 9, 9, 9, -1));
+
+ intArray3[0] = intArray3[1] = intArray3[2] = intArray3[4] = intArray3[5] = intArray3[6] = 9;
+
+ set_intersection(intArray1, intArray1 + 10, intArray2, intArray2 + 10, intArray3, less<int>());
+ EATEST_VERIFY(VerifySequence(intArray3, intArray3 + 10, int(), "set_intersection", 0, 0, 5, 8, 24, 43, 9, 9, 9, 9, -1));
+ }
+
+
+ {
+ // OutputIterator set_union(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2, OutputIterator result)
+ // OutputIterator set_union(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2, OutputIterator result, Compare compare)
+
+ int intArray1[] = { 0, 0, 2, 5, 8, 8, 12, 24, 26, 43 };
+ int intArray2[] = { 0, 0, 0, 5, 7, 8, 11, 24, 25, 43 };
+ int intArray3[] = { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 };
+
+ set_union(intArray1, intArray1 + 0, intArray2, intArray2 + 0, intArray3);
+ EATEST_VERIFY(VerifySequence(intArray3, intArray3 + 20, int(), "set_union", 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, -1));
+
+ set_union(intArray1, intArray1 + 10, intArray2, intArray2 + 10, intArray3);
+ EATEST_VERIFY(VerifySequence(intArray3, intArray3 + 20, int(), "set_union", 0, 0, 0, 2, 5, 7, 8, 8, 11, 12, 24, 25, 26, 43, 9, 9, 9, 9, 9, 9, -1));
+
+ intArray3[0] = intArray3[1] = intArray3[2] = intArray3[3] = intArray3[4] = intArray3[5] = intArray3[6] = intArray3[7] = intArray3[8] = intArray3[9] = intArray3[10] = intArray3[11] = 9;
+
+ set_union(intArray1, intArray1 + 10, intArray2, intArray2 + 10, intArray3, less<int>());
+ EATEST_VERIFY(VerifySequence(intArray3, intArray3 + 20, int(), "set_union", 0, 0, 0, 2, 5, 7, 8, 8, 11, 12, 24, 25, 26, 43, 9, 9, 9, 9, 9, 9, -1));
+ }
+
+
+ // set_difference_2
+ {
+ // template <typename InputIterator1, typename InputIterator2, typename OutputIterator>
+ // void set_difference_2(InputIterator1 first1, InputIterator1 last1,
+ // InputIterator2 first2, InputIterator2 last2,
+ // OutputIterator result1, OutputIterator result2)
+ {
+ const eastl::vector<int> v1 = {1, 2, 4, 5, 7, 7, 9};
+ const eastl::vector<int> v2 = { 2, 6, 9};
+ eastl::vector<int> only_v1, only_v2;
+
+ eastl::set_difference_2(v1.begin(), v1.end(), v2.begin(), v2.end(),
+ eastl::inserter(only_v1, only_v1.begin()),
+ eastl::inserter(only_v2, only_v2.begin()));
+
+ EATEST_VERIFY((only_v1 == eastl::vector<int>{1, 4, 5, 7, 7}));
+ EATEST_VERIFY((only_v2 == eastl::vector<int>{6}));
+ }
+
+ // template <typename InputIterator1, typename InputIterator2, typename OutputIterator, typename Compare>
+ // void set_difference_2(InputIterator1 first1, InputIterator1 last1,
+ // InputIterator2 first2, InputIterator2 last2,
+ // OutputIterator result1, OutputIterator result2, Compare compare)
+ {
+ struct local
+ {
+ int data = -1;
+ bool operator==(const local& other) const
+ { return data == other.data; }
+ };
+
+ const eastl::vector<local> v1 = {{1}, {2}, {4}, {5}, {7}, {7}, {9}};
+ const eastl::vector<local> v2 = { {2}, {6}, {9}};
+ eastl::vector<local> only_v1, only_v2;
+
+ eastl::set_difference_2(v1.begin(), v1.end(), v2.begin(), v2.end(),
+ eastl::inserter(only_v1, only_v1.begin()),
+ eastl::inserter(only_v2, only_v2.begin()),
+ [](const local& lhs, const local& rhs) { return lhs.data < rhs.data; });
+
+ EATEST_VERIFY((only_v1 == eastl::vector<local>{{1}, {4}, {5}, {7}, {7}}));
+ EATEST_VERIFY((only_v2 == eastl::vector<local>{{6}}));
+ }
+ }
+
+
+ // set_decomposition
+ {
+ // OutputIterator3 set_decomposition(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2,
+ // OutputIterator1 result1, OutputIterator2 result2, OutputIterator3 result3)
+ {
+ const eastl::vector<int> v1 = {1, 2, 4, 5, 7, 7, 9};
+ const eastl::vector<int> v2 = { 2, 6, 9};
+ eastl::vector<int> only_v1, only_v2, intersection;
+
+ eastl::set_decomposition(v1.begin(), v1.end(), v2.begin(), v2.end(),
+ eastl::inserter(only_v1, only_v1.begin()),
+ eastl::inserter(only_v2, only_v2.begin()),
+ eastl::inserter(intersection, intersection.begin()));
+
+ EATEST_VERIFY((only_v1 == eastl::vector<int>{1, 4, 5, 7, 7}));
+ EATEST_VERIFY((only_v2 == eastl::vector<int>{6}));
+ EATEST_VERIFY((intersection == eastl::vector<int>{2, 9}));
+ }
+
+ // OutputIterator3 set_decomposition(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2,
+ // OutputIterator1 result1, OutputIterator2 result2, OutputIterator3 result3, Compare compare)
+ {
+ struct local
+ {
+ int data = -1;
+ bool operator==(const local& other) const
+ { return data == other.data; }
+ };
+
+ const eastl::vector<local> v1 = {{1}, {2}, {4}, {5}, {7}, {7}, {9}};
+ const eastl::vector<local> v2 = { {2}, {6}, {9}};
+ eastl::vector<local> only_v1, only_v2, intersection;
+
+ eastl::set_decomposition(v1.begin(), v1.end(), v2.begin(), v2.end(),
+ eastl::inserter(only_v1, only_v1.begin()),
+ eastl::inserter(only_v2, only_v2.begin()),
+ eastl::inserter(intersection, intersection.begin()),
+ [](const local& lhs, const local& rhs) { return lhs.data < rhs.data; });
+
+ EATEST_VERIFY((only_v1 == eastl::vector<local>{{1}, {4}, {5}, {7}, {7}}));
+ EATEST_VERIFY((only_v2 == eastl::vector<local>{{6}}));
+ EATEST_VERIFY((intersection == eastl::vector<local>{{2}, {9}}));
+ }
+ }
+
+ {
+ // template<typename ForwardIterator1, typename ForwardIterator2>
+ // bool is_permutation(ForwardIterator1 first1, ForwardIterator1 last1, ForwardIterator2 first2)
+
+ // template<typename ForwardIterator1, typename ForwardIterator2, class BinaryPredicate>
+ // bool is_permutation(ForwardIterator1 first1, ForwardIterator1 last1, ForwardIterator2 first2, BinaryPredicate predicate)
+ EASTLTest_Rand eastlRNG(EA::UnitTest::GetRandSeed());
+
+ {
+ int intArray1[] = { 0, 1, 2, 3, 4 };
+ int intArray2[] = { 0, 1, 2, 3, 4 };
+
+ // Test an empty set.
+ EATEST_VERIFY(eastl::is_permutation(intArray1, intArray1 + 0, intArray2));
+
+ // Test two identical sets.
+ EATEST_VERIFY(eastl::is_permutation(intArray1, intArray1 + EAArrayCount(intArray1), intArray2));
+ eastl::random_shuffle(intArray1, intArray1 + EAArrayCount(intArray1), eastlRNG);
+
+ // Test order randomization.
+ EATEST_VERIFY(eastl::is_permutation(intArray1, intArray1 + EAArrayCount(intArray1), intArray2));
+ eastl::random_shuffle(intArray2, intArray2 + EAArrayCount(intArray2), eastlRNG);
+ EATEST_VERIFY(eastl::is_permutation(intArray1, intArray1 + EAArrayCount(intArray1), intArray2));
+
+ // Test the case where there's a difference.
+ intArray2[4] = intArray2[3]; // This change guarantees is_permutation will return false.
+ EATEST_VERIFY(!eastl::is_permutation(intArray1, intArray1 + EAArrayCount(intArray1), intArray2));
+ }
+
+ {
+ int intArray1[] = { 0, 0, 0, 1, 1 };
+ int intArray2[] = { 0, 0, 0, 1, 1 };
+
+ // Test two identical sets.
+ EATEST_VERIFY(eastl::is_permutation(intArray1, intArray1 + EAArrayCount(intArray1), intArray2));
+ eastl::random_shuffle(intArray1, intArray1 + EAArrayCount(intArray1), eastlRNG);
+
+ // Test order randomization.
+ EATEST_VERIFY(eastl::is_permutation(intArray1, intArray1 + EAArrayCount(intArray1), intArray2));
+ eastl::random_shuffle(intArray2, intArray2 + EAArrayCount(intArray2), eastlRNG);
+ EATEST_VERIFY(eastl::is_permutation(intArray1, intArray1 + EAArrayCount(intArray1), intArray2));
+
+ // Test the case where there's a difference.
+ intArray2[4] = (intArray2[4] == 0) ? 1 : 0;
+ EATEST_VERIFY(!eastl::is_permutation(intArray1, intArray1 + EAArrayCount(intArray1), intArray2));
+ }
+
+ for(int n = 0; n < 100000; n++)
+ {
+ eastl_size_t intArray1[6];
+ eastl_size_t intArray2[6];
+
+ for(size_t i = 0; i < EAArrayCount(intArray1); i++)
+ {
+ intArray1[i] = eastlRNG.RandLimit(6);
+ intArray2[i] = eastlRNG.RandLimit(6);
+ }
+
+ bool isPermutation = eastl::is_permutation(intArray1, intArray1 + EAArrayCount(intArray1), intArray2);
+
+ // If is_permutation returned true, then sorted versions of the two arrays should be identical.
+ eastl::sort(intArray1, intArray1 + EAArrayCount(intArray1));
+ eastl::sort(intArray2, intArray2 + EAArrayCount(intArray2));
+
+ eastl::pair<eastl_size_t*, eastl_size_t*> mismatchResult = eastl::mismatch(intArray1, intArray1 + EAArrayCount(intArray1), intArray2);
+ bool isIdentical = (mismatchResult.first == (intArray1 + EAArrayCount(intArray1)));
+
+ EATEST_VERIFY(isPermutation == isIdentical); // With an array size of 6, isPermutation ends up being true about 1 in 400 times here.
+ }
+ }
+
+ {
+ //template<typename BidirectionalIterator>
+ //bool next_permutation(BidirectionalIterator first, BidirectionalIterator last);
+
+ //template<typename BidirectionalIterator, typename Compare>
+ //bool next_permutation(BidirectionalIterator first, BidirectionalIterator last, Compare compare);
+
+ uint64_t count;
+ vector<int> intArray;
+ for(int i = 0; i < 8; i++)
+ intArray.push_back(i);
+
+ count = 0;
+ do {
+ ++count;
+ } while(next_permutation(intArray.begin(), intArray.end()));
+ EATEST_VERIFY(count == 40320); // count = n!
+ EATEST_VERIFY(is_sorted(intArray.begin(), intArray.end()));
+
+ count = 0;
+ do {
+ ++count;
+ } while(next_permutation(intArray.begin(), intArray.end(), eastl::less<int>()));
+ EATEST_VERIFY(count == 40320); // count = n!
+ EATEST_VERIFY(is_sorted(intArray.begin(), intArray.end()));
+ }
+
+
+ {
+ // template <typename ForwardIterator>
+ // ForwardIterator rotate(ForwardIterator first, ForwardIterator middle, ForwardIterator last);
+
+ // eastl::array (ContiguousIterator/Pointer)
+ const eastl_size_t kRotateArraySize = 10;
+ typedef eastl::array<int, kRotateArraySize> IntArray;
+
+ { // This type is templated, so we can't run a loop over various sizes.
+ IntArray intArray;
+
+ for(eastl_size_t i = 0; i < kRotateArraySize; i++)
+ {
+ eastl::generate_n(intArray.begin(), kRotateArraySize, GenerateIncrementalIntegers<int>());
+ IntArray::iterator intArrayItMiddle = eastl::next(intArray.begin(), i);
+ IntArray::iterator intArrayIt = eastl::rotate(intArray.begin(), intArrayItMiddle, intArray.end());
+
+ for(eastl_size_t j = 0; j < kRotateArraySize; j++)
+ {
+ if(intArrayIt == intArray.end())
+ intArrayIt = intArray.begin();
+ EATEST_VERIFY(*intArrayIt++ == (int)j);
+ }
+ }
+ }
+
+ // eastl::vector (ContiguousIterator)
+ typedef eastl::vector<int> IntVector;
+
+ for(eastl_size_t s = 10; s < 500; s += (eastl_size_t)rng.RandRange(50, 100))
+ {
+ IntVector intVector(s, 0);
+
+ for(eastl_size_t i = 0; i < s; i++)
+ {
+ eastl::generate_n(intVector.begin(), s, GenerateIncrementalIntegers<int>());
+ IntVector::iterator intVectorItMiddle = eastl::next(intVector.begin(), i);
+ IntVector::iterator intVectorIt = eastl::rotate(intVector.begin(), intVectorItMiddle, intVector.end());
+
+ for(eastl_size_t j = 0; j < s; j++)
+ {
+ if(intVectorIt == intVector.end())
+ intVectorIt = intVector.begin();
+ EATEST_VERIFY(*intVectorIt++ == (int)j);
+ }
+ }
+ }
+
+ // eastl::deque (RandomAccessIterator)
+ typedef eastl::deque<int> IntDeque;
+
+ for(eastl_size_t s = 10; s < 500; s += (eastl_size_t)rng.RandRange(50, 100))
+ {
+ IntDeque intDeque(s, 0);
+
+ for(eastl_size_t i = 0; i < s; i++)
+ {
+ eastl::generate_n(intDeque.begin(), s, GenerateIncrementalIntegers<int>());
+ IntDeque::iterator intDequeItMiddle = eastl::next(intDeque.begin(), i);
+ IntDeque::iterator intDequeIt = eastl::rotate(intDeque.begin(), intDequeItMiddle, intDeque.end());
+
+ for(eastl_size_t j = 0; j < s; j++)
+ {
+ if(intDequeIt == intDeque.end())
+ intDequeIt = intDeque.begin();
+ EATEST_VERIFY(*intDequeIt++ == (int)j);
+ }
+ }
+ }
+
+ // eastl::list (BidirectionalIterator)
+ typedef eastl::list<int> IntList;
+
+ for(eastl_size_t s = 10; s < 500; s += (eastl_size_t)rng.RandRange(50, 100))
+ {
+ IntList intList(s, 0);
+
+ for(eastl_size_t i = 0; i < s; i++)
+ {
+ eastl::generate_n(intList.begin(), s, GenerateIncrementalIntegers<int>());
+ IntList::iterator intListItMiddle = eastl::next(intList.begin(), i);
+ IntList::iterator intListIt = eastl::rotate(intList.begin(), intListItMiddle, intList.end());
+
+ for(eastl_size_t j = 0; j < s; j++)
+ {
+ if(intListIt == intList.end())
+ intListIt = intList.begin();
+ EATEST_VERIFY(*intListIt++ == (int)j);
+ }
+ }
+ }
+
+ // eastl::slist (ForwardIterator)
+ typedef eastl::slist<int> IntSlist;
+
+ for(eastl_size_t s = 10; s < 500; s += (eastl_size_t)rng.RandRange(50, 100))
+ {
+ IntSlist intSlist(s, 0);
+
+ for(eastl_size_t i = 0; i < s; i++)
+ {
+ eastl::generate_n(intSlist.begin(), s, GenerateIncrementalIntegers<int>());
+ IntSlist::iterator intSlistItMiddle = eastl::next(intSlist.begin(), i);
+ IntSlist::iterator intSlistIt = eastl::rotate(intSlist.begin(), intSlistItMiddle, intSlist.end());
+
+ for(eastl_size_t j = 0; j < s; j++)
+ {
+ if(intSlistIt == intSlist.end())
+ intSlistIt = intSlist.begin();
+ EATEST_VERIFY(*intSlistIt++ == (int)j);
+ }
+ }
+ }
+ }
+
+ // test eastl::sort with move-only type
+ {
+ {
+ eastl::vector<eastl::unique_ptr<int>> vec;
+ eastl::sort(vec.begin(), vec.end(), [](const eastl::unique_ptr<int>& lhs, const eastl::unique_ptr<int>& rhs) { return *lhs < *rhs; });
+ }
+ {
+ eastl::vector<eastl::unique_ptr<int>> vec;
+ eastl::sort(vec.begin(), vec.end());
+ }
+ {
+ eastl::vector<MissingMoveConstructor> vec;
+ eastl::sort(vec.begin(), vec.end(), [](const MissingMoveConstructor& lhs, const MissingMoveConstructor& rhs) { return lhs < rhs; });
+ }
+ {
+ eastl::vector<MissingMoveConstructor> vec;
+ eastl::sort(vec.begin(), vec.end());
+ }
+ {
+ eastl::vector<MissingMoveAssignable> vec;
+ eastl::sort(vec.begin(), vec.end(), [](const MissingMoveAssignable& lhs, const MissingMoveAssignable& rhs) { return lhs < rhs; });
+ }
+ {
+ eastl::vector<MissingMoveAssignable> vec;
+ eastl::sort(vec.begin(), vec.end());
+ }
+ {
+ eastl::vector<eastl::unique_ptr<int>> vec;
+ vec.emplace_back(new int(7));
+ vec.emplace_back(new int(-42));
+ vec.emplace_back(new int(5));
+ eastl::sort(vec.begin(), vec.end(), [](const eastl::unique_ptr<int>& lhs, const eastl::unique_ptr<int>& rhs) { return *lhs < *rhs; });
+ EATEST_VERIFY(*vec[0] == -42);
+ EATEST_VERIFY(*vec[1] == 5);
+ EATEST_VERIFY(*vec[2] == 7);
+ }
+ {
+ for (unsigned tests = 0; tests < 50; ++tests)
+ {
+ eastl::vector<eastl::unique_ptr<int>> vec1;
+
+ for (int i = 0; i < 100; ++i)
+ {
+ int randomNumber = rng();
+ vec1.emplace_back(new int(randomNumber));
+ }
+
+ auto vec1Cmp = [](const eastl::unique_ptr<int>& lhs, const eastl::unique_ptr<int>& rhs) { return *lhs < *rhs; };
+ eastl::sort(vec1.begin(), vec1.end(), vec1Cmp);
+ EATEST_VERIFY(eastl::is_sorted(vec1.begin(), vec1.end(), vec1Cmp));
+ }
+ }
+ }
+
+ EATEST_VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+
+ return nErrorCount;
+}
diff --git a/EASTL/test/source/TestAllocator.cpp b/EASTL/test/source/TestAllocator.cpp
new file mode 100644
index 0000000..2a28c07
--- /dev/null
+++ b/EASTL/test/source/TestAllocator.cpp
@@ -0,0 +1,405 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLTest.h"
+#include <EASTL/allocator.h>
+#include <EASTL/allocator_malloc.h>
+#include <EASTL/fixed_allocator.h>
+#include <EASTL/core_allocator_adapter.h>
+#include <EASTL/list.h>
+#include <EAStdC/EAString.h>
+#include <EAStdC/EAAlignment.h>
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// fixed_pool_reference
+//
+struct fixed_pool_reference
+{
+public:
+ fixed_pool_reference(const char* = NULL)
+ {
+ mpFixedPool = NULL;
+ }
+
+ fixed_pool_reference(eastl::fixed_pool& fixedPool)
+ {
+ mpFixedPool = &fixedPool;
+ }
+
+ fixed_pool_reference(const fixed_pool_reference& x)
+ {
+ mpFixedPool = x.mpFixedPool;
+ }
+
+ fixed_pool_reference& operator=(const fixed_pool_reference& x)
+ {
+ mpFixedPool = x.mpFixedPool;
+ return *this;
+ }
+
+ void* allocate(size_t /*n*/, int /*flags*/ = 0)
+ {
+ return mpFixedPool->allocate();
+ }
+
+ void* allocate(size_t /*n*/, size_t /*alignment*/, size_t /*offset*/, int /*flags*/ = 0)
+ {
+ return mpFixedPool->allocate();
+ }
+
+ void deallocate(void* p, size_t /*n*/)
+ {
+ return mpFixedPool->deallocate(p);
+ }
+
+ const char* get_name() const
+ {
+ return "fixed_pool_reference";
+ }
+
+ void set_name(const char* /*pName*/)
+ {
+ }
+
+protected:
+ friend bool operator==(const fixed_pool_reference& a, const fixed_pool_reference& b);
+ friend bool operator!=(const fixed_pool_reference& a, const fixed_pool_reference& b);
+
+ eastl::fixed_pool* mpFixedPool;
+};
+
+
+inline bool operator==(const fixed_pool_reference& a, const fixed_pool_reference& b)
+{
+ return (a.mpFixedPool == b.mpFixedPool);
+}
+
+inline bool operator!=(const fixed_pool_reference& a, const fixed_pool_reference& b)
+{
+ return (a.mpFixedPool != b.mpFixedPool);
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+// TestFixedAllocator
+//
+EA_DISABLE_VC_WARNING(6262)
+static int TestFixedAllocator()
+{
+ using namespace eastl;
+
+ int nErrorCount = 0;
+
+ { // fixed_allocator
+ typedef eastl::list<int, fixed_allocator> IntList;
+ typedef IntList::node_type IntListNode;
+
+ const size_t kBufferCount = 200;
+ IntListNode buffer1[kBufferCount];
+ IntList intList1;
+ const size_t kAlignOfIntListNode = EA_ALIGN_OF(IntListNode);
+
+ intList1.get_allocator().init(buffer1, sizeof(buffer1), sizeof(IntListNode), kAlignOfIntListNode);
+
+ for(size_t i = 0; i < kBufferCount; i++)
+ intList1.push_back(0);
+
+ EATEST_VERIFY(intList1.size() == kBufferCount);
+
+ // Try making a copy.
+ IntListNode buffer2[kBufferCount];
+ IntList intList2;
+ intList2.get_allocator().init(buffer2, sizeof(buffer2), sizeof(IntListNode), kAlignOfIntListNode);
+ intList2 = intList1;
+ EATEST_VERIFY(intList2.size() == kBufferCount);
+ }
+
+ // fixed_allocator_with_overflow, ensure allocations are coming from fixed buffer. This is to
+ // prevent a reported user regression where all allocations were being routed to the overflow
+ // allocator.
+ {
+ const int DEFAULT_VALUE = 0xbaadf00d;
+ const int TEST_VALUE = 0x12345689;
+ const size_t kBufferCount = 10;
+
+ typedef eastl::list<int, fixed_allocator_with_overflow> IntList;
+ typedef IntList::node_type IntListNode;
+
+ const size_t kAlignOfIntListNode = EA_ALIGN_OF(IntListNode);
+
+ // ensure the fixed buffer contains the default value that will be replaced
+ IntListNode buffer1[kBufferCount];
+ for (int i = 0; i < kBufferCount; i++)
+ {
+ buffer1[i].mValue = DEFAULT_VALUE;
+ EATEST_VERIFY(buffer1[i].mValue == DEFAULT_VALUE);
+ }
+
+ IntList intList1;
+
+ // replace all the values in the local buffer with the test value
+ intList1.get_allocator().init(buffer1, sizeof(buffer1), sizeof(IntListNode), kAlignOfIntListNode);
+ for (size_t i = 0; i < kBufferCount; i++)
+ intList1.push_back(TEST_VALUE);
+
+ // ensure the local buffer has been altered with the contents of the list::push_back
+ for (int i = 0; i < kBufferCount; i++)
+ {
+ EATEST_VERIFY(buffer1[i].mValue == TEST_VALUE);
+ }
+
+ intList1.clear();
+ }
+
+ { // fixed_allocator_with_overflow
+ typedef eastl::list<int, fixed_allocator_with_overflow> IntList;
+ typedef IntList::node_type IntListNode;
+
+ const size_t kBufferCount = 200;
+ IntListNode buffer1[kBufferCount];
+ IntList intList1;
+ const size_t kAlignOfIntListNode = EA_ALIGN_OF(IntListNode);
+
+ intList1.get_allocator().init(buffer1, sizeof(buffer1), sizeof(IntListNode), kAlignOfIntListNode);
+
+ for(size_t i = 0; i < kBufferCount * 2; i++)
+ intList1.push_back(0);
+
+ EATEST_VERIFY(intList1.size() == (kBufferCount * 2));
+
+ // Try making a copy.
+ IntListNode buffer2[kBufferCount];
+ IntList intList2;
+ intList2.get_allocator().init(buffer2, sizeof(buffer2), sizeof(IntListNode), kAlignOfIntListNode);
+ intList2 = intList1;
+ EATEST_VERIFY(intList2.size() == (kBufferCount * 2));
+ }
+
+ {
+ // fixed_pool_reference
+ typedef eastl::list<int, fixed_pool_reference> WidgetList;
+
+ WidgetList::node_type buffer[16];
+ eastl::fixed_pool myPool(buffer, sizeof(buffer), sizeof(WidgetList::node_type), 16);
+
+ WidgetList myList1(myPool);
+ WidgetList myList2(myPool);
+
+ myList1.push_back(1);
+ myList2.push_back(1);
+ EATEST_VERIFY(myList1 == myList2);
+
+ myList1.push_back(2);
+ myList1.sort();
+ myList2.push_front(2);
+ myList2.sort();
+ EATEST_VERIFY(myList1 == myList2);
+ }
+
+ return nErrorCount;
+}
+EA_RESTORE_VC_WARNING()
+
+
+///////////////////////////////////////////////////////////////////////////////
+// TestAllocatorMalloc
+//
+static int TestAllocatorMalloc()
+{
+ int nErrorCount = 0;
+
+ {
+ typedef eastl::list<int, eastl::allocator_malloc> WidgetList;
+
+ WidgetList myList1;
+ WidgetList myList2;
+
+ myList1.push_back(1);
+ myList2.push_back(1);
+ EATEST_VERIFY(myList1 == myList2);
+
+ myList1.push_back(2);
+ myList1.sort();
+ myList2.push_front(2);
+ myList2.sort();
+ EATEST_VERIFY(myList1 == myList2);
+
+ #if EASTL_ALIGNED_MALLOC_AVAILABLE
+
+ #endif
+ }
+
+ return nErrorCount;
+}
+
+
+
+#if EASTL_DLL
+ void* operator new[](size_t size, const char* pName, int flags, unsigned debugFlags, const char* file, int line);
+ void* operator new[](size_t size, size_t alignment, size_t alignmentOffset, const char* pName, int flags, unsigned debugFlags, const char* file, int line);
+#endif
+
+
+struct EASTLTestCoreAllocator
+{
+public:
+ void* Alloc(size_t size, const char* name, unsigned int flags)
+ {
+ return ::operator new[](size, name, flags, 0, __FILE__, __LINE__);
+ }
+
+ void* Alloc(size_t size, const char* name, unsigned int flags,
+ unsigned int alignment, unsigned int alignOffset = 0)
+ {
+ return ::operator new[](size, alignment, alignOffset, name, flags, 0, __FILE__, __LINE__);
+ }
+
+ void Free(void* p, size_t /*size*/ = 0)
+ {
+ ::operator delete((char*)p);
+ }
+
+ static EASTLTestCoreAllocator* GetDefaultAllocator();
+};
+
+EASTLTestCoreAllocator gEASTLTestCoreAllocator;
+
+EASTLTestCoreAllocator* EASTLTestCoreAllocator::GetDefaultAllocator()
+{
+ return &gEASTLTestCoreAllocator;
+}
+
+
+
+struct TestClass
+{
+ mutable int mX;
+
+ TestClass() : mX(37) { }
+
+ void Increment()
+ { mX++; }
+
+ void IncrementConst() const
+ { mX++; }
+
+ int MultiplyBy(int x)
+ { return mX * x; }
+
+ int MultiplyByConst(int x) const
+ { return mX * x; }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// TestCoreAllocatorAdapter
+//
+static int TestCoreAllocatorAdapter()
+{
+ int nErrorCount = 0;
+
+#if EASTL_CORE_ALLOCATOR_ENABLED
+ typedef EA::Allocator::CoreAllocatorAdapter<EASTLTestCoreAllocator> Adapter;
+
+ eastl::list<TestClass, Adapter> widgetList(Adapter("UI/WidgetList", &gEASTLTestCoreAllocator));
+ widgetList.push_back(TestClass());
+ EATEST_VERIFY(widgetList.size() == 1);
+
+ eastl::vector<TestClass, Adapter> widgetVector(100, Adapter("UI/WidgetVector", &gEASTLTestCoreAllocator));
+ widgetVector.push_back(TestClass());
+ EATEST_VERIFY(widgetVector.size() == 101);
+
+ eastl::vector<TestClass, Adapter> widgetVector2(widgetVector);
+ widgetVector2.resize(400);
+ EATEST_VERIFY(widgetVector2.size() == 400);
+#endif
+
+ return nErrorCount;
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+// TestSwapAllocator
+//
+static int TestSwapAllocator()
+{
+ int nErrorCount = 0;
+
+ {
+ InstanceAllocator a(nullptr, (uint8_t)111), b(nullptr, (uint8_t)222);
+ eastl::swap(a, b);
+
+ EATEST_VERIFY(a.mInstanceId == 222);
+ EATEST_VERIFY(b.mInstanceId == 111);
+
+ EATEST_VERIFY(EA::StdC::Strcmp(a.get_name(), "InstanceAllocator 222") == 0);
+ EATEST_VERIFY(EA::StdC::Strcmp(b.get_name(), "InstanceAllocator 111") == 0);
+ }
+
+ return nErrorCount;
+}
+
+static int TestAllocationOffsetAndAlignment()
+{
+ int nErrorCount = 0;
+
+ auto testAllocatorAlignment = [&nErrorCount](int requestedSize, int requestedAlignment, int requestedOffset)
+ {
+ CountingAllocator::resetCount();
+ CountingAllocator a;
+
+ void* p = allocate_memory(a, requestedSize, requestedAlignment, requestedOffset);
+
+ EATEST_VERIFY(p != nullptr);
+ EATEST_VERIFY(EA::StdC::IsAligned(p, requestedAlignment));
+
+ a.deallocate(p, requestedSize);
+ EATEST_VERIFY(CountingAllocator::getActiveAllocationSize() == 0);
+ };
+
+ testAllocatorAlignment(100, 1, 0);
+ testAllocatorAlignment(100, 2, 0);
+ testAllocatorAlignment(100, 4, 0);
+ testAllocatorAlignment(100, 8, 0);
+ testAllocatorAlignment(100, 16, 0);
+
+ testAllocatorAlignment(100, 1, 16);
+ testAllocatorAlignment(100, 2, 16);
+ testAllocatorAlignment(100, 4, 16);
+ testAllocatorAlignment(100, 8, 16);
+ testAllocatorAlignment(100, 16, 16);
+
+ return nErrorCount;
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+// TestAllocator
+//
+int TestAllocator()
+{
+ int nErrorCount = 0;
+
+ nErrorCount += TestAllocationOffsetAndAlignment();
+ nErrorCount += TestFixedAllocator();
+ nErrorCount += TestAllocatorMalloc();
+ nErrorCount += TestCoreAllocatorAdapter();
+ nErrorCount += TestSwapAllocator();
+
+ return nErrorCount;
+}
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/test/source/TestAny.cpp b/EASTL/test/source/TestAny.cpp
new file mode 100644
index 0000000..fedc85f
--- /dev/null
+++ b/EASTL/test/source/TestAny.cpp
@@ -0,0 +1,472 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLTest.h"
+#include <EASTL/any.h>
+#include <EASTL/vector.h>
+#include <EASTL/string.h>
+#include <EASTL/numeric.h>
+#include <EAStdC/EAString.h>
+
+
+// SmallTestObject
+//
+struct SmallTestObject
+{
+ static int mCtorCount;
+
+ SmallTestObject() EA_NOEXCEPT { mCtorCount++; }
+ SmallTestObject(const SmallTestObject&) EA_NOEXCEPT { mCtorCount++; }
+ SmallTestObject(SmallTestObject&&) EA_NOEXCEPT { mCtorCount++; }
+ SmallTestObject& operator=(const SmallTestObject&) EA_NOEXCEPT { mCtorCount++; return *this; }
+ ~SmallTestObject() EA_NOEXCEPT { mCtorCount--; }
+
+ static void Reset() { mCtorCount = 0; }
+ static bool IsClear() { return mCtorCount == 0; }
+};
+
+int SmallTestObject::mCtorCount = 0;
+
+
+// RequiresInitList
+//
+struct RequiresInitList
+{
+ RequiresInitList(std::initializer_list<int> ilist)
+ : sum(eastl::accumulate(begin(ilist), end(ilist), 0)) {}
+
+ int sum;
+};
+
+
+int TestAny()
+{
+ using namespace eastl;
+ int nErrorCount = 0;
+
+ // NOTE(rparolin): Ensure 'any' is at least the size of an eastl::string and an eastl::vector to prevent heap
+ // allocation of handle objects (objects that point to a heap allocation). This will reduce memory pressure since
+ // eastl::string will be a commonly used type. We could also test with a vector.
+ {
+ static_assert(sizeof(eastl::string) <= sizeof(eastl::any), "ensure that 'any' has enough local memory to store a string");
+ static_assert(sizeof(eastl::vector<int>) <= sizeof(eastl::any), "ensure that 'any' has enough local memory to store a vector");
+ }
+
+ {
+ // default construct
+ any a;
+ VERIFY(a.has_value() == false);
+ }
+
+ {
+ // test object ctors & dtors are called for a large object
+ TestObject::Reset();
+ { any a{TestObject()}; }
+ VERIFY(TestObject::IsClear());
+ }
+
+ {
+ // test object ctors & dtors are called for a small object
+ SmallTestObject::Reset();
+ { any a{SmallTestObject()}; }
+ VERIFY(SmallTestObject::IsClear());
+ }
+
+ {
+ any a(42);
+ VERIFY(a.has_value() == true);
+
+ VERIFY(any_cast<int>(a) == 42);
+ VERIFY(any_cast<int>(a) != 1337);
+ any_cast<int&>(a) = 10;
+ VERIFY(any_cast<int>(a) == 10);
+
+ a = 1.f;
+ any_cast<float&>(a) = 1337.f;
+ VERIFY(any_cast<float>(a) == 1337.f);
+
+ a = 4343;
+ VERIFY(any_cast<int>(a) == 4343);
+
+ a = string("hello world");
+ VERIFY(any_cast<string>(a) == "hello world");
+ VERIFY(any_cast<string&>(a) == "hello world");
+ }
+
+ {
+ struct custom_type { int data; };
+
+ any a = custom_type{};
+ any_cast<custom_type&>(a).data = 42;
+ VERIFY(any_cast<custom_type>(a).data == 42);
+ }
+
+ {
+ any a = 42;
+ VERIFY(any_cast<int>(a) == 42);
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ int throwCount = 0;
+ try { VERIFY(any_cast<short>(a) == 42); }
+ catch (bad_any_cast) { throwCount++; }
+ VERIFY(throwCount != 0);
+ #endif
+ }
+
+ {
+ vector<any> va = {42, 'a', 42.f, 3333u, 4444ul, 5555ull, 6666.0};
+
+ VERIFY(any_cast<int>(va[0]) == 42);
+ VERIFY(any_cast<char>(va[1]) == 'a');
+ VERIFY(any_cast<float>(va[2]) == 42.f);
+ VERIFY(any_cast<unsigned>(va[3]) == 3333u);
+ VERIFY(any_cast<unsigned long>(va[4]) == 4444ul);
+ VERIFY(any_cast<unsigned long long>(va[5]) == 5555ull);
+ VERIFY(any_cast<double>(va[6]) == 6666.0);
+ }
+
+ {
+ any a(string("test string"));
+ VERIFY(a.has_value());
+ VERIFY(any_cast<string>(a) == "test string");
+ }
+
+ {
+ vector<any> va = {42, string("rob"), 'a', 42.f};
+ VERIFY(any_cast<int>(va[0]) == 42);
+ VERIFY(any_cast<string>(va[1]) == "rob");
+ VERIFY(any_cast<char>(va[2]) == 'a');
+ VERIFY(any_cast<float>(va[3]) == 42.f);
+ }
+
+ {
+ vector<any> va;
+ va.push_back(42);
+ va.push_back(string("rob"));
+ va.push_back('a');
+ va.push_back(42.f);
+
+ VERIFY(any_cast<int>(va[0]) == 42);
+ VERIFY(any_cast<string>(va[1]) == "rob");
+ VERIFY(any_cast<char>(va[2]) == 'a');
+ VERIFY(any_cast<float>(va[3]) == 42.f);
+ }
+
+ // NOTE(rparolin): Replaces a small 'any' object with a large one and make sure it doesn't corrupt
+ // the surrounding memory in the vector.
+ {
+ TestObject::Reset();
+ {
+ vector<any> va = {42, 'a', 42.f, 3333u, 4444ul, 5555ull, 6666.0};
+
+ VERIFY(any_cast<int>(va[0]) == 42);
+ VERIFY(any_cast<char>(va[1]) == 'a');
+ VERIFY(any_cast<float>(va[2]) == 42.f);
+ VERIFY(any_cast<unsigned>(va[3]) == 3333u);
+ VERIFY(any_cast<unsigned long>(va[4]) == 4444ul);
+ VERIFY(any_cast<unsigned long long>(va[5]) == 5555ull);
+ VERIFY(any_cast<double>(va[6]) == 6666.0);
+
+ va[3] = TestObject(3333); // replace a small integral with a large heap allocated object.
+
+ VERIFY(any_cast<int>(va[0]) == 42);
+ VERIFY(any_cast<char>(va[1]) == 'a');
+ VERIFY(any_cast<float>(va[2]) == 42.f);
+ VERIFY(any_cast<TestObject>(va[3]).mX == 3333); // not 3333u because TestObject ctor takes a signed type.
+ VERIFY(any_cast<unsigned long>(va[4]) == 4444ul);
+ VERIFY(any_cast<unsigned long long>(va[5]) == 5555ull);
+ VERIFY(any_cast<double>(va[6]) == 6666.0);
+ }
+ VERIFY(TestObject::IsClear());
+ }
+
+ {
+ any a(string("test string"));
+ VERIFY(a.has_value());
+ a.reset();
+ VERIFY(!a.has_value());
+ }
+
+ {
+ any a1 = 42;
+ any a2 = a1;
+
+ VERIFY(a1.has_value());
+ VERIFY(a2.has_value());
+ VERIFY(any_cast<int>(a1) == any_cast<int>(a2));
+ }
+
+ {
+ any a1;
+ VERIFY(!a1.has_value());
+ {
+ any a2(string("test string"));
+ a1 = any_cast<string>(a2);
+
+ VERIFY(a1.has_value());
+ }
+ VERIFY(any_cast<string>(a1) == "test string");
+ VERIFY(a1.has_value());
+ }
+
+ {
+ any a1;
+ VERIFY(!a1.has_value());
+ {
+ any a2(string("test string"));
+ a1 = a2;
+ VERIFY(a1.has_value());
+ }
+ VERIFY(any_cast<string&>(a1) == "test string");
+ VERIFY(a1.has_value());
+ }
+
+ // swap tests
+ {
+ {
+ any a1 = 42;
+ any a2 = 24;
+ VERIFY(any_cast<int>(a1) == 42);
+ VERIFY(any_cast<int>(a2) == 24);
+
+ a1.swap(a2);
+ VERIFY(any_cast<int>(a1) == 24);
+ VERIFY(any_cast<int>(a2) == 42);
+
+ eastl::swap(a1, a2);
+ VERIFY(any_cast<int>(a1) == 42);
+ VERIFY(any_cast<int>(a2) == 24);
+ }
+ {
+ any a1 = string("hello");
+ any a2 = string("world");
+ VERIFY(any_cast<string>(a1) == "hello");
+ VERIFY(any_cast<string>(a2) == "world");
+
+ a1.swap(a2);
+ VERIFY(any_cast<string>(a1) == "world");
+ VERIFY(any_cast<string>(a2) == "hello");
+
+ eastl::swap(a1, a2);
+ VERIFY(any_cast<string>(a1) == "hello");
+ VERIFY(any_cast<string>(a2) == "world");
+ }
+ }
+
+ #if EASTL_RTTI_ENABLED
+ {
+ #if defined(EA_COMPILER_MSVC)
+ VERIFY(EA::StdC::Strcmp(any(42).type().name(), "int") == 0);
+ VERIFY(EA::StdC::Strcmp(any(42.f).type().name(), "float") == 0);
+ VERIFY(EA::StdC::Strcmp(any(42u).type().name(), "unsigned int") == 0);
+ VERIFY(EA::StdC::Strcmp(any(42ul).type().name(), "unsigned long") == 0);
+ VERIFY(EA::StdC::Strcmp(any(42l).type().name(), "long") == 0);
+
+ #elif defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)
+ VERIFY(EA::StdC::Strcmp(any(42).type().name(), "i") == 0);
+ VERIFY(EA::StdC::Strcmp(any(42.f).type().name(), "f") == 0);
+ VERIFY(EA::StdC::Strcmp(any(42u).type().name(), "j") == 0);
+ VERIFY(EA::StdC::Strcmp(any(42ul).type().name(), "m") == 0);
+ VERIFY(EA::StdC::Strcmp(any(42l).type().name(), "l") == 0);
+ #endif
+ }
+ #endif
+
+ // emplace, small object tests
+ {
+ any a;
+
+ a.emplace<int>(42);
+ VERIFY(a.has_value());
+ VERIFY(any_cast<int>(a) == 42);
+
+ a.emplace<short>((short)8); // no way to define a short literal we must cast here.
+ VERIFY(any_cast<short>(a) == 8);
+ VERIFY(a.has_value());
+
+ a.reset();
+ VERIFY(!a.has_value());
+ }
+
+ // emplace, large object tests
+ {
+ TestObject::Reset();
+ {
+ any a;
+ a.emplace<TestObject>();
+ VERIFY(a.has_value());
+ }
+ VERIFY(TestObject::IsClear());
+ }
+
+ // emplace, initializer_list
+ {
+ {
+ any a;
+ a.emplace<RequiresInitList>(std::initializer_list<int>{1,2,3,4,5,6});
+
+ VERIFY(a.has_value());
+ VERIFY(any_cast<RequiresInitList>(a).sum == 21);
+ }
+ }
+
+ // equivalence tests
+ {
+ any a, b;
+ VERIFY(!a.has_value() == !b.has_value());
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ int bad_any_cast_thrown = 0;
+ try
+ {
+ VERIFY(any_cast<int>(a) == any_cast<int>(b));
+ }
+ catch (eastl::bad_any_cast)
+ {
+ bad_any_cast_thrown++;
+ }
+ VERIFY(bad_any_cast_thrown != 0);
+ #endif
+
+
+ a = 42; b = 24;
+ VERIFY(any_cast<int>(a) != any_cast<int>(b));
+ VERIFY(a.has_value() == b.has_value());
+
+ a = 42; b = 42;
+ VERIFY(any_cast<int>(a) == any_cast<int>(b));
+ VERIFY(a.has_value() == b.has_value());
+ }
+
+ // move tests
+ {
+ any a = string("hello world");
+ VERIFY(any_cast<string&>(a) == "hello world");
+
+ auto s = move(any_cast<string&>(a)); // move string out
+ VERIFY(s == "hello world");
+ VERIFY(any_cast<string&>(a).empty());
+
+ any_cast<string&>(a) = move(s); // move string in
+ VERIFY(any_cast<string&>(a) == "hello world");
+ }
+
+ // nullptr tests
+ {
+ any* a = nullptr;
+ VERIFY(any_cast<int>(a) == nullptr);
+ VERIFY(any_cast<short>(a) == nullptr);
+ VERIFY(any_cast<long>(a) == nullptr);
+ VERIFY(any_cast<string>(a) == nullptr);
+
+ any b;
+ VERIFY(any_cast<short>(&b) == nullptr);
+ VERIFY(any_cast<const short>(&b) == nullptr);
+ VERIFY(any_cast<volatile short>(&b) == nullptr);
+ VERIFY(any_cast<const volatile short>(&b) == nullptr);
+
+ VERIFY(any_cast<short*>(&b) == nullptr);
+ VERIFY(any_cast<const short*>(&b) == nullptr);
+ VERIFY(any_cast<volatile short*>(&b) == nullptr);
+ VERIFY(any_cast<const volatile short*>(&b) == nullptr);
+ }
+
+ // Aligned type tests
+ {
+ {
+ any a = Align16(1337);
+ VERIFY(any_cast<Align16>(a) == Align16(1337));
+ }
+
+ {
+ any a = Align32(1337);
+ VERIFY(any_cast<Align32>(a) == Align32(1337));
+ }
+
+ {
+ any a = Align64(1337);
+ VERIFY(any_cast<Align64>(a) == Align64(1337));
+ }
+ }
+
+ // make_any
+ {
+ {
+ auto a = make_any<int>(42);
+ VERIFY(any_cast<int>(a) == 42);
+ }
+
+ {
+ auto a = make_any<RequiresInitList>(std::initializer_list<int>{1,2,3,4,5,6,7,8});
+ VERIFY(any_cast<RequiresInitList&>(a).sum == 36);
+ }
+ }
+
+ // user reported regression that eastl::any constructor was not decaying the deduced type correctly.
+ {
+ float f = 42.f;
+ eastl::any a(f);
+ VERIFY(any_cast<float>(a) == 42.f);
+ }
+
+ //testing unsafe operations
+ {
+ eastl::any a = 1;
+ int* i = eastl::any_cast<int>(&a);
+ VERIFY((*i) == 1);
+
+ a = 2;
+ int *j = (int*)eastl::unsafe_any_cast<void>(&a);
+ VERIFY((*j) == 2);
+
+ const eastl::any b = 3;
+ const void * p = eastl::unsafe_any_cast<void>(&b);
+ void *q = const_cast<void *>(p);
+ int *r = static_cast<int *>(q);
+ VERIFY((*r) == 3);
+ }
+
+ // user regression when calling the assignment operator
+ {
+ {
+ eastl::any a1;
+ eastl::any a2;
+ VERIFY(a1.has_value() == false);
+ VERIFY(a2.has_value() == false);
+
+ a1 = a2;
+ VERIFY(a1.has_value() == false);
+ VERIFY(a2.has_value() == false);
+ }
+
+ {
+ eastl::any a1 = 42;
+ eastl::any a2;
+ VERIFY(a1.has_value() == true);
+ VERIFY(a2.has_value() == false);
+
+ a1 = a2;
+ VERIFY(a1.has_value() == false);
+ VERIFY(a2.has_value() == false);
+ }
+
+ {
+ eastl::any a1;
+ eastl::any a2 = 42;
+ VERIFY(a1.has_value() == false);
+ VERIFY(a2.has_value() == true);
+
+ a1 = a2;
+ VERIFY(a1.has_value() == true);
+ VERIFY(a2.has_value() == true);
+ VERIFY(any_cast<int>(a1) == 42);
+ VERIFY(any_cast<int>(a2) == 42);
+ }
+ }
+
+ return nErrorCount;
+}
+
+
diff --git a/EASTL/test/source/TestArray.cpp b/EASTL/test/source/TestArray.cpp
new file mode 100644
index 0000000..ca05b67
--- /dev/null
+++ b/EASTL/test/source/TestArray.cpp
@@ -0,0 +1,360 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLTest.h"
+#include <EASTL/array.h>
+#include <EABase/eabase.h>
+
+
+
+using namespace eastl;
+
+
+
+// Template instantations.
+// These tell the compiler to compile all the functions for the given class.
+template struct eastl::array<int>;
+template struct eastl::array<Align32>; // VC++ fails to compile due to error generated by the swap function. C2718: http://msdn.microsoft.com/en-us/library/vstudio/sxe76d9e.aspx
+
+template<typename T> class TP;
+
+
+int TestArray()
+{
+ int nErrorCount = 0;
+
+ {
+ array<int, 5> a = { { 0, 1, 2, 3, 4 } };
+ array<int, 5> b = { { 0, 1, 2, 3 } };
+ array<int, 5> c = { { 4, 3, 2, 1, 0 } };
+ array<int, 0> d = { { 0 } };
+
+ VERIFY(!a.empty());
+ VERIFY(a.size() == 5);
+ VERIFY(a[0] == 0);
+ VERIFY(a[4] == 4);
+
+ VERIFY(!b.empty());
+ VERIFY(b.size() == 5);
+ VERIFY(b[0] == 0);
+ VERIFY(b[3] == 3);
+
+ VERIFY(d.empty());
+ VERIFY(d.size() == 0);
+
+ // swap
+ a.swap(c);
+ VERIFY(a[0] == 4);
+ VERIFY(c[0] == 0);
+
+ // begin, end
+ array<int, 5>::iterator it = a.begin();
+ VERIFY((a.validate_iterator(it) & (isf_valid | isf_can_dereference)) != 0);
+ VERIFY(*it == 4);
+
+ ++it;
+ VERIFY(*it == 3);
+
+ ++it;
+ VERIFY(*it == 2);
+
+ --it;
+ VERIFY(*it == 3);
+
+ it += 3;
+ VERIFY((a.validate_iterator(it) & (isf_valid | isf_can_dereference)) != 0);
+ VERIFY(*it == 0);
+
+ ++it;
+ VERIFY(it == a.end());
+ VERIFY((a.validate_iterator(it) & isf_valid) != 0);
+ VERIFY(a.validate());
+
+ // rbegin, rend
+ array<int, 5>::reverse_iterator itr = a.rbegin();
+ VERIFY((a.validate_iterator(itr.base()) & (isf_valid | isf_can_dereference)) != 0);
+ VERIFY(*itr == 0);
+
+ itr++;
+ VERIFY(*itr == 1);
+
+ // data
+ int* pArray = a.data();
+ VERIFY(pArray == a.mValue);
+
+ // front
+ int& nFront = a.front();
+ VERIFY(nFront == 4);
+
+ // back
+ int& nBack = a.back();
+ VERIFY(nBack == 0);
+
+ // at
+ VERIFY(a[0] == a.at(0));
+ #if EASTL_EXCEPTIONS_ENABLED
+ bool bExceptionOccurred = false;
+ try{
+ int x = a.at(100);
+ VERIFY(x != -1);
+ }
+ catch(...){
+ bExceptionOccurred = true;
+ }
+ VERIFY(bExceptionOccurred);
+ #endif
+
+ // global operators
+ a[0] = 0; a[1] = 1; a[2] = 2; a[3] = 3; a[4] = 4; // 01234
+ b[0] = 0; b[1] = 1; b[2] = 2; b[3] = 3; b[4] = 4; // 01234
+ c[0] = 0; c[1] = 1; c[2] = 2; c[3] = 3; c[4] = 9; // 01239
+
+ VERIFY( (a == b));
+ VERIFY(!(a != b));
+ VERIFY(!(a < b));
+ VERIFY( (a <= b));
+ VERIFY( (a >= b));
+ VERIFY(!(a > b));
+
+ VERIFY(!(a == c));
+ VERIFY( (a != c));
+ VERIFY( (a < c));
+ VERIFY( (a <= c));
+ VERIFY(!(a >= c));
+ VERIFY(!(a > c));
+
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ VERIFY( (a <=> b) == 0);
+ VERIFY(!((a <=> b) != 0));
+ VERIFY(!((a <=> b) < 0));
+ VERIFY( (a <=> b) <= 0);
+ VERIFY( (a <=> b) >= 0);
+ VERIFY(!((a <=> b) > 0));
+
+ VERIFY(!((a <=> c) == 0));
+ VERIFY( (a <=> c) != 0);
+ VERIFY( (a <=> c) < 0);
+ VERIFY( (a <=> c) <= 0);
+ VERIFY(!((a <=> c) >= 0));
+ VERIFY(!((a <=> c) > 0));
+#endif
+
+ // deduction guides
+ #ifdef __cpp_deduction_guides
+ array deduced {1,2,3,4,5};
+
+ static_assert(eastl::is_same_v<decltype(deduced)::value_type, int>, "deduced array value_type mismatch");
+ VERIFY(deduced.size() == 5);
+ #endif
+
+ // structured binding
+
+ {
+ eastl::array<int, 5> aCopy = a;
+ auto&& [a0, a1, a2, a3, a4] = aCopy;
+
+ VERIFY(a0 == aCopy[0]);
+ VERIFY(a1 == aCopy[1]);
+ VERIFY(a2 == aCopy[2]);
+ VERIFY(a3 == aCopy[3]);
+ VERIFY(a4 == aCopy[4]);
+
+ a0 = 100;
+ VERIFY(aCopy[0] == 100);
+
+ a4 = 0;
+ VERIFY(aCopy[4] == 0);
+
+ // The deduced type may or may not be a reference type; it is an aliased type,
+ // as per https://en.cppreference.com/w/cpp/language/structured_binding:
+ // > Like a reference, a structured binding is an alias to an existing object. Unlike a reference,
+ // the type of a structured binding does not have to be a reference type.
+ // Any reference specifier is thus removed to check only the type & its const qualifier
+ static_assert(eastl::is_same_v<eastl::remove_reference_t<decltype(a0)>, int>);
+
+ const eastl::array<int, 5> aConstCopy = a;
+ auto&& [aConst0, aConst1, aConst2, aConst3, aConst4] = aConstCopy;
+
+ static_assert(eastl::is_same_v<eastl::remove_reference_t<decltype(aConst0)>, const int>);
+ }
+ }
+
+ // constexpr tests
+ {
+ #ifndef EA_NO_CPP14_CONSTEXPR
+ EA_CPP14_CONSTEXPR eastl::array<int, 4> a = {{ 0, 1, 2, 3 }};
+
+ static_assert(a == eastl::array<int, 4>{{ 0, 1, 2, 3 }}, "array constexpr failure");
+
+ static_assert(a[0] == 0, "array constexpr failure");
+ static_assert(a[1] == 1, "array constexpr failure");
+ static_assert(a[2] == 2, "array constexpr failure");
+ static_assert(a[3] == 3, "array constexpr failure");
+
+ static_assert(a.at(0) == 0, "array constexpr failure");
+ static_assert(a.at(1) == 1, "array constexpr failure");
+ static_assert(a.at(2) == 2, "array constexpr failure");
+ static_assert(a.at(3) == 3, "array constexpr failure");
+
+ static_assert(a.data()[0] == 0, "array constexpr failure");
+ static_assert(a.data()[1] == 1, "array constexpr failure");
+ static_assert(a.data()[2] == 2, "array constexpr failure");
+ static_assert(a.data()[3] == 3, "array constexpr failure");
+
+ static_assert(a.empty() == false, "array constexpr failure");
+ static_assert(a.size() == 4, "array constexpr failure");
+ static_assert(a.max_size() == 4, "array constexpr failure");
+
+ static_assert(a.front() == 0, "array constexpr failure");
+ static_assert(a.back() == 3, "array constexpr failure");
+
+ static_assert(a.begin()[0] == 0, "array constexpr failure");
+ static_assert(a.begin()[1] == 1, "array constexpr failure");
+ static_assert(a.begin()[2] == 2, "array constexpr failure");
+ static_assert(a.begin()[3] == 3, "array constexpr failure");
+
+ static_assert(a.cbegin()[0] == 0, "array constexpr failure");
+ static_assert(a.cbegin()[1] == 1, "array constexpr failure");
+ static_assert(a.cbegin()[2] == 2, "array constexpr failure");
+ static_assert(a.cbegin()[3] == 3, "array constexpr failure");
+
+ static_assert(a.crbegin()[0] == 3, "array constexpr failure");
+ static_assert(a.crbegin()[1] == 2, "array constexpr failure");
+ static_assert(a.crbegin()[2] == 1, "array constexpr failure");
+ static_assert(a.crbegin()[3] == 0, "array constexpr failure");
+
+ static_assert(a.end()[-1] == 3, "array constexpr failure");
+ static_assert(a.end()[-2] == 2, "array constexpr failure");
+ static_assert(a.end()[-3] == 1, "array constexpr failure");
+ static_assert(a.end()[-4] == 0, "array constexpr failure");
+
+ static_assert(a.cend()[-1] == 3, "array constexpr failure");
+ static_assert(a.cend()[-2] == 2, "array constexpr failure");
+ static_assert(a.cend()[-3] == 1, "array constexpr failure");
+ static_assert(a.cend()[-4] == 0, "array constexpr failure");
+
+ static_assert(a.crend()[-1] == 0, "array constexpr failure");
+ static_assert(a.crend()[-2] == 1, "array constexpr failure");
+ static_assert(a.crend()[-3] == 2, "array constexpr failure");
+ static_assert(a.crend()[-4] == 3, "array constexpr failure");
+ #endif
+ }
+
+ // to_array
+ {
+ {
+ constexpr int c_array[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ constexpr auto arr = to_array(c_array);
+
+ static_assert(is_same_v<remove_cv_t<decltype(arr)>, eastl::array<int, 10>>, "unexpected return type");
+
+ static_assert(arr[0] == 0, "unexpected array value");
+ static_assert(arr[1] == 1, "unexpected array value");
+ static_assert(arr[2] == 2, "unexpected array value");
+ static_assert(arr[3] == 3, "unexpected array value");
+ static_assert(arr[4] == 4, "unexpected array value");
+ static_assert(arr[5] == 5, "unexpected array value");
+ static_assert(arr[6] == 6, "unexpected array value");
+ static_assert(arr[7] == 7, "unexpected array value");
+ static_assert(arr[8] == 8, "unexpected array value");
+ static_assert(arr[9] == 9, "unexpected array value");
+ }
+
+ {
+ constexpr auto arr = to_array({0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
+
+ static_assert(is_same_v<remove_cv_t<decltype(arr)>, eastl::array<int, 10>>, "unexpected return type");
+
+ static_assert(arr[0] == 0, "unexpected array value");
+ static_assert(arr[1] == 1, "unexpected array value");
+ static_assert(arr[2] == 2, "unexpected array value");
+ static_assert(arr[3] == 3, "unexpected array value");
+ static_assert(arr[4] == 4, "unexpected array value");
+ static_assert(arr[5] == 5, "unexpected array value");
+ static_assert(arr[6] == 6, "unexpected array value");
+ static_assert(arr[7] == 7, "unexpected array value");
+ static_assert(arr[8] == 8, "unexpected array value");
+ static_assert(arr[9] == 9, "unexpected array value");
+ }
+
+ {
+ constexpr auto arr = to_array<long>({0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
+
+ static_assert(is_same_v<remove_cv_t<decltype(arr)>, eastl::array<long, 10>>, "unexpected return type");
+
+ static_assert(arr[0] == 0l, "unexpected array value");
+ static_assert(arr[1] == 1l, "unexpected array value");
+ static_assert(arr[2] == 2l, "unexpected array value");
+ static_assert(arr[3] == 3l, "unexpected array value");
+ static_assert(arr[4] == 4l, "unexpected array value");
+ static_assert(arr[5] == 5l, "unexpected array value");
+ static_assert(arr[6] == 6l, "unexpected array value");
+ static_assert(arr[7] == 7l, "unexpected array value");
+ static_assert(arr[8] == 8l, "unexpected array value");
+ static_assert(arr[9] == 9l, "unexpected array value");
+ }
+
+ {
+ constexpr auto arr = to_array<unsigned long>({0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
+
+ static_assert(is_same_v<remove_cv_t<decltype(arr)>, eastl::array<unsigned long, 10>>, "unexpected return type");
+
+ static_assert(arr[0] == 0ul, "unexpected array value");
+ static_assert(arr[1] == 1ul, "unexpected array value");
+ static_assert(arr[2] == 2ul, "unexpected array value");
+ static_assert(arr[3] == 3ul, "unexpected array value");
+ static_assert(arr[4] == 4ul, "unexpected array value");
+ static_assert(arr[5] == 5ul, "unexpected array value");
+ static_assert(arr[6] == 6ul, "unexpected array value");
+ static_assert(arr[7] == 7ul, "unexpected array value");
+ static_assert(arr[8] == 8ul, "unexpected array value");
+ static_assert(arr[9] == 9ul, "unexpected array value");
+ }
+
+ {
+ constexpr auto arr = to_array("EASTL");
+
+ static_assert(is_same_v<remove_cv_t<decltype(arr)>, eastl::array<char, 6>>, "unexpected return type");
+
+ static_assert(arr[0] == 'E', "unexpected value in array");
+ static_assert(arr[1] == 'A', "unexpected value in array");
+ static_assert(arr[2] == 'S', "unexpected value in array");
+ static_assert(arr[3] == 'T', "unexpected value in array");
+ static_assert(arr[4] == 'L', "unexpected value in array");
+ }
+
+ // Older Microsoft compilers don't implement guaranteed copy ellision which is problematic when dealing with
+ // non-copyable types. We disable this test unless we are on a version of MSVC with those features.
+ #if defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1920) // VS2019 16.0+
+ {
+ struct LocalNonCopyable
+ {
+ LocalNonCopyable() = default;
+ ~LocalNonCopyable() = default;
+
+ LocalNonCopyable(LocalNonCopyable&&) = default;
+ LocalNonCopyable& operator=(LocalNonCopyable&&) = default;
+
+ LocalNonCopyable(const LocalNonCopyable&) = delete;
+ LocalNonCopyable& operator=(const LocalNonCopyable&) = delete;
+ };
+
+ constexpr auto arr = to_array({LocalNonCopyable{}});
+ static_assert(arr.size() == 1, "unexpected error");
+ }
+ #endif
+ }
+
+ return nErrorCount;
+}
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/test/source/TestAtomicAsm.cpp b/EASTL/test/source/TestAtomicAsm.cpp
new file mode 100644
index 0000000..d4db04e
--- /dev/null
+++ b/EASTL/test/source/TestAtomicAsm.cpp
@@ -0,0 +1,4921 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLTest.h"
+
+#include <EASTL/atomic.h>
+
+#include <cstddef>
+
+
+struct UserType128
+{
+ uint32_t a,b,c,d;
+
+ friend bool operator==(const UserType128& a, const UserType128& b)
+ {
+ return (a.a == b.a) && (a.b == b.b) && (a.c == b.c) && (a.d == b.d);
+ }
+};
+
+
+/**
+ * There is no nice way to verify the emitted asm for each of the given operations.
+ * This test file is meant to put each operation into its own function so its easy
+ * to verify in a disassembler for manual inspection.
+ */
+
+#if defined(EASTL_ATOMIC_HAS_32BIT)
+
+EA_NO_INLINE static void TestAtomicU32StoreRelaxed()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ atomic.store(1, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(atomic);
+}
+
+EA_NO_INLINE static void TestAtomicU32StoreRelease()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ atomic.store(1, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(atomic);
+}
+
+EA_NO_INLINE static void TestAtomicU32StoreSeqCst()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ atomic.store(1, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(atomic);
+}
+
+EA_NO_INLINE static void TestAtomicU32Store()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ atomic.store(1);
+
+ eastl::compiler_barrier_data_dependency(atomic);
+}
+
+EA_NO_INLINE static void TestAtomicU32StoreOrders()
+{
+ TestAtomicU32StoreRelaxed();
+
+ TestAtomicU32StoreRelease();
+
+ TestAtomicU32StoreSeqCst();
+
+ TestAtomicU32Store();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_64BIT)
+
+EA_NO_INLINE static void TestAtomicU64StoreRelaxed()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ atomic.store(1, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(atomic);
+}
+
+EA_NO_INLINE static void TestAtomicU64StoreRelease()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ atomic.store(1, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(atomic);
+}
+
+EA_NO_INLINE static void TestAtomicU64StoreSeqCst()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ atomic.store(1, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(atomic);
+}
+
+EA_NO_INLINE static void TestAtomicU64Store()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ atomic.store(1);
+
+ eastl::compiler_barrier_data_dependency(atomic);
+}
+
+EA_NO_INLINE static void TestAtomicU64StoreOrders()
+{
+ TestAtomicU64StoreRelaxed();
+
+ TestAtomicU64StoreRelease();
+
+ TestAtomicU64StoreSeqCst();
+
+ TestAtomicU64Store();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_128BIT)
+
+EA_NO_INLINE static void TestAtomic128StoreRelaxed()
+{
+ eastl::atomic<UserType128> atomic;
+
+ atomic.store(UserType128{1, 1, 1, 1}, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(atomic);
+}
+
+EA_NO_INLINE static void TestAtomic128StoreRelease()
+{
+ eastl::atomic<UserType128> atomic;
+
+ atomic.store(UserType128{1, 1, 1, 1}, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(atomic);
+}
+
+EA_NO_INLINE static void TestAtomic128StoreSeqCst()
+{
+ eastl::atomic<UserType128> atomic;
+
+ atomic.store(UserType128{1, 1, 1, 1}, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(atomic);
+}
+
+EA_NO_INLINE static void TestAtomic128Store()
+{
+ eastl::atomic<UserType128> atomic;
+
+ atomic.store(UserType128{1, 1, 1, 1});
+
+ eastl::compiler_barrier_data_dependency(atomic);
+}
+
+EA_NO_INLINE static void TestAtomic128StoreOrders()
+{
+ TestAtomic128StoreRelaxed();
+
+ TestAtomic128StoreRelease();
+
+ TestAtomic128StoreSeqCst();
+
+ TestAtomic128Store();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_32BIT)
+
+EA_NO_INLINE static void TestAtomicU32LoadRelaxed()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t load = atomic.load(eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(load);
+}
+
+EA_NO_INLINE static void TestAtomicU32LoadAcquire()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t load = atomic.load(eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(load);
+}
+
+EA_NO_INLINE static void TestAtomicU32LoadSeqCst()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t load = atomic.load(eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(load);
+}
+
+EA_NO_INLINE static void TestAtomicU32Load()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t load = atomic.load();
+
+ eastl::compiler_barrier_data_dependency(load);
+}
+
+EA_NO_INLINE static void TestAtomicU32LoadOrders()
+{
+ TestAtomicU32LoadRelaxed();
+
+ TestAtomicU32LoadAcquire();
+
+ TestAtomicU32LoadSeqCst();
+
+ TestAtomicU32Load();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_64BIT)
+
+EA_NO_INLINE static void TestAtomicU64LoadRelaxed()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t load = atomic.load(eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(load);
+}
+
+EA_NO_INLINE static void TestAtomicU64LoadAcquire()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t load = atomic.load(eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(load);
+}
+
+EA_NO_INLINE static void TestAtomicU64LoadSeqCst()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t load = atomic.load(eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(load);
+}
+
+EA_NO_INLINE static void TestAtomicU64Load()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t load = atomic.load();
+
+ eastl::compiler_barrier_data_dependency(load);
+}
+
+EA_NO_INLINE static void TestAtomicU64LoadOrders()
+{
+ TestAtomicU64LoadRelaxed();
+
+ TestAtomicU64LoadAcquire();
+
+ TestAtomicU64LoadSeqCst();
+
+ TestAtomicU64Load();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_128BIT)
+
+EA_NO_INLINE static void TestAtomic128LoadRelaxed()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 load = atomic.load(eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(load);
+}
+
+EA_NO_INLINE static void TestAtomic128LoadAcquire()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 load = atomic.load(eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(load);
+}
+
+EA_NO_INLINE static void TestAtomic128LoadSeqCst()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 load = atomic.load(eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(load);
+}
+
+EA_NO_INLINE static void TestAtomic128Load()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 load = atomic.load();
+
+ eastl::compiler_barrier_data_dependency(load);
+}
+
+EA_NO_INLINE static void TestAtomic128LoadOrders()
+{
+ TestAtomic128LoadRelaxed();
+
+ TestAtomic128LoadAcquire();
+
+ TestAtomic128LoadSeqCst();
+
+ TestAtomic128Load();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_32BIT)
+
+EA_NO_INLINE static void TestAtomicU32ExchangeRelaxed()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t exchange = atomic.exchange(1, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(exchange);
+}
+
+EA_NO_INLINE static void TestAtomicU32ExchangeAcquire()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t exchange = atomic.exchange(1, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(exchange);
+}
+
+EA_NO_INLINE static void TestAtomicU32ExchangeRelease()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t exchange = atomic.exchange(1, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(exchange);
+}
+
+EA_NO_INLINE static void TestAtomicU32ExchangeAcqRel()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t exchange = atomic.exchange(1, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(exchange);
+}
+
+EA_NO_INLINE static void TestAtomicU32ExchangeSeqCst()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t exchange = atomic.exchange(1, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(exchange);
+}
+
+EA_NO_INLINE static void TestAtomicU32Exchange()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t exchange = atomic.exchange(1);
+
+ eastl::compiler_barrier_data_dependency(exchange);
+}
+
+EA_NO_INLINE static void TestAtomicU32ExchangeOrders()
+{
+ TestAtomicU32ExchangeRelaxed();
+
+ TestAtomicU32ExchangeAcquire();
+
+ TestAtomicU32ExchangeRelease();
+
+ TestAtomicU32ExchangeAcqRel();
+
+ TestAtomicU32ExchangeSeqCst();
+
+ TestAtomicU32Exchange();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_64BIT)
+
+EA_NO_INLINE static void TestAtomicU64ExchangeRelaxed()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t exchange = atomic.exchange(1, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(exchange);
+}
+
+EA_NO_INLINE static void TestAtomicU64ExchangeAcquire()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t exchange = atomic.exchange(1, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(exchange);
+}
+
+EA_NO_INLINE static void TestAtomicU64ExchangeRelease()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t exchange = atomic.exchange(1, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(exchange);
+}
+
+EA_NO_INLINE static void TestAtomicU64ExchangeAcqRel()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t exchange = atomic.exchange(1, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(exchange);
+}
+
+EA_NO_INLINE static void TestAtomicU64ExchangeSeqCst()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t exchange = atomic.exchange(1, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(exchange);
+}
+
+EA_NO_INLINE static void TestAtomicU64Exchange()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t exchange = atomic.exchange(1);
+
+ eastl::compiler_barrier_data_dependency(exchange);
+}
+
+EA_NO_INLINE static void TestAtomicU64ExchangeOrders()
+{
+ TestAtomicU64ExchangeRelaxed();
+
+ TestAtomicU64ExchangeAcquire();
+
+ TestAtomicU64ExchangeRelease();
+
+ TestAtomicU64ExchangeAcqRel();
+
+ TestAtomicU64ExchangeSeqCst();
+
+ TestAtomicU64Exchange();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_128BIT)
+
+EA_NO_INLINE static void TestAtomic128ExchangeRelaxed()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 exchange = atomic.exchange(UserType128{1, 1, 1, 1}, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(exchange);
+}
+
+EA_NO_INLINE static void TestAtomic128ExchangeAcquire()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 exchange = atomic.exchange(UserType128{1, 1, 1, 1}, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(exchange);
+}
+
+EA_NO_INLINE static void TestAtomic128ExchangeRelease()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 exchange = atomic.exchange(UserType128{1, 1, 1, 1}, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(exchange);
+}
+
+EA_NO_INLINE static void TestAtomic128ExchangeAcqRel()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 exchange = atomic.exchange(UserType128{1, 1, 1, 1}, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(exchange);
+}
+
+EA_NO_INLINE static void TestAtomic128ExchangeSeqCst()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 exchange = atomic.exchange(UserType128{1, 1, 1, 1}, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(exchange);
+}
+
+EA_NO_INLINE static void TestAtomic128Exchange()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 exchange = atomic.exchange(UserType128{1, 1, 1, 1});
+
+ eastl::compiler_barrier_data_dependency(exchange);
+}
+
+EA_NO_INLINE static void TestAtomic128ExchangeOrders()
+{
+ TestAtomic128ExchangeRelaxed();
+
+ TestAtomic128ExchangeAcquire();
+
+ TestAtomic128ExchangeRelease();
+
+ TestAtomic128ExchangeAcqRel();
+
+ TestAtomic128ExchangeSeqCst();
+
+ TestAtomic128Exchange();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_32BIT)
+
+EA_NO_INLINE static void TestAtomicU32OperatorT()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t load = atomic;
+
+ eastl::compiler_barrier_data_dependency(load);
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_64BIT)
+
+EA_NO_INLINE static void TestAtomicU64OperatorT()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t load = atomic;
+
+ eastl::compiler_barrier_data_dependency(load);
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_128BIT)
+
+EA_NO_INLINE static void TestAtomic128OperatorT()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 load = atomic;
+
+ eastl::compiler_barrier_data_dependency(load);
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_32BIT)
+
+EA_NO_INLINE static void TestAtomicU32OperatorEqual()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ atomic = 1;
+
+ eastl::compiler_barrier_data_dependency(atomic);
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_64BIT)
+
+EA_NO_INLINE static void TestAtomicU64OperatorEqual()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ atomic = 1;
+
+ eastl::compiler_barrier_data_dependency(atomic);
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_128BIT)
+
+EA_NO_INLINE static void TestAtomic128OperatorEqual()
+{
+ eastl::atomic<UserType128> atomic;
+
+ atomic = UserType128{1, 1, 1, 1};
+
+ eastl::compiler_barrier_data_dependency(atomic);
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_32BIT)
+
+EA_NO_INLINE static void TestAtomicU32CompareExchangeStrongRelaxedRelaxed()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t expected = 0;
+ bool ret = atomic.compare_exchange_strong(expected, 1, eastl::memory_order_relaxed, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU32CompareExchangeStrongAcquireRelaxed()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t expected = 0;
+ bool ret = atomic.compare_exchange_strong(expected, 1, eastl::memory_order_acquire, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU32CompareExchangeStrongAcquireAcquire()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t expected = 0;
+ bool ret = atomic.compare_exchange_strong(expected, 1, eastl::memory_order_acquire, eastl::memory_order_acquire);
+
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU32CompareExchangeStrongReleaseRelaxed()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t expected = 0;
+ bool ret = atomic.compare_exchange_strong(expected, 1, eastl::memory_order_release, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU32CompareExchangeStrongAcqRelRelaxed()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t expected = 0;
+ bool ret = atomic.compare_exchange_strong(expected, 1, eastl::memory_order_acq_rel, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU32CompareExchangeStrongAcqRelAcquire()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t expected = 0;
+ bool ret = atomic.compare_exchange_strong(expected, 1, eastl::memory_order_acq_rel, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU32CompareExchangeStrongSeqCstRelaxed()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t expected = 0;
+ bool ret = atomic.compare_exchange_strong(expected, 1, eastl::memory_order_seq_cst, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU32CompareExchangeStrongSeqCstAcquire()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t expected = 0;
+ bool ret = atomic.compare_exchange_strong(expected, 1, eastl::memory_order_seq_cst, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU32CompareExchangeStrongSeqCstSeqCst()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t expected = 0;
+ bool ret = atomic.compare_exchange_strong(expected, 1, eastl::memory_order_seq_cst, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU32CompareExchangeStrongRelaxed()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t expected = 0;
+ bool ret = atomic.compare_exchange_strong(expected, 1, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU32CompareExchangeStrongAcquire()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t expected = 0;
+ bool ret = atomic.compare_exchange_strong(expected, 1, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU32CompareExchangeStrongRelease()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t expected = 0;
+ bool ret = atomic.compare_exchange_strong(expected, 1, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU32CompareExchangeStrongAcqRel()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t expected = 0;
+ bool ret = atomic.compare_exchange_strong(expected, 1, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU32CompareExchangeStrongSeqCst()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t expected = 0;
+ bool ret = atomic.compare_exchange_strong(expected, 1, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU32CompareExchangeStrong()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t expected = 0;
+ bool ret = atomic.compare_exchange_strong(expected, 1);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU32CompareExchangeStrongOrders()
+{
+ TestAtomicU32CompareExchangeStrongRelaxedRelaxed();
+
+ TestAtomicU32CompareExchangeStrongAcquireRelaxed();
+
+ TestAtomicU32CompareExchangeStrongAcquireAcquire();
+
+ TestAtomicU32CompareExchangeStrongReleaseRelaxed();
+
+ TestAtomicU32CompareExchangeStrongAcqRelRelaxed();
+
+ TestAtomicU32CompareExchangeStrongAcqRelAcquire();
+
+ TestAtomicU32CompareExchangeStrongSeqCstRelaxed();
+
+ TestAtomicU32CompareExchangeStrongSeqCstAcquire();
+
+ TestAtomicU32CompareExchangeStrongSeqCstSeqCst();
+
+ TestAtomicU32CompareExchangeStrongRelaxed();
+
+ TestAtomicU32CompareExchangeStrongAcquire();
+
+ TestAtomicU32CompareExchangeStrongRelease();
+
+ TestAtomicU32CompareExchangeStrongAcqRel();
+
+ TestAtomicU32CompareExchangeStrongSeqCst();
+
+ TestAtomicU32CompareExchangeStrong();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_64BIT)
+
+EA_NO_INLINE static void TestAtomicU64CompareExchangeStrongRelaxedRelaxed()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t expected = 0;
+ bool ret = atomic.compare_exchange_strong(expected, 1, eastl::memory_order_relaxed, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU64CompareExchangeStrongAcquireRelaxed()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t expected = 0;
+ bool ret = atomic.compare_exchange_strong(expected, 1, eastl::memory_order_acquire, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU64CompareExchangeStrongAcquireAcquire()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t expected = 0;
+ bool ret = atomic.compare_exchange_strong(expected, 1, eastl::memory_order_acquire, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU64CompareExchangeStrongReleaseRelaxed()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t expected = 0;
+ bool ret = atomic.compare_exchange_strong(expected, 1, eastl::memory_order_release, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU64CompareExchangeStrongAcqRelRelaxed()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t expected = 0;
+ bool ret = atomic.compare_exchange_strong(expected, 1, eastl::memory_order_acq_rel, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU64CompareExchangeStrongAcqRelAcquire()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t expected = 0;
+ bool ret = atomic.compare_exchange_strong(expected, 1, eastl::memory_order_acq_rel, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU64CompareExchangeStrongSeqCstRelaxed()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t expected = 0;
+ bool ret = atomic.compare_exchange_strong(expected, 1, eastl::memory_order_seq_cst, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU64CompareExchangeStrongSeqCstAcquire()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t expected = 0;
+ bool ret = atomic.compare_exchange_strong(expected, 1, eastl::memory_order_seq_cst, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU64CompareExchangeStrongSeqCstSeqCst()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t expected = 0;
+ bool ret = atomic.compare_exchange_strong(expected, 1, eastl::memory_order_seq_cst, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU64CompareExchangeStrongRelaxed()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t expected = 0;
+ bool ret = atomic.compare_exchange_strong(expected, 1, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU64CompareExchangeStrongAcquire()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t expected = 0;
+ bool ret = atomic.compare_exchange_strong(expected, 1, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU64CompareExchangeStrongRelease()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t expected = 0;
+ bool ret = atomic.compare_exchange_strong(expected, 1, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU64CompareExchangeStrongAcqRel()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t expected = 0;
+ bool ret = atomic.compare_exchange_strong(expected, 1, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU64CompareExchangeStrongSeqCst()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t expected = 0;
+ bool ret = atomic.compare_exchange_strong(expected, 1, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU64CompareExchangeStrong()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t expected = 0;
+ bool ret = atomic.compare_exchange_strong(expected, 1);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU64CompareExchangeStrongOrders()
+{
+ TestAtomicU64CompareExchangeStrongRelaxedRelaxed();
+
+ TestAtomicU64CompareExchangeStrongAcquireRelaxed();
+
+ TestAtomicU64CompareExchangeStrongAcquireAcquire();
+
+ TestAtomicU64CompareExchangeStrongReleaseRelaxed();
+
+ TestAtomicU64CompareExchangeStrongAcqRelRelaxed();
+
+ TestAtomicU64CompareExchangeStrongAcqRelAcquire();
+
+ TestAtomicU64CompareExchangeStrongSeqCstRelaxed();
+
+ TestAtomicU64CompareExchangeStrongSeqCstAcquire();
+
+ TestAtomicU64CompareExchangeStrongSeqCstSeqCst();
+
+ TestAtomicU64CompareExchangeStrongRelaxed();
+
+ TestAtomicU64CompareExchangeStrongAcquire();
+
+ TestAtomicU64CompareExchangeStrongRelease();
+
+ TestAtomicU64CompareExchangeStrongAcqRel();
+
+ TestAtomicU64CompareExchangeStrongSeqCst();
+
+ TestAtomicU64CompareExchangeStrong();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_128BIT)
+
+EA_NO_INLINE static void TestAtomic128CompareExchangeStrongRelaxedRelaxed()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 expected = UserType128{0, 0, 0, 0};
+ bool ret = atomic.compare_exchange_strong(expected, UserType128{1, 1, 1, 1}, eastl::memory_order_relaxed, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomic128CompareExchangeStrongAcquireRelaxed()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 expected = UserType128{0, 0, 0, 0};
+ bool ret = atomic.compare_exchange_strong(expected, UserType128{1, 1, 1, 1}, eastl::memory_order_acquire, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomic128CompareExchangeStrongAcquireAcquire()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 expected = UserType128{0, 0, 0, 0};
+ bool ret = atomic.compare_exchange_strong(expected, UserType128{1, 1, 1, 1}, eastl::memory_order_acquire, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomic128CompareExchangeStrongReleaseRelaxed()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 expected = UserType128{0, 0, 0, 0};
+ bool ret = atomic.compare_exchange_strong(expected, UserType128{1, 1, 1, 1}, eastl::memory_order_release, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomic128CompareExchangeStrongAcqRelRelaxed()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 expected = UserType128{0, 0, 0, 0};
+ bool ret = atomic.compare_exchange_strong(expected, UserType128{1, 1, 1, 1}, eastl::memory_order_acq_rel, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomic128CompareExchangeStrongAcqRelAcquire()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 expected = UserType128{0, 0, 0, 0};
+ bool ret = atomic.compare_exchange_strong(expected, UserType128{1, 1, 1, 1}, eastl::memory_order_acq_rel, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomic128CompareExchangeStrongSeqCstRelaxed()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 expected = UserType128{0, 0, 0, 0};
+ bool ret = atomic.compare_exchange_strong(expected, UserType128{1, 1, 1, 1}, eastl::memory_order_seq_cst, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomic128CompareExchangeStrongSeqCstAcquire()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 expected = UserType128{0, 0, 0, 0};
+ bool ret = atomic.compare_exchange_strong(expected, UserType128{1, 1, 1, 1}, eastl::memory_order_seq_cst, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomic128CompareExchangeStrongSeqCstSeqCst()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 expected = UserType128{0, 0, 0, 0};
+ bool ret = atomic.compare_exchange_strong(expected, UserType128{1, 1, 1, 1}, eastl::memory_order_seq_cst, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomic128CompareExchangeStrongRelaxed()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 expected = UserType128{0, 0, 0, 0};
+ bool ret = atomic.compare_exchange_strong(expected, UserType128{1, 1, 1, 1}, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomic128CompareExchangeStrongAcquire()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 expected = UserType128{0, 0, 0, 0};
+ bool ret = atomic.compare_exchange_strong(expected, UserType128{1, 1, 1, 1}, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomic128CompareExchangeStrongRelease()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 expected = UserType128{0, 0, 0, 0};
+ bool ret = atomic.compare_exchange_strong(expected, UserType128{1, 1, 1, 1}, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomic128CompareExchangeStrongAcqRel()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 expected = UserType128{0, 0, 0, 0};
+ bool ret = atomic.compare_exchange_strong(expected, UserType128{1, 1, 1, 1}, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomic128CompareExchangeStrongSeqCst()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 expected = UserType128{0, 0, 0, 0};
+ bool ret = atomic.compare_exchange_strong(expected, UserType128{1, 1, 1, 1}, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomic128CompareExchangeStrong()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 expected = UserType128{0, 0, 0, 0};
+ bool ret = atomic.compare_exchange_strong(expected, UserType128{1, 1, 1, 1});
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomic128CompareExchangeStrongOrders()
+{
+ TestAtomic128CompareExchangeStrongRelaxedRelaxed();
+
+ TestAtomic128CompareExchangeStrongAcquireRelaxed();
+
+ TestAtomic128CompareExchangeStrongAcquireAcquire();
+
+ TestAtomic128CompareExchangeStrongReleaseRelaxed();
+
+ TestAtomic128CompareExchangeStrongAcqRelRelaxed();
+
+ TestAtomic128CompareExchangeStrongAcqRelAcquire();
+
+ TestAtomic128CompareExchangeStrongSeqCstRelaxed();
+
+ TestAtomic128CompareExchangeStrongSeqCstAcquire();
+
+ TestAtomic128CompareExchangeStrongSeqCstSeqCst();
+
+ TestAtomic128CompareExchangeStrongRelaxed();
+
+ TestAtomic128CompareExchangeStrongAcquire();
+
+ TestAtomic128CompareExchangeStrongRelease();
+
+ TestAtomic128CompareExchangeStrongAcqRel();
+
+ TestAtomic128CompareExchangeStrongSeqCst();
+
+ TestAtomic128CompareExchangeStrong();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_32BIT)
+
+EA_NO_INLINE static void TestAtomicU32CompareExchangeWeakRelaxedRelaxed()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t expected = 0;
+ bool ret = atomic.compare_exchange_weak(expected, 1, eastl::memory_order_relaxed, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU32CompareExchangeWeakAcquireRelaxed()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t expected = 0;
+ bool ret = atomic.compare_exchange_weak(expected, 1, eastl::memory_order_acquire, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU32CompareExchangeWeakAcquireAcquire()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t expected = 0;
+ bool ret = atomic.compare_exchange_weak(expected, 1, eastl::memory_order_acquire, eastl::memory_order_acquire);
+
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU32CompareExchangeWeakReleaseRelaxed()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t expected = 0;
+ bool ret = atomic.compare_exchange_weak(expected, 1, eastl::memory_order_release, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU32CompareExchangeWeakAcqRelRelaxed()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t expected = 0;
+ bool ret = atomic.compare_exchange_weak(expected, 1, eastl::memory_order_acq_rel, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU32CompareExchangeWeakAcqRelAcquire()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t expected = 0;
+ bool ret = atomic.compare_exchange_weak(expected, 1, eastl::memory_order_acq_rel, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU32CompareExchangeWeakSeqCstRelaxed()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t expected = 0;
+ bool ret = atomic.compare_exchange_weak(expected, 1, eastl::memory_order_seq_cst, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU32CompareExchangeWeakSeqCstAcquire()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t expected = 0;
+ bool ret = atomic.compare_exchange_weak(expected, 1, eastl::memory_order_seq_cst, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU32CompareExchangeWeakSeqCstSeqCst()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t expected = 0;
+ bool ret = atomic.compare_exchange_weak(expected, 1, eastl::memory_order_seq_cst, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU32CompareExchangeWeakRelaxed()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t expected = 0;
+ bool ret = atomic.compare_exchange_weak(expected, 1, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU32CompareExchangeWeakAcquire()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t expected = 0;
+ bool ret = atomic.compare_exchange_weak(expected, 1, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU32CompareExchangeWeakRelease()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t expected = 0;
+ bool ret = atomic.compare_exchange_weak(expected, 1, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU32CompareExchangeWeakAcqRel()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t expected = 0;
+ bool ret = atomic.compare_exchange_weak(expected, 1, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU32CompareExchangeWeakSeqCst()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t expected = 0;
+ bool ret = atomic.compare_exchange_weak(expected, 1, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU32CompareExchangeWeak()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t expected = 0;
+ bool ret = atomic.compare_exchange_weak(expected, 1);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU32CompareExchangeWeakOrders()
+{
+ TestAtomicU32CompareExchangeWeakRelaxedRelaxed();
+
+ TestAtomicU32CompareExchangeWeakAcquireRelaxed();
+
+ TestAtomicU32CompareExchangeWeakAcquireAcquire();
+
+ TestAtomicU32CompareExchangeWeakReleaseRelaxed();
+
+ TestAtomicU32CompareExchangeWeakAcqRelRelaxed();
+
+ TestAtomicU32CompareExchangeWeakAcqRelAcquire();
+
+ TestAtomicU32CompareExchangeWeakSeqCstRelaxed();
+
+ TestAtomicU32CompareExchangeWeakSeqCstAcquire();
+
+ TestAtomicU32CompareExchangeWeakSeqCstSeqCst();
+
+ TestAtomicU32CompareExchangeWeakRelaxed();
+
+ TestAtomicU32CompareExchangeWeakAcquire();
+
+ TestAtomicU32CompareExchangeWeakRelease();
+
+ TestAtomicU32CompareExchangeWeakAcqRel();
+
+ TestAtomicU32CompareExchangeWeakSeqCst();
+
+ TestAtomicU32CompareExchangeWeak();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_64BIT)
+
+EA_NO_INLINE static void TestAtomicU64CompareExchangeWeakRelaxedRelaxed()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t expected = 0;
+ bool ret = atomic.compare_exchange_weak(expected, 1, eastl::memory_order_relaxed, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU64CompareExchangeWeakAcquireRelaxed()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t expected = 0;
+ bool ret = atomic.compare_exchange_weak(expected, 1, eastl::memory_order_acquire, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU64CompareExchangeWeakAcquireAcquire()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t expected = 0;
+ bool ret = atomic.compare_exchange_weak(expected, 1, eastl::memory_order_acquire, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU64CompareExchangeWeakReleaseRelaxed()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t expected = 0;
+ bool ret = atomic.compare_exchange_weak(expected, 1, eastl::memory_order_release, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU64CompareExchangeWeakAcqRelRelaxed()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t expected = 0;
+ bool ret = atomic.compare_exchange_weak(expected, 1, eastl::memory_order_acq_rel, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU64CompareExchangeWeakAcqRelAcquire()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t expected = 0;
+ bool ret = atomic.compare_exchange_weak(expected, 1, eastl::memory_order_acq_rel, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU64CompareExchangeWeakSeqCstRelaxed()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t expected = 0;
+ bool ret = atomic.compare_exchange_weak(expected, 1, eastl::memory_order_seq_cst, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU64CompareExchangeWeakSeqCstAcquire()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t expected = 0;
+ bool ret = atomic.compare_exchange_weak(expected, 1, eastl::memory_order_seq_cst, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU64CompareExchangeWeakSeqCstSeqCst()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t expected = 0;
+ bool ret = atomic.compare_exchange_weak(expected, 1, eastl::memory_order_seq_cst, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU64CompareExchangeWeakRelaxed()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t expected = 0;
+ bool ret = atomic.compare_exchange_weak(expected, 1, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU64CompareExchangeWeakAcquire()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t expected = 0;
+ bool ret = atomic.compare_exchange_weak(expected, 1, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU64CompareExchangeWeakRelease()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t expected = 0;
+ bool ret = atomic.compare_exchange_weak(expected, 1, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU64CompareExchangeWeakAcqRel()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t expected = 0;
+ bool ret = atomic.compare_exchange_weak(expected, 1, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU64CompareExchangeWeakSeqCst()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t expected = 0;
+ bool ret = atomic.compare_exchange_weak(expected, 1, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU64CompareExchangeWeak()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t expected = 0;
+ bool ret = atomic.compare_exchange_weak(expected, 1);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomicU64CompareExchangeWeakOrders()
+{
+ TestAtomicU64CompareExchangeWeakRelaxedRelaxed();
+
+ TestAtomicU64CompareExchangeWeakAcquireRelaxed();
+
+ TestAtomicU64CompareExchangeWeakAcquireAcquire();
+
+ TestAtomicU64CompareExchangeWeakReleaseRelaxed();
+
+ TestAtomicU64CompareExchangeWeakAcqRelRelaxed();
+
+ TestAtomicU64CompareExchangeWeakAcqRelAcquire();
+
+ TestAtomicU64CompareExchangeWeakSeqCstRelaxed();
+
+ TestAtomicU64CompareExchangeWeakSeqCstAcquire();
+
+ TestAtomicU64CompareExchangeWeakSeqCstSeqCst();
+
+ TestAtomicU64CompareExchangeWeakRelaxed();
+
+ TestAtomicU64CompareExchangeWeakAcquire();
+
+ TestAtomicU64CompareExchangeWeakRelease();
+
+ TestAtomicU64CompareExchangeWeakAcqRel();
+
+ TestAtomicU64CompareExchangeWeakSeqCst();
+
+ TestAtomicU64CompareExchangeWeak();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_128BIT)
+
+EA_NO_INLINE static void TestAtomic128CompareExchangeWeakRelaxedRelaxed()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 expected = UserType128{0, 0, 0, 0};
+ bool ret = atomic.compare_exchange_weak(expected, UserType128{1, 1, 1, 1}, eastl::memory_order_relaxed, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomic128CompareExchangeWeakAcquireRelaxed()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 expected = UserType128{0, 0, 0, 0};
+ bool ret = atomic.compare_exchange_weak(expected, UserType128{1, 1, 1, 1}, eastl::memory_order_acquire, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomic128CompareExchangeWeakAcquireAcquire()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 expected = UserType128{0, 0, 0, 0};
+ bool ret = atomic.compare_exchange_weak(expected, UserType128{1, 1, 1, 1}, eastl::memory_order_acquire, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomic128CompareExchangeWeakReleaseRelaxed()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 expected = UserType128{0, 0, 0, 0};
+ bool ret = atomic.compare_exchange_weak(expected, UserType128{1, 1, 1, 1}, eastl::memory_order_release, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomic128CompareExchangeWeakAcqRelRelaxed()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 expected = UserType128{0, 0, 0, 0};
+ bool ret = atomic.compare_exchange_weak(expected, UserType128{1, 1, 1, 1}, eastl::memory_order_acq_rel, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomic128CompareExchangeWeakAcqRelAcquire()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 expected = UserType128{0, 0, 0, 0};
+ bool ret = atomic.compare_exchange_weak(expected, UserType128{1, 1, 1, 1}, eastl::memory_order_acq_rel, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomic128CompareExchangeWeakSeqCstRelaxed()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 expected = UserType128{0, 0, 0, 0};
+ bool ret = atomic.compare_exchange_weak(expected, UserType128{1, 1, 1, 1}, eastl::memory_order_seq_cst, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomic128CompareExchangeWeakSeqCstAcquire()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 expected = UserType128{0, 0, 0, 0};
+ bool ret = atomic.compare_exchange_weak(expected, UserType128{1, 1, 1, 1}, eastl::memory_order_seq_cst, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomic128CompareExchangeWeakSeqCstSeqCst()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 expected = UserType128{0, 0, 0, 0};
+ bool ret = atomic.compare_exchange_weak(expected, UserType128{1, 1, 1, 1}, eastl::memory_order_seq_cst, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomic128CompareExchangeWeakRelaxed()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 expected = UserType128{0, 0, 0, 0};
+ bool ret = atomic.compare_exchange_weak(expected, UserType128{1, 1, 1, 1}, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomic128CompareExchangeWeakAcquire()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 expected = UserType128{0, 0, 0, 0};
+ bool ret = atomic.compare_exchange_weak(expected, UserType128{1, 1, 1, 1}, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomic128CompareExchangeWeakRelease()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 expected = UserType128{0, 0, 0, 0};
+ bool ret = atomic.compare_exchange_weak(expected, UserType128{1, 1, 1, 1}, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomic128CompareExchangeWeakAcqRel()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 expected = UserType128{0, 0, 0, 0};
+ bool ret = atomic.compare_exchange_weak(expected, UserType128{1, 1, 1, 1}, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomic128CompareExchangeWeakSeqCst()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 expected = UserType128{0, 0, 0, 0};
+ bool ret = atomic.compare_exchange_weak(expected, UserType128{1, 1, 1, 1}, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomic128CompareExchangeWeak()
+{
+ eastl::atomic<UserType128> atomic;
+
+ UserType128 expected = UserType128{0, 0, 0, 0};
+ bool ret = atomic.compare_exchange_weak(expected, UserType128{1, 1, 1, 1});
+
+ eastl::compiler_barrier_data_dependency(ret);
+}
+
+EA_NO_INLINE static void TestAtomic128CompareExchangeWeakOrders()
+{
+ TestAtomic128CompareExchangeWeakRelaxedRelaxed();
+
+ TestAtomic128CompareExchangeWeakAcquireRelaxed();
+
+ TestAtomic128CompareExchangeWeakAcquireAcquire();
+
+ TestAtomic128CompareExchangeWeakReleaseRelaxed();
+
+ TestAtomic128CompareExchangeWeakAcqRelRelaxed();
+
+ TestAtomic128CompareExchangeWeakAcqRelAcquire();
+
+ TestAtomic128CompareExchangeWeakSeqCstRelaxed();
+
+ TestAtomic128CompareExchangeWeakSeqCstAcquire();
+
+ TestAtomic128CompareExchangeWeakSeqCstSeqCst();
+
+ TestAtomic128CompareExchangeWeakRelaxed();
+
+ TestAtomic128CompareExchangeWeakAcquire();
+
+ TestAtomic128CompareExchangeWeakRelease();
+
+ TestAtomic128CompareExchangeWeakAcqRel();
+
+ TestAtomic128CompareExchangeWeakSeqCst();
+
+ TestAtomic128CompareExchangeWeak();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_32BIT)
+
+EA_NO_INLINE static void TestAtomicU32FetchAddRelaxed()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.fetch_add(1, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32FetchAddAcquire()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.fetch_add(1, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32FetchAddRelease()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.fetch_add(1, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32FetchAddAcqRel()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.fetch_add(1, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32FetchAddSeqCst()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.fetch_add(1, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32FetchAdd()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.fetch_add(1);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32FetchAddOrders()
+{
+ TestAtomicU32FetchAddRelaxed();
+
+ TestAtomicU32FetchAddAcquire();
+
+ TestAtomicU32FetchAddRelease();
+
+ TestAtomicU32FetchAddAcqRel();
+
+ TestAtomicU32FetchAddSeqCst();
+
+ TestAtomicU32FetchAdd();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_64BIT)
+
+EA_NO_INLINE static void TestAtomicU64FetchAddRelaxed()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.fetch_add(1, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64FetchAddAcquire()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.fetch_add(1, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64FetchAddRelease()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.fetch_add(1, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64FetchAddAcqRel()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.fetch_add(1, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64FetchAddSeqCst()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.fetch_add(1, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64FetchAdd()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.fetch_add(1);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64FetchAddOrders()
+{
+ TestAtomicU64FetchAddRelaxed();
+
+ TestAtomicU64FetchAddAcquire();
+
+ TestAtomicU64FetchAddRelease();
+
+ TestAtomicU64FetchAddAcqRel();
+
+ TestAtomicU64FetchAddSeqCst();
+
+ TestAtomicU64FetchAdd();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+
+EA_NO_INLINE static void TestAtomic128FetchAddRelaxed()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.fetch_add(1, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128FetchAddAcquire()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.fetch_add(1, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128FetchAddRelease()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.fetch_add(1, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128FetchAddAcqRel()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.fetch_add(1, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128FetchAddSeqCst()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.fetch_add(1, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128FetchAdd()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.fetch_add(1);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128FetchAddOrders()
+{
+ TestAtomic128FetchAddRelaxed();
+
+ TestAtomic128FetchAddAcquire();
+
+ TestAtomic128FetchAddRelease();
+
+ TestAtomic128FetchAddAcqRel();
+
+ TestAtomic128FetchAddSeqCst();
+
+ TestAtomic128FetchAdd();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_32BIT)
+
+EA_NO_INLINE static void TestAtomicU32AddFetchRelaxed()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.add_fetch(1, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32AddFetchAcquire()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.add_fetch(1, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32AddFetchRelease()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.add_fetch(1, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32AddFetchAcqRel()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.add_fetch(1, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32AddFetchSeqCst()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.add_fetch(1, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32AddFetch()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.add_fetch(1);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32AddFetchOrders()
+{
+ TestAtomicU32AddFetchRelaxed();
+
+ TestAtomicU32AddFetchAcquire();
+
+ TestAtomicU32AddFetchRelease();
+
+ TestAtomicU32AddFetchAcqRel();
+
+ TestAtomicU32AddFetchSeqCst();
+
+ TestAtomicU32AddFetch();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_64BIT)
+
+EA_NO_INLINE static void TestAtomicU64AddFetchRelaxed()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.add_fetch(1, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64AddFetchAcquire()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.add_fetch(1, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64AddFetchRelease()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.add_fetch(1, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64AddFetchAcqRel()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.add_fetch(1, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64AddFetchSeqCst()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.add_fetch(1, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64AddFetch()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.add_fetch(1);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64AddFetchOrders()
+{
+ TestAtomicU64AddFetchRelaxed();
+
+ TestAtomicU64AddFetchAcquire();
+
+ TestAtomicU64AddFetchRelease();
+
+ TestAtomicU64AddFetchAcqRel();
+
+ TestAtomicU64AddFetchSeqCst();
+
+ TestAtomicU64AddFetch();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+
+EA_NO_INLINE static void TestAtomic128AddFetchRelaxed()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.add_fetch(1, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128AddFetchAcquire()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.add_fetch(1, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128AddFetchRelease()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.add_fetch(1, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128AddFetchAcqRel()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.add_fetch(1, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128AddFetchSeqCst()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.add_fetch(1, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128AddFetch()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.add_fetch(1);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128AddFetchOrders()
+{
+ TestAtomic128AddFetchRelaxed();
+
+ TestAtomic128AddFetchAcquire();
+
+ TestAtomic128AddFetchRelease();
+
+ TestAtomic128AddFetchAcqRel();
+
+ TestAtomic128AddFetchSeqCst();
+
+ TestAtomic128AddFetch();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_32BIT)
+
+EA_NO_INLINE static void TestAtomicU32FetchSubRelaxed()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.fetch_sub(1, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32FetchSubAcquire()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.fetch_sub(1, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32FetchSubRelease()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.fetch_sub(1, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32FetchSubAcqRel()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.fetch_sub(1, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32FetchSubSeqCst()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.fetch_sub(1, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32FetchSub()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.fetch_sub(1);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32FetchSubOrders()
+{
+ TestAtomicU32FetchSubRelaxed();
+
+ TestAtomicU32FetchSubAcquire();
+
+ TestAtomicU32FetchSubRelease();
+
+ TestAtomicU32FetchSubAcqRel();
+
+ TestAtomicU32FetchSubSeqCst();
+
+ TestAtomicU32FetchSub();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_64BIT)
+
+EA_NO_INLINE static void TestAtomicU64FetchSubRelaxed()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.fetch_sub(1, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64FetchSubAcquire()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.fetch_sub(1, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64FetchSubRelease()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.fetch_sub(1, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64FetchSubAcqRel()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.fetch_sub(1, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64FetchSubSeqCst()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.fetch_sub(1, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64FetchSub()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.fetch_sub(1);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64FetchSubOrders()
+{
+ TestAtomicU64FetchSubRelaxed();
+
+ TestAtomicU64FetchSubAcquire();
+
+ TestAtomicU64FetchSubRelease();
+
+ TestAtomicU64FetchSubAcqRel();
+
+ TestAtomicU64FetchSubSeqCst();
+
+ TestAtomicU64FetchSub();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+
+EA_NO_INLINE static void TestAtomic128FetchSubRelaxed()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.fetch_sub(1, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128FetchSubAcquire()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.fetch_sub(1, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128FetchSubRelease()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.fetch_sub(1, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128FetchSubAcqRel()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.fetch_sub(1, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128FetchSubSeqCst()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.fetch_sub(1, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128FetchSub()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.fetch_sub(1);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128FetchSubOrders()
+{
+ TestAtomic128FetchSubRelaxed();
+
+ TestAtomic128FetchSubAcquire();
+
+ TestAtomic128FetchSubRelease();
+
+ TestAtomic128FetchSubAcqRel();
+
+ TestAtomic128FetchSubSeqCst();
+
+ TestAtomic128FetchSub();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_32BIT)
+
+EA_NO_INLINE static void TestAtomicU32SubFetchRelaxed()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.sub_fetch(1, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32SubFetchAcquire()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.sub_fetch(1, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32SubFetchRelease()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.sub_fetch(1, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32SubFetchAcqRel()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.sub_fetch(1, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32SubFetchSeqCst()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.sub_fetch(1, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32SubFetch()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.sub_fetch(1);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32SubFetchOrders()
+{
+ TestAtomicU32SubFetchRelaxed();
+
+ TestAtomicU32SubFetchAcquire();
+
+ TestAtomicU32SubFetchRelease();
+
+ TestAtomicU32SubFetchAcqRel();
+
+ TestAtomicU32SubFetchSeqCst();
+
+ TestAtomicU32SubFetch();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_64BIT)
+
+EA_NO_INLINE static void TestAtomicU64SubFetchRelaxed()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.sub_fetch(1, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64SubFetchAcquire()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.sub_fetch(1, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64SubFetchRelease()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.sub_fetch(1, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64SubFetchAcqRel()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.sub_fetch(1, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64SubFetchSeqCst()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.sub_fetch(1, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64SubFetch()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.sub_fetch(1);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64SubFetchOrders()
+{
+ TestAtomicU64SubFetchRelaxed();
+
+ TestAtomicU64SubFetchAcquire();
+
+ TestAtomicU64SubFetchRelease();
+
+ TestAtomicU64SubFetchAcqRel();
+
+ TestAtomicU64SubFetchSeqCst();
+
+ TestAtomicU64SubFetch();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+
+EA_NO_INLINE static void TestAtomic128SubFetchRelaxed()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.sub_fetch(1, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128SubFetchAcquire()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.sub_fetch(1, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128SubFetchRelease()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.sub_fetch(1, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128SubFetchAcqRel()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.sub_fetch(1, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128SubFetchSeqCst()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.sub_fetch(1, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128SubFetch()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.sub_fetch(1);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128SubFetchOrders()
+{
+ TestAtomic128SubFetchRelaxed();
+
+ TestAtomic128SubFetchAcquire();
+
+ TestAtomic128SubFetchRelease();
+
+ TestAtomic128SubFetchAcqRel();
+
+ TestAtomic128SubFetchSeqCst();
+
+ TestAtomic128SubFetch();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_32BIT)
+
+EA_NO_INLINE static void TestAtomicU32FetchAndRelaxed()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.fetch_and(1, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32FetchAndAcquire()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.fetch_and(1, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32FetchAndRelease()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.fetch_and(1, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32FetchAndAcqRel()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.fetch_and(1, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32FetchAndSeqCst()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.fetch_and(1, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32FetchAnd()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.fetch_and(1);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32FetchAndOrders()
+{
+ TestAtomicU32FetchAndRelaxed();
+
+ TestAtomicU32FetchAndAcquire();
+
+ TestAtomicU32FetchAndRelease();
+
+ TestAtomicU32FetchAndAcqRel();
+
+ TestAtomicU32FetchAndSeqCst();
+
+ TestAtomicU32FetchAnd();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_64BIT)
+
+EA_NO_INLINE static void TestAtomicU64FetchAndRelaxed()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.fetch_and(1, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64FetchAndAcquire()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.fetch_and(1, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64FetchAndRelease()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.fetch_and(1, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64FetchAndAcqRel()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.fetch_and(1, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64FetchAndSeqCst()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.fetch_and(1, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64FetchAnd()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.fetch_and(1);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64FetchAndOrders()
+{
+ TestAtomicU64FetchAndRelaxed();
+
+ TestAtomicU64FetchAndAcquire();
+
+ TestAtomicU64FetchAndRelease();
+
+ TestAtomicU64FetchAndAcqRel();
+
+ TestAtomicU64FetchAndSeqCst();
+
+ TestAtomicU64FetchAnd();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+
+EA_NO_INLINE static void TestAtomic128FetchAndRelaxed()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.fetch_and(1, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128FetchAndAcquire()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.fetch_and(1, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128FetchAndRelease()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.fetch_and(1, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128FetchAndAcqRel()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.fetch_and(1, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128FetchAndSeqCst()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.fetch_and(1, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128FetchAnd()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.fetch_and(1);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128FetchAndOrders()
+{
+ TestAtomic128FetchAndRelaxed();
+
+ TestAtomic128FetchAndAcquire();
+
+ TestAtomic128FetchAndRelease();
+
+ TestAtomic128FetchAndAcqRel();
+
+ TestAtomic128FetchAndSeqCst();
+
+ TestAtomic128FetchAnd();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_32BIT)
+
+EA_NO_INLINE static void TestAtomicU32AndFetchRelaxed()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.and_fetch(1, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32AndFetchAcquire()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.and_fetch(1, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32AndFetchRelease()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.and_fetch(1, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32AndFetchAcqRel()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.and_fetch(1, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32AndFetchSeqCst()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.and_fetch(1, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32AndFetch()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.and_fetch(1);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32AndFetchOrders()
+{
+ TestAtomicU32AndFetchRelaxed();
+
+ TestAtomicU32AndFetchAcquire();
+
+ TestAtomicU32AndFetchRelease();
+
+ TestAtomicU32AndFetchAcqRel();
+
+ TestAtomicU32AndFetchSeqCst();
+
+ TestAtomicU32AndFetch();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_64BIT)
+
+EA_NO_INLINE static void TestAtomicU64AndFetchRelaxed()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.and_fetch(1, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64AndFetchAcquire()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.and_fetch(1, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64AndFetchRelease()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.and_fetch(1, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64AndFetchAcqRel()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.and_fetch(1, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64AndFetchSeqCst()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.and_fetch(1, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64AndFetch()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.and_fetch(1);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64AndFetchOrders()
+{
+ TestAtomicU64AndFetchRelaxed();
+
+ TestAtomicU64AndFetchAcquire();
+
+ TestAtomicU64AndFetchRelease();
+
+ TestAtomicU64AndFetchAcqRel();
+
+ TestAtomicU64AndFetchSeqCst();
+
+ TestAtomicU64AndFetch();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+
+EA_NO_INLINE static void TestAtomic128AndFetchRelaxed()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.and_fetch(1, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128AndFetchAcquire()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.and_fetch(1, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128AndFetchRelease()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.and_fetch(1, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128AndFetchAcqRel()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.and_fetch(1, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128AndFetchSeqCst()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.and_fetch(1, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128AndFetch()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.and_fetch(1);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128AndFetchOrders()
+{
+ TestAtomic128AndFetchRelaxed();
+
+ TestAtomic128AndFetchAcquire();
+
+ TestAtomic128AndFetchRelease();
+
+ TestAtomic128AndFetchAcqRel();
+
+ TestAtomic128AndFetchSeqCst();
+
+ TestAtomic128AndFetch();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_32BIT)
+
+EA_NO_INLINE static void TestAtomicU32OrFetchRelaxed()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.or_fetch(1, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32OrFetchAcquire()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.or_fetch(1, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32OrFetchRelease()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.or_fetch(1, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32OrFetchAcqRel()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.or_fetch(1, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32OrFetchSeqCst()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.or_fetch(1, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32OrFetch()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.or_fetch(1);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32OrFetchOrders()
+{
+ TestAtomicU32OrFetchRelaxed();
+
+ TestAtomicU32OrFetchAcquire();
+
+ TestAtomicU32OrFetchRelease();
+
+ TestAtomicU32OrFetchAcqRel();
+
+ TestAtomicU32OrFetchSeqCst();
+
+ TestAtomicU32OrFetch();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_64BIT)
+
+EA_NO_INLINE static void TestAtomicU64OrFetchRelaxed()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.or_fetch(1, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64OrFetchAcquire()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.or_fetch(1, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64OrFetchRelease()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.or_fetch(1, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64OrFetchAcqRel()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.or_fetch(1, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64OrFetchSeqCst()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.or_fetch(1, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64OrFetch()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.or_fetch(1);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64OrFetchOrders()
+{
+ TestAtomicU64OrFetchRelaxed();
+
+ TestAtomicU64OrFetchAcquire();
+
+ TestAtomicU64OrFetchRelease();
+
+ TestAtomicU64OrFetchAcqRel();
+
+ TestAtomicU64OrFetchSeqCst();
+
+ TestAtomicU64OrFetch();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+
+EA_NO_INLINE static void TestAtomic128OrFetchRelaxed()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.or_fetch(1, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128OrFetchAcquire()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.or_fetch(1, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128OrFetchRelease()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.or_fetch(1, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128OrFetchAcqRel()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.or_fetch(1, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128OrFetchSeqCst()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.or_fetch(1, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128OrFetch()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.or_fetch(1);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128OrFetchOrders()
+{
+ TestAtomic128OrFetchRelaxed();
+
+ TestAtomic128OrFetchAcquire();
+
+ TestAtomic128OrFetchRelease();
+
+ TestAtomic128OrFetchAcqRel();
+
+ TestAtomic128OrFetchSeqCst();
+
+ TestAtomic128OrFetch();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_32BIT)
+
+EA_NO_INLINE static void TestAtomicU32FetchOrRelaxed()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.fetch_or(1, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32FetchOrAcquire()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.fetch_or(1, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32FetchOrRelease()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.fetch_or(1, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32FetchOrAcqRel()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.fetch_or(1, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32FetchOrSeqCst()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.fetch_or(1, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32FetchOr()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.fetch_or(1);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32FetchOrOrders()
+{
+ TestAtomicU32FetchOrRelaxed();
+
+ TestAtomicU32FetchOrAcquire();
+
+ TestAtomicU32FetchOrRelease();
+
+ TestAtomicU32FetchOrAcqRel();
+
+ TestAtomicU32FetchOrSeqCst();
+
+ TestAtomicU32FetchOr();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_64BIT)
+
+EA_NO_INLINE static void TestAtomicU64FetchOrRelaxed()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.fetch_or(1, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64FetchOrAcquire()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.fetch_or(1, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64FetchOrRelease()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.fetch_or(1, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64FetchOrAcqRel()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.fetch_or(1, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64FetchOrSeqCst()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.fetch_or(1, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64FetchOr()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.fetch_or(1);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64FetchOrOrders()
+{
+ TestAtomicU64FetchOrRelaxed();
+
+ TestAtomicU64FetchOrAcquire();
+
+ TestAtomicU64FetchOrRelease();
+
+ TestAtomicU64FetchOrAcqRel();
+
+ TestAtomicU64FetchOrSeqCst();
+
+ TestAtomicU64FetchOr();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+
+EA_NO_INLINE static void TestAtomic128FetchOrRelaxed()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.fetch_or(1, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128FetchOrAcquire()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.fetch_or(1, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128FetchOrRelease()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.fetch_or(1, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128FetchOrAcqRel()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.fetch_or(1, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128FetchOrSeqCst()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.fetch_or(1, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128FetchOr()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.fetch_or(1);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128FetchOrOrders()
+{
+ TestAtomic128FetchOrRelaxed();
+
+ TestAtomic128FetchOrAcquire();
+
+ TestAtomic128FetchOrRelease();
+
+ TestAtomic128FetchOrAcqRel();
+
+ TestAtomic128FetchOrSeqCst();
+
+ TestAtomic128FetchOr();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_32BIT)
+
+EA_NO_INLINE static void TestAtomicU32FetchXorRelaxed()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.fetch_xor(1, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32FetchXorAcquire()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.fetch_xor(1, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32FetchXorRelease()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.fetch_xor(1, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32FetchXorAcqRel()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.fetch_xor(1, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32FetchXorSeqCst()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.fetch_xor(1, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32FetchXor()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.fetch_xor(1);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32FetchXorOrders()
+{
+ TestAtomicU32FetchXorRelaxed();
+
+ TestAtomicU32FetchXorAcquire();
+
+ TestAtomicU32FetchXorRelease();
+
+ TestAtomicU32FetchXorAcqRel();
+
+ TestAtomicU32FetchXorSeqCst();
+
+ TestAtomicU32FetchXor();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_64BIT)
+
+EA_NO_INLINE static void TestAtomicU64FetchXorRelaxed()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.fetch_xor(1, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64FetchXorAcquire()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.fetch_xor(1, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64FetchXorRelease()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.fetch_xor(1, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64FetchXorAcqRel()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.fetch_xor(1, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64FetchXorSeqCst()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.fetch_add(1, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64FetchXor()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.fetch_xor(1);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64FetchXorOrders()
+{
+ TestAtomicU64FetchXorRelaxed();
+
+ TestAtomicU64FetchXorAcquire();
+
+ TestAtomicU64FetchXorRelease();
+
+ TestAtomicU64FetchXorAcqRel();
+
+ TestAtomicU64FetchXorSeqCst();
+
+ TestAtomicU64FetchXor();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+
+EA_NO_INLINE static void TestAtomic128FetchXorRelaxed()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.fetch_xor(1, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128FetchXorAcquire()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.fetch_xor(1, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128FetchXorRelease()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.fetch_xor(1, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128FetchXorAcqRel()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.fetch_xor(1, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128FetchXorSeqCst()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.fetch_xor(1, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128FetchXor()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.fetch_xor(1);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128FetchXorOrders()
+{
+ TestAtomic128FetchXorRelaxed();
+
+ TestAtomic128FetchXorAcquire();
+
+ TestAtomic128FetchXorRelease();
+
+ TestAtomic128FetchXorAcqRel();
+
+ TestAtomic128FetchXorSeqCst();
+
+ TestAtomic128FetchXor();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_32BIT)
+
+EA_NO_INLINE static void TestAtomicU32XorFetchRelaxed()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.xor_fetch(1, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32XorFetchAcquire()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.xor_fetch(1, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32XorFetchRelease()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.xor_fetch(1, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32XorFetchAcqRel()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.xor_fetch(1, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32XorFetchSeqCst()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.xor_fetch(1, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32XorFetch()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic.xor_fetch(1);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU32XorFetchOrders()
+{
+ TestAtomicU32XorFetchRelaxed();
+
+ TestAtomicU32XorFetchAcquire();
+
+ TestAtomicU32XorFetchRelease();
+
+ TestAtomicU32XorFetchAcqRel();
+
+ TestAtomicU32XorFetchSeqCst();
+
+ TestAtomicU32XorFetch();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_64BIT)
+
+EA_NO_INLINE static void TestAtomicU64XorFetchRelaxed()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.xor_fetch(1, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64XorFetchAcquire()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.xor_fetch(1, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64XorFetchRelease()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.xor_fetch(1, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64XorFetchAcqRel()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.xor_fetch(1, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64XorFetchSeqCst()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.xor_fetch(1, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64XorFetch()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic.xor_fetch(1);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomicU64XorFetchOrders()
+{
+ TestAtomicU64XorFetchRelaxed();
+
+ TestAtomicU64XorFetchAcquire();
+
+ TestAtomicU64XorFetchRelease();
+
+ TestAtomicU64XorFetchAcqRel();
+
+ TestAtomicU64XorFetchSeqCst();
+
+ TestAtomicU64XorFetch();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+
+EA_NO_INLINE static void TestAtomic128XorFetchRelaxed()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.xor_fetch(1, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128XorFetchAcquire()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.xor_fetch(1, eastl::memory_order_acquire);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128XorFetchRelease()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.xor_fetch(1, eastl::memory_order_release);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128XorFetchAcqRel()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.xor_fetch(1, eastl::memory_order_acq_rel);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128XorFetchSeqCst()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.xor_fetch(1, eastl::memory_order_seq_cst);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128XorFetch()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic.xor_fetch(1);
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+EA_NO_INLINE static void TestAtomic128XorFetchOrders()
+{
+ TestAtomic128XorFetchRelaxed();
+
+ TestAtomic128XorFetchAcquire();
+
+ TestAtomic128XorFetchRelease();
+
+ TestAtomic128XorFetchAcqRel();
+
+ TestAtomic128XorFetchSeqCst();
+
+ TestAtomic128XorFetch();
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_32BIT)
+
+EA_NO_INLINE static void TestAtomicU32OperatorPlusPlus()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic++;
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_64BIT)
+
+EA_NO_INLINE static void TestAtomicU64OperatorPlusPlus()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic++;
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+
+EA_NO_INLINE static void TestAtomic128OperatorPlusPlus()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic++;
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_32BIT)
+
+EA_NO_INLINE static void TestAtomicU32PlusPlusOperator()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = ++atomic;
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_64BIT)
+
+EA_NO_INLINE static void TestAtomicU64PlusPlusOperator()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = ++atomic;
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+
+EA_NO_INLINE static void TestAtomic128PlusPlusOperator()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = ++atomic;
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_32BIT)
+
+EA_NO_INLINE static void TestAtomicU32OperatorMinusMinus()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic--;
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_64BIT)
+
+EA_NO_INLINE static void TestAtomicU64OperatorMinusMinus()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic--;
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+
+EA_NO_INLINE static void TestAtomic128OperatorMinusMinus()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic--;
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_32BIT)
+
+EA_NO_INLINE static void TestAtomicU32MinusMinusOperator()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = --atomic;
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_64BIT)
+
+EA_NO_INLINE static void TestAtomicU64MinusMinusOperator()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = --atomic;
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+
+EA_NO_INLINE static void TestAtomic128MinusMinusOperator()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = --atomic;
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_32BIT)
+
+EA_NO_INLINE static void TestAtomicU32OperatorPlusAssignment()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic += 1;
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_64BIT)
+
+EA_NO_INLINE static void TestAtomicU64OperatorPlusAssignment()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic += 1;
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+
+EA_NO_INLINE static void TestAtomic128OperatorPlusAssignment()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic += 1;
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_32BIT)
+
+EA_NO_INLINE static void TestAtomicU32OperatorMinusAssignment()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic -= 1;
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_64BIT)
+
+EA_NO_INLINE static void TestAtomicU64OperatorMinusAssignment()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic -= 1;
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+
+EA_NO_INLINE static void TestAtomic128OperatorMinusAssignment()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic -= 1;
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_32BIT)
+
+EA_NO_INLINE static void TestAtomicU32OperatorAndAssignment()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic &= 1;
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_64BIT)
+
+EA_NO_INLINE static void TestAtomicU64OperatorAndAssignment()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic &= 1;
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+
+EA_NO_INLINE static void TestAtomic128OperatorAndAssignment()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic &= 1;
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_32BIT)
+
+EA_NO_INLINE static void TestAtomicU32OperatorOrAssignment()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic |= 1;
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_64BIT)
+
+EA_NO_INLINE static void TestAtomicU64OperatorOrAssignment()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic |= 1;
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+
+EA_NO_INLINE static void TestAtomic128OperatorOrAssignment()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic |= 1;
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_32BIT)
+
+EA_NO_INLINE static void TestAtomicU32OperatorXorAssignment()
+{
+ eastl::atomic<uint32_t> atomic;
+
+ uint32_t val = atomic ^= 1;
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_64BIT)
+
+EA_NO_INLINE static void TestAtomicU64OperatorXorAssignment()
+{
+ eastl::atomic<uint64_t> atomic;
+
+ uint64_t val = atomic ^= 1;
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+
+EA_NO_INLINE static void TestAtomic128OperatorXorAssignment()
+{
+ eastl::atomic<__uint128_t> atomic;
+
+ __uint128_t val = atomic ^= 1;
+
+ eastl::compiler_barrier_data_dependency(val);
+}
+
+#endif
+
+EA_NO_INLINE static void TestAtomicSignalFenceRelaxed()
+{
+ eastl::atomic_signal_fence(eastl::memory_order_relaxed);
+}
+
+EA_NO_INLINE static void TestAtomicSignalFenceAcquire()
+{
+ eastl::atomic_signal_fence(eastl::memory_order_acquire);
+}
+
+EA_NO_INLINE static void TestAtomicSignalFenceRelease()
+{
+ eastl::atomic_signal_fence(eastl::memory_order_release);
+}
+
+EA_NO_INLINE static void TestAtomicSignalFenceAcqRel()
+{
+ eastl::atomic_signal_fence(eastl::memory_order_acq_rel);
+}
+
+EA_NO_INLINE static void TestAtomicSignalFenceSeqCst()
+{
+ eastl::atomic_signal_fence(eastl::memory_order_seq_cst);
+}
+
+EA_NO_INLINE static void TestAtomicThreadFenceRelaxed()
+{
+ eastl::atomic_thread_fence(eastl::memory_order_relaxed);
+}
+
+EA_NO_INLINE static void TestAtomicThreadFenceAcquire()
+{
+ eastl::atomic_thread_fence(eastl::memory_order_acquire);
+}
+
+EA_NO_INLINE static void TestAtomicThreadFenceRelease()
+{
+ eastl::atomic_thread_fence(eastl::memory_order_release);
+}
+
+EA_NO_INLINE static void TestAtomicThreadFenceAcqRel()
+{
+ eastl::atomic_thread_fence(eastl::memory_order_acq_rel);
+}
+
+EA_NO_INLINE static void TestAtomicThreadFenceSeqCst()
+{
+ eastl::atomic_thread_fence(eastl::memory_order_seq_cst);
+}
+
+EA_NO_INLINE static void TestAtomicPointerReadDepends()
+{
+ eastl::atomic<void*> atomic;
+
+ void* p = atomic.load(eastl::memory_order_read_depends);
+
+ eastl::compiler_barrier_data_dependency(p);
+}
+
+struct ReadDependsStruct
+{
+ int a;
+ int b;
+};
+
+eastl::atomic<ReadDependsStruct*> gAtomicPtr;
+
+EA_NO_INLINE int TestAtomicReadDependsStruct()
+{
+ ReadDependsStruct* p = gAtomicPtr.load(eastl::memory_order_read_depends);
+
+ int a = p->a;
+ int b = p->b;
+
+ return a + b;
+}
+
+EA_NO_INLINE static void TestCompilerBarrierDataDependency()
+{
+ {
+ UserType128 t{4, 5, 7, 8};
+
+ eastl::compiler_barrier_data_dependency(t);
+ }
+
+ {
+ void* p = (void*)0xdeadbeef;
+
+ eastl::compiler_barrier_data_dependency(p);
+ }
+
+ {
+ bool b = false;
+
+ eastl::compiler_barrier_data_dependency(b);
+ }
+}
+
+struct ReadDependsIntrusive
+{
+ int a;
+ int b;
+ struct ReadDependsIntrusive* next;
+ int c;
+ int d;
+};
+
+eastl::atomic<ReadDependsIntrusive**> gListHead;
+
+EA_NO_INLINE static int TestAtomicReadDependsIntrusive()
+{
+ ReadDependsIntrusive** intrusivePtr = gListHead.load(eastl::memory_order_read_depends);
+ ReadDependsIntrusive* ptr = ((ReadDependsIntrusive*)(((char*)intrusivePtr) - offsetof(ReadDependsIntrusive, next)));
+
+ int a = ptr->a;
+ int b = ptr->b;
+ int c = ptr->c;
+ int d = ptr->d;
+
+ return a + b + c + d;
+}
+
+#if defined(EASTL_ATOMIC_HAS_32BIT)
+
+EA_NO_INLINE static void TestAtomic32LoadStoreSameAddressSeqCst()
+{
+ eastl::atomic<uint32_t> atomic{0};
+
+ uint32_t ret1 = atomic.load(eastl::memory_order_relaxed);
+
+ atomic.store(4, eastl::memory_order_relaxed);
+
+ uint32_t ret2 = atomic.load(eastl::memory_order_relaxed);
+
+ uint32_t ret3 = atomic.load(eastl::memory_order_relaxed);
+
+ atomic.store(5, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(ret1);
+ eastl::compiler_barrier_data_dependency(ret2);
+ eastl::compiler_barrier_data_dependency(ret3);
+}
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_128BIT)
+
+EA_NO_INLINE static void TestAtomic128LoadStoreSameAddressSeqCst()
+{
+ eastl::atomic<UserType128> atomic{UserType128{0, 0, 0, 0}};
+
+ UserType128 ret1 = atomic.load(eastl::memory_order_relaxed);
+
+ atomic.store(UserType128{1, 0, 2, 4}, eastl::memory_order_relaxed);
+
+ UserType128 ret2 = atomic.load(eastl::memory_order_relaxed);
+
+ UserType128 ret3 = atomic.load(eastl::memory_order_relaxed);
+
+ atomic.store(UserType128{1, 1, 2, 4}, eastl::memory_order_relaxed);
+
+ eastl::compiler_barrier_data_dependency(ret1);
+ eastl::compiler_barrier_data_dependency(ret2);
+ eastl::compiler_barrier_data_dependency(ret3);
+}
+
+#endif
+
+int TestAtomicAsm()
+{
+ int nErrorCount = 0;
+
+ // Stores
+ {
+ #if defined(EASTL_ATOMIC_HAS_32BIT)
+ TestAtomicU32StoreOrders();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_64BIT)
+ TestAtomicU64StoreOrders();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_128BIT)
+ TestAtomic128StoreOrders();
+ #endif
+ }
+
+ // Loads
+ {
+ #if defined(EASTL_ATOMIC_HAS_32BIT)
+ TestAtomicU32LoadOrders();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_64BIT)
+ TestAtomicU64LoadOrders();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_128BIT)
+ TestAtomic128LoadOrders();
+ #endif
+ }
+
+ // exchange
+ {
+ #if defined(EASTL_ATOMIC_HAS_32BIT)
+ TestAtomicU32ExchangeOrders();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_64BIT)
+ TestAtomicU64ExchangeOrders();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_128BIT)
+ TestAtomic128ExchangeOrders();
+ #endif
+ }
+
+ // operator T
+ {
+ #if defined(EASTL_ATOMIC_HAS_32BIT)
+ TestAtomicU32OperatorT();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_64BIT)
+ TestAtomicU64OperatorT();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_128BIT)
+ TestAtomic128OperatorT();
+ #endif
+ }
+
+ // operator=
+ {
+ #if defined(EASTL_ATOMIC_HAS_32BIT)
+ TestAtomicU32OperatorEqual();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_64BIT)
+ TestAtomicU64OperatorEqual();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_128BIT)
+ TestAtomic128OperatorEqual();
+ #endif
+ }
+
+ // compare_exchange_weak
+ {
+ #if defined(EASTL_ATOMIC_HAS_32BIT)
+ TestAtomicU32CompareExchangeWeakOrders();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_64BIT)
+ TestAtomicU64CompareExchangeWeakOrders();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_128BIT)
+ TestAtomic128CompareExchangeWeakOrders();
+ #endif
+ }
+
+ // compare_exchange_strong
+ {
+ #if defined(EASTL_ATOMIC_HAS_32BIT)
+ TestAtomicU32CompareExchangeStrongOrders();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_64BIT)
+ TestAtomicU64CompareExchangeStrongOrders();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_128BIT)
+ TestAtomic128CompareExchangeStrongOrders();
+ #endif
+ }
+
+ // fetch_add
+ {
+ #if defined(EASTL_ATOMIC_HAS_32BIT)
+ TestAtomicU32FetchAddOrders();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_64BIT)
+ TestAtomicU64FetchAddOrders();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+ TestAtomic128FetchAddOrders();
+ #endif
+ }
+
+ // add_fetch
+ {
+ #if defined(EASTL_ATOMIC_HAS_32BIT)
+ TestAtomicU32AddFetchOrders();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_64BIT)
+ TestAtomicU64AddFetchOrders();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+ TestAtomic128AddFetchOrders();
+ #endif
+ }
+
+ // fetch_sub
+ {
+ #if defined(EASTL_ATOMIC_HAS_32BIT)
+ TestAtomicU32FetchSubOrders();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_64BIT)
+ TestAtomicU64FetchSubOrders();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+ TestAtomic128FetchSubOrders();
+ #endif
+ }
+
+ // sub_fetch
+ {
+ #if defined(EASTL_ATOMIC_HAS_32BIT)
+ TestAtomicU32SubFetchOrders();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_64BIT)
+ TestAtomicU64SubFetchOrders();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+ TestAtomic128SubFetchOrders();
+ #endif
+ }
+
+ // fetch_and
+ {
+ #if defined(EASTL_ATOMIC_HAS_32BIT)
+ TestAtomicU32FetchAndOrders();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_64BIT)
+ TestAtomicU64FetchAndOrders();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+ TestAtomic128FetchAndOrders();
+ #endif
+ }
+
+ // and_fetch
+ {
+ #if defined(EASTL_ATOMIC_HAS_32BIT)
+ TestAtomicU32AndFetchOrders();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_64BIT)
+ TestAtomicU64AndFetchOrders();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+ TestAtomic128AndFetchOrders();
+ #endif
+ }
+
+ // fetch_or
+ {
+ #if defined(EASTL_ATOMIC_HAS_32BIT)
+ TestAtomicU32FetchOrOrders();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_64BIT)
+ TestAtomicU64FetchOrOrders();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+ TestAtomic128FetchOrOrders();
+ #endif
+ }
+
+ // or_fetch
+ {
+ #if defined(EASTL_ATOMIC_HAS_32BIT)
+ TestAtomicU32OrFetchOrders();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_64BIT)
+ TestAtomicU64OrFetchOrders();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+ TestAtomic128OrFetchOrders();
+ #endif
+ }
+
+ // fetch_xor
+ {
+ #if defined(EASTL_ATOMIC_HAS_32BIT)
+ TestAtomicU32FetchXorOrders();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_64BIT)
+ TestAtomicU64FetchXorOrders();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+ TestAtomic128FetchXorOrders();
+ #endif
+ }
+
+ // xor_fetch
+ {
+ #if defined(EASTL_ATOMIC_HAS_32BIT)
+ TestAtomicU32XorFetchOrders();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_64BIT)
+ TestAtomicU64XorFetchOrders();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+ TestAtomic128XorFetchOrders();
+ #endif
+ }
+
+ // operator++/++operator
+ {
+ #if defined(EASTL_ATOMIC_HAS_32BIT)
+ TestAtomicU32OperatorPlusPlus();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_64BIT)
+ TestAtomicU64OperatorPlusPlus();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+ TestAtomic128OperatorPlusPlus();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_32BIT)
+ TestAtomicU32PlusPlusOperator();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_64BIT)
+ TestAtomicU64PlusPlusOperator();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+ TestAtomic128PlusPlusOperator();
+ #endif
+ }
+
+ // operator--/--operator
+ {
+ #if defined(EASTL_ATOMIC_HAS_32BIT)
+ TestAtomicU32OperatorMinusMinus();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_64BIT)
+ TestAtomicU64OperatorMinusMinus();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+ TestAtomic128OperatorMinusMinus();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_32BIT)
+ TestAtomicU32MinusMinusOperator();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_64BIT)
+ TestAtomicU64MinusMinusOperator();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+ TestAtomic128MinusMinusOperator();
+ #endif
+ }
+
+ // operator+=
+ {
+ #if defined(EASTL_ATOMIC_HAS_32BIT)
+ TestAtomicU32OperatorPlusAssignment();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_64BIT)
+ TestAtomicU64OperatorPlusAssignment();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+ TestAtomic128OperatorPlusAssignment();
+ #endif
+ }
+
+ // operator-=
+ {
+ #if defined(EASTL_ATOMIC_HAS_32BIT)
+ TestAtomicU32OperatorMinusAssignment();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_64BIT)
+ TestAtomicU64OperatorMinusAssignment();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+ TestAtomic128OperatorMinusAssignment();
+ #endif
+ }
+
+ // operator&=
+ {
+ #if defined(EASTL_ATOMIC_HAS_32BIT)
+ TestAtomicU32OperatorAndAssignment();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_64BIT)
+ TestAtomicU64OperatorAndAssignment();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+ TestAtomic128OperatorAndAssignment();
+ #endif
+ }
+
+ // operator|=
+ {
+ #if defined(EASTL_ATOMIC_HAS_32BIT)
+ TestAtomicU32OperatorOrAssignment();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_64BIT)
+ TestAtomicU64OperatorOrAssignment();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+ TestAtomic128OperatorOrAssignment();
+ #endif
+ }
+
+ // operator^=
+ {
+ #if defined(EASTL_ATOMIC_HAS_32BIT)
+ TestAtomicU32OperatorXorAssignment();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_64BIT)
+ TestAtomicU64OperatorXorAssignment();
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+ TestAtomic128OperatorXorAssignment();
+ #endif
+ }
+
+ // atomic_signal_fence
+ {
+ TestAtomicSignalFenceRelaxed();
+
+ TestAtomicSignalFenceAcquire();
+
+ TestAtomicSignalFenceRelease();
+
+ TestAtomicSignalFenceAcqRel();
+
+ TestAtomicSignalFenceSeqCst();
+ }
+
+ // atomic_thread_fence
+ {
+ TestAtomicThreadFenceRelaxed();
+
+ TestAtomicThreadFenceAcquire();
+
+ TestAtomicThreadFenceRelease();
+
+ TestAtomicThreadFenceAcqRel();
+
+ TestAtomicThreadFenceSeqCst();
+ }
+
+ // atomic pointer read depends
+ {
+ TestAtomicPointerReadDepends();
+ }
+
+ // atomic pointer read depends
+ {
+ ReadDependsStruct rds {3, 2};
+
+ gAtomicPtr.store(&rds, eastl::memory_order_release);
+
+ int ret = TestAtomicReadDependsStruct();
+ eastl::compiler_barrier_data_dependency(ret);
+ }
+
+ {
+ ReadDependsIntrusive rdi {3, 2, &rdi, 1, 0};
+
+ gListHead.store(&(rdi.next), eastl::memory_order_release);
+
+ int ret = TestAtomicReadDependsIntrusive();
+ eastl::compiler_barrier_data_dependency(ret);
+ }
+
+ {
+ TestCompilerBarrierDataDependency();
+ }
+
+#if defined(EASTL_ATOMIC_HAS_32BIT)
+
+ TestAtomic32LoadStoreSameAddressSeqCst();
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_128BIT)
+
+ TestAtomic128LoadStoreSameAddressSeqCst();
+
+#endif
+
+ return nErrorCount;
+}
diff --git a/EASTL/test/source/TestAtomicBasic.cpp b/EASTL/test/source/TestAtomicBasic.cpp
new file mode 100644
index 0000000..166b030
--- /dev/null
+++ b/EASTL/test/source/TestAtomicBasic.cpp
@@ -0,0 +1,4083 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLTest.h"
+
+#include <EASTL/atomic.h>
+
+
+/**
+ * This is a basic test suite that tests all functionality is implemented
+ * and that all operations do as expected.
+ * I.E. fetch_add returns the previous value and add_fetch returns the current value
+ */
+
+static eastl::atomic<int> sAtomicInt{ 4 };
+static eastl::atomic<void*> sAtomicPtr{ nullptr };
+
+static int TestAtomicConstantInitialization()
+{
+ int nErrorCount;
+
+ EATEST_VERIFY(sAtomicInt.load() == 4);
+ EATEST_VERIFY(sAtomicPtr == nullptr);
+
+ return 0;
+}
+
+class AtomicStandaloneBasicTest
+{
+public:
+
+ int RunTest()
+ {
+ AtomicSignalFence();
+
+ AtomicThreadFence();
+
+ AtomicCpuPause();
+
+ AtomicCompilerBarrier();
+
+ return nErrorCount;
+ }
+
+private:
+
+ void AtomicSignalFence();
+
+ void AtomicThreadFence();
+
+ void AtomicCpuPause();
+
+ void AtomicCompilerBarrier();
+
+private:
+
+ int nErrorCount = 0;
+};
+
+void AtomicStandaloneBasicTest::AtomicSignalFence()
+{
+ eastl::atomic_signal_fence(eastl::memory_order_relaxed);
+
+ eastl::atomic_signal_fence(eastl::memory_order_acquire);
+
+ eastl::atomic_signal_fence(eastl::memory_order_release);
+
+ eastl::atomic_signal_fence(eastl::memory_order_acq_rel);
+
+ eastl::atomic_signal_fence(eastl::memory_order_seq_cst);
+}
+
+void AtomicStandaloneBasicTest::AtomicThreadFence()
+{
+ eastl::atomic_thread_fence(eastl::memory_order_relaxed);
+
+ eastl::atomic_thread_fence(eastl::memory_order_acquire);
+
+ eastl::atomic_thread_fence(eastl::memory_order_release);
+
+ eastl::atomic_thread_fence(eastl::memory_order_acq_rel);
+
+ eastl::atomic_thread_fence(eastl::memory_order_seq_cst);
+}
+
+void AtomicStandaloneBasicTest::AtomicCpuPause()
+{
+ eastl::cpu_pause();
+}
+
+void AtomicStandaloneBasicTest::AtomicCompilerBarrier()
+{
+ eastl::compiler_barrier();
+
+ {
+ bool ret = false;
+ eastl::compiler_barrier_data_dependency(ret);
+ }
+}
+
+class AtomicFlagBasicTest
+{
+public:
+
+ using AtomicType = eastl::atomic_flag;
+ using BoolType = bool;
+
+ int RunTest()
+ {
+ TestAtomicFlagCtor();
+
+ TestAtomicFlagClear();
+
+ TestAtomicFlagTestAndSet();
+
+ TestAtomicFlagTest();
+
+ TestAllMemoryOrders();
+
+ TestAtomicFlagStandalone();
+
+ return nErrorCount;
+ }
+
+private:
+
+ void TestAtomicFlagCtor();
+
+ void TestAtomicFlagClear();
+
+ void TestAtomicFlagTestAndSet();
+
+ void TestAtomicFlagTest();
+
+ void TestAllMemoryOrders();
+
+ void TestAtomicFlagStandalone();
+
+private:
+
+ int nErrorCount = 0;
+};
+
+void AtomicFlagBasicTest::TestAtomicFlagCtor()
+{
+ {
+ AtomicType atomic;
+
+ VERIFY(atomic.test(eastl::memory_order_relaxed) == false);
+ }
+
+ {
+ AtomicType atomic{ false };
+
+ VERIFY(atomic.test(eastl::memory_order_relaxed) == false);
+ }
+
+ {
+ AtomicType atomic{ true };
+
+ VERIFY(atomic.test(eastl::memory_order_relaxed) == true);
+ }
+}
+
+void AtomicFlagBasicTest::TestAtomicFlagClear()
+{
+ {
+ AtomicType atomic;
+
+ atomic.clear(eastl::memory_order_relaxed);
+
+ VERIFY(atomic.test(eastl::memory_order_relaxed) == false);
+ }
+
+ {
+ AtomicType atomic{ true };
+
+ atomic.clear(eastl::memory_order_relaxed);
+
+ VERIFY(atomic.test(eastl::memory_order_relaxed) == false);
+ }
+}
+
+void AtomicFlagBasicTest::TestAtomicFlagTestAndSet()
+{
+ {
+ AtomicType atomic;
+
+ BoolType ret = atomic.test_and_set(eastl::memory_order_relaxed);
+
+ VERIFY(ret == false);
+
+ VERIFY(atomic.test(eastl::memory_order_relaxed) == true);
+ }
+
+ {
+ AtomicType atomic{ true };
+
+ BoolType ret = atomic.test_and_set(eastl::memory_order_relaxed);
+
+ VERIFY(ret == true);
+
+ VERIFY(atomic.test(eastl::memory_order_relaxed) == true);
+ }
+}
+
+void AtomicFlagBasicTest::TestAtomicFlagTest()
+{
+ {
+ AtomicType atomic;
+
+ VERIFY(atomic.test(eastl::memory_order_relaxed) == false);
+ }
+
+ {
+ AtomicType atomic{ true };
+
+ VERIFY(atomic.test(eastl::memory_order_relaxed) == true);
+ }
+}
+
+void AtomicFlagBasicTest::TestAllMemoryOrders()
+{
+ {
+ AtomicType atomic;
+
+ atomic.clear();
+
+ atomic.clear(eastl::memory_order_relaxed);
+
+ atomic.clear(eastl::memory_order_release);
+
+ atomic.clear(eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic;
+
+ atomic.test_and_set();
+
+ atomic.test_and_set(eastl::memory_order_relaxed);
+
+ atomic.test_and_set(eastl::memory_order_acquire);
+
+ atomic.test_and_set(eastl::memory_order_release);
+
+ atomic.test_and_set(eastl::memory_order_acq_rel);
+
+ atomic.test_and_set(eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic;
+
+ BoolType ret = atomic.test();
+
+ ret = atomic.test(eastl::memory_order_relaxed);
+
+ ret = atomic.test(eastl::memory_order_acquire);
+
+ ret = atomic.test(eastl::memory_order_seq_cst);
+ }
+}
+
+void AtomicFlagBasicTest::TestAtomicFlagStandalone()
+{
+ {
+ AtomicType atomic;
+
+ BoolType ret = atomic_flag_test_and_set(&atomic);
+
+ ret = atomic_flag_test_and_set_explicit(&atomic, eastl::memory_order_relaxed);
+
+ ret = atomic_flag_test_and_set_explicit(&atomic, eastl::memory_order_acquire);
+
+ ret = atomic_flag_test_and_set_explicit(&atomic, eastl::memory_order_release);
+
+ ret = atomic_flag_test_and_set_explicit(&atomic, eastl::memory_order_acq_rel);
+
+ ret = atomic_flag_test_and_set_explicit(&atomic, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic;
+
+ atomic_flag_clear(&atomic);
+
+ atomic_flag_clear_explicit(&atomic, eastl::memory_order_relaxed);
+
+ atomic_flag_clear_explicit(&atomic, eastl::memory_order_release);
+
+ atomic_flag_clear_explicit(&atomic, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic;
+
+ BoolType ret = atomic_flag_test(&atomic);
+
+ ret = atomic_flag_test_explicit(&atomic, eastl::memory_order_relaxed);
+
+ ret = atomic_flag_test_explicit(&atomic, eastl::memory_order_acquire);
+
+ ret = atomic_flag_test_explicit(&atomic, eastl::memory_order_seq_cst);
+ }
+}
+
+class AtomicVoidPointerBasicTest
+{
+public:
+
+ using AtomicType = eastl::atomic<void*>;
+ using PtrType = void*;
+
+ int RunTest()
+ {
+ TestAtomicCtor();
+
+ TestAssignmentOperators();
+
+ TestIsLockFree();
+
+ TestStore();
+
+ TestLoad();
+
+ TestExchange();
+
+ TestCompareExchangeWeak();
+
+ TestCompareExchangeStrong();
+
+ TestAllMemoryOrders();
+
+ return nErrorCount;
+ }
+
+private:
+
+ void TestAtomicCtor();
+
+ void TestAssignmentOperators();
+
+ void TestIsLockFree();
+
+ void TestStore();
+
+ void TestLoad();
+
+ void TestExchange();
+
+ void TestCompareExchangeWeak();
+
+ void TestCompareExchangeStrong();
+
+ void TestAllMemoryOrders();
+
+private:
+
+ int nErrorCount = 0;
+};
+
+void AtomicVoidPointerBasicTest::TestAtomicCtor()
+{
+ {
+ AtomicType atomic;
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x0);
+ }
+
+ {
+ AtomicType atomic{ (PtrType)0x04 };
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x04);
+ }
+}
+
+void AtomicVoidPointerBasicTest::TestAssignmentOperators()
+{
+ {
+ AtomicType atomic;
+
+ PtrType ret = atomic = (PtrType)0x04;
+
+ VERIFY(ret == (PtrType)0x04);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x04);
+ }
+
+ {
+ AtomicType atomic;
+
+ PtrType ret = atomic = (PtrType)0x0;
+
+ VERIFY(ret == (PtrType)0x0);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x0);
+ }
+}
+
+void AtomicVoidPointerBasicTest::TestIsLockFree()
+{
+ {
+ AtomicType atomic;
+
+ VERIFY(atomic.is_lock_free() == true);
+
+ VERIFY(atomic.is_always_lock_free == true);
+ }
+}
+
+void AtomicVoidPointerBasicTest::TestStore()
+{
+ {
+ PtrType val = (PtrType)0x0;
+ AtomicType atomic;
+
+ atomic.store(val, eastl::memory_order_relaxed);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == val);
+ }
+
+ {
+ PtrType val = (PtrType)0x4;
+ AtomicType atomic;
+
+ atomic.store(val, eastl::memory_order_relaxed);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == val);
+ }
+}
+
+void AtomicVoidPointerBasicTest::TestLoad()
+{
+ {
+ AtomicType atomic{ (PtrType)0x4 };
+
+ PtrType ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == (PtrType)0x4);
+
+ VERIFY(atomic == (PtrType)0x4);
+ }
+}
+
+void AtomicVoidPointerBasicTest::TestExchange()
+{
+ {
+ AtomicType atomic;
+
+ PtrType ret = atomic.exchange((PtrType)0x4, eastl::memory_order_release);
+
+ VERIFY(ret == (PtrType)0x0);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4);
+ }
+}
+
+void AtomicVoidPointerBasicTest::TestCompareExchangeWeak()
+{
+ {
+ AtomicType atomic;
+
+ PtrType observed = (PtrType)0x0;
+ bool ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_relaxed);
+
+ if (ret)
+ {
+ VERIFY(ret == true);
+ VERIFY(observed == (PtrType)0x0);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4);
+ }
+ }
+
+ {
+ AtomicType atomic;
+
+ PtrType observed = (PtrType)0x4;
+ bool ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_relaxed);
+
+ VERIFY(ret == false);
+ VERIFY(observed == (PtrType)0x0);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x0);
+ }
+}
+
+void AtomicVoidPointerBasicTest::TestCompareExchangeStrong()
+{
+ {
+ AtomicType atomic;
+
+ PtrType observed = (PtrType)0x0;
+ bool ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_relaxed);
+
+ VERIFY(ret == true);
+ VERIFY(observed == (PtrType)0x0);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4);
+ }
+
+ {
+ AtomicType atomic;
+
+ PtrType observed = (PtrType)0x4;
+ bool ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_relaxed);
+
+ VERIFY(ret == false);
+ VERIFY(observed == (PtrType)0x0);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x0);
+ }
+}
+
+void AtomicVoidPointerBasicTest::TestAllMemoryOrders()
+{
+ {
+ AtomicType atomic;
+ PtrType val = (PtrType)0x4;
+
+ atomic.store(val);
+
+ atomic.store(val, eastl::memory_order_relaxed);
+
+ atomic.store(val, eastl::memory_order_release);
+
+ atomic.store(val, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic;
+
+ PtrType ret = atomic.load();
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ ret = atomic.load(eastl::memory_order_acquire);
+
+ ret = atomic.load(eastl::memory_order_seq_cst);
+
+ ret = atomic.load(eastl::memory_order_read_depends);
+ }
+
+ {
+ AtomicType atomic;
+
+ PtrType ret = atomic.exchange((PtrType)0x4);
+
+ ret = atomic.exchange((PtrType)0x4, eastl::memory_order_relaxed);
+
+ ret = atomic.exchange((PtrType)0x4, eastl::memory_order_acquire);
+
+ ret = atomic.exchange((PtrType)0x4, eastl::memory_order_release);
+
+ ret = atomic.exchange((PtrType)0x4, eastl::memory_order_acq_rel);
+
+ ret = atomic.exchange((PtrType)0x4, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic;
+ PtrType observed = (PtrType)0x0;
+
+ bool ret = atomic.compare_exchange_weak(observed, (PtrType)0x4);
+
+ ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_release);
+
+ ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_acq_rel);
+
+ ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic;
+ PtrType observed = (PtrType)0x0;
+
+ bool ret = atomic.compare_exchange_strong(observed, (PtrType)0x4);
+
+ ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_release);
+
+ ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_acq_rel);
+
+ ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic;
+ PtrType observed = (PtrType)0x0;
+ bool ret;
+
+ ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_relaxed, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_acquire, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_acquire, eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_release, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_acq_rel, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_acq_rel, eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_seq_cst, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_seq_cst, eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_seq_cst, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic;
+ PtrType observed = (PtrType)0x0;
+ bool ret;
+
+ ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_relaxed, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_acquire, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_acquire, eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_release, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_acq_rel, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_acq_rel, eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_seq_cst, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_seq_cst, eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_seq_cst, eastl::memory_order_seq_cst);
+ }
+}
+
+class AtomicPointerBasicTest
+{
+public:
+
+ using AtomicType = eastl::atomic<uint32_t*>;
+ using PtrType = uint32_t*;
+
+ int RunTest()
+ {
+ TestAtomicCtor();
+
+ TestAssignmentOperators();
+
+ TestIsLockFree();
+
+ TestStore();
+
+ TestLoad();
+
+ TestExchange();
+
+ TestCompareExchangeWeak();
+
+ TestCompareExchangeStrong();
+
+ TestAllMemoryOrders();
+
+ TestFetchAdd();
+ TestAddFetch();
+
+ TestFetchSub();
+ TestSubFetch();
+
+ TestAtomicPointerStandalone();
+
+ return nErrorCount;
+ }
+
+private:
+
+ void TestAtomicCtor();
+
+ void TestAssignmentOperators();
+
+ void TestIsLockFree();
+
+ void TestStore();
+
+ void TestLoad();
+
+ void TestExchange();
+
+ void TestCompareExchangeWeak();
+
+ void TestCompareExchangeStrong();
+
+ void TestAllMemoryOrders();
+
+ void TestFetchAdd();
+ void TestAddFetch();
+
+ void TestFetchSub();
+ void TestSubFetch();
+
+ void TestAtomicPointerStandalone();
+
+private:
+
+ int nErrorCount = 0;
+};
+
+void AtomicPointerBasicTest::TestAtomicCtor()
+{
+ {
+ AtomicType atomic{};
+
+ PtrType ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == nullptr);
+ }
+
+ {
+ AtomicType atomic{ (PtrType)0x4 };
+
+ PtrType ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == (PtrType)0x4);
+ }
+}
+
+void AtomicPointerBasicTest::TestAssignmentOperators()
+{
+ {
+ PtrType val = (PtrType)0x4;
+ AtomicType atomic{val};
+
+ PtrType expected = (PtrType)0x8;
+
+ PtrType ret = atomic = expected;
+
+ VERIFY(ret == expected);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == expected);
+ }
+
+ {
+ PtrType val = (PtrType)0x0;
+ AtomicType atomic{val};
+
+ PtrType ret = atomic = val;
+
+ VERIFY(ret == val);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == val);
+ }
+
+ {
+ PtrType val = (PtrType)0x4;
+ AtomicType atomic{val};
+
+ PtrType expected = (PtrType)0x8;
+ PtrType ret = ++atomic;
+
+ VERIFY(ret == expected);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == expected);
+ }
+
+ {
+ PtrType val = (PtrType)0x4;
+
+ AtomicType atomic{val};
+
+ PtrType expected = (PtrType)0x8;
+ PtrType ret = atomic++;
+
+ VERIFY(ret == val);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == expected);
+ }
+
+ {
+ PtrType val = (PtrType)0x4;
+ AtomicType atomic{val};
+
+ PtrType expected = (PtrType)0x10;
+ PtrType ret = atomic += 3;
+
+ VERIFY(ret == expected);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == expected);
+ }
+
+ {
+ PtrType val = (PtrType)0x4;
+ AtomicType atomic{val};
+
+ PtrType expected = (PtrType)0x4;
+ PtrType ret = atomic += 0;
+
+ VERIFY(ret == expected);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == expected);
+ }
+
+ {
+ PtrType val = (PtrType)0x4;
+ AtomicType atomic{val};
+
+ PtrType expected = (PtrType)0x0;
+ PtrType ret = atomic -= 1;
+
+ VERIFY(ret == expected);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == expected);
+ }
+
+ {
+ PtrType val = (PtrType)0x4;
+ AtomicType atomic{val};
+
+ PtrType expected = (PtrType)0x4;
+ PtrType ret = atomic -= 0;
+
+ VERIFY(ret == expected);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == expected);
+ }
+}
+
+void AtomicPointerBasicTest::TestIsLockFree()
+{
+ {
+ AtomicType atomic;
+
+ VERIFY(atomic.is_lock_free() == true);
+
+ VERIFY(atomic.is_always_lock_free == true);
+ }
+}
+
+void AtomicPointerBasicTest::TestStore()
+{
+ {
+ PtrType val = (PtrType)0x0;
+ AtomicType atomic;
+
+ atomic.store(val, eastl::memory_order_relaxed);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == val);
+ }
+
+ {
+ PtrType val = (PtrType)0x4;
+ AtomicType atomic;
+
+ atomic.store(val, eastl::memory_order_relaxed);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == val);
+ }
+}
+
+void AtomicPointerBasicTest::TestLoad()
+{
+ {
+ AtomicType atomic{ (PtrType)0x4 };
+
+ PtrType ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == (PtrType)0x4);
+
+ VERIFY(atomic == (PtrType)0x4);
+ }
+}
+
+void AtomicPointerBasicTest::TestCompareExchangeWeak()
+{
+ {
+ AtomicType atomic;
+
+ PtrType observed = (PtrType)0x0;
+ bool ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_relaxed);
+
+ if (ret)
+ {
+ VERIFY(ret == true);
+ VERIFY(observed == (PtrType)0x0);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4);
+ }
+ }
+
+ {
+ AtomicType atomic;
+
+ PtrType observed = (PtrType)0x4;
+ bool ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_relaxed);
+
+ VERIFY(ret == false);
+ VERIFY(observed == (PtrType)0x0);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x0);
+ }
+}
+
+void AtomicPointerBasicTest::TestCompareExchangeStrong()
+{
+ {
+ AtomicType atomic;
+
+ PtrType observed = (PtrType)0x0;
+ bool ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_relaxed);
+
+ VERIFY(ret == true);
+ VERIFY(observed == (PtrType)0x0);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4);
+ }
+
+ {
+ AtomicType atomic;
+
+ PtrType observed = (PtrType)0x4;
+ bool ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_relaxed);
+
+ VERIFY(ret == false);
+ VERIFY(observed == (PtrType)0x0);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x0);
+ }
+}
+
+void AtomicPointerBasicTest::TestExchange()
+{
+ {
+ AtomicType atomic;
+
+ PtrType ret = atomic.exchange((PtrType)0x4, eastl::memory_order_release);
+
+ VERIFY(ret == (PtrType)0x0);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4);
+ }
+}
+
+void AtomicPointerBasicTest::TestAllMemoryOrders()
+{
+ {
+ AtomicType atomic;
+ PtrType val = (PtrType)0x4;
+
+ atomic.store(val);
+
+ atomic.store(val, eastl::memory_order_relaxed);
+
+ atomic.store(val, eastl::memory_order_release);
+
+ atomic.store(val, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic;
+
+ PtrType ret = atomic.load();
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ ret = atomic.load(eastl::memory_order_acquire);
+
+ ret = atomic.load(eastl::memory_order_seq_cst);
+
+ ret = atomic.load(eastl::memory_order_read_depends);
+ }
+
+ {
+ AtomicType atomic;
+
+ PtrType ret = atomic.fetch_add(0);
+
+ ret = atomic.fetch_add(0, eastl::memory_order_relaxed);
+
+ ret = atomic.fetch_add(0, eastl::memory_order_acquire);
+
+ ret = atomic.fetch_add(0, eastl::memory_order_release);
+
+ ret = atomic.fetch_add(0, eastl::memory_order_acq_rel);
+
+ ret = atomic.fetch_add(0, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic;
+
+ PtrType ret = atomic.fetch_sub(0);
+
+ ret = atomic.fetch_sub(0, eastl::memory_order_relaxed);
+
+ ret = atomic.fetch_sub(0, eastl::memory_order_acquire);
+
+ ret = atomic.fetch_sub(0, eastl::memory_order_release);
+
+ ret = atomic.fetch_sub(0, eastl::memory_order_acq_rel);
+
+ ret = atomic.fetch_sub(0, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic;
+
+ PtrType ret = atomic.add_fetch(0);
+
+ ret = atomic.add_fetch(0, eastl::memory_order_relaxed);
+
+ ret = atomic.add_fetch(0, eastl::memory_order_acquire);
+
+ ret = atomic.add_fetch(0, eastl::memory_order_release);
+
+ ret = atomic.add_fetch(0, eastl::memory_order_acq_rel);
+
+ ret = atomic.add_fetch(0, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic;
+
+ PtrType ret = atomic.sub_fetch(0);
+
+ ret = atomic.sub_fetch(0, eastl::memory_order_relaxed);
+
+ ret = atomic.sub_fetch(0, eastl::memory_order_acquire);
+
+ ret = atomic.sub_fetch(0, eastl::memory_order_release);
+
+ ret = atomic.sub_fetch(0, eastl::memory_order_acq_rel);
+
+ ret = atomic.sub_fetch(0, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic;
+
+ PtrType ret = atomic.exchange((PtrType)0x4);
+
+ ret = atomic.exchange((PtrType)0x4, eastl::memory_order_relaxed);
+
+ ret = atomic.exchange((PtrType)0x4, eastl::memory_order_acquire);
+
+ ret = atomic.exchange((PtrType)0x4, eastl::memory_order_release);
+
+ ret = atomic.exchange((PtrType)0x4, eastl::memory_order_acq_rel);
+
+ ret = atomic.exchange((PtrType)0x4, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic;
+ PtrType observed = (PtrType)0x0;
+
+ bool ret = atomic.compare_exchange_weak(observed, (PtrType)0x4);
+
+ ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_release);
+
+ ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_acq_rel);
+
+ ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic;
+ PtrType observed = (PtrType)0x0;
+
+ bool ret = atomic.compare_exchange_strong(observed, (PtrType)0x4);
+
+ ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_release);
+
+ ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_acq_rel);
+
+ ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic;
+ PtrType observed = (PtrType)0x0;
+ bool ret;
+
+ ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_relaxed, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_acquire, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_acquire, eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_release, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_acq_rel, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_acq_rel, eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_seq_cst, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_seq_cst, eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_seq_cst, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic;
+ PtrType observed = (PtrType)0x0;
+ bool ret;
+
+ ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_relaxed, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_acquire, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_acquire, eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_release, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_acq_rel, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_acq_rel, eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_seq_cst, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_seq_cst, eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_seq_cst, eastl::memory_order_seq_cst);
+ }
+}
+
+void AtomicPointerBasicTest::TestFetchAdd()
+{
+ {
+ PtrType val = (PtrType)0x4;
+ AtomicType atomic{ val };
+
+ PtrType ret = atomic.fetch_add(1, eastl::memory_order_relaxed);
+
+ VERIFY(ret == (PtrType)0x4);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x8);
+ }
+
+ {
+ PtrType val = (PtrType)0x4;
+ AtomicType atomic{ val };
+
+ PtrType ret = atomic.fetch_add(0, eastl::memory_order_relaxed);
+
+ VERIFY(ret == (PtrType)0x4);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4);
+ }
+}
+
+void AtomicPointerBasicTest::TestAddFetch()
+{
+ {
+ PtrType val = (PtrType)0x4;
+ AtomicType atomic{ val };
+
+ PtrType ret = atomic.add_fetch(1, eastl::memory_order_relaxed);
+
+ VERIFY(ret == (PtrType)0x8);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x8);
+ }
+
+ {
+ PtrType val = (PtrType)0x4;
+ AtomicType atomic{ val };
+
+ PtrType ret = atomic.add_fetch(0, eastl::memory_order_relaxed);
+
+ VERIFY(ret == (PtrType)0x4);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4);
+ }
+}
+
+void AtomicPointerBasicTest::TestFetchSub()
+{
+ {
+ PtrType val = (PtrType)0x4;
+ AtomicType atomic{ val };
+
+ PtrType ret = atomic.fetch_sub(1, eastl::memory_order_relaxed);
+
+ VERIFY(ret == (PtrType)0x4);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x0);
+ }
+
+ {
+ PtrType val = (PtrType)0x4;
+ AtomicType atomic{ val };
+
+ PtrType ret = atomic.fetch_sub(0, eastl::memory_order_relaxed);
+
+ VERIFY(ret == (PtrType)0x4);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4);
+ }
+}
+
+void AtomicPointerBasicTest::TestSubFetch()
+{
+ {
+ PtrType val = (PtrType)0x4;
+ AtomicType atomic{ val };
+
+ PtrType ret = atomic.sub_fetch(1, eastl::memory_order_relaxed);
+
+ VERIFY(ret == (PtrType)0x0);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x0);
+ }
+
+ {
+ PtrType val = (PtrType)0x4;
+ AtomicType atomic{ val };
+
+ PtrType ret = atomic.sub_fetch(0, eastl::memory_order_relaxed);
+
+ VERIFY(ret == (PtrType)0x4);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4);
+ }
+}
+
+void AtomicPointerBasicTest::TestAtomicPointerStandalone()
+{
+ {
+ AtomicType atomic;
+
+ VERIFY(atomic_is_lock_free(&atomic) == true);
+ }
+
+ {
+ AtomicType atomic;
+ PtrType val = (PtrType)0x4;
+
+ atomic_store(&atomic, val);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == val);
+ }
+
+ {
+ AtomicType atomic;
+ PtrType val = (PtrType)0x4;
+
+ atomic_store_explicit(&atomic, val, eastl::memory_order_relaxed);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == val);
+ }
+
+ {
+ AtomicType atomic;
+
+ PtrType ret = atomic_load(&atomic);
+
+ VERIFY(ret == (PtrType)0x0);
+ }
+
+ {
+ AtomicType atomic;
+
+ PtrType ret = atomic_load_explicit(&atomic, eastl::memory_order_relaxed);
+
+ VERIFY(ret == (PtrType)0x0);
+ }
+
+ {
+ AtomicType atomic;
+
+ PtrType ret = atomic_load_cond(&atomic, [](PtrType val) { return true; });
+
+ VERIFY(ret == (PtrType)0x0);
+ }
+
+ {
+ AtomicType atomic;
+
+ PtrType ret = atomic_load_cond_explicit(&atomic, [](PtrType val) { return true; }, eastl::memory_order_relaxed);
+
+ VERIFY(ret == (PtrType)0x0);
+ }
+
+ {
+ AtomicType atomic;
+
+ PtrType ret = atomic_exchange(&atomic, (PtrType)0x4);
+
+ VERIFY(ret == (PtrType)0x0);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4);
+ }
+
+ {
+ AtomicType atomic;
+
+ PtrType ret = atomic_exchange_explicit(&atomic, (PtrType)0x4, eastl::memory_order_relaxed);
+
+ VERIFY(ret == (PtrType)0x0);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4);
+ }
+
+ {
+ AtomicType atomic;
+
+ PtrType ret = atomic_add_fetch(&atomic, 1);
+
+ VERIFY(ret == (PtrType)0x4);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4);
+ }
+
+ {
+ AtomicType atomic;
+
+ PtrType ret = atomic_add_fetch_explicit(&atomic, 1, eastl::memory_order_relaxed);
+
+ VERIFY(ret == (PtrType)0x4);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4);
+ }
+
+ {
+ AtomicType atomic;
+
+ PtrType ret = atomic_fetch_add(&atomic, 1);
+
+ VERIFY(ret == (PtrType)0x0);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4);
+ }
+
+ {
+ AtomicType atomic;
+
+ PtrType ret = atomic_fetch_add_explicit(&atomic, 1, eastl::memory_order_relaxed);
+
+ VERIFY(ret == (PtrType)0x0);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4);
+ }
+
+ {
+ AtomicType atomic{ (PtrType)0x4 };
+
+ PtrType ret = atomic_fetch_sub(&atomic, 1);
+
+ VERIFY(ret == (PtrType)0x4);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x0);
+ }
+
+ {
+ AtomicType atomic{ (PtrType)0x4 };
+
+ PtrType ret = atomic_fetch_sub_explicit(&atomic, 1, eastl::memory_order_relaxed);
+
+ VERIFY(ret == (PtrType)0x4);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x0);
+ }
+
+ {
+ AtomicType atomic{ (PtrType)0x4 };
+
+ PtrType ret = atomic_sub_fetch(&atomic, 1);
+
+ VERIFY(ret == (PtrType)0x0);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x0);
+ }
+
+ {
+ AtomicType atomic{ (PtrType)0x4 };
+
+ PtrType ret = atomic_sub_fetch_explicit(&atomic, 1, eastl::memory_order_relaxed);
+
+ VERIFY(ret == (PtrType)0x0);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x0);
+ }
+
+ {
+ AtomicType atomic;
+
+ PtrType expected = (PtrType)0x0;
+ bool ret = atomic_compare_exchange_strong(&atomic, &expected, (PtrType)0x4);
+
+ VERIFY(ret == true);
+
+ VERIFY(expected == (PtrType)0x0);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4);
+ }
+
+ {
+ AtomicType atomic;
+
+ PtrType expected = (PtrType)0x0;
+ bool ret = atomic_compare_exchange_strong_explicit(&atomic, &expected, (PtrType)0x4, eastl::memory_order_relaxed, eastl::memory_order_relaxed);
+
+ VERIFY(ret == true);
+
+ VERIFY(expected == (PtrType)0x0);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4);
+ }
+
+ {
+ AtomicType atomic;
+
+ PtrType expected = (PtrType)0x0;
+ bool ret = atomic_compare_exchange_weak(&atomic, &expected, (PtrType)0x4);
+
+ if (ret)
+ {
+ VERIFY(ret == true);
+
+ VERIFY(expected == (PtrType)0x0);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4);
+ }
+ }
+
+ {
+ AtomicType atomic;
+
+ PtrType expected = (PtrType)0x0;
+ bool ret = atomic_compare_exchange_weak_explicit(&atomic, &expected, (PtrType)0x4, eastl::memory_order_relaxed, eastl::memory_order_relaxed);
+
+ if (ret)
+ {
+ VERIFY(ret == true);
+
+ VERIFY(expected == (PtrType)0x0);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4);
+ }
+ }
+}
+
+struct AtomicNonTriviallyConstructible
+{
+ AtomicNonTriviallyConstructible()
+ : a(0)
+ , b(0)
+ {
+ }
+
+ AtomicNonTriviallyConstructible(uint16_t a, uint16_t b)
+ : a(a)
+ , b(b)
+ {
+ }
+
+ friend bool operator==(const AtomicNonTriviallyConstructible& a, const AtomicNonTriviallyConstructible& b)
+ {
+ return a.a == b.a && a.b == b.b;
+ }
+
+ uint16_t a;
+ uint16_t b;
+};
+
+struct AtomicNonTriviallyConstructibleNoExcept
+{
+ AtomicNonTriviallyConstructibleNoExcept() noexcept
+ : a(0)
+ , b(0)
+ {
+ }
+
+ AtomicNonTriviallyConstructibleNoExcept(uint16_t a, uint16_t b) noexcept
+ : a(a)
+ , b(b)
+ {
+ }
+
+ friend bool operator==(const AtomicNonTriviallyConstructibleNoExcept& a, const AtomicNonTriviallyConstructibleNoExcept& b)
+ {
+ return a.a == b.a && a.b == b.b;
+ }
+
+ uint16_t a;
+ uint16_t b;
+};
+
+struct AtomicUserType16
+{
+ uint8_t a;
+ uint8_t b;
+
+ friend bool operator==(const AtomicUserType16& a, const AtomicUserType16& b)
+ {
+ return (a.a == b.a) && (a.b == b.b);
+ }
+};
+
+struct AtomicUserType128
+{
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+
+ AtomicUserType128() = default;
+
+ AtomicUserType128(const AtomicUserType128&) = default;
+
+ AtomicUserType128(uint32_t a, uint32_t b)
+ : a(a)
+ , b(b)
+ , c(0)
+ , d(0)
+ {
+ }
+
+ AtomicUserType128& operator=(const AtomicUserType128&) = default;
+
+ friend bool operator==(const AtomicUserType128& a, const AtomicUserType128& b)
+ {
+ return (a.a == b.a) && (a.b == b.b) && (a.c == b.c) && (a.d == b.d);
+ }
+};
+
+template <typename T>
+class AtomicUserTypeBasicTest
+{
+public:
+
+ using AtomicType = eastl::atomic<T>;
+ using UserType = T;
+
+ int RunTest()
+ {
+ TestAtomicCtor();
+
+ TestAssignmentOperators();
+
+ TestIsLockFree();
+
+ TestStore();
+
+ TestLoad();
+
+ TestExchange();
+
+ TestCompareExchangeWeak();
+
+ TestCompareExchangeStrong();
+
+ TestAllMemoryOrders();
+
+ return nErrorCount;
+ }
+
+private:
+
+ void TestAtomicCtor();
+
+ void TestAssignmentOperators();
+
+ void TestIsLockFree();
+
+ void TestStore();
+
+ void TestLoad();
+
+ void TestExchange();
+
+ void TestCompareExchangeWeak();
+
+ void TestCompareExchangeStrong();
+
+ void TestAllMemoryOrders();
+
+private:
+
+ int nErrorCount = 0;
+};
+
+template <typename T>
+void AtomicUserTypeBasicTest<T>::TestAtomicCtor()
+{
+ {
+ AtomicType atomic;
+ UserType expected{0, 0};
+
+ UserType ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == expected);
+ }
+
+ {
+ AtomicType atomic{ {5, 8} };
+ UserType expected{5, 8};
+
+ UserType ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == expected);
+ }
+}
+
+template <typename T>
+void AtomicUserTypeBasicTest<T>::TestAssignmentOperators()
+{
+ {
+ AtomicType atomic;
+ UserType expected{5, 6};
+
+ atomic = {5, 6};
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == expected);
+ }
+
+ {
+ AtomicType atomic;
+ UserType expected{0, 0};
+
+ atomic = {0, 0};
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == expected);
+ }
+}
+
+template <typename T>
+void AtomicUserTypeBasicTest<T>::TestIsLockFree()
+{
+ {
+ AtomicType atomic;
+
+ VERIFY(atomic.is_lock_free() == true);
+
+ VERIFY(AtomicType::is_always_lock_free == true);
+ }
+}
+
+template <typename T>
+void AtomicUserTypeBasicTest<T>::TestStore()
+{
+ {
+ AtomicType atomic;
+ UserType expected{5, 6};
+
+ atomic.store(expected, eastl::memory_order_relaxed);
+
+ UserType ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == expected);
+ }
+
+ {
+ AtomicType atomic;
+ UserType expected{5, 6};
+
+ atomic.store({5, 6}, eastl::memory_order_relaxed);
+
+ UserType ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == expected);
+ }
+}
+
+template <typename T>
+void AtomicUserTypeBasicTest<T>::TestLoad()
+{
+ {
+ AtomicType atomic;
+ UserType expected{0, 0};
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == expected);
+
+ VERIFY(atomic == expected);
+ }
+
+ {
+ AtomicType atomic{ {5, 6} };
+ UserType expected{5, 6};
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == expected);
+
+ VERIFY(atomic == expected);
+ }
+}
+
+template <typename T>
+void AtomicUserTypeBasicTest<T>::TestExchange()
+{
+ {
+ AtomicType atomic;
+ UserType expected{0, 0};
+
+ UserType ret = atomic.exchange({0, 0}, eastl::memory_order_relaxed);
+
+ VERIFY(ret == expected);
+ }
+
+ {
+ AtomicType atomic;
+ UserType expected{0, 0};
+ UserType expected2{0, 1};
+
+ UserType ret = atomic.exchange({0, 1}, eastl::memory_order_relaxed);
+
+ VERIFY(ret == expected);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == expected2);
+ }
+}
+
+template <typename T>
+void AtomicUserTypeBasicTest<T>::TestCompareExchangeWeak()
+{
+ {
+ AtomicType atomic;
+
+ UserType observed{0, 0};
+ bool ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_relaxed);
+
+ UserType expected{0, 0};
+ if (ret)
+ {
+ VERIFY(ret == true);
+ VERIFY(observed == expected);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == expected);
+ }
+ }
+
+ {
+ AtomicType atomic;
+
+ UserType observed{0, 0};
+ bool ret = atomic.compare_exchange_weak(observed, {0, 1}, eastl::memory_order_relaxed);
+
+ UserType expected{0, 1};
+ UserType expected2{0, 0};
+ if (ret)
+ {
+ VERIFY(ret == true);
+ VERIFY(observed == expected2);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == expected);
+ }
+ }
+
+ {
+ AtomicType atomic;
+
+ UserType observed{0, 1};
+ bool ret = atomic.compare_exchange_weak(observed, {0, 1}, eastl::memory_order_relaxed);
+
+ UserType expected{0, 0};
+
+ VERIFY(ret == false);
+ VERIFY(observed == expected);
+ }
+}
+
+template <typename T>
+void AtomicUserTypeBasicTest<T>::TestCompareExchangeStrong()
+{
+ {
+ AtomicType atomic;
+
+ UserType observed{0, 0};
+ bool ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_relaxed);
+
+ UserType expected{0, 0};
+
+ VERIFY(ret == true);
+ VERIFY(observed == expected);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == expected);
+ }
+
+ {
+ AtomicType atomic;
+
+ UserType observed{0, 0};
+ bool ret = atomic.compare_exchange_strong(observed, {0, 1}, eastl::memory_order_relaxed);
+
+ UserType expected{0, 1};
+ UserType expected2{0, 0};
+
+ VERIFY(ret == true);
+ VERIFY(observed == expected2);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == expected);
+ }
+
+ {
+ AtomicType atomic;
+
+ UserType observed{0, 1};
+ bool ret = atomic.compare_exchange_strong(observed, {0, 1}, eastl::memory_order_relaxed);
+
+ UserType expected{0, 0};
+
+ VERIFY(ret == false);
+ VERIFY(observed == expected);
+ }
+}
+
+template <typename T>
+void AtomicUserTypeBasicTest<T>::TestAllMemoryOrders()
+{
+ {
+ AtomicType atomic;
+ UserType val{0, 1};
+
+ atomic.store(val);
+
+ atomic.store(val, eastl::memory_order_relaxed);
+
+ atomic.store(val, eastl::memory_order_release);
+
+ atomic.store(val, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic;
+
+ UserType ret = atomic.load();
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ ret = atomic.load(eastl::memory_order_acquire);
+
+ ret = atomic.load(eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic;
+
+ UserType ret = atomic.exchange({0, 1});
+
+ ret = atomic.exchange({0, 0}, eastl::memory_order_relaxed);
+
+ ret = atomic.exchange({0, 0}, eastl::memory_order_acquire);
+
+ ret = atomic.exchange({0, 0}, eastl::memory_order_release);
+
+ ret = atomic.exchange({0, 0}, eastl::memory_order_acq_rel);
+
+ ret = atomic.exchange({0, 0}, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic;
+
+ UserType observed{0, 0};
+
+ bool ret = atomic.compare_exchange_weak(observed, {0, 0});
+
+ ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_release);
+
+ ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_acq_rel);
+
+ ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic;
+
+ UserType observed{0, 0};
+
+ bool ret = atomic.compare_exchange_strong(observed, {0, 0});
+
+ ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_release);
+
+ ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_acq_rel);
+
+ ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic;
+
+ UserType observed{0, 0};
+ bool ret;
+
+ ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_relaxed, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_acquire, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_acquire, eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_release, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_acq_rel, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_acq_rel, eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_seq_cst, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_seq_cst, eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_seq_cst, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic;
+
+ UserType observed{0, 0};
+ bool ret;
+
+ ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_relaxed, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_acquire, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_acquire, eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_release, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_acq_rel, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_acq_rel, eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_seq_cst, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_seq_cst, eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_seq_cst, eastl::memory_order_seq_cst);
+ }
+}
+
+
+class AtomicBoolBasicTest
+{
+public:
+
+ using AtomicType = eastl::atomic<bool>;
+ using BoolType = bool;
+
+ int RunTest()
+ {
+ TestAtomicCtor();
+
+ TestAssignmentOperators();
+
+ TestIsLockFree();
+
+ TestStore();
+
+ TestLoad();
+
+ TestExchange();
+
+ TestCompareExchangeWeak();
+
+ TestCompareExchangeStrong();
+
+ TestAllMemoryOrders();
+
+ return nErrorCount;
+ }
+
+private:
+
+ void TestAtomicCtor();
+
+ void TestAssignmentOperators();
+
+ void TestIsLockFree();
+
+ void TestStore();
+
+ void TestLoad();
+
+ void TestExchange();
+
+ void TestCompareExchangeWeak();
+
+ void TestCompareExchangeStrong();
+
+ void TestAllMemoryOrders();
+
+private:
+
+ int nErrorCount = 0;
+};
+
+void AtomicBoolBasicTest::TestAtomicCtor()
+{
+ {
+ AtomicType atomic{ false };
+
+ BoolType ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == false);
+ }
+
+ {
+ AtomicType atomic{ true };
+
+ BoolType ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == true);
+ }
+
+ {
+ AtomicType atomic;
+
+ BoolType ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == false);
+ }
+
+ {
+ AtomicType atomic{};
+
+ BoolType ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == false);
+ }
+}
+
+void AtomicBoolBasicTest::TestAssignmentOperators()
+{
+ {
+ AtomicType atomic;
+
+ BoolType ret = atomic = true;
+
+ VERIFY(ret == true);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == true);
+ }
+}
+
+void AtomicBoolBasicTest::TestIsLockFree()
+{
+ {
+ AtomicType atomic;
+
+ bool ret = atomic.is_lock_free();
+
+ VERIFY(ret == true);
+
+ VERIFY(AtomicType::is_always_lock_free == true);
+ }
+}
+
+void AtomicBoolBasicTest::TestStore()
+{
+ {
+ AtomicType atomic;
+
+ atomic.store(true, eastl::memory_order_relaxed);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == true);
+ }
+}
+
+void AtomicBoolBasicTest::TestLoad()
+{
+ {
+ AtomicType atomic;
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == false);
+
+ VERIFY(atomic == false);
+ }
+
+ {
+ AtomicType atomic{ true };
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == true);
+
+ VERIFY(atomic == true);
+ }
+}
+
+void AtomicBoolBasicTest::TestExchange()
+{
+ {
+ AtomicType atomic;
+
+ BoolType ret = atomic.exchange(false, eastl::memory_order_relaxed);
+
+ VERIFY(ret == false);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == false);
+ }
+
+ {
+ AtomicType atomic;
+
+ BoolType ret = atomic.exchange(true, eastl::memory_order_relaxed);
+
+ VERIFY(ret == false);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == true);
+ }
+}
+
+void AtomicBoolBasicTest::TestCompareExchangeWeak()
+{
+ {
+ AtomicType atomic{ false };
+
+ BoolType observed = false;
+ bool ret = atomic.compare_exchange_weak(observed, false, eastl::memory_order_relaxed);
+
+ if (ret)
+ {
+ VERIFY(ret == true);
+ VERIFY(observed == false);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == false);
+ }
+ }
+
+ {
+ AtomicType atomic{ false };
+
+ BoolType observed = false;
+ bool ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_relaxed);
+
+ if (ret)
+ {
+ VERIFY(ret == true);
+ VERIFY(observed == false);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == true);
+ }
+ }
+
+ {
+ AtomicType atomic{ false };
+
+ BoolType observed = true;
+ bool ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_relaxed);
+
+ VERIFY(ret == false);
+ VERIFY(observed == false);
+ }
+}
+
+void AtomicBoolBasicTest::TestCompareExchangeStrong()
+{
+ {
+ AtomicType atomic{ false };
+
+ BoolType observed = false;
+ bool ret = atomic.compare_exchange_weak(observed, false, eastl::memory_order_relaxed);
+
+ VERIFY(ret == true);
+ VERIFY(observed == false);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == false);
+ }
+
+ {
+ AtomicType atomic{ false };
+
+ BoolType observed = false;
+ bool ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_relaxed);
+
+ VERIFY(ret == true);
+ VERIFY(observed == false);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == true);
+ }
+
+ {
+ AtomicType atomic{ false };
+
+ BoolType observed = true;
+ bool ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_relaxed);
+
+ VERIFY(ret == false);
+ VERIFY(observed == false);
+ }
+}
+
+void AtomicBoolBasicTest::TestAllMemoryOrders()
+{
+ {
+ AtomicType atomic;
+
+ atomic.store(true);
+
+ atomic.store(true, eastl::memory_order_relaxed);
+
+ atomic.store(true, eastl::memory_order_release);
+
+ atomic.store(true, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic;
+
+ BoolType ret = atomic.load();
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ ret = atomic.load(eastl::memory_order_acquire);
+
+ ret = atomic.load(eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic;
+
+ BoolType ret = atomic.exchange(true);
+
+ ret = atomic.exchange(true, eastl::memory_order_relaxed);
+
+ ret = atomic.exchange(true, eastl::memory_order_acquire);
+
+ ret = atomic.exchange(true, eastl::memory_order_release);
+
+ ret = atomic.exchange(true, eastl::memory_order_acq_rel);
+
+ ret = atomic.exchange(true, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic;
+
+ BoolType observed = false;
+ bool ret = atomic.compare_exchange_weak(observed, true);
+
+ ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_release);
+
+ ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_acq_rel);
+
+ ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic;
+
+ BoolType observed = false;
+ bool ret = atomic.compare_exchange_strong(observed, true);
+
+ ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_release);
+
+ ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_acq_rel);
+
+ ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic;
+
+ BoolType observed = false;
+ bool ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_relaxed, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_acquire, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_acquire, eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_release, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_acq_rel, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_acq_rel, eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_seq_cst, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_seq_cst, eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_seq_cst, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic;
+
+ BoolType observed = false;
+ bool ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_relaxed, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_acquire, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_acquire, eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_release, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_acq_rel, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_acq_rel, eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_seq_cst, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_seq_cst, eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_seq_cst, eastl::memory_order_seq_cst);
+ }
+}
+
+
+template <typename T>
+class AtomicIntegralBasicTest
+{
+public:
+
+ using AtomicType = eastl::atomic<T>;
+ using IntegralType = T;
+
+ int RunTest()
+ {
+ TestAtomicCtor();
+
+ TestAtomicFetchAdd();
+ TestAtomicAddFetch();
+
+ TestAtomicFetchSub();
+ TestAtomicSubFetch();
+
+ TestAtomicFetchAnd();
+ TestAtomicAndFetch();
+
+ TestAtomicFetchOr();
+ TestAtomicOrFetch();
+
+ TestAtomicFetchXor();
+ TestAtomicXorFetch();
+
+ TestAssignmentOperators();
+
+ TestIsLockFree();
+
+ TestStore();
+
+ TestLoad();
+
+ TestExchange();
+
+ TestCompareExchangeWeak();
+
+ TestCompareExchangeStrong();
+
+ TestAllMemoryOrders();
+
+ TestAtomicStandalone();
+
+ return nErrorCount;
+ }
+
+private:
+
+ void TestAtomicCtor();
+
+ void TestAtomicFetchAdd();
+ void TestAtomicAddFetch();
+
+ void TestAtomicFetchSub();
+ void TestAtomicSubFetch();
+
+ void TestAtomicFetchAnd();
+ void TestAtomicAndFetch();
+
+ void TestAtomicFetchOr();
+ void TestAtomicOrFetch();
+
+ void TestAtomicFetchXor();
+ void TestAtomicXorFetch();
+
+ void TestAssignmentOperators();
+
+ void TestIsLockFree();
+
+ void TestStore();
+
+ void TestLoad();
+
+ void TestExchange();
+
+ void TestCompareExchangeWeak();
+
+ void TestCompareExchangeStrong();
+
+ void TestAllMemoryOrders();
+
+ void TestAtomicStandalone();
+
+private:
+
+ int nErrorCount = 0;
+};
+
+template <typename T>
+void AtomicIntegralBasicTest<T>::TestAtomicCtor()
+{
+ {
+ AtomicType atomic{ 0 };
+
+ IntegralType ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0);
+ }
+
+ {
+ AtomicType atomic{ 1 };
+
+ IntegralType ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 1);
+ }
+
+ {
+ AtomicType atomic{ 20 };
+
+ IntegralType ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 20);
+ }
+
+ {
+ AtomicType atomic;
+
+ IntegralType ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0);
+ }
+
+ {
+ AtomicType atomic{};
+
+ IntegralType ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0);
+ }
+}
+
+template <typename T>
+void AtomicIntegralBasicTest<T>::TestAtomicFetchAdd()
+{
+ {
+ AtomicType atomic;
+
+ IntegralType ret = atomic.fetch_add(1, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 1);
+ }
+
+ {
+ AtomicType atomic;
+
+ IntegralType ret = atomic.fetch_add(0, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0);
+ }
+
+ {
+ AtomicType atomic{ 5 };
+
+ IntegralType ret = atomic.fetch_add(0, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 5);
+
+ ret = atomic.fetch_add(4, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 5);
+
+ ret = atomic.fetch_add(1, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 9);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 10);
+ }
+}
+
+template <typename T>
+void AtomicIntegralBasicTest<T>::TestAtomicAddFetch()
+{
+ {
+ AtomicType atomic;
+
+ IntegralType ret = atomic.add_fetch(1, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 1);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 1);
+ }
+
+ {
+ AtomicType atomic;
+
+ IntegralType ret = atomic.add_fetch(0, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0);
+ }
+
+ {
+ AtomicType atomic{ 5 };
+
+ IntegralType ret = atomic.add_fetch(0, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 5);
+
+ ret = atomic.add_fetch(4, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 9);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 9);
+ }
+}
+
+template <typename T>
+void AtomicIntegralBasicTest<T>::TestAtomicFetchSub()
+{
+ {
+ AtomicType atomic{ 1 };
+
+ IntegralType ret = atomic.fetch_sub(1, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 1);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0);
+ }
+
+ {
+ AtomicType atomic{ 1 };
+
+ IntegralType ret = atomic.fetch_sub(0, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 1);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 1);
+ }
+
+ {
+ AtomicType atomic{ 5 };
+
+ IntegralType ret = atomic.fetch_sub(2, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 5);
+
+ ret = atomic.fetch_sub(1, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 3);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 2);
+ }
+}
+
+template <typename T>
+void AtomicIntegralBasicTest<T>::TestAtomicSubFetch()
+{
+ {
+ AtomicType atomic{ 1 };
+
+ IntegralType ret = atomic.sub_fetch(1, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0);
+ }
+
+ {
+ AtomicType atomic{ 1 };
+
+ IntegralType ret = atomic.sub_fetch(0, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 1);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 1);
+ }
+
+ {
+ AtomicType atomic{ 5 };
+
+ IntegralType ret = atomic.sub_fetch(2, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 3);
+
+ ret = atomic.sub_fetch(1, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 2);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 2);
+ }
+}
+
+template <typename T>
+void AtomicIntegralBasicTest<T>::TestAtomicFetchAnd()
+{
+ {
+ AtomicType atomic{ 0 };
+
+ IntegralType ret = atomic.fetch_and(0x0, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0);
+ }
+
+ {
+ AtomicType atomic{ 0 };
+
+ IntegralType ret = atomic.fetch_and(0x1, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0);
+ }
+
+ {
+ AtomicType atomic{ 0xF };
+
+ IntegralType ret = atomic.fetch_and(0x1, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0xF);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0X1);
+ }
+
+ {
+ AtomicType atomic{ 0xF };
+
+ IntegralType ret = atomic.fetch_and(0xF0, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0xF);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0x0);
+ }
+}
+
+template <typename T>
+void AtomicIntegralBasicTest<T>::TestAtomicAndFetch()
+{
+ {
+ AtomicType atomic{ 0 };
+
+ IntegralType ret = atomic.and_fetch(0x0, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0);
+ }
+
+ {
+ AtomicType atomic{ 0 };
+
+ IntegralType ret = atomic.and_fetch(0x1, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0);
+ }
+
+ {
+ AtomicType atomic{ 0xF };
+
+ IntegralType ret = atomic.and_fetch(0x1, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0x1);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0x1);
+ }
+
+ {
+ AtomicType atomic{ 0xF };
+
+ IntegralType ret = atomic.and_fetch(0xF0, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0x0);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0x0);
+ }
+}
+
+template <typename T>
+void AtomicIntegralBasicTest<T>::TestAtomicFetchOr()
+{
+ {
+ AtomicType atomic{ 0 };
+
+ IntegralType ret = atomic.fetch_or(0x1, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0x0);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0x1);
+ }
+
+ {
+ AtomicType atomic{ 0x1 };
+
+ IntegralType ret = atomic.fetch_or(0x0, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0x1);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0x1);
+ }
+
+ {
+ AtomicType atomic{ 0x1 };
+
+ IntegralType ret = atomic.fetch_or(0x2, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0x1);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0x3);
+ }
+}
+
+template <typename T>
+void AtomicIntegralBasicTest<T>::TestAtomicOrFetch()
+{
+ {
+ AtomicType atomic{ 0 };
+
+ IntegralType ret = atomic.or_fetch(0x1, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0x1);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0x1);
+ }
+
+ {
+ AtomicType atomic{ 0x1 };
+
+ IntegralType ret = atomic.or_fetch(0x0, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0x1);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0x1);
+ }
+
+ {
+ AtomicType atomic{ 0x1 };
+
+ IntegralType ret = atomic.or_fetch(0x2, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0x3);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0x3);
+ }
+}
+
+template <typename T>
+void AtomicIntegralBasicTest<T>::TestAtomicFetchXor()
+{
+ {
+ AtomicType atomic{ 0 };
+
+ IntegralType ret = atomic.fetch_xor(0x0, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0x0);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0x0);
+ }
+
+ {
+ AtomicType atomic{ 0x1 };
+
+ IntegralType ret = atomic.fetch_xor(0x1, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0x1);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0x0);
+ }
+
+ {
+ AtomicType atomic{ 0x0 };
+
+ IntegralType ret = atomic.fetch_xor(0x1, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0x0);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0x1);
+ }
+}
+
+template <typename T>
+void AtomicIntegralBasicTest<T>::TestAtomicXorFetch()
+{
+ {
+ AtomicType atomic{ 0 };
+
+ IntegralType ret = atomic.xor_fetch(0x0, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0x0);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0x0);
+ }
+
+ {
+ AtomicType atomic{ 0x1 };
+
+ IntegralType ret = atomic.xor_fetch(0x1, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0x0);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0x0);
+ }
+
+ {
+ AtomicType atomic{ 0x0 };
+
+ IntegralType ret = atomic.xor_fetch(0x1, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0x1);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0x1);
+ }
+}
+
+template <typename T>
+void AtomicIntegralBasicTest<T>::TestAssignmentOperators()
+{
+ {
+ AtomicType atomic{ 0 };
+
+ IntegralType ret = (atomic = 5);
+
+ VERIFY(ret == 5);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 5);
+ }
+
+ {
+ AtomicType atomic{ 0 };
+
+ IntegralType ret = ++atomic;
+
+ VERIFY(ret == 1);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 1);
+ }
+
+ {
+ AtomicType atomic{ 0 };
+
+ IntegralType ret = atomic++;
+
+ VERIFY(ret == 0);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 1);
+ }
+
+ {
+ AtomicType atomic{ 1 };
+
+ IntegralType ret = --atomic;
+
+ VERIFY(ret == 0);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0);
+ }
+
+ {
+ AtomicType atomic{ 1 };
+
+ IntegralType ret = atomic--;
+
+ VERIFY(ret == 1);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0);
+ }
+
+ {
+ AtomicType atomic{ 0 };
+
+ IntegralType ret = atomic += 5;
+
+ VERIFY(ret == 5);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 5);
+ }
+
+ {
+ AtomicType atomic{ 5 };
+
+ IntegralType ret = atomic -= 3;
+
+ VERIFY(ret == 2);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 2);
+ }
+
+ {
+ AtomicType atomic{ 0x0 };
+
+ IntegralType ret = atomic |= 0x1;
+
+ VERIFY(ret == 0x1);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0x1);
+ }
+
+ {
+ AtomicType atomic{ 0x1 };
+
+ IntegralType ret = atomic &= 0x1;
+
+ VERIFY(ret == 0x1);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0x1);
+ }
+
+ {
+ AtomicType atomic{ 0x1 };
+
+ IntegralType ret = atomic ^= 0x1;
+
+ VERIFY(ret == 0x0);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0x0);
+ }
+}
+
+template <typename T>
+void AtomicIntegralBasicTest<T>::TestIsLockFree()
+{
+ {
+ const AtomicType atomic{ 5 };
+
+ VERIFY(atomic.is_lock_free() == true);
+
+ VERIFY(AtomicType::is_always_lock_free == true);
+ }
+}
+
+template <typename T>
+void AtomicIntegralBasicTest<T>::TestStore()
+{
+ {
+ AtomicType atomic{ 0 };
+
+ atomic.store(0, eastl::memory_order_relaxed);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == 0);
+ }
+
+ {
+ AtomicType atomic{ 0 };
+
+ atomic.store(1, eastl::memory_order_relaxed);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == 1);
+ }
+}
+
+template <typename T>
+void AtomicIntegralBasicTest<T>::TestLoad()
+{
+ {
+ AtomicType atomic{ 0 };
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == 0);
+
+ bool ret = atomic == 0;
+ VERIFY(ret == true);
+
+ VERIFY(atomic == 0);
+ }
+
+ {
+ AtomicType atomic{ 5 };
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == 5);
+
+ bool ret = atomic == 5;
+ VERIFY(ret == true);
+
+ VERIFY(atomic == 5);
+ }
+}
+
+template <typename T>
+void AtomicIntegralBasicTest<T>::TestExchange()
+{
+ {
+ AtomicType atomic{ 0 };
+
+ IntegralType ret = atomic.exchange(0, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0);
+ }
+
+ {
+ AtomicType atomic{ 0 };
+
+ IntegralType ret = atomic.exchange(1, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0);
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ VERIFY(ret == 1);
+ }
+}
+
+template <typename T>
+void AtomicIntegralBasicTest<T>::TestCompareExchangeWeak()
+{
+ {
+ AtomicType atomic{ 0 };
+
+ IntegralType observed = 0;
+ bool ret = atomic.compare_exchange_weak(observed, 1, eastl::memory_order_relaxed);
+
+ if (ret == true)
+ {
+ VERIFY(ret == true);
+ VERIFY(observed == 0);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == 1);
+ }
+ }
+
+ {
+ AtomicType atomic{ 0 };
+
+ IntegralType observed = 1;
+ bool ret = atomic.compare_exchange_weak(observed, 1, eastl::memory_order_relaxed);
+
+ VERIFY(ret == false);
+ VERIFY(observed == 0);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == 0);
+ }
+}
+
+template <typename T>
+void AtomicIntegralBasicTest<T>::TestCompareExchangeStrong()
+{
+ {
+ AtomicType atomic{ 0 };
+
+ IntegralType observed = 0;
+ bool ret = atomic.compare_exchange_strong(observed, 1, eastl::memory_order_relaxed);
+
+ VERIFY(ret == true);
+ VERIFY(observed == 0);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == 1);
+ }
+
+ {
+ AtomicType atomic{ 0 };
+
+ IntegralType observed = 1;
+ bool ret = atomic.compare_exchange_strong(observed, 1, eastl::memory_order_relaxed);
+
+ VERIFY(ret == false);
+ VERIFY(observed == 0);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == 0);
+ }
+}
+
+template <typename T>
+void AtomicIntegralBasicTest<T>::TestAllMemoryOrders()
+{
+ {
+ AtomicType atomic{};
+
+ atomic.store(1);
+
+ atomic.store(1, eastl::memory_order_relaxed);
+
+ atomic.store(1, eastl::memory_order_release);
+
+ atomic.store(1, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic{};
+
+ IntegralType ret = atomic.load();
+
+ ret = atomic.load(eastl::memory_order_relaxed);
+
+ ret = atomic.load(eastl::memory_order_acquire);
+
+ ret = atomic.load(eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic{};
+
+ IntegralType ret = atomic.exchange(1);
+
+ ret = atomic.exchange(1, eastl::memory_order_relaxed);
+
+ ret = atomic.exchange(1, eastl::memory_order_acquire);
+
+ ret = atomic.exchange(1, eastl::memory_order_release);
+
+ ret = atomic.exchange(1, eastl::memory_order_acq_rel);
+
+ ret = atomic.exchange(1, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic{};
+
+ IntegralType ret = atomic.fetch_add(1);
+
+ ret = atomic.fetch_add(1, eastl::memory_order_relaxed);
+
+ ret = atomic.fetch_add(1, eastl::memory_order_acquire);
+
+ ret = atomic.fetch_add(1, eastl::memory_order_release);
+
+ ret = atomic.fetch_add(1, eastl::memory_order_acq_rel);
+
+ ret = atomic.fetch_add(1, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic{};
+
+ IntegralType ret = atomic.add_fetch(1);
+
+ ret = atomic.add_fetch(1, eastl::memory_order_relaxed);
+
+ ret = atomic.add_fetch(1, eastl::memory_order_acquire);
+
+ ret = atomic.add_fetch(1, eastl::memory_order_release);
+
+ ret = atomic.add_fetch(1, eastl::memory_order_acq_rel);
+
+ ret = atomic.add_fetch(1, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic{};
+
+ IntegralType ret = atomic.fetch_sub(1);
+
+ ret = atomic.fetch_sub(1, eastl::memory_order_relaxed);
+
+ ret = atomic.fetch_sub(1, eastl::memory_order_acquire);
+
+ ret = atomic.fetch_sub(1, eastl::memory_order_release);
+
+ ret = atomic.fetch_sub(1, eastl::memory_order_acq_rel);
+
+ ret = atomic.fetch_sub(1, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic{};
+
+ IntegralType ret = atomic.sub_fetch(1);
+
+ ret = atomic.sub_fetch(1, eastl::memory_order_relaxed);
+
+ ret = atomic.sub_fetch(1, eastl::memory_order_acquire);
+
+ ret = atomic.sub_fetch(1, eastl::memory_order_release);
+
+ ret = atomic.sub_fetch(1, eastl::memory_order_acq_rel);
+
+ ret = atomic.sub_fetch(1, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic{};
+
+ IntegralType ret = atomic.fetch_and(1);
+
+ ret = atomic.fetch_and(1, eastl::memory_order_relaxed);
+
+ ret = atomic.fetch_and(1, eastl::memory_order_acquire);
+
+ ret = atomic.fetch_and(1, eastl::memory_order_release);
+
+ ret = atomic.fetch_and(1, eastl::memory_order_acq_rel);
+
+ ret = atomic.fetch_and(1, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic{};
+
+ IntegralType ret = atomic.and_fetch(1);
+
+ ret = atomic.and_fetch(1, eastl::memory_order_relaxed);
+
+ ret = atomic.and_fetch(1, eastl::memory_order_acquire);
+
+ ret = atomic.and_fetch(1, eastl::memory_order_release);
+
+ ret = atomic.and_fetch(1, eastl::memory_order_acq_rel);
+
+ ret = atomic.and_fetch(1, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic{};
+
+ IntegralType ret = atomic.fetch_or(1);
+
+ ret = atomic.fetch_or(1, eastl::memory_order_relaxed);
+
+ ret = atomic.fetch_or(1, eastl::memory_order_acquire);
+
+ ret = atomic.fetch_or(1, eastl::memory_order_release);
+
+ ret = atomic.fetch_or(1, eastl::memory_order_acq_rel);
+
+ ret = atomic.fetch_or(1, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic{};
+
+ IntegralType ret = atomic.or_fetch(1);
+
+ ret = atomic.or_fetch(1, eastl::memory_order_relaxed);
+
+ ret = atomic.or_fetch(1, eastl::memory_order_acquire);
+
+ ret = atomic.or_fetch(1, eastl::memory_order_release);
+
+ ret = atomic.or_fetch(1, eastl::memory_order_acq_rel);
+
+ ret = atomic.or_fetch(1, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic{};
+
+ IntegralType ret = atomic.fetch_xor(1);
+
+ ret = atomic.fetch_xor(1, eastl::memory_order_relaxed);
+
+ ret = atomic.fetch_xor(1, eastl::memory_order_acquire);
+
+ ret = atomic.fetch_xor(1, eastl::memory_order_release);
+
+ ret = atomic.fetch_xor(1, eastl::memory_order_acq_rel);
+
+ ret = atomic.fetch_xor(1, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic{};
+
+ IntegralType ret = atomic.xor_fetch(1);
+
+ ret = atomic.xor_fetch(1, eastl::memory_order_relaxed);
+
+ ret = atomic.xor_fetch(1, eastl::memory_order_acquire);
+
+ ret = atomic.xor_fetch(1, eastl::memory_order_release);
+
+ ret = atomic.xor_fetch(1, eastl::memory_order_acq_rel);
+
+ ret = atomic.xor_fetch(1, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic{};
+
+ IntegralType observed = 0;
+ bool ret;
+
+ ret = atomic.compare_exchange_weak(observed, 1);
+
+ ret = atomic.compare_exchange_weak(observed, 1, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_weak(observed, 1, eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_weak(observed, 1, eastl::memory_order_release);
+
+ ret = atomic.compare_exchange_weak(observed, 1, eastl::memory_order_acq_rel);
+
+ ret = atomic.compare_exchange_weak(observed, 1, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic{};
+
+ IntegralType observed = 0;
+ bool ret;
+
+ ret = atomic.compare_exchange_strong(observed, 1);
+
+ ret = atomic.compare_exchange_strong(observed, 1, eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_strong(observed, 1, eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_strong(observed, 1, eastl::memory_order_release);
+
+ ret = atomic.compare_exchange_strong(observed, 1, eastl::memory_order_acq_rel);
+
+ ret = atomic.compare_exchange_strong(observed, 1, eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic{};
+
+ IntegralType observed = 0;
+ bool ret;
+
+ ret = atomic.compare_exchange_weak(observed, 1,
+ eastl::memory_order_relaxed,
+ eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_weak(observed, 1,
+ eastl::memory_order_acquire,
+ eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_weak(observed, 1,
+ eastl::memory_order_acquire,
+ eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_weak(observed, 1,
+ eastl::memory_order_release,
+ eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_weak(observed, 1,
+ eastl::memory_order_acq_rel,
+ eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_weak(observed, 1,
+ eastl::memory_order_acq_rel,
+ eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_weak(observed, 1,
+ eastl::memory_order_seq_cst,
+ eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_weak(observed, 1,
+ eastl::memory_order_seq_cst,
+ eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_weak(observed, 1,
+ eastl::memory_order_seq_cst,
+ eastl::memory_order_seq_cst);
+ }
+
+ {
+ AtomicType atomic{};
+
+ IntegralType observed = 0;
+ bool ret;
+
+ ret = atomic.compare_exchange_strong(observed, 1,
+ eastl::memory_order_relaxed,
+ eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_strong(observed, 1,
+ eastl::memory_order_acquire,
+ eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_strong(observed, 1,
+ eastl::memory_order_acquire,
+ eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_strong(observed, 1,
+ eastl::memory_order_release,
+ eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_strong(observed, 1,
+ eastl::memory_order_acq_rel,
+ eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_strong(observed, 1,
+ eastl::memory_order_acq_rel,
+ eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_strong(observed, 1,
+ eastl::memory_order_seq_cst,
+ eastl::memory_order_relaxed);
+
+ ret = atomic.compare_exchange_strong(observed, 1,
+ eastl::memory_order_seq_cst,
+ eastl::memory_order_acquire);
+
+ ret = atomic.compare_exchange_strong(observed, 1,
+ eastl::memory_order_seq_cst,
+ eastl::memory_order_seq_cst);
+ }
+
+}
+
+template <typename T>
+void AtomicIntegralBasicTest<T>::TestAtomicStandalone()
+{
+ {
+ AtomicType atomic;
+
+ IntegralType expected = 0;
+ bool ret = atomic_compare_exchange_weak(&atomic, &expected, 1);
+
+ if (ret)
+ {
+ VERIFY(ret == true);
+
+ VERIFY(expected == 0);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == 1);
+ }
+ }
+
+ {
+ AtomicType atomic;
+
+ IntegralType expected = 0;
+ bool ret = atomic_compare_exchange_weak_explicit(&atomic, &expected, 1, eastl::memory_order_relaxed, eastl::memory_order_relaxed);
+
+ if (ret)
+ {
+ VERIFY(ret == true);
+
+ VERIFY(expected == 0);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == 1);
+ }
+ }
+
+ {
+ AtomicType atomic;
+
+ IntegralType expected = 0;
+ bool ret = atomic_compare_exchange_strong(&atomic, &expected, 1);
+
+ VERIFY(ret == true);
+
+ VERIFY(expected == 0);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == 1);
+ }
+
+ {
+ AtomicType atomic;
+
+ IntegralType expected = 0;
+ bool ret = atomic_compare_exchange_strong_explicit(&atomic, &expected, 1, eastl::memory_order_relaxed, eastl::memory_order_relaxed);
+
+ VERIFY(ret == true);
+
+ VERIFY(expected == 0);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == 1);
+ }
+
+ {
+ AtomicType atomic;
+
+ IntegralType ret = atomic_fetch_xor(&atomic, 0x1);
+
+ VERIFY(ret == 0x0);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == 0x1);
+ }
+
+ {
+ AtomicType atomic;
+
+ IntegralType ret = atomic_fetch_xor_explicit(&atomic, 0x1, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0x0);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == 0x1);
+ }
+
+ {
+ AtomicType atomic;
+
+ IntegralType ret = atomic_xor_fetch(&atomic, 0x1);
+
+ VERIFY(ret == 0x1);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == 0x1);
+ }
+
+ {
+ AtomicType atomic;
+
+ IntegralType ret = atomic_xor_fetch_explicit(&atomic, 0x1, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0x1);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == 0x1);
+ }
+
+ {
+ AtomicType atomic;
+
+ IntegralType ret = atomic_fetch_or(&atomic, 0x1);
+
+ VERIFY(ret == 0x0);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == 0x1);
+ }
+
+ {
+ AtomicType atomic;
+
+ IntegralType ret = atomic_fetch_or_explicit(&atomic, 0x1, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0x0);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == 0x1);
+ }
+
+ {
+ AtomicType atomic;
+
+ IntegralType ret = atomic_or_fetch(&atomic, 0x1);
+
+ VERIFY(ret == 0x1);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == 0x1);
+ }
+
+ {
+ AtomicType atomic;
+
+ IntegralType ret = atomic_or_fetch_explicit(&atomic, 0x1, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0x1);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == 0x1);
+ }
+
+ {
+ AtomicType atomic{ 0x1 };
+
+ IntegralType ret = atomic_fetch_and(&atomic, 0x0);
+
+ VERIFY(ret == 0x1);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == 0x0);
+ }
+
+ {
+ AtomicType atomic{ 0x1 };
+
+ IntegralType ret = atomic_fetch_and_explicit(&atomic, 0x0, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0x1);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == 0x0);
+ }
+
+ {
+ AtomicType atomic{ 0x1 };
+
+ IntegralType ret = atomic_and_fetch(&atomic, 0x0);
+
+ VERIFY(ret == 0x0);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == 0x0);
+ }
+
+ {
+ AtomicType atomic{ 0x1 };
+
+ IntegralType ret = atomic_and_fetch_explicit(&atomic, 0x0, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0x0);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == 0x0);
+ }
+
+ {
+ AtomicType atomic{ 1 };
+
+ IntegralType ret = atomic_fetch_sub(&atomic, 1);
+
+ VERIFY(ret == 1);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == 0);
+ }
+
+ {
+ AtomicType atomic{ 1 };
+
+ IntegralType ret = atomic_fetch_sub_explicit(&atomic, 1, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 1);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == 0);
+ }
+
+ {
+ AtomicType atomic{ 1 };
+
+ IntegralType ret = atomic_sub_fetch(&atomic, 1);
+
+ VERIFY(ret == 0);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == 0);
+ }
+
+ {
+ AtomicType atomic{ 1 };
+
+ IntegralType ret = atomic_sub_fetch_explicit(&atomic, 1, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == 0);
+ }
+
+ {
+ AtomicType atomic;
+
+ IntegralType ret = atomic_fetch_add(&atomic, 1);
+
+ VERIFY(ret == 0);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == 1);
+ }
+
+ {
+ AtomicType atomic;
+
+ IntegralType ret = atomic_fetch_add_explicit(&atomic, 1, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == 1);
+ }
+
+ {
+ AtomicType atomic;
+
+ IntegralType ret = atomic_add_fetch(&atomic, 1);
+
+ VERIFY(ret == 1);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == 1);
+ }
+
+ {
+ AtomicType atomic;
+
+ IntegralType ret = atomic_add_fetch_explicit(&atomic, 1, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 1);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == 1);
+ }
+
+ {
+ AtomicType atomic;
+
+ IntegralType ret = atomic_exchange(&atomic, 1);
+
+ VERIFY(ret == 0);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == 1);
+ }
+
+ {
+ AtomicType atomic;
+
+ IntegralType ret = atomic_exchange_explicit(&atomic, 1, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0);
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == 1);
+ }
+
+ {
+ AtomicType atomic;
+
+ IntegralType ret = atomic_load(&atomic);
+
+ VERIFY(ret == 0);
+ }
+
+ {
+ AtomicType atomic;
+
+ IntegralType ret = atomic_load_explicit(&atomic, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0);
+ }
+
+ {
+ AtomicType atomic;
+
+ IntegralType ret = atomic_load_cond(&atomic, [](IntegralType val) { return true; });
+
+ VERIFY(ret == 0);
+ }
+
+ {
+ AtomicType atomic;
+
+ IntegralType ret = atomic_load_cond_explicit(&atomic, [](IntegralType val) { return true; }, eastl::memory_order_relaxed);
+
+ VERIFY(ret == 0);
+ }
+
+ {
+ AtomicType atomic;
+
+ atomic_store(&atomic, 1);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == 1);
+ }
+
+ {
+ AtomicType atomic;
+
+ atomic_store_explicit(&atomic, 1, eastl::memory_order_relaxed);
+
+ VERIFY(atomic.load(eastl::memory_order_relaxed) == 1);
+ }
+
+ {
+ AtomicType atomic;
+
+ VERIFY(atomic_is_lock_free(&atomic) == true);
+ }
+}
+
+struct AtomicNonDefaultConstructible
+{
+ AtomicNonDefaultConstructible(uint8_t a)
+ : a(a)
+ {
+ }
+
+ friend bool operator==(const AtomicNonDefaultConstructible& a, const AtomicNonDefaultConstructible& b)
+ {
+ return a.a == b.a;
+ }
+
+ uint8_t a;
+};
+
+#if defined(EASTL_ATOMIC_HAS_8BIT)
+
+int TestAtomicNonDefaultConstructible()
+{
+ int nErrorCount = 0;
+
+ {
+ eastl::atomic<AtomicNonDefaultConstructible> atomic{AtomicNonDefaultConstructible{(uint8_t)3}};
+
+ VERIFY(atomic.load() == AtomicNonDefaultConstructible{(uint8_t)3});
+ }
+
+ {
+ eastl::atomic<AtomicNonDefaultConstructible> atomic{AtomicNonDefaultConstructible{(uint8_t)3}};
+
+ atomic.store(AtomicNonDefaultConstructible{(uint8_t)4});
+
+ VERIFY(atomic.load() == AtomicNonDefaultConstructible{(uint8_t)4});
+ }
+
+ {
+ eastl::atomic<AtomicNonDefaultConstructible> atomic{AtomicNonDefaultConstructible{(uint8_t)3}};
+
+ VERIFY(atomic_load_cond(&atomic, [] (AtomicNonDefaultConstructible) { return true; }) == AtomicNonDefaultConstructible{(uint8_t)3});
+ }
+
+ {
+ eastl::atomic<AtomicNonDefaultConstructible> atomic{AtomicNonDefaultConstructible{(uint8_t)3}};
+
+ VERIFY(atomic_load_cond_explicit(&atomic, [] (AtomicNonDefaultConstructible) { return true; }, eastl::memory_order_seq_cst) == AtomicNonDefaultConstructible{(uint8_t)3});
+ }
+
+ return nErrorCount;
+}
+
+#endif
+
+struct Atomic128LoadType
+{
+ friend bool operator==(const Atomic128LoadType& a, const Atomic128LoadType& b)
+ {
+ return a.a == b.a && a.b == b.b && a.c == b.c && a.d == b.d;
+ }
+
+ uint32_t a, b, c, d;
+};
+
+#if defined(EASTL_ATOMIC_HAS_128BIT)
+
+int TestAtomic128Loads()
+{
+ int nErrorCount = 0;
+
+ {
+ eastl::atomic<Atomic128LoadType> atomic{Atomic128LoadType{1, 1, 0, 0}};
+
+ VERIFY((atomic.load() == Atomic128LoadType{1, 1, 0, 0}));
+ }
+
+ {
+ eastl::atomic<Atomic128LoadType> atomic{Atomic128LoadType{0, 0, 1, 1}};
+
+ VERIFY((atomic.load() == Atomic128LoadType{0, 0, 1, 1}));
+ }
+
+ {
+ eastl::atomic<Atomic128LoadType> atomic{Atomic128LoadType{0, 1, 0, 1}};
+
+ VERIFY((atomic.load() == Atomic128LoadType{0, 1, 0, 1}));
+ }
+
+ {
+ eastl::atomic<Atomic128LoadType> atomic{Atomic128LoadType{1, 0, 1, 0}};
+
+ VERIFY((atomic.load() == Atomic128LoadType{1, 0, 1, 0}));
+ }
+
+ {
+ eastl::atomic<Atomic128LoadType> atomic{Atomic128LoadType{1, 1, 0, 0}};
+
+ Atomic128LoadType expected{0, 0, 0, 0};
+ atomic.compare_exchange_strong(expected, Atomic128LoadType{1, 1, 0, 0});
+
+ VERIFY((expected == Atomic128LoadType{1, 1, 0, 0}));
+ }
+
+ {
+ eastl::atomic<Atomic128LoadType> atomic{Atomic128LoadType{0, 0, 1, 1}};
+
+ Atomic128LoadType expected{0, 0, 0, 0};
+ atomic.compare_exchange_strong(expected, Atomic128LoadType{0, 0, 1, 1});
+
+ VERIFY((expected == Atomic128LoadType{0, 0, 1, 1}));
+ }
+
+ {
+ eastl::atomic<Atomic128LoadType> atomic{Atomic128LoadType{0, 1, 0, 1}};
+
+ Atomic128LoadType expected{0, 0, 0, 0};
+ atomic.compare_exchange_strong(expected, Atomic128LoadType{0, 1, 0, 1});
+
+ VERIFY((expected == Atomic128LoadType{0, 1, 0, 1}));
+ }
+
+ {
+ eastl::atomic<Atomic128LoadType> atomic{Atomic128LoadType{1, 0, 1, 0}};
+
+ Atomic128LoadType expected{0, 0, 0, 0};
+ atomic.compare_exchange_strong(expected, Atomic128LoadType{1, 0, 1, 0});
+
+ VERIFY((expected == Atomic128LoadType{1, 0, 1, 0}));
+ }
+
+ {
+ eastl::atomic<Atomic128LoadType> atomic{Atomic128LoadType{0, 0, 0, 0}};
+
+ Atomic128LoadType expected{0, 0, 0, 0};
+ atomic.compare_exchange_strong(expected, Atomic128LoadType{1, 1, 0, 0});
+
+ VERIFY((atomic.load() == Atomic128LoadType{1, 1, 0, 0}));
+ }
+
+ {
+ eastl::atomic<Atomic128LoadType> atomic{Atomic128LoadType{0, 0, 0, 0}};
+
+ Atomic128LoadType expected{0, 0, 0, 0};
+ atomic.compare_exchange_strong(expected, Atomic128LoadType{0, 0, 1, 1});
+
+ VERIFY((atomic.load() == Atomic128LoadType{0, 0, 1, 1}));
+ }
+
+ {
+ eastl::atomic<Atomic128LoadType> atomic{Atomic128LoadType{0, 0, 0, 0}};
+
+ Atomic128LoadType expected{0, 0, 0, 0};
+ atomic.compare_exchange_strong(expected, Atomic128LoadType{0, 1, 0, 1});
+
+ VERIFY((atomic.load() == Atomic128LoadType{0, 1, 0, 1}));
+ }
+
+ {
+ eastl::atomic<Atomic128LoadType> atomic{Atomic128LoadType{0, 0, 0, 0}};
+
+ Atomic128LoadType expected{0, 0, 0, 0};
+ atomic.compare_exchange_strong(expected, Atomic128LoadType{1, 0, 1, 0});
+
+ VERIFY((atomic.load() == Atomic128LoadType{1, 0, 1, 0}));
+ }
+
+ return nErrorCount;
+}
+
+#endif
+
+int TestAtomicBasic()
+{
+ int nErrorCount = 0;
+
+ #if defined(EASTL_ATOMIC_HAS_8BIT)
+ {
+ AtomicIntegralBasicTest<uint8_t> u8AtomicTest;
+
+ nErrorCount += u8AtomicTest.RunTest();
+ }
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_16BIT)
+ {
+ AtomicIntegralBasicTest<uint16_t> u16AtomicTest;
+
+ nErrorCount += u16AtomicTest.RunTest();
+ }
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_32BIT)
+ {
+ AtomicIntegralBasicTest<uint32_t> u32AtomicTest;
+
+ nErrorCount += u32AtomicTest.RunTest();
+ }
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_64BIT)
+ {
+ AtomicIntegralBasicTest<uint64_t> u64AtomicTest;
+
+ nErrorCount += u64AtomicTest.RunTest();
+ }
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC))
+ {
+ AtomicIntegralBasicTest<__uint128_t> u128AtomicTest;
+
+ nErrorCount += u128AtomicTest.RunTest();
+ }
+
+ {
+ AtomicIntegralBasicTest<eastl_uint128_t> u128AtomicTest;
+
+ nErrorCount += u128AtomicTest.RunTest();
+ }
+ #endif
+
+ {
+ AtomicBoolBasicTest boolAtomicTest;
+
+ nErrorCount += boolAtomicTest.RunTest();
+ }
+
+ #if defined(EASTL_ATOMIC_HAS_16BIT)
+ {
+ AtomicUserTypeBasicTest<AtomicUserType16> userTypeAtomicTest;
+
+ nErrorCount += userTypeAtomicTest.RunTest();
+ }
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_32BIT)
+ {
+ AtomicUserTypeBasicTest<AtomicNonTriviallyConstructible> userTypeAtomicTest;
+
+ nErrorCount += userTypeAtomicTest.RunTest();
+ }
+
+ {
+ AtomicUserTypeBasicTest<AtomicNonTriviallyConstructibleNoExcept> userTypeAtomicTest;
+
+ nErrorCount += userTypeAtomicTest.RunTest();
+ }
+ #endif
+
+ #if defined(EASTL_ATOMIC_HAS_128BIT)
+ {
+ AtomicUserTypeBasicTest<AtomicUserType128> userTypeAtomicTest;
+
+ nErrorCount += userTypeAtomicTest.RunTest();
+ }
+ #endif
+
+ {
+ AtomicPointerBasicTest ptrAtomicTest;
+
+ nErrorCount += ptrAtomicTest.RunTest();
+ }
+
+ {
+ AtomicVoidPointerBasicTest voidPtrAtomicTest;
+
+ nErrorCount += voidPtrAtomicTest.RunTest();
+ }
+
+ {
+ AtomicFlagBasicTest atomicFlagBasicTest;
+
+ nErrorCount += atomicFlagBasicTest.RunTest();
+ }
+
+ {
+ AtomicStandaloneBasicTest atomicStandaloneBasicTest;
+
+ nErrorCount += atomicStandaloneBasicTest.RunTest();
+ }
+
+#if defined(EASTL_ATOMIC_HAS_128BIT)
+
+ nErrorCount += TestAtomic128Loads();
+
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_8BIT)
+
+ nErrorCount += TestAtomicNonDefaultConstructible();
+
+#endif
+
+ nErrorCount += TestAtomicConstantInitialization();
+
+ return nErrorCount;
+}
diff --git a/EASTL/test/source/TestBitVector.cpp b/EASTL/test/source/TestBitVector.cpp
new file mode 100644
index 0000000..ba3ae8c
--- /dev/null
+++ b/EASTL/test/source/TestBitVector.cpp
@@ -0,0 +1,469 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLTest.h"
+#include <EABase/eabase.h>
+#include <EASTL/bitvector.h>
+#include <EASTL/vector.h>
+#include <EASTL/deque.h>
+#include <EASTL/string.h>
+
+
+
+
+// Template instantations.
+// These tell the compiler to compile all the functions for the given class.
+template class eastl::bitvector<>;
+template class eastl::bitvector<MallocAllocator>;
+template class eastl::bitvector<EASTLAllocatorType, uint8_t>;
+template class eastl::bitvector<EASTLAllocatorType, int16_t>;
+template class eastl::bitvector<EASTLAllocatorType, int32_t>;
+template class eastl::bitvector<EASTLAllocatorType, int64_t, eastl::vector<int64_t, EASTLAllocatorType> >;
+
+// bitvector doesn't yet support deque.
+//template class eastl::bitvector<EASTLAllocatorType, uint8_t, eastl::deque<uint64_t, EASTLAllocatorType> >;
+//template class eastl::bitvector<EASTLAllocatorType, uint8_t, eastl::deque<int32_t, EASTLAllocatorType, 64> >;
+
+
+
+int TestBitVector()
+{
+ using namespace eastl;
+
+ int nErrorCount = 0;
+
+ {
+ // typedef bitvector<Allocator, Element> this_type;
+ // typedef bool value_type;
+ // typedef bitvector_reference<Element> reference;
+ // typedef bool const_reference;
+ // typedef bitvector_iterator<Element> iterator;
+ // typedef bitvector_const_iterator<Element> const_iterator;
+ // typedef eastl::reverse_iterator<iterator> reverse_iterator;
+ // typedef eastl::reverse_iterator<const_iterator> const_reverse_iterator;
+ // typedef Allocator allocator_type;
+ // typedef Element element_type;
+ // typedef Container container_type;
+ // typedef eastl_size_t size_type;
+
+ bitvector<>::this_type this_typeVariable;
+ bitvector<>::value_type value_typeVariable = 0;
+ bitvector<>::const_reference const_referenceVariable(false);
+ bitvector<>::iterator iteratorVariable(NULL, 0);
+ bitvector<>::const_iterator const_iteratorVariable(NULL, 0);
+ bitvector<>::reverse_iterator reverse_iteratorVariable(iteratorVariable);
+ bitvector<>::const_reverse_iterator const_reverse_iteratorVariable(const_iteratorVariable);
+ bitvector<>::allocator_type allocator_typeVariable;
+ bitvector<>::element_type element_typeVariable = 0;
+ bitvector<>::container_type container_typeVariable;
+ bitvector<>::size_type size_typeVariable = 0;
+
+ string sAddresses(string::CtorSprintf(), "%p %p %p %p %p %p %p %p %p %p %p",
+ &this_typeVariable, &value_typeVariable, &const_referenceVariable, &iteratorVariable,
+ &const_iteratorVariable, &reverse_iteratorVariable,&const_reverse_iteratorVariable,
+ &allocator_typeVariable, &element_typeVariable, &container_typeVariable, &size_typeVariable);
+ EATEST_VERIFY(sAddresses.size() > 0);
+ }
+
+ {
+ // bitvector();
+ // explicit bitvector(const allocator_type& allocator);
+ // explicit bitvector(size_type n, const allocator_type& allocator = EASTL_BITVECTOR_DEFAULT_ALLOCATOR);
+ // bitvector(size_type n, value_type value, const allocator_type& allocator = EASTL_BITVECTOR_DEFAULT_ALLOCATOR);
+ // bitvector(const bitvector& copy);
+ // template <typename InputIterator> bitvector(InputIterator first, InputIterator last);
+ // bitvector& operator=(const bitvector& x);
+ // reference operator[](size_type n); // behavior is undefined if n is invalid.
+ // const_reference operator[](size_type n) const;
+ MallocAllocator mallocAllocator;
+ bitvector<> bv0;
+ bitvector<MallocAllocator> bv1(mallocAllocator);
+ bitvector<> bv2(200);
+ bitvector<> bv3(300, true);
+ bitvector<MallocAllocator> bv4(400, false, mallocAllocator);
+ const bitvector<> bv5(bv2);
+ bool boolArray[] = { true, false, true };
+ bitvector<> bv6(boolArray, boolArray + EAArrayCount(boolArray));
+ bitvector<> bv7(bv3.begin(), bv3.end());
+
+ {
+ // Validate the above constructions
+ EATEST_VERIFY(bv0.validate());
+ EATEST_VERIFY(bv0.empty());
+
+ EATEST_VERIFY(bv1.validate());
+ EATEST_VERIFY(bv1.empty());
+
+ EATEST_VERIFY(bv2.validate());
+ EATEST_VERIFY(bv2.size() == 200);
+ for(eastl_size_t i = 0; i < bv2.size(); i++)
+ EATEST_VERIFY(bv2[i] == false);
+
+ EATEST_VERIFY(bv3.validate());
+ EATEST_VERIFY(bv3.size() == 300);
+ for(eastl_size_t i = 0; i < bv3.size(); i++)
+ EATEST_VERIFY(bv3[i] == true);
+
+ EATEST_VERIFY(bv4.validate());
+ EATEST_VERIFY(bv4.size() == 400);
+ for(eastl_size_t i = 0; i < bv4.size(); i++)
+ EATEST_VERIFY(bv4[i] == false);
+
+ EATEST_VERIFY(bv5.validate());
+ EATEST_VERIFY(bv5 == bv2);
+ for(eastl_size_t i = 0; i < bv5.size(); i++)
+ EATEST_VERIFY(bv5[i] == false);
+
+ EATEST_VERIFY(bv6.validate());
+ EATEST_VERIFY(bv6.size() == EAArrayCount(boolArray));
+ for(eastl_size_t i = 0; i < bv6.size(); i++)
+ EATEST_VERIFY(bv6[i] == boolArray[i]);
+
+ EATEST_VERIFY(bv7.validate());
+ EATEST_VERIFY(bv7.size() == bv3.size()); // The == test theoretically includes this test, be we check anyway.
+ for(eastl_size_t j = 0; j < bv7.size(); j++)
+ EATEST_VERIFY(bv7[j] == bv3[j]);
+ EATEST_VERIFY(bv7 == bv3);
+ for(eastl_size_t i = 0; (i < bv3.size()) && (i < bv7.size()); i++)
+ EATEST_VERIFY(bv3[i] == bv7[i]);
+ }
+
+ {
+ // void swap(this_type& x);
+
+ bv7.swap(bv7); // Test swapping against self.
+ EATEST_VERIFY(bv7.validate());
+ EATEST_VERIFY(bv7 == bv3);
+ EATEST_VERIFY(bv7.size() == bv3.size()); // The == test theoretically includes this test, be we check anyway.
+ for(eastl_size_t i = 0; (i < bv3.size()) && (i < bv7.size()); i++)
+ EATEST_VERIFY(bv3[i] == bv7[i]);
+
+ bv3.swap(bv2); // Note that bv3 and bv4 use different allocators, so we are exercizing that.
+ EATEST_VERIFY(bv3.validate());
+ EATEST_VERIFY(bv3.size() == 200);
+ for(eastl_size_t i = 0; i < bv3.size(); i++)
+ EATEST_VERIFY(bv3[i] == false);
+
+ EATEST_VERIFY(bv2.validate());
+ EATEST_VERIFY(bv2.size() == 300);
+ for(eastl_size_t i = 0; i < bv2.size(); i++)
+ EATEST_VERIFY(bv2[i] == true);
+
+
+ // bitvector& operator=(const bitvector& x);
+
+ bv6 = bv7;
+ EATEST_VERIFY(bv6.validate());
+ EATEST_VERIFY(bv6 == bv7);
+
+
+ // template <typename InputIterator> void assign(InputIterator first, InputIterator last);
+ bv0.assign(bv3.begin(), bv3.end());
+ EATEST_VERIFY(bv0 == bv3);
+
+ bv0.assign(boolArray, boolArray + EAArrayCount(boolArray));
+ EATEST_VERIFY(bv0 == bitvector<>(boolArray, boolArray + EAArrayCount(boolArray)));
+
+ bv0.resize(0);
+ EATEST_VERIFY(bv0.begin()==bv0.end());//should not crash
+ bv3.resize(0);
+ EATEST_VERIFY(bv0 == bv3);
+ }
+ }
+
+
+ {
+ // iterator begin();
+ // const_iterator begin() const;
+ // iterator end();
+ // const_iterator end() const;
+
+ bool boolArray[] = { true, false, true, true, false, true };
+ const bitvector<> bv0(boolArray, boolArray + EAArrayCount(boolArray));
+ bitvector<>::const_iterator it;
+ eastl_size_t i;
+
+ for(it = bv0.begin(), i = 0; it != bv0.end(); ++it, ++i) // Iterate forward by 1.
+ {
+ const bool value = *it;
+ EATEST_VERIFY(value == boolArray[i]);
+ }
+
+ for(--it, --i; (eastl_ssize_t)i >= 0; --it, --i) // Iterate backward by 1. Problem: this test code does --it for it == begin(), which isn't strictly allowed.
+ {
+ const bool value = *it;
+ EATEST_VERIFY(value == boolArray[i]);
+ }
+
+ // The following code asssumes an even number of elements.
+ EASTL_CT_ASSERT((EAArrayCount(boolArray) % 2) == 0);
+ for(it = bv0.begin(), ++i; it != bv0.end(); it += 2, i += 2) // Iterate forward by 2.
+ {
+ const bool value = *it;
+ EATEST_VERIFY(value == boolArray[i]);
+ }
+
+ for(it -= 2, i -= 2; (eastl_ssize_t)i >= 0; it -= 2, i -= 2) // Iterate backward by 1. Problem: this test code does it -= 2 for it == begin(), which isn't strictly allowed.
+ {
+ const bool value = *it;
+ EATEST_VERIFY(value == boolArray[i]);
+ }
+
+
+ // reverse_iterator rbegin();
+ // const_reverse_iterator rbegin() const;
+ // reverse_iterator rend();
+ // const_reverse_iterator rend() const;
+
+ bitvector<>::const_reverse_iterator rit;
+ i = (bv0.size() - 1);
+
+ for(rit = bv0.rbegin(); rit != bv0.rend(); ++rit, --i) // Reverse-iterate forward by 1.
+ {
+ //const bool value = *rit; // This is currently broken and will require a bit of work to fix.
+ const bool value = *--rit.base();
+ EATEST_VERIFY(value == boolArray[i]);
+ }
+
+ for(--rit, ++i; i < bv0.size(); --rit, ++i) // Reverse-iterate backward by 1.
+ {
+ //const bool value = *rit; // This is currently broken and will require a bit of work to fix.
+ const bool value = *--rit.base();
+ EATEST_VERIFY(value == boolArray[i]);
+ }
+
+ // The following code asssumes an even number of elements.
+ EASTL_CT_ASSERT((EAArrayCount(boolArray) % 2) == 0);
+ for(rit = bv0.rbegin(), --i; rit != bv0.rend(); rit += 2, i -= 2) // Reverse-iterate forward by 2.
+ {
+ //const bool value = *rit; // This is currently broken and will require a bit of work to fix.
+ const bool value = *--rit.base();
+ EATEST_VERIFY(value == boolArray[i]);
+ }
+
+ for(rit -= 2, i += 2; i < bv0.size(); rit -= 2, i += 2) // Reverse-iterate backward by 2.
+ {
+ //const bool value = *rit; // This is currently broken and will require a bit of work to fix.
+ const bool value = *--rit.base();
+ EATEST_VERIFY(value == boolArray[i]);
+ }
+
+
+ // find_first, etc.
+ /* This work is not complete.
+ {
+ bitvector<> bv(30, false);
+
+ bitvector<>::iterator it = bv.find_first();
+ EATEST_VERIFY(it == bv.begin());
+ }
+ */
+ }
+
+ {
+ MallocAllocator mallocAllocator;
+ bitvector<MallocAllocator> bv0(mallocAllocator);
+
+ // bool empty() const;
+ // size_type size() const;
+ // size_type capacity() const;
+
+ EATEST_VERIFY(bv0.empty());
+ EATEST_VERIFY(bv0.size() == 0);
+ EATEST_VERIFY(bv0.capacity() == 0); // EASTL requires that newly constructed containers have 0 capacity.
+
+ bool boolArray[] = { false, true, true };
+ bv0.assign(boolArray, boolArray + EAArrayCount(boolArray));
+
+ EATEST_VERIFY(!bv0.empty());
+ EATEST_VERIFY(bv0.size() == EAArrayCount(boolArray));
+ EATEST_VERIFY((bv0.capacity() > 0) && (bv0.capacity() <= (8 * sizeof(bitvector<>::element_type))));
+
+
+ // reference front();
+ // const_reference front() const;
+ // reference back();
+ // const_reference back() const;
+
+ EATEST_VERIFY(bv0.front() == false);
+ EATEST_VERIFY(bv0.back() == true);
+ bv0.erase(bv0.begin());
+ EATEST_VERIFY(bv0.front() == true);
+ bv0.erase(bv0.rbegin());
+ EATEST_VERIFY(bv0.back() == true);
+
+ // void set_capacity(size_type n = npos);
+
+ bv0.reserve(17);
+ EATEST_VERIFY((bv0.capacity() >= 17) && (bv0.capacity() <= 100)); // It's hard to make a unit test to portably test an upper limit.
+
+ int allocCountBefore = MallocAllocator::mAllocCountAll;
+ while(bv0.size() < 17)
+ bv0.push_back(false);
+ EATEST_VERIFY(allocCountBefore == MallocAllocator::mAllocCountAll); // Verify no new memory was allocated.
+
+ bv0.set_capacity();
+ EATEST_VERIFY(bv0.capacity() >= bv0.size());
+
+ bv0.set_capacity(0);
+ EATEST_VERIFY(bv0.capacity() == 0);
+ EATEST_VERIFY(bv0.empty());
+
+
+ // void resize(size_type n, value_type value);
+ // void resize(size_type n);
+ // void reserve(size_type n);
+
+ bv0.reserve(800);
+ EATEST_VERIFY(bv0.capacity() >= 800);
+ allocCountBefore = MallocAllocator::mAllocCountAll;
+ bv0.resize(800, true);
+ EATEST_VERIFY(allocCountBefore == MallocAllocator::mAllocCountAll); // Verify no new memory was allocated.
+
+
+ // void push_back();
+ // void push_back(value_type value);
+ // void pop_back();
+ // reference operator[](size_type n);
+ // const_reference operator[](size_type n) const;
+
+ bv0.push_back();
+ bv0.back() = true;
+ bv0.push_back(false);
+ bv0.push_back(true);
+
+ EATEST_VERIFY(bv0[bv0.size()-1] == true);
+ EATEST_VERIFY(bv0[bv0.size()-2] == false);
+ EATEST_VERIFY(bv0[bv0.size()-3] == true);
+
+
+ // reference at(size_type n);
+ // const_reference at(size_type n) const;
+
+ EATEST_VERIFY(bv0.at(bv0.size()-1) == true);
+ EATEST_VERIFY(bv0.at(bv0.size()-2) == false);
+ EATEST_VERIFY(bv0.at(bv0.size()-3) == true);
+
+
+ // void clear();
+ // bool test(size_type n, bool defaultValue) const;
+ // void set(bool value, size_type n);
+
+ bv0.clear();
+ bv0.resize(17, true);
+ EATEST_VERIFY(bv0.test(0, false) == true);
+ EATEST_VERIFY(bv0.test(17, false) == false); // Test past the end.
+ EATEST_VERIFY(bv0.test(17, true) == true);
+
+ bv0.set(3, false);
+ EATEST_VERIFY(bv0.test(3, true) == false);
+
+ bv0.set(100, true);
+ EATEST_VERIFY(bv0.test(100, false) == true);
+
+
+ // container_type& get_container();
+ // const container_type& get_container() const;
+
+ EATEST_VERIFY(!bv0.get_container().empty());
+
+
+ // bool validate() const;
+ // int validate_iterator(const_iterator i) const;
+
+ EATEST_VERIFY(bv0.validate());
+ bitvector<>::iterator it;
+ EATEST_VERIFY(bv0.validate_iterator(it) == isf_none);
+ for(it = bv0.begin(); it != bv0.end(); ++it)
+ EATEST_VERIFY(bv0.validate_iterator(it) == (isf_valid | isf_current | isf_can_dereference));
+ EATEST_VERIFY(bv0.validate_iterator(it) == (isf_valid | isf_current));
+
+
+
+ // iterator insert(iterator position, value_type value);
+ // void insert(iterator position, size_type n, value_type value);
+
+ bv0.clear();
+ bv0.resize(17, true);
+ bv0.insert(bv0.begin() + 5, false);
+ EATEST_VERIFY(bv0[5] == false);
+ bv0[5] = true;
+ EATEST_VERIFY(bv0[5] == true);
+
+ bv0.insert(bv0.begin() + 5, 7, false);
+ EATEST_VERIFY((bv0[5] == false) && (bv0[11] == false));
+
+ EATEST_VERIFY(bv0.back() == true);
+ bv0.insert(bv0.end(), false);
+ EATEST_VERIFY(bv0.back() == false);
+
+
+ // iterator erase(iterator position);
+ // iterator erase(iterator first, iterator last);
+
+ EATEST_VERIFY((bv0[10] == false) && (bv0[11] == false));
+ bv0.erase(bv0.begin() + 11);
+ EATEST_VERIFY((bv0[10] == false) && (bv0[11] == true));
+
+ EATEST_VERIFY(bv0[5] == false);
+ bool bv06 = bv0[6];
+ bv0.erase(bv0.begin() + 5, bv0.begin() + 6);
+ EATEST_VERIFY(bv0[5] == bv06);
+
+
+ // reverse_iterator erase(reverse_iterator position);
+ // reverse_iterator erase(reverse_iterator first, reverse_iterator last);
+
+ bv0.clear();
+ bv0.resize(10, true);
+ bv0.back() = false;
+ bv0.erase(bv0.rbegin());
+ EATEST_VERIFY((bv0.size() == 9) && (bv0.back() == true));
+
+ bv0.erase(bv0.rbegin(), bv0.rend());
+ EATEST_VERIFY(bv0.empty());
+
+
+ // template <typename InputIterator> Not yet implemented. See below for disabled definition.
+ // void insert(iterator position, InputIterator first, InputIterator last);
+ //
+ // Disabled because insert isn't implemented yet.
+ // const bool boolArray2[4] = { false, true, false, true };
+ // bv0.insert(bv0.end(), boolArray2, boolArray2 + EAArrayCount(boolArray));
+ // EATEST_VERIFY(bv0.size() == EAArrayCount(boolArray2));
+
+
+ // element_type* data();
+ // const element_type* data() const;
+
+ EATEST_VERIFY(bv0.data() != NULL);
+ bv0.set_capacity(0);
+ EATEST_VERIFY(bv0.data() == NULL);
+
+
+ // void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs.
+
+ bv0.resize(100, true);
+ void* pSaved = MallocAllocator::mpLastAllocation;
+ bv0.reset_lose_memory();
+ EATEST_VERIFY(bv0.validate());
+ free(pSaved); // Call the C free function.
+ MallocAllocator::mpLastAllocation = NULL;
+ }
+
+ return nErrorCount;
+}
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/test/source/TestBitcast.cpp b/EASTL/test/source/TestBitcast.cpp
new file mode 100644
index 0000000..d6f0840
--- /dev/null
+++ b/EASTL/test/source/TestBitcast.cpp
@@ -0,0 +1,52 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLTest.h"
+#include <EASTL/bit.h>
+
+using namespace eastl;
+
+
+int TestBitcast()
+{
+ int nErrorCount = 0;
+
+ {
+ uint32_t int32Value = 0x12345678;
+ float floatValue = eastl::bit_cast<float>(int32Value);
+ VERIFY(memcmp(&int32Value, &floatValue, sizeof(float)) == 0);
+ }
+
+ {
+ struct IntFloatStruct
+ {
+ uint32_t i = 0x87654321;
+ float f = 10.f;
+ };
+ struct CharIntStruct
+ {
+ char c1;
+ char c2;
+ char c3;
+ char c4;
+ uint32_t i;
+ };
+
+ IntFloatStruct ifStruct;
+ CharIntStruct ciStruct = eastl::bit_cast<CharIntStruct>(ifStruct);
+ VERIFY(memcmp(&ifStruct, &ciStruct, sizeof(IntFloatStruct)) == 0);
+ }
+
+#if EASTL_CONSTEXPR_BIT_CAST_SUPPORTED
+ {
+ constexpr uint32_t int32Value = 40;
+ constexpr float floatValue = eastl::bit_cast<float>(int32Value);
+ VERIFY(memcmp(&int32Value, &floatValue, sizeof(float)) == 0);
+ }
+#endif
+
+
+ return nErrorCount;
+}
diff --git a/EASTL/test/source/TestBitset.cpp b/EASTL/test/source/TestBitset.cpp
new file mode 100644
index 0000000..ef97489
--- /dev/null
+++ b/EASTL/test/source/TestBitset.cpp
@@ -0,0 +1,1327 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLTest.h"
+#include <EASTL/bitset.h>
+#include <EABase/eabase.h>
+
+#ifdef _MSC_VER
+ #pragma warning(push, 0)
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#if defined(_MSC_VER)
+ #pragma warning(pop)
+ #pragma warning(disable: 4310) // Cast truncates constant value
+#endif
+
+
+using namespace eastl;
+
+
+// Template instantations.
+// These tell the compiler to compile all the functions for the given class.
+
+#if (EASTL_BITSET_WORD_SIZE_DEFAULT != 1)
+ template class eastl::bitset<1, uint8_t>;
+ template class eastl::bitset<33, uint8_t>;
+ template class eastl::bitset<65, uint8_t>;
+ template class eastl::bitset<129, uint8_t>;
+#endif
+
+#if (EASTL_BITSET_WORD_SIZE_DEFAULT != 2)
+ template class eastl::bitset<1, uint16_t>;
+ template class eastl::bitset<33, uint16_t>;
+ template class eastl::bitset<65, uint16_t>;
+ template class eastl::bitset<129, uint16_t>;
+#endif
+
+#if (EASTL_BITSET_WORD_SIZE_DEFAULT != 4) // If not already represented
+ template class eastl::bitset<1, uint32_t>;
+ template class eastl::bitset<33, uint32_t>;
+ template class eastl::bitset<65, uint32_t>;
+ template class eastl::bitset<129, uint32_t>;
+#endif
+
+#if (EASTL_BITSET_WORD_SIZE_DEFAULT != 8)
+ template class eastl::bitset<1, uint64_t>;
+ template class eastl::bitset<33, uint64_t>;
+ template class eastl::bitset<65, uint64_t>;
+ template class eastl::bitset<129, uint64_t>;
+#endif
+
+#if (EASTL_BITSET_WORD_SIZE_DEFAULT != 16)
+ #if EASTL_INT128_SUPPORTED
+ template class eastl::bitset<1, eastl_uint128_t>;
+ template class eastl::bitset<33, eastl_uint128_t>;
+ template class eastl::bitset<65, eastl_uint128_t>;
+ template class eastl::bitset<129, eastl_uint128_t>;
+ #endif
+#endif
+
+
+int TestBitset()
+{
+ int nErrorCount = 0;
+
+ {
+ // bitset<0> tests
+ #if !defined(__GNUC__) || (__GNUC__ >= 3) // GCC before v3.0 can't handle our bitset<0>.
+ bitset<0> b0(0x10101010);
+ EATEST_VERIFY(b0.count() == 0);
+ EATEST_VERIFY(b0.to_ulong() == 0x00000000);
+ EATEST_VERIFY(b0.to_uint32() == 0x00000000);
+ EATEST_VERIFY(b0.to_uint64() == 0x00000000);
+
+ b0.flip();
+ EATEST_VERIFY(b0.count() == 0);
+ EATEST_VERIFY(b0.to_ulong() == 0x00000000);
+ EATEST_VERIFY(b0.to_uint32() == 0x00000000);
+ EATEST_VERIFY(b0.to_uint64() == 0x00000000);
+
+ b0 <<= 1;
+ EATEST_VERIFY(b0.count() == 0);
+ EATEST_VERIFY(b0.to_ulong() == 0x00000000);
+ EATEST_VERIFY(b0.to_uint32() == 0x00000000);
+ EATEST_VERIFY(b0.to_uint64() == 0x00000000);
+
+ // Disabled because currently bitset<0> instances can't be modified without triggering asserts.
+ //b0.from_uint32(0x10101010);
+ //EATEST_VERIFY(b0.to_uint32() == 0x00000000);
+ //b0.from_uint64(UINT64_C(0x1010101010101010));
+ //EATEST_VERIFY(b0.to_uint64() == UINT64_C(0x0000000000000000));
+ #endif
+
+ // bitset<8> tests
+ bitset<8> b8(0x10101010);
+ EATEST_VERIFY(b8.count() == 1);
+ EATEST_VERIFY(b8.to_ulong() == 0x00000010);
+ EATEST_VERIFY(b8.to_uint32() == 0x00000010);
+ EATEST_VERIFY(b8.to_uint64() == 0x00000010);
+
+ b8.flip();
+ EATEST_VERIFY(b8.count() == 7);
+ EATEST_VERIFY(b8.to_ulong() == 0x000000ef);
+ EATEST_VERIFY(b8.to_uint32() == 0x000000ef);
+ EATEST_VERIFY(b8.to_uint64() == 0x000000ef);
+
+ b8 <<= 1;
+ EATEST_VERIFY(b8.count() == 6);
+ EATEST_VERIFY(b8.to_ulong() == 0x000000de);
+ EATEST_VERIFY(b8.to_uint32() == 0x000000de);
+ EATEST_VERIFY(b8.to_uint64() == 0x000000de);
+
+ b8.reset();
+ b8.flip();
+ b8 >>= 33;
+ EATEST_VERIFY(b8.count() == 0);
+
+ b8.reset();
+ b8.flip();
+ b8 >>= 65;
+ EATEST_VERIFY(b8.count() == 0);
+
+ b8.from_uint32(0x10101010);
+ EATEST_VERIFY(b8.to_uint32() == 0x00000010);
+ b8.from_uint64(UINT64_C(0x0000000000000010));
+ EATEST_VERIFY(b8.to_uint64() == UINT64_C(0x0000000000000010));
+
+
+
+ // bitset<16> tests
+ bitset<16> b16(0x10101010);
+ EATEST_VERIFY(b16.count() == 2);
+ EATEST_VERIFY(b16.to_ulong() == 0x00001010);
+ EATEST_VERIFY(b16.to_uint32() == 0x00001010);
+ EATEST_VERIFY(b16.to_uint64() == 0x00001010);
+
+ b16.flip();
+ EATEST_VERIFY(b16.count() == 14);
+ EATEST_VERIFY(b16.to_ulong() == 0x0000efef);
+ EATEST_VERIFY(b16.to_uint32() == 0x0000efef);
+ EATEST_VERIFY(b16.to_uint64() == 0x0000efef);
+
+ b16 <<= 1;
+ EATEST_VERIFY(b16.count() == 13);
+ EATEST_VERIFY(b16.to_ulong() == 0x0000dfde);
+ EATEST_VERIFY(b16.to_uint32() == 0x0000dfde);
+ EATEST_VERIFY(b16.to_uint64() == 0x0000dfde);
+
+ b16.reset();
+ b16.flip();
+ b16 >>= 33;
+ EATEST_VERIFY(b16.count() == 0);
+
+ b16.reset();
+ b16.flip();
+ b16 >>= 65;
+ EATEST_VERIFY(b16.count() == 0);
+
+ b16.from_uint32(0x10101010);
+ EATEST_VERIFY(b16.to_uint32() == 0x00001010);
+ b16.from_uint64(UINT64_C(0x0000000000001010));
+ EATEST_VERIFY(b16.to_uint64() == UINT64_C(0x0000000000001010));
+
+
+
+ // bitset<32> tests
+ bitset<32> b32(0x10101010);
+ EATEST_VERIFY(b32.count() == 4);
+ EATEST_VERIFY(b32.to_ulong() == 0x10101010);
+ EATEST_VERIFY(b32.to_uint32() == 0x10101010);
+ EATEST_VERIFY(b32.to_uint64() == 0x10101010);
+
+ b32.flip();
+ EATEST_VERIFY(b32.count() == 28);
+ EATEST_VERIFY(b32.to_ulong() == 0xefefefef);
+ EATEST_VERIFY(b32.to_uint32() == 0xefefefef);
+ EATEST_VERIFY(b32.to_uint64() == 0xefefefef);
+
+ b32 <<= 1;
+ EATEST_VERIFY(b32.count() == 27);
+ EATEST_VERIFY(b32.to_ulong() == 0xdfdfdfde);
+ EATEST_VERIFY(b32.to_uint32() == 0xdfdfdfde);
+ EATEST_VERIFY(b32.to_uint64() == 0xdfdfdfde);
+
+ b32.reset();
+ b32.flip();
+ b32 >>= 33;
+ EATEST_VERIFY(b32.count() == 0);
+
+ b32.reset();
+ b32.flip();
+ b32 >>= 65;
+ EATEST_VERIFY(b32.count() == 0);
+
+ b32.from_uint32(0x10101010);
+ EATEST_VERIFY(b32.to_uint32() == 0x10101010);
+ b32.from_uint64(UINT64_C(0x0000000010101010));
+ EATEST_VERIFY(b32.to_uint64() == UINT64_C(0x0000000010101010));
+
+
+
+ // bitset<64> tests
+ bitset<64> b64(0x10101010); // b64 => 00000000 00000000 00000000 00000000 00010000 00010000 00010000 00010000
+ EATEST_VERIFY(b64.count() == 4);
+ EATEST_VERIFY(b64.to_ulong() == 0x10101010);
+ EATEST_VERIFY(b64.to_uint32() == 0x10101010);
+ EATEST_VERIFY(b64.to_uint64() == 0x10101010);
+
+ b64.flip(); // b64 => 11111111 11111111 11111111 11111111 11101111 11101111 11101111 11101111
+ EATEST_VERIFY(b64.count() == 60);
+ if(sizeof(unsigned long) + nErrorCount - nErrorCount == 4) // We have this no-op math here in order to avoid compiler warnings about constant expressions.
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ try {
+ EATEST_VERIFY(b64.to_ulong() == 0xefefefef);
+ EATEST_VERIFY(false);
+ }
+ catch(std::overflow_error&)
+ {
+ EATEST_VERIFY(true); // This pathway should be taken.
+ }
+ catch(...)
+ {
+ EATEST_VERIFY(false);
+ }
+ #else
+ EATEST_VERIFY(b64.to_ulong() == 0xefefefef);
+ #endif
+ }
+ else
+ {
+ EATEST_VERIFY(b64.to_ulong() == (unsigned long)UINT64_C(0xffffffffefefefef));
+ }
+
+ b64 <<= 1; // b64 => 11111111 11111111 11111111 11111111 11011111 11011111 11011111 11011110
+ EATEST_VERIFY(b64.count() == 59);
+ if(sizeof(unsigned long) + nErrorCount - nErrorCount == 4)
+ {
+ #if !EASTL_EXCEPTIONS_ENABLED
+ EATEST_VERIFY(b64.to_ulong() == 0xdfdfdfde);
+ #endif
+ }
+ else
+ {
+ EATEST_VERIFY(b64.to_ulong() == (unsigned long)UINT64_C(0xffffffffdfdfdfde));
+ }
+
+ b64.reset(); // b64 => 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+ EATEST_VERIFY(b64.count() == 0);
+ EATEST_VERIFY(b64.to_ulong() == 0);
+
+ b64 <<= 1; // b64 => 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+ EATEST_VERIFY(b64.count() == 0);
+ EATEST_VERIFY(b64.to_ulong() == 0);
+
+ b64.flip(); // b64 => 11111111 11111111 11111111 11111111 11111111 11111111 11111111 11111111
+ EATEST_VERIFY(b64.count() == 64);
+ if(sizeof(unsigned long) + nErrorCount - nErrorCount == 4)
+ {
+ #if !EASTL_EXCEPTIONS_ENABLED
+ EATEST_VERIFY(b64.to_ulong() == 0xffffffff);
+ #endif
+ }
+ else
+ EATEST_VERIFY(b64.to_ulong() == (unsigned long)UINT64_C(0xffffffffffffffff));
+
+ b64 <<= 1; // b64 => 11111111 11111111 11111111 11111111 11111111 11111111 11111111 11111110
+ EATEST_VERIFY(b64.count() == 63);
+ if(sizeof(unsigned long) + nErrorCount - nErrorCount == 4)
+ {
+ #if !EASTL_EXCEPTIONS_ENABLED
+ EATEST_VERIFY(b64.to_ulong() == 0xfffffffe);
+ #endif
+ }
+ else
+ EATEST_VERIFY(b64.to_ulong() == (unsigned long)UINT64_C(0xfffffffffffffffe));
+
+ b64.reset();
+ b64.flip();
+ b64 >>= 33;
+ EATEST_VERIFY(b64.count() == 31);
+
+ b64.reset();
+ b64.flip();
+ b64 >>= 65;
+ EATEST_VERIFY(b64.count() == 0);
+
+ b64.from_uint32(0x10101010);
+ EATEST_VERIFY(b64.to_uint32() == 0x10101010);
+ b64.from_uint64(UINT64_C(0x1010101010101010));
+ EATEST_VERIFY(b64.to_uint64() == UINT64_C(0x1010101010101010));
+ }
+
+
+ {
+ bitset<1> b1;
+ bitset<1> b1A(1);
+
+ EATEST_VERIFY(b1.size() == 1);
+ EATEST_VERIFY(b1.any() == false);
+ EATEST_VERIFY(b1.all() == false);
+ EATEST_VERIFY(b1.none() == true);
+ EATEST_VERIFY(b1.to_ulong() == 0);
+ EATEST_VERIFY(b1A.any() == true);
+ EATEST_VERIFY(b1A.all() == true);
+ EATEST_VERIFY(b1A.none() == false);
+ EATEST_VERIFY(b1A.to_ulong() == 1);
+ EATEST_VERIFY(b1A.to_uint32() == 1);
+ EATEST_VERIFY(b1A.to_uint64() == 1);
+
+
+ bitset<33> b33;
+ bitset<33> b33A(1);
+
+ EATEST_VERIFY(b33.size() == 33);
+ EATEST_VERIFY(b33.any() == false);
+ EATEST_VERIFY(b33.all() == false);
+ EATEST_VERIFY(b33.none() == true);
+ EATEST_VERIFY(b33.to_ulong() == 0);
+ EATEST_VERIFY(b33A.any() == true);
+ EATEST_VERIFY(b33A.all() == false);
+ EATEST_VERIFY(b33A.none() == false);
+ EATEST_VERIFY(b33A.to_ulong() == 1);
+
+
+ bitset<65> b65;
+ bitset<65> b65A(1);
+
+ EATEST_VERIFY(b65.size() == 65);
+ EATEST_VERIFY(b65.any() == false);
+ EATEST_VERIFY(b65.all() == false);
+ EATEST_VERIFY(b65.none() == true);
+ EATEST_VERIFY(b65.to_ulong() == 0);
+ EATEST_VERIFY(b65A.any() == true);
+ EATEST_VERIFY(b65A.all() == false);
+ EATEST_VERIFY(b65A.none() == false);
+ EATEST_VERIFY(b65A.to_ulong() == 1);
+
+
+ bitset<129> b129;
+ bitset<129> b129A(1);
+
+ EATEST_VERIFY(b129.size() == 129);
+ EATEST_VERIFY(b129.any() == false);
+ EATEST_VERIFY(b129.all() == false);
+ EATEST_VERIFY(b129.none() == true);
+ EATEST_VERIFY(b129.to_ulong() == 0);
+ EATEST_VERIFY(b129A.any() == true);
+ EATEST_VERIFY(b129A.all() == false);
+ EATEST_VERIFY(b129A.none() == false);
+ EATEST_VERIFY(b129A.to_ulong() == 1);
+
+
+ // operator[], data, test, to_ulong, count
+ b1[0] = true;
+ EATEST_VERIFY(b1.test(0) == true);
+ EATEST_VERIFY(b1.count() == 1);
+
+ b33[0] = true;
+ b33[32] = true;
+ EATEST_VERIFY(b33.test(0) == true);
+ EATEST_VERIFY(b33.test(15) == false);
+ EATEST_VERIFY(b33.test(32) == true);
+ EATEST_VERIFY(b33.count() == 2);
+
+ b65[0] = true;
+ b65[32] = true;
+ b65[64] = true;
+ EATEST_VERIFY(b65.test(0) == true);
+ EATEST_VERIFY(b65.test(15) == false);
+ EATEST_VERIFY(b65.test(32) == true);
+ EATEST_VERIFY(b65.test(47) == false);
+ EATEST_VERIFY(b65.test(64) == true);
+ EATEST_VERIFY(b65.count() == 3);
+
+ b129[0] = true;
+ b129[32] = true;
+ b129[64] = true;
+ b129[128] = true;
+ EATEST_VERIFY(b129.test(0) == true);
+ EATEST_VERIFY(b129.test(15) == false);
+ EATEST_VERIFY(b129.test(32) == true);
+ EATEST_VERIFY(b129.test(47) == false);
+ EATEST_VERIFY(b129.test(64) == true);
+ EATEST_VERIFY(b129.test(91) == false);
+ EATEST_VERIFY(b129.test(128) == true);
+ EATEST_VERIFY(b129.count() == 4);
+
+ bitset<1>::word_type* pWordArray;
+
+ pWordArray = b1.data();
+ EATEST_VERIFY(pWordArray != NULL);
+ pWordArray = b33.data();
+ EATEST_VERIFY(pWordArray != NULL);
+ pWordArray = b65.data();
+ EATEST_VERIFY(pWordArray != NULL);
+ pWordArray = b129.data();
+ EATEST_VERIFY(pWordArray != NULL);
+
+
+ // bitset<1> set, reset, flip, ~
+ b1.reset();
+ EATEST_VERIFY(b1.count() == 0);
+
+ b1.set();
+ EATEST_VERIFY(b1.count() == b1.size());
+ EATEST_VERIFY(b1.all());
+
+ b1.flip();
+ EATEST_VERIFY(b1.count() == 0);
+ EATEST_VERIFY(!b1.all());
+ EATEST_VERIFY(b1.none());
+
+ b1.set(0, true);
+ EATEST_VERIFY(b1[0] == true);
+
+ b1.reset(0);
+ EATEST_VERIFY(b1[0] == false);
+
+ b1.flip(0);
+ EATEST_VERIFY(b1[0] == true);
+
+ bitset<1> b1Not = ~b1;
+ EATEST_VERIFY(b1[0] == true);
+ EATEST_VERIFY(b1Not[0] == false);
+
+
+ // bitset<33> set, reset, flip, ~
+ b33.reset();
+ EATEST_VERIFY(b33.count() == 0);
+
+ b33.set();
+ EATEST_VERIFY(b33.count() == b33.size());
+ EATEST_VERIFY(b33.all());
+
+
+ b33.flip();
+ EATEST_VERIFY(b33.count() == 0);
+ EATEST_VERIFY(!b33.all());
+
+ b33.set(0, true);
+ b33.set(32, true);
+ EATEST_VERIFY(b33[0] == true);
+ EATEST_VERIFY(b33[15] == false);
+ EATEST_VERIFY(b33[32] == true);
+
+ b33.reset(0);
+ b33.reset(32);
+ EATEST_VERIFY(b33[0] == false);
+ EATEST_VERIFY(b33[32] == false);
+
+ b33.flip(0);
+ b33.flip(32);
+ EATEST_VERIFY(b33[0] == true);
+ EATEST_VERIFY(b33[32] == true);
+
+ bitset<33> b33Not(~b33);
+ EATEST_VERIFY(b33[0] == true);
+ EATEST_VERIFY(b33[32] == true);
+ EATEST_VERIFY(b33Not[0] == false);
+ EATEST_VERIFY(b33Not[32] == false);
+
+
+ // bitset<65> set, reset, flip, ~
+ b65.reset();
+ EATEST_VERIFY(b65.count() == 0);
+ EATEST_VERIFY(!b65.all());
+ EATEST_VERIFY(b65.none());
+
+ b65.set();
+ EATEST_VERIFY(b65.count() == b65.size());
+ EATEST_VERIFY(b65.all());
+ EATEST_VERIFY(!b65.none());
+
+ b65.flip();
+ EATEST_VERIFY(b65.count() == 0);
+ EATEST_VERIFY(!b65.all());
+ EATEST_VERIFY(b65.none());
+
+
+ b65.set(0, true);
+ b65.set(32, true);
+ b65.set(64, true);
+ EATEST_VERIFY(b65[0] == true);
+ EATEST_VERIFY(b65[15] == false);
+ EATEST_VERIFY(b65[32] == true);
+ EATEST_VERIFY(b65[50] == false);
+ EATEST_VERIFY(b65[64] == true);
+
+ b65.reset(0);
+ b65.reset(32);
+ b65.reset(64);
+ EATEST_VERIFY(b65[0] == false);
+ EATEST_VERIFY(b65[32] == false);
+ EATEST_VERIFY(b65[64] == false);
+
+ b65.flip(0);
+ b65.flip(32);
+ b65.flip(64);
+ EATEST_VERIFY(b65[0] == true);
+ EATEST_VERIFY(b65[32] == true);
+ EATEST_VERIFY(b65[64] == true);
+
+ bitset<65> b65Not(~b65);
+ EATEST_VERIFY(b65[0] == true);
+ EATEST_VERIFY(b65[32] == true);
+ EATEST_VERIFY(b65[64] == true);
+ EATEST_VERIFY(b65Not[0] == false);
+ EATEST_VERIFY(b65Not[32] == false);
+ EATEST_VERIFY(b65Not[64] == false);
+
+
+ // bitset<65> set, reset, flip, ~
+ b129.reset();
+ EATEST_VERIFY(b129.count() == 0);
+
+ b129.set();
+ EATEST_VERIFY(b129.count() == b129.size());
+ EATEST_VERIFY(b129.all());
+
+ b129.flip();
+ EATEST_VERIFY(b129.count() == 0);
+ EATEST_VERIFY(!b129.all());
+ EATEST_VERIFY(b129.none());
+
+ b129.set(0, true);
+ b129.set(32, true);
+ b129.set(64, true);
+ b129.set(128, true);
+ EATEST_VERIFY(b129[0] == true);
+ EATEST_VERIFY(b129[15] == false);
+ EATEST_VERIFY(b129[32] == true);
+ EATEST_VERIFY(b129[50] == false);
+ EATEST_VERIFY(b129[64] == true);
+ EATEST_VERIFY(b129[90] == false);
+ EATEST_VERIFY(b129[128] == true);
+
+ b129.reset(0);
+ b129.reset(32);
+ b129.reset(64);
+ b129.reset(128);
+ EATEST_VERIFY(b129[0] == false);
+ EATEST_VERIFY(b129[32] == false);
+ EATEST_VERIFY(b129[64] == false);
+ EATEST_VERIFY(b129[128] == false);
+
+ b129.flip(0);
+ b129.flip(32);
+ b129.flip(64);
+ b129.flip(128);
+ EATEST_VERIFY(b129[0] == true);
+ EATEST_VERIFY(b129[32] == true);
+ EATEST_VERIFY(b129[64] == true);
+ EATEST_VERIFY(b129[128] == true);
+
+ bitset<129> b129Not(~b129);
+ EATEST_VERIFY(b129[0] == true);
+ EATEST_VERIFY(b129[32] == true);
+ EATEST_VERIFY(b129[64] == true);
+ EATEST_VERIFY(b129[128] == true);
+ EATEST_VERIFY(b129Not[0] == false);
+ EATEST_VERIFY(b129Not[32] == false);
+ EATEST_VERIFY(b129Not[64] == false);
+ EATEST_VERIFY(b129Not[128] == false);
+
+
+ // operator ==, !=
+ bitset<1> b1Equal(b1);
+ EATEST_VERIFY(b1Equal == b1);
+ EATEST_VERIFY(b1Equal != b1Not);
+
+ bitset<33> b33Equal(b33);
+ EATEST_VERIFY(b33Equal == b33);
+ EATEST_VERIFY(b33Equal != b33Not);
+
+ bitset<65> b65Equal(b65);
+ EATEST_VERIFY(b65Equal == b65);
+ EATEST_VERIFY(b65Equal != b65Not);
+
+ bitset<129> b129Equal(b129);
+ EATEST_VERIFY(b129Equal == b129);
+ EATEST_VERIFY(b129Equal != b129Not);
+
+
+ // bitset<1> operator<<=, operator>>=, operator<<, operator>>
+ b1.reset();
+
+ b1[0] = true;
+ b1 >>= 0;
+ EATEST_VERIFY(b1[0] == true);
+ b1 >>= 1;
+ EATEST_VERIFY(b1[0] == false);
+
+ b1[0] = true;
+ b1 <<= 0;
+ EATEST_VERIFY(b1[0] == true);
+ b1 <<= 1;
+ EATEST_VERIFY(b1[0] == false);
+
+ b1[0] = true;
+ b1Equal = b1 >> 0;
+ EATEST_VERIFY(b1Equal == b1);
+ b1Equal = b1 >> 1;
+ EATEST_VERIFY(b1Equal[0] == false);
+
+ b1[0] = true;
+ b1Equal = b1 << 0;
+ EATEST_VERIFY(b1Equal[0] == true);
+ b1Equal = b1 << 1;
+ EATEST_VERIFY(b1Equal[0] == false);
+
+ b1.reset();
+ b1.flip();
+ b1 >>= 33;
+ EATEST_VERIFY(b1.count() == 0);
+ EATEST_VERIFY(!b1.all());
+ EATEST_VERIFY(b1.none());
+
+ b1.reset();
+ b1.flip();
+ b1 <<= 33;
+ EATEST_VERIFY(b1.count() == 0);
+ EATEST_VERIFY(!b1.all());
+ EATEST_VERIFY(b1.none());
+
+ b1.reset();
+ b1.flip();
+ b1 >>= 65;
+ EATEST_VERIFY(b1.count() == 0);
+ EATEST_VERIFY(!b1.all());
+ EATEST_VERIFY(b1.none());
+
+ b1.reset();
+ b1.flip();
+ b1 <<= 65;
+ EATEST_VERIFY(b1.count() == 0);
+ EATEST_VERIFY(!b1.all());
+ EATEST_VERIFY(b1.none());
+
+
+ // bitset<33> operator<<=, operator>>=, operator<<, operator>>
+ b33.reset();
+
+ b33[0] = true;
+ b33[32] = true;
+ b33 >>= 0;
+ EATEST_VERIFY(b33[0] == true);
+ EATEST_VERIFY(b33[32] == true);
+ b33 >>= 10;
+ EATEST_VERIFY(b33[22] == true);
+
+ b33.reset();
+ b33[0] = true;
+ b33[32] = true;
+ b33 <<= 0;
+ EATEST_VERIFY(b33[0] == true);
+ EATEST_VERIFY(b33[32] == true);
+ b33 <<= 10;
+ EATEST_VERIFY(b33[10] == true);
+
+ b33.reset();
+ b33[0] = true;
+ b33[32] = true;
+ b33Equal = b33 >> 0;
+ EATEST_VERIFY(b33Equal == b33);
+ b33Equal = b33 >> 10;
+ EATEST_VERIFY(b33Equal[22] == true);
+
+ b33.reset();
+ b33[0] = true;
+ b33[32] = true;
+ b33Equal = b33 << 10;
+ EATEST_VERIFY(b33Equal[10] == true);
+
+ b33.reset();
+ b33.flip();
+ b33 >>= 33;
+ EATEST_VERIFY(b33.count() == 0);
+ EATEST_VERIFY(!b33.all());
+ EATEST_VERIFY(b33.none());
+
+ b33.reset();
+ b33.flip();
+ b33 <<= 33;
+ EATEST_VERIFY(b33.count() == 0);
+ EATEST_VERIFY(!b33.all());
+ EATEST_VERIFY(b33.none());
+
+ b33.reset();
+ b33.flip();
+ b33 >>= 65;
+ EATEST_VERIFY(b33.count() == 0);
+ EATEST_VERIFY(!b33.all());
+ EATEST_VERIFY(b33.none());
+
+ b33.reset();
+ b33.flip();
+ b33 <<= 65;
+ EATEST_VERIFY(b33.count() == 0);
+ EATEST_VERIFY(!b33.all());
+ EATEST_VERIFY(b33.none());
+
+
+ // bitset<65> operator<<=, operator>>=, operator<<, operator>>
+ b65.reset();
+
+ b65[0] = true;
+ b65[32] = true;
+ b65[64] = true;
+ b65 >>= 0;
+ EATEST_VERIFY(b65[0] == true);
+ EATEST_VERIFY(b65[32] == true);
+ EATEST_VERIFY(b65[64] == true);
+ b65 >>= 10;
+ EATEST_VERIFY(b65[22] == true);
+ EATEST_VERIFY(b65[54] == true);
+
+ b65.reset();
+ b65[0] = true;
+ b65[32] = true;
+ b65[64] = true;
+ b65 <<= 0;
+ EATEST_VERIFY(b65[0] == true);
+ EATEST_VERIFY(b65[32] == true);
+ EATEST_VERIFY(b65[64] == true);
+ b65 <<= 10;
+ EATEST_VERIFY(b65[10] == true);
+ EATEST_VERIFY(b65[42] == true);
+
+ b65.reset();
+ b65[0] = true;
+ b65[32] = true;
+ b65[64] = true;
+ b65Equal = b65 >> 0;
+ EATEST_VERIFY(b65Equal == b65);
+ b65Equal = b65 >> 10;
+ EATEST_VERIFY(b65Equal[22] == true);
+ EATEST_VERIFY(b65Equal[54] == true);
+
+ b65.reset();
+ b65[0] = true;
+ b65[32] = true;
+ b65[64] = true;
+ b65Equal = b65 << 10;
+ EATEST_VERIFY(b65Equal[10] == true);
+ EATEST_VERIFY(b65Equal[42] == true);
+
+ b65.reset();
+ b65.flip();
+ b65 >>= 33;
+ EATEST_VERIFY(b65.count() == 32);
+
+ b65.reset();
+ b65.flip();
+ b65 <<= 33;
+ EATEST_VERIFY(b65.count() == 32);
+
+ b65.reset();
+ b65.flip();
+ b65 >>= 65;
+ EATEST_VERIFY(b65.count() == 0);
+
+ b65.reset();
+ b65.flip();
+ b65 <<= 65;
+ EATEST_VERIFY(b65.count() == 0);
+
+
+ // bitset<129> operator<<=, operator>>=, operator<<, operator>>
+ b129.reset();
+
+ b129[0] = true;
+ b129[32] = true;
+ b129[64] = true;
+ b129[128] = true;
+ b129 >>= 0;
+ EATEST_VERIFY(b129[0] == true);
+ EATEST_VERIFY(b129[32] == true);
+ EATEST_VERIFY(b129[64] == true);
+ EATEST_VERIFY(b129[128] == true);
+ b129 >>= 10;
+ EATEST_VERIFY(b129[22] == true);
+ EATEST_VERIFY(b129[54] == true);
+ EATEST_VERIFY(b129[118] == true);
+
+ b129.reset();
+ b129[0] = true;
+ b129[32] = true;
+ b129[64] = true;
+ b129[128] = true;
+ b129 <<= 0;
+ EATEST_VERIFY(b129[0] == true);
+ EATEST_VERIFY(b129[32] == true);
+ EATEST_VERIFY(b129[64] == true);
+ EATEST_VERIFY(b129[128] == true);
+ b129 <<= 10;
+ EATEST_VERIFY(b129[10] == true);
+ EATEST_VERIFY(b129[42] == true);
+ EATEST_VERIFY(b129[74] == true);
+
+ b129.reset();
+ b129[0] = true;
+ b129[32] = true;
+ b129[64] = true;
+ b129[128] = true;
+ b129Equal = b129 >> 0;
+ EATEST_VERIFY(b129Equal == b129);
+ b129Equal = b129 >> 10;
+ EATEST_VERIFY(b129Equal[22] == true);
+ EATEST_VERIFY(b129Equal[54] == true);
+ EATEST_VERIFY(b129Equal[118] == true);
+
+ b129.reset();
+ b129[0] = true;
+ b129[32] = true;
+ b129[64] = true;
+ b129[128] = true;
+ b129Equal = b129 << 10;
+ EATEST_VERIFY(b129Equal[10] == true);
+ EATEST_VERIFY(b129Equal[42] == true);
+ EATEST_VERIFY(b129Equal[74] == true);
+
+ b129.reset();
+ b129.flip();
+ b129 >>= 33;
+ EATEST_VERIFY(b129.count() == 96);
+
+ b129.reset();
+ b129.flip();
+ b129 <<= 33;
+ EATEST_VERIFY(b129.count() == 96);
+
+ b129.reset();
+ b129.flip();
+ b129 >>= 65;
+ EATEST_VERIFY(b129.count() == 64);
+
+ b129.reset();
+ b129.flip();
+ b129 <<= 65;
+ EATEST_VERIFY(b129.count() == 64);
+
+
+ // operator&=(const this_type& x), operator|=(const this_type& x), operator^=(const this_type& x)
+ b1.set();
+ b1[0] = false;
+ b1A[0] = true;
+ b1 &= b1A;
+ EATEST_VERIFY(b1[0] == false);
+ b1 |= b1A;
+ EATEST_VERIFY(b1[0] == true);
+ b1 ^= b1A;
+ EATEST_VERIFY(b1[0] == false);
+ b1 |= b1A;
+ EATEST_VERIFY(b1[0] == true);
+
+ b33.set();
+ b33[0] = false;
+ b33[32] = false;
+ b33A[0] = true;
+ b33A[32] = true;
+ b33 &= b33A;
+ EATEST_VERIFY((b33[0] == false) && (b33[32] == false));
+ b33 |= b33A;
+ EATEST_VERIFY((b33[0] == true) && (b33[32] == true));
+ b33 ^= b33A;
+ EATEST_VERIFY((b33[0] == false) && (b33[32] == false));
+ b33 |= b33A;
+ EATEST_VERIFY((b33[0] == true) && (b33[32] == true));
+
+ b65.set();
+ b65[0] = false;
+ b65[32] = false;
+ b65[64] = false;
+ b65A[0] = true;
+ b65A[32] = true;
+ b65A[64] = true;
+ b65 &= b65A;
+ EATEST_VERIFY((b65[0] == false) && (b65[32] == false) && (b65[64] == false));
+ b65 |= b65A;
+ EATEST_VERIFY((b65[0] == true) && (b65[32] == true) && (b65[64] == true));
+ b65 ^= b65A;
+ EATEST_VERIFY((b65[0] == false) && (b65[32] == false) && (b65[64] == false));
+ b65 |= b65A;
+ EATEST_VERIFY((b65[0] == true) && (b65[32] == true) && (b65[64] == true));
+
+ b129.set();
+ b129[0] = false;
+ b129[32] = false;
+ b129[64] = false;
+ b129[128] = false;
+ b129A[0] = true;
+ b129A[32] = true;
+ b129A[64] = true;
+ b129A[128] = true;
+ b129 &= b129A;
+ EATEST_VERIFY((b129[0] == false) && (b129[32] == false) && (b129[64] == false) && (b129[128] == false));
+ b129 |= b129A;
+ EATEST_VERIFY((b129[0] == true) && (b129[32] == true) && (b129[64] == true) && (b129[128] == true));
+ b129 ^= b129A;
+ EATEST_VERIFY((b129[0] == false) && (b129[32] == false) && (b129[64] == false) && (b129[128] == false));
+ b129 |= b129A;
+ EATEST_VERIFY((b129[0] == true) && (b129[32] == true) && (b129[64] == true) && (b129[128] == true));
+ }
+
+ { // Test bitset::reference
+ bitset<65> b65;
+ bitset<65>::reference r = b65[33];
+
+ r = true;
+ EATEST_VERIFY(r == true);
+ }
+
+ { // Test find_first, find_next
+ size_t i, j;
+
+ // bitset<1>
+ bitset<1> b1;
+
+ i = b1.find_first();
+ EATEST_VERIFY(i == b1.kSize);
+ b1.set(0, true);
+ i = b1.find_first();
+ EATEST_VERIFY(i == 0);
+ i = b1.find_next(i);
+ EATEST_VERIFY(i == b1.kSize);
+
+ b1.set();
+ for(i = 0, j = b1.find_first(); j != b1.kSize; j = b1.find_next(j))
+ ++i;
+ EATEST_VERIFY(i == 1);
+
+ // bitset<7>
+ bitset<7> b7;
+
+ i = b7.find_first();
+ EATEST_VERIFY(i == b7.kSize);
+ b7.set(0, true);
+ b7.set(5, true);
+ i = b7.find_first();
+ EATEST_VERIFY(i == 0);
+ i = b7.find_next(i);
+ EATEST_VERIFY(i == 5);
+ i = b7.find_next(i);
+ EATEST_VERIFY(i == b7.kSize);
+
+ b7.set();
+ for(i = 0, j = b7.find_first(); j != b7.kSize; j = b7.find_next(j))
+ ++i;
+ EATEST_VERIFY(i == 7);
+
+ // bitset<32>
+ bitset<32> b32;
+
+ i = b32.find_first();
+ EATEST_VERIFY(i == b32.kSize);
+ b32.set(0, true);
+ b32.set(27, true);
+ i = b32.find_first();
+ EATEST_VERIFY(i == 0);
+ i = b32.find_next(i);
+ EATEST_VERIFY(i == 27);
+ i = b32.find_next(i);
+ EATEST_VERIFY(i == b32.kSize);
+
+ b32.set();
+ for(i = 0, j = b32.find_first(); j != b32.kSize; j = b32.find_next(j))
+ ++i;
+ EATEST_VERIFY(i == 32);
+
+ // bitset<41>
+ bitset<41> b41;
+
+ i = b41.find_first();
+ EATEST_VERIFY(i == b41.kSize);
+ b41.set(0, true);
+ b41.set(27, true);
+ b41.set(37, true);
+ i = b41.find_first();
+ EATEST_VERIFY(i == 0);
+ i = b41.find_next(i);
+ EATEST_VERIFY(i == 27);
+ i = b41.find_next(i);
+ EATEST_VERIFY(i == 37);
+ i = b41.find_next(i);
+ EATEST_VERIFY(i == b41.kSize);
+
+ b41.set();
+ for(i = 0, j = b41.find_first(); j != b41.kSize; j = b41.find_next(j))
+ ++i;
+ EATEST_VERIFY(i == 41);
+
+ // bitset<64>
+ bitset<64> b64;
+
+ i = b64.find_first();
+ EATEST_VERIFY(i == b64.kSize);
+ b64.set(0, true);
+ b64.set(27, true);
+ b64.set(37, true);
+ i = b64.find_first();
+ EATEST_VERIFY(i == 0);
+ i = b64.find_next(i);
+ EATEST_VERIFY(i == 27);
+ i = b64.find_next(i);
+ EATEST_VERIFY(i == 37);
+ i = b64.find_next(i);
+ EATEST_VERIFY(i == b64.kSize);
+
+ b64.set();
+ for(i = 0, j = b64.find_first(); j != b64.kSize; j = b64.find_next(j))
+ ++i;
+ EATEST_VERIFY(i == 64);
+
+ // bitset<79>
+ bitset<79> b79;
+
+ i = b79.find_first();
+ EATEST_VERIFY(i == b79.kSize);
+ b79.set(0, true);
+ b79.set(27, true);
+ b79.set(37, true);
+ i = b79.find_first();
+ EATEST_VERIFY(i == 0);
+ i = b79.find_next(i);
+ EATEST_VERIFY(i == 27);
+ i = b79.find_next(i);
+ EATEST_VERIFY(i == 37);
+ i = b79.find_next(i);
+ EATEST_VERIFY(i == b79.kSize);
+
+ b79.set();
+ for(i = 0, j = b79.find_first(); j != b79.kSize; j = b79.find_next(j))
+ ++i;
+ EATEST_VERIFY(i == 79);
+
+ // bitset<128>
+ bitset<128> b128;
+
+ i = b128.find_first();
+ EATEST_VERIFY(i == b128.kSize);
+ b128.set(0, true);
+ b128.set(27, true);
+ b128.set(37, true);
+ b128.set(77, true);
+ i = b128.find_first();
+ EATEST_VERIFY(i == 0);
+ i = b128.find_next(i);
+ EATEST_VERIFY(i == 27);
+ i = b128.find_next(i);
+ EATEST_VERIFY(i == 37);
+ i = b128.find_next(i);
+ EATEST_VERIFY(i == 77);
+ i = b128.find_next(i);
+ EATEST_VERIFY(i == b128.kSize);
+
+ b128.set();
+ for(i = 0, j = b128.find_first(); j != b128.kSize; j = b128.find_next(j))
+ ++i;
+ EATEST_VERIFY(i == 128);
+
+ // bitset<137>
+ bitset<137> b137;
+
+ i = b137.find_first();
+ EATEST_VERIFY(i == b137.kSize);
+ b137.set(0, true);
+ b137.set(27, true);
+ b137.set(37, true);
+ b137.set(77, true);
+ b137.set(99, true);
+ b137.set(136, true);
+ i = b137.find_first();
+ EATEST_VERIFY(i == 0);
+ i = b137.find_next(i);
+ EATEST_VERIFY(i == 27);
+ i = b137.find_next(i);
+ EATEST_VERIFY(i == 37);
+ i = b137.find_next(i);
+ EATEST_VERIFY(i == 77);
+ i = b137.find_next(i);
+ EATEST_VERIFY(i == 99);
+ i = b137.find_next(i);
+ EATEST_VERIFY(i == 136);
+ i = b137.find_next(i);
+ EATEST_VERIFY(i == b137.kSize);
+
+ b137.set();
+ for(i = 0, j = b137.find_first(); j != b137.kSize; j = b137.find_next(j))
+ ++i;
+ EATEST_VERIFY(i == 137);
+ }
+
+ { // Test find_last, find_prev
+ size_t i, j;
+
+ // bitset<1>
+ bitset<1> b1;
+
+ i = b1.find_last();
+ EATEST_VERIFY(i == b1.kSize);
+ b1.set(0, true);
+ i = b1.find_last();
+ EATEST_VERIFY(i == 0);
+ i = b1.find_prev(i);
+ EATEST_VERIFY(i == b1.kSize);
+
+ b1.set();
+ for(i = 0, j = b1.find_last(); j != b1.kSize; j = b1.find_prev(j))
+ ++i;
+ EATEST_VERIFY(i == 1);
+
+ // bitset<7>
+ bitset<7> b7;
+
+ i = b7.find_last();
+ EATEST_VERIFY(i == b7.kSize);
+ b7.set(0, true);
+ b7.set(5, true);
+ i = b7.find_last();
+ EATEST_VERIFY(i == 5);
+ i = b7.find_prev(i);
+ EATEST_VERIFY(i == 0);
+ i = b7.find_prev(i);
+ EATEST_VERIFY(i == b7.kSize);
+
+ b7.set();
+ for(i = 0, j = b7.find_last(); j != b7.kSize; j = b7.find_prev(j))
+ ++i;
+ EATEST_VERIFY(i == 7);
+
+ // bitset<32>
+ bitset<32> b32;
+
+ i = b32.find_last();
+ EATEST_VERIFY(i == b32.kSize);
+ b32.set(0, true);
+ b32.set(27, true);
+ i = b32.find_last();
+ EATEST_VERIFY(i == 27);
+ i = b32.find_prev(i);
+ EATEST_VERIFY(i == 0);
+ i = b32.find_prev(i);
+ EATEST_VERIFY(i == b32.kSize);
+
+ b32.set();
+ for(i = 0, j = b32.find_last(); j != b32.kSize; j = b32.find_prev(j))
+ ++i;
+ EATEST_VERIFY(i == 32);
+
+ // bitset<41>
+ bitset<41> b41;
+
+ i = b41.find_last();
+ EATEST_VERIFY(i == b41.kSize);
+ b41.set(0, true);
+ b41.set(27, true);
+ b41.set(37, true);
+ i = b41.find_last();
+ EATEST_VERIFY(i == 37);
+ i = b41.find_prev(i);
+ EATEST_VERIFY(i == 27);
+ i = b41.find_prev(i);
+ EATEST_VERIFY(i == 0);
+ i = b41.find_prev(i);
+ EATEST_VERIFY(i == b41.kSize);
+
+ b41.set();
+ for(i = 0, j = b41.find_last(); j != b41.kSize; j = b41.find_prev(j))
+ ++i;
+ EATEST_VERIFY(i == 41);
+
+ // bitset<64>
+ bitset<64> b64;
+
+ i = b64.find_last();
+ EATEST_VERIFY(i == b64.kSize);
+ b64.set(0, true);
+ b64.set(27, true);
+ b64.set(37, true);
+ i = b64.find_last();
+ EATEST_VERIFY(i == 37);
+ i = b64.find_prev(i);
+ EATEST_VERIFY(i == 27);
+ i = b64.find_prev(i);
+ EATEST_VERIFY(i == 0);
+ i = b64.find_prev(i);
+ EATEST_VERIFY(i == b64.kSize);
+
+ b64.set();
+ for(i = 0, j = b64.find_last(); j != b64.kSize; j = b64.find_prev(j))
+ ++i;
+ EATEST_VERIFY(i == 64);
+
+ // bitset<79>
+ bitset<79> b79;
+
+ i = b79.find_last();
+ EATEST_VERIFY(i == b79.kSize);
+ b79.set(0, true);
+ b79.set(27, true);
+ b79.set(37, true);
+ i = b79.find_last();
+ EATEST_VERIFY(i == 37);
+ i = b79.find_prev(i);
+ EATEST_VERIFY(i == 27);
+ i = b79.find_prev(i);
+ EATEST_VERIFY(i == 0);
+ i = b79.find_prev(i);
+ EATEST_VERIFY(i == b79.kSize);
+
+ b79.set();
+ for(i = 0, j = b79.find_last(); j != b79.kSize; j = b79.find_prev(j))
+ ++i;
+ EATEST_VERIFY(i == 79);
+
+ // bitset<128>
+ bitset<128> b128;
+
+ i = b128.find_last();
+ EATEST_VERIFY(i == b128.kSize);
+ b128.set(0, true);
+ b128.set(27, true);
+ b128.set(37, true);
+ b128.set(77, true);
+ i = b128.find_last();
+ EATEST_VERIFY(i == 77);
+ i = b128.find_prev(i);
+ EATEST_VERIFY(i == 37);
+ i = b128.find_prev(i);
+ EATEST_VERIFY(i == 27);
+ i = b128.find_prev(i);
+ EATEST_VERIFY(i == 0);
+ i = b128.find_prev(i);
+ EATEST_VERIFY(i == b128.kSize);
+
+ b128.set();
+ for(i = 0, j = b128.find_last(); j != b128.kSize; j = b128.find_prev(j))
+ ++i;
+ EATEST_VERIFY(i == 128);
+
+ // bitset<137>
+ bitset<137> b137;
+
+ i = b137.find_last();
+ EATEST_VERIFY(i == b137.kSize);
+ b137.set(0, true);
+ b137.set(27, true);
+ b137.set(37, true);
+ b137.set(77, true);
+ b137.set(99, true);
+ b137.set(136, true);
+ i = b137.find_last();
+ EATEST_VERIFY(i == 136);
+ i = b137.find_prev(i);
+ EATEST_VERIFY(i == 99);
+ i = b137.find_prev(i);
+ EATEST_VERIFY(i == 77);
+ i = b137.find_prev(i);
+ EATEST_VERIFY(i == 37);
+ i = b137.find_prev(i);
+ EATEST_VERIFY(i == 27);
+ i = b137.find_prev(i);
+ EATEST_VERIFY(i == 0);
+ i = b137.find_prev(i);
+ EATEST_VERIFY(i == b137.kSize);
+
+ b137.set();
+ for(i = 0, j = b137.find_last(); j != b137.kSize; j = b137.find_prev(j))
+ ++i;
+ EATEST_VERIFY(i == 137);
+ }
+
+ // test BITSET_WORD_COUNT macro
+ {
+ {
+ typedef eastl::bitset<32, char> bitset_t;
+ static_assert(bitset_t::kWordCount == BITSET_WORD_COUNT(bitset_t::kSize, bitset_t::word_type), "bitset failure");
+ }
+ {
+ typedef eastl::bitset<32, int> bitset_t;
+ static_assert(bitset_t::kWordCount == BITSET_WORD_COUNT(bitset_t::kSize, bitset_t::word_type), "bitset failure");
+ }
+ {
+ typedef eastl::bitset<32, int16_t> bitset_t;
+ static_assert(bitset_t::kWordCount == BITSET_WORD_COUNT(bitset_t::kSize, bitset_t::word_type), "bitset failure");
+ }
+ {
+ typedef eastl::bitset<32, int32_t> bitset_t;
+ static_assert(bitset_t::kWordCount == BITSET_WORD_COUNT(bitset_t::kSize, bitset_t::word_type), "bitset failure");
+ }
+ {
+ typedef eastl::bitset<128, int64_t> bitset_t;
+ static_assert(bitset_t::kWordCount == BITSET_WORD_COUNT(bitset_t::kSize, bitset_t::word_type), "bitset failure");
+ }
+ {
+ typedef eastl::bitset<256, int64_t> bitset_t;
+ static_assert(bitset_t::kWordCount == BITSET_WORD_COUNT(bitset_t::kSize, bitset_t::word_type), "bitset failure");
+ }
+ }
+
+ return nErrorCount;
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/test/source/TestCharTraits.cpp b/EASTL/test/source/TestCharTraits.cpp
new file mode 100644
index 0000000..bbcab54
--- /dev/null
+++ b/EASTL/test/source/TestCharTraits.cpp
@@ -0,0 +1,39 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+#include "EASTLTest.h"
+#include <EABase/eabase.h>
+#include <EASTL/internal/char_traits.h>
+
+
+template<typename CharT>
+int TestCharTraits()
+{
+ int nErrorCount = 0;
+ return nErrorCount;
+}
+
+
+int TestCharTraits()
+{
+ using namespace eastl;
+
+ int nErrorCount = 0;
+
+ nErrorCount += TestCharTraits<char>();
+ nErrorCount += TestCharTraits<wchar_t>();
+ nErrorCount += TestCharTraits<char16_t>();
+ nErrorCount += TestCharTraits<char32_t>();
+
+ return nErrorCount;
+}
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/test/source/TestChrono.cpp b/EASTL/test/source/TestChrono.cpp
new file mode 100644
index 0000000..a56b934
--- /dev/null
+++ b/EASTL/test/source/TestChrono.cpp
@@ -0,0 +1,220 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLTest.h"
+#include <EABase/eabase.h>
+#include <EASTL/chrono.h>
+#include <EASTL/numeric.h>
+#include <EASTL/string.h>
+
+
+using namespace eastl;
+using namespace eastl::chrono;
+
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// TestDuration
+//
+int TestDuration()
+{
+ int nErrorCount = 0;
+
+ {
+ hours h{1}; // 1 hour
+ milliseconds ms{3}; // 3 milliseconds
+ duration<int, kilo> ks{3}; // 3000 seconds
+
+ duration<double, ratio<1, 30>> hz30{3.5};
+ microseconds us = ms;
+ duration<double, milli> ms2 = us; // 3.0 milliseconds
+
+ EA_UNUSED(h);
+ EA_UNUSED(ms2);
+ EA_UNUSED(ks);
+ EA_UNUSED(hz30);
+ EA_UNUSED(us);
+ }
+
+ {
+ typedef duration<double, ratio<1, 30>> dur_t;
+ VERIFY(dur_t::min() < dur_t::zero());
+ VERIFY(dur_t::zero() < dur_t::max());
+ VERIFY(dur_t::min() < dur_t::max());
+ }
+
+ {
+ seconds s1(10);
+ seconds s2 = -s1;
+ VERIFY(s1.count() == 10);
+ VERIFY(s2.count() == -10);
+ }
+
+ {
+ {
+ hours h(1);
+ minutes m = ++h;
+ m--;
+ VERIFY(m.count() == 119);
+ }
+
+ {
+ hours h(24);
+ minutes m = h;
+ seconds s = m;
+ milliseconds ms = s;
+
+ VERIFY(h.count() == 24);
+ VERIFY(m.count() == 1440);
+ VERIFY(s.count() == 86400);
+ VERIFY(ms.count() == 86400000);
+ }
+
+ {
+ minutes m(11);
+ m *= 2;
+ VERIFY(m.count() == 22);
+ m += hours(10);
+ VERIFY(m.count() == 622);
+ VERIFY(duration_cast<hours>(m).count() == 10);
+ m %= hours(1);
+ VERIFY(duration_cast<hours>(m).count() == 0);
+ VERIFY(m.count() == 22);
+ }
+
+ {
+ milliseconds ms(3); // 3 milliseconds
+ VERIFY(ms.count() == 3);
+
+ microseconds us = 2 * ms; // 6000 microseconds constructed from 3 milliseconds
+ VERIFY(us.count() == 6000);
+
+ microseconds us2 = ms * 2; // 6000 microseconds constructed from 3 milliseconds
+ VERIFY(us2.count() == 6000);
+
+ microseconds us3 = us / 2;
+ VERIFY(us3.count() == 3000);
+
+ microseconds us4 = us % 2;
+ VERIFY(us4.count() == 0);
+ }
+ }
+
+ return nErrorCount;
+}
+
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// TestTimePoint
+//
+int TestTimePoint()
+{
+ int nErrorCount = 0;
+ {
+ {
+ system_clock::time_point t0 = system_clock::now();
+ auto tomorrow = t0 + hours(24);
+ auto today = tomorrow - system_clock::now();
+ auto hours_count = duration_cast<hours>(today).count();
+
+ VERIFY(hours_count == 24 || hours_count == 23); // account for time flux
+ }
+
+ {
+ time_point<system_clock, hours> hour1(hours(1));
+
+ auto hour_to_min = time_point_cast<minutes>(hour1);
+ auto hour_to_sec = time_point_cast<seconds>(hour1);
+ auto hour_to_millisec = time_point_cast<milliseconds>(hour1);
+ auto hour_to_microsec = time_point_cast<microseconds>(hour1);
+ auto hour_to_nanosec = time_point_cast<nanoseconds>(hour1);
+
+ VERIFY(hour_to_min.time_since_epoch().count() == 60);
+ VERIFY(hour_to_sec.time_since_epoch().count() == 3600);
+ VERIFY(hour_to_millisec.time_since_epoch().count() == 3600000ll);
+ VERIFY(hour_to_microsec.time_since_epoch().count() == 3600000000ll);
+ VERIFY(hour_to_nanosec.time_since_epoch().count() == 3600000000000ll);
+ }
+ }
+ return nErrorCount;
+}
+
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// TestClocks
+//
+int TestClocks()
+{
+ int nErrorCount = 0;
+ {
+ {
+ auto sys = system_clock::now();
+ VERIFY(sys.time_since_epoch().count() > 0);
+
+ auto stdy = steady_clock::now();
+ VERIFY(stdy.time_since_epoch().count() > 0);
+
+ auto hrc = high_resolution_clock::now();
+ VERIFY(hrc.time_since_epoch().count() > 0);
+ }
+
+ {
+ auto start = system_clock::now();
+ auto end = system_clock::now();
+ auto d = end - start;
+ EA_UNUSED(d);
+ VERIFY(d.count() >= 0);
+ }
+
+ {
+ auto start = steady_clock::now();
+ auto end = steady_clock::now();
+ auto d = end - start;
+ EA_UNUSED(d);
+ VERIFY(d.count() >= 0);
+ }
+
+ {
+ auto start = high_resolution_clock::now();
+ auto end = high_resolution_clock::now();
+ auto d = end - start;
+ EA_UNUSED(d);
+ VERIFY(d.count() >= 0);
+ }
+
+ {
+ typedef duration<int, ratio<1, 100000000>> shakes;
+ typedef duration<int, centi> jiffies;
+ typedef duration<float, ratio<12096, 10000>> microfortnights;
+ typedef duration<float, ratio<3155, 1000>> nanocenturies;
+
+ seconds sec(1);
+
+ VERIFY(duration_cast<shakes>(sec).count() == 100000000);
+ VERIFY(duration_cast<jiffies>(sec).count() == 100);
+ VERIFY(microfortnights(sec).count() > 0.82f);
+ VERIFY(nanocenturies(sec).count() > 0.31f);
+ }
+ }
+ return nErrorCount;
+}
+
+
+int TestChrono()
+{
+ int nErrorCount = 0;
+ nErrorCount += TestDuration();
+ nErrorCount += TestTimePoint();
+ nErrorCount += TestClocks();
+ return nErrorCount;
+}
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/test/source/TestCppCXTypeTraits.cpp b/EASTL/test/source/TestCppCXTypeTraits.cpp
new file mode 100644
index 0000000..ab03aa7
--- /dev/null
+++ b/EASTL/test/source/TestCppCXTypeTraits.cpp
@@ -0,0 +1,35 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLTest.h"
+#include <EASTL/type_traits.h>
+
+using namespace eastl;
+
+#if defined(__cplusplus_winrt)
+ ref class Foo
+ {
+
+ };
+#endif
+
+int TestCppCXTypeTraits()
+{
+ int nErrorCount = 0;
+
+ // We can only build this code if C++/CX is enabled
+#if defined(__cplusplus_winrt)
+ {
+ Foo^ foo = ref new Foo();
+ static_assert(eastl::is_pod<Foo^>::value == false, "Ref types are not POD");
+ static_assert(eastl::is_trivially_destructible<Foo^>::value == false, "Ref types cannot be trivially destructible");
+ static_assert(eastl::is_trivially_constructible<Foo^>::value == false, "Ref types cannot be trivially constructible");
+ static_assert(eastl::is_trivially_copy_constructible<Foo^>::value == false, "Ref types cannot be trivially copyable");
+ static_assert(eastl::is_trivially_copy_assignable<Foo^>::value == false, "Ref types cannot be trivially copyable");
+ }
+#endif
+
+ return nErrorCount;
+}
diff --git a/EASTL/test/source/TestDeque.cpp b/EASTL/test/source/TestDeque.cpp
new file mode 100644
index 0000000..e3f4ab6
--- /dev/null
+++ b/EASTL/test/source/TestDeque.cpp
@@ -0,0 +1,1146 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLTest.h"
+#include <EABase/eabase.h>
+#include <EASTL/deque.h>
+#include <EASTL/list.h>
+#include <EASTL/vector.h>
+#include <EASTL/string.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/unique_ptr.h>
+#include "ConceptImpls.h"
+
+#if !defined(EA_COMPILER_NO_STANDARD_CPP_LIBRARY)
+ EA_DISABLE_ALL_VC_WARNINGS()
+ #include <deque>
+ #include <list>
+ #include <vector>
+ #include <algorithm>
+ #include <stdio.h>
+ EA_RESTORE_ALL_VC_WARNINGS()
+#endif
+
+
+using namespace eastl;
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// DequeObject
+//
+struct DequeObject
+{
+ int mX; // Value for the DequeObject.
+ uint32_t mMagicValue; //
+ static int sDOCount; // Count of all current existing DequeObjects.
+ static int sMagicErrorCount; // Number of magic number mismatch errors.
+
+ DequeObject(int x = 0) : mX(x), mMagicValue(kMagicValue)
+ { ++sDOCount; }
+
+ DequeObject(const DequeObject& dequeObject) : mX(dequeObject.mX), mMagicValue(kMagicValue)
+ { ++sDOCount; }
+
+ DequeObject& operator=(const DequeObject& dequeObject)
+ {
+ mX = dequeObject.mX;
+ return *this;
+ }
+
+ ~DequeObject()
+ {
+ if(mMagicValue != kMagicValue)
+ ++sMagicErrorCount;
+ mMagicValue = 0;
+ --sDOCount;
+ }
+};
+
+int DequeObject::sDOCount = 0;
+int DequeObject::sMagicErrorCount = 0;
+
+
+bool operator==(const DequeObject& de1, const DequeObject& de2)
+ { return de1.mX == de2.mX; }
+
+bool operator<(const DequeObject& de1, const DequeObject& de2)
+ { return de1.mX < de2.mX; }
+///////////////////////////////////////////////////////////////////////////////
+
+
+
+// Template instantations.
+// These tell the compiler to compile all the functions for the given class.
+template class eastl::deque<int>;
+template class eastl::deque<DequeObject>;
+
+
+// Test compiler issue that appeared in VS2012 relating to deque::kAlignment.
+struct StructWithContainerOfStructs
+{
+ eastl::deque<StructWithContainerOfStructs, EASTLAllocatorType, 16> children;
+};
+
+// The following will not compile because the default value of kDequeSubarraySize
+// relies on sizeof(T). Thus, a non-default value must be provided, or the full type
+// will be required at the time of instantiation, but it is not available.
+// struct StructWithContainerOfStructsDefault
+// {
+// eastl::deque<StructWithContainerOfStructsDefault> children;
+// };
+
+
+///////////////////////////////////////////////////////////////////////////////
+typedef eastl::deque<int> EIntDeque;
+typedef eastl::deque<int, EASTLAllocatorType, 1> EIntDeque1;
+typedef eastl::deque<int, EASTLAllocatorType, 32768> EIntDeque32768;
+
+
+typedef eastl::deque<DequeObject> EDODeque;
+typedef eastl::deque<DequeObject, EASTLAllocatorType, 1> EDODeque1;
+typedef eastl::deque<DequeObject, EASTLAllocatorType, 32768> EDODeque32768;
+
+
+#ifndef EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+ typedef std::deque<int> SIntDeque;
+ typedef std::deque<DequeObject> SDODeque;
+#endif
+///////////////////////////////////////////////////////////////////////////////
+
+
+
+#ifndef EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+
+
+template <typename D1, typename D2>
+int CompareDeques(const D1& d1, const D2& d2, const char* pTestName)
+{
+ int nErrorCount = 0;
+
+ // Compare emptiness.
+ VERIFY(d1.empty() == d2.empty());
+
+ // Compare sizes.
+ const size_t nSize1 = d1.size();
+ const size_t nSize2 = d2.size();
+
+ VERIFY(nSize1 == nSize2);
+ if(nSize1 != nSize2)
+ EASTLTest_Printf("%s: Deque size difference: %u, %u", pTestName, (unsigned)nSize1, (unsigned)nSize2);
+
+ // Compare values.
+ if(nSize1 == nSize2)
+ {
+ // Test operator[]
+ for(unsigned i = 0; i < nSize1; i++)
+ {
+ const typename D1::value_type& t1 = d1[i];
+ const typename D2::value_type& t2 = d2[i];
+
+ VERIFY(t1 == t2);
+ if(!(t1 == t2))
+ {
+ EASTLTest_Printf("%s: Deque index difference at index %d", pTestName, i);
+ break;
+ }
+ }
+
+ // Test iteration
+ typename D1::const_iterator it1 = d1.begin();
+ typename D2::const_iterator it2 = d2.begin();
+
+ for(unsigned j = 0; it1 != d1.end(); ++it1, ++it2, ++j)
+ {
+ const typename D1::value_type& t1 = *it1;
+ const typename D2::value_type& t2 = *it2;
+
+ VERIFY(t1 == t2);
+ if(!(t1 == t2))
+ {
+ EASTLTest_Printf("%s: Deque iterator difference at index %d", pTestName, j);
+ break;
+ }
+ }
+
+ // Test reverse iteration
+ typename D1::const_reverse_iterator itr1 = d1.rbegin();
+ typename D2::const_reverse_iterator itr2 = d2.rbegin();
+
+ for(typename D1::size_type j = d1.size() - 1; itr1 != d1.rend(); ++itr1, ++itr2, --j)
+ {
+ const typename D1::value_type& t1 = *itr1;
+ const typename D2::value_type& t2 = *itr2;
+
+ VERIFY(t1 == t2);
+ if(!(t1 == t2))
+ {
+ EASTLTest_Printf("%s: Deque reverse iterator difference at index %u", pTestName, (unsigned)j);
+ break;
+ }
+ }
+ }
+
+ return nErrorCount;
+}
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// TestDequeConstruction
+//
+template <typename D1, typename D2>
+int TestDequeConstruction()
+{
+ int nErrorCount = 0;
+
+ {
+ D1 d1A;
+ D2 d2A;
+ nErrorCount += CompareDeques(d1A, d2A, "Deque ctor");
+
+ D1 d1B((typename D1::size_type)0);
+ D2 d2B((typename D2::size_type)0);
+ nErrorCount += CompareDeques(d1B, d2B, "Deque ctor");
+
+ D1 d1C(1000);
+ D2 d2C(1000);
+ nErrorCount += CompareDeques(d1C, d2C, "Deque ctor");
+
+ D1 d1D(2000, 1);
+ D2 d2D(2000, 1);
+ nErrorCount += CompareDeques(d1D, d2D, "Deque ctor");
+
+ D1 d1E(d1C);
+ D2 d2E(d2C);
+ nErrorCount += CompareDeques(d1E, d2E, "Deque ctor");
+
+ D1 d1F(d1C.begin(), d1C.end());
+ D2 d2F(d2C.begin(), d2C.end());
+ nErrorCount += CompareDeques(d1F, d2F, "Deque ctor");
+
+ // operator=
+ d1E = d1D;
+ d2E = d2D;
+ nErrorCount += CompareDeques(d1D, d2D, "Deque operator=");
+ nErrorCount += CompareDeques(d1E, d2E, "Deque operator=");
+
+ // swap
+ d1E.swap(d1D);
+ d2E.swap(d2D);
+ nErrorCount += CompareDeques(d1D, d2D, "Deque swap");
+ nErrorCount += CompareDeques(d1E, d2E, "Deque swap");
+
+ // clear
+ d1A.clear();
+ d2A.clear();
+ nErrorCount += CompareDeques(d1A, d2A, "Deque clear");
+
+ d1B.clear();
+ d2B.clear();
+ nErrorCount += CompareDeques(d1B, d2B, "Deque clear");
+ }
+
+ VERIFY(DequeObject::sDOCount == 0);
+ VERIFY(DequeObject::sMagicErrorCount == 0);
+
+ return nErrorCount;
+}
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// TestDequeSimpleMutation
+//
+template <typename D1, typename D2>
+int TestDequeSimpleMutation()
+{
+ int nErrorCount = 0;
+
+ {
+ D1 d1;
+ D2 d2;
+
+ // push_back(value_type&)
+ // front
+ // back
+ for(int i = 0; i < 1000; i++)
+ {
+ d1.push_back(i);
+ d2.push_back(i);
+ VERIFY(d1.front() == d2.front());
+ VERIFY(d1.back() == d2.back());
+ }
+ nErrorCount += CompareDeques(d1, d2, "Deque push_back(value_type&)");
+
+ // operator[]
+ // at()
+ for(typename D1::size_type i = 0, iEnd = d1.size(); i < iEnd; i++)
+ {
+ VERIFY(d1[(unsigned)i] == d2[(unsigned)i]);
+ VERIFY(d1.at((unsigned)i) == d2.at((unsigned)i));
+ }
+
+ // push_back()
+ for(int i = 0; i < 1000; i++)
+ {
+ d1.push_back(int());
+ typename D2::value_type& ref = d2.push_back(); // d2 here must be the EASTL version.
+ VERIFY(d1.front() == d2.front());
+ VERIFY(d1.back() == d2.back());
+ VERIFY(&ref == &d2.back());
+ }
+ nErrorCount += CompareDeques(d1, d2, "Deque push_back()");
+
+ // operator[]
+ // at()
+ for(typename D1::size_type i = 0, iEnd = d1.size(); i < iEnd; i++)
+ {
+ VERIFY(d1[(unsigned)i] == d2[(unsigned)i]);
+ VERIFY(d1.at((unsigned)i) == d2.at((unsigned)i));
+ }
+
+ // push_front(value_type&)
+ for(int i = 0; i < 1000; i++)
+ {
+ d1.push_front(i);
+ d2.push_front(i);
+ VERIFY(d1.front() == d2.front());
+ VERIFY(d1.back() == d2.back());
+ }
+ nErrorCount += CompareDeques(d1, d2, "Deque push_front(value_type&)");
+
+ // operator[]
+ // at()
+ for(typename D1::size_type i = 0, iEnd = d1.size(); i < iEnd; i++)
+ {
+ VERIFY(d1[(unsigned)i] == d2[(unsigned)i]);
+ VERIFY(d1.at((unsigned)i) == d2.at((unsigned)i));
+ }
+
+ // push_front()
+ for(int i = 0; i < 1000; i++)
+ {
+ d1.push_front(int());
+ typename D2::value_type& ref = d2.push_front();
+ VERIFY(d1.front() == d2.front());
+ VERIFY(d1.back() == d2.back());
+ VERIFY(&ref == &d2.front());
+ }
+ nErrorCount += CompareDeques(d1, d2, "Deque push_front()");
+
+ // operator[]
+ // at()
+ for(typename D1::size_type i = 0, iEnd = d1.size(); i < iEnd; i++)
+ {
+ VERIFY(d1[(unsigned)i] == d2[(unsigned)i]);
+ VERIFY(d1.at((unsigned)i) == d2.at((unsigned)i));
+ }
+
+ // pop_back()
+ for(int i = 0; i < 500; i++)
+ {
+ d1.pop_back();
+ d2.pop_back();
+ VERIFY(d1.front() == d2.front());
+ VERIFY(d1.back() == d2.back());
+ }
+ nErrorCount += CompareDeques(d1, d2, "Deque pop_back()");
+
+ // operator[]
+ // at()
+ for(typename D1::size_type i = 0, iEnd = d1.size(); i < iEnd; i++)
+ {
+ VERIFY(d1[(unsigned)i] == d2[(unsigned)i]);
+ VERIFY(d1.at((unsigned)i) == d2.at((unsigned)i));
+ }
+
+ // pop_front()
+ for(int i = 0; i < 500; i++)
+ {
+ d1.pop_front();
+ d2.pop_front();
+ VERIFY(d1.front() == d2.front());
+ VERIFY(d1.back() == d2.back());
+ }
+ nErrorCount += CompareDeques(d1, d2, "Deque pop_front()");
+
+ // operator[]
+ // at()
+ for(typename D1::size_type i = 0, iEnd = d1.size(); i < iEnd; i++)
+ {
+ VERIFY(d1[(unsigned)i] == d2[(unsigned)i]);
+ VERIFY(d1.at((unsigned)i) == d2.at((unsigned)i));
+ }
+
+ // resize(value_type&)
+ for(int i = 0; i < 500; i++)
+ {
+ d1.resize(d1.size() + 3, i);
+ d2.resize(d2.size() + 3, i);
+ VERIFY(d1.front() == d2.front());
+ VERIFY(d1.back() == d2.back());
+ }
+ nErrorCount += CompareDeques(d1, d2, "Deque resize(value_type&)");
+
+ // operator[]
+ // at()
+ for(typename D1::size_type i = 0, iEnd = d1.size(); i < iEnd; i++)
+ {
+ VERIFY(d1[(unsigned)i] == d2[(unsigned)i]);
+ VERIFY(d1.at((unsigned)i) == d2.at((unsigned)i));
+ }
+
+ // resize()
+ for(int i = 0; i < 500; i++)
+ {
+ d1.resize(d1.size() - 2);
+ d2.resize(d2.size() - 2);
+ VERIFY(d1.front() == d2.front());
+ VERIFY(d1.back() == d2.back());
+ }
+ nErrorCount += CompareDeques(d1, d2, "Deque resize()");
+
+ // operator[]
+ // at()
+ for(typename D1::size_type i = 0, iEnd = d1.size(); i < iEnd; i++)
+ {
+ VERIFY(d1[(unsigned)i] == d2[(unsigned)i]);
+ VERIFY(d1.at((unsigned)i) == d2.at((unsigned)i));
+ }
+ }
+
+ VERIFY(DequeObject::sDOCount == 0);
+ VERIFY(DequeObject::sMagicErrorCount == 0);
+
+ return nErrorCount;
+}
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// TestDequeComplexMutation
+//
+template <typename D1, typename D2>
+int TestDequeComplexMutation()
+{
+ int nErrorCount = 0;
+
+ {
+ D1 d1;
+ D2 d2;
+
+
+ //////////////////////////////////////////////////////////////////
+ // void assign(size_type n, const value_type& value);
+ //////////////////////////////////////////////////////////////////
+
+ d1.assign(100, 1);
+ d2.assign(100, 1);
+ nErrorCount += CompareDeques(d1, d2, "Deque assign(size_type n, const value_type& value)");
+
+ d1.assign(50, 2);
+ d2.assign(50, 2);
+ nErrorCount += CompareDeques(d1, d2, "Deque assign(size_type n, const value_type& value)");
+
+ d1.assign(150, 2);
+ d2.assign(150, 2);
+ nErrorCount += CompareDeques(d1, d2, "Deque assign(size_type n, const value_type& value)");
+
+
+
+ //////////////////////////////////////////////////////////////////
+ // template <typename InputIterator>
+ // void assign(InputIterator first, InputIterator last);
+ //////////////////////////////////////////////////////////////////
+
+ std::list<int> intList1;
+ for(int i = 0; i < 100; i++)
+ intList1.push_back(i);
+
+ eastl::list<int> intList2;
+ for(int i = 0; i < 100; i++)
+ intList2.push_back(i);
+
+ d1.assign(intList1.begin(), intList1.end());
+ d2.assign(intList2.begin(), intList2.end());
+ nErrorCount += CompareDeques(d1, d2, "Deque assign(InputIterator first, InputIterator last)");
+
+
+
+ //////////////////////////////////////////////////////////////////
+ // iterator insert(iterator position, const value_type& value);
+ //////////////////////////////////////////////////////////////////
+
+ d1.insert(d1.begin(), d1[1]);
+ d2.insert(d2.begin(), d2[1]);
+ nErrorCount += CompareDeques(d1, d2, "Deque insert(iterator position, const value_type& value)");
+
+ d1.insert(d1.end(), d1[d1.size() - 2]);
+ d2.insert(d2.end(), d2[d2.size() - 2]);
+ nErrorCount += CompareDeques(d1, d2, "Deque insert(iterator position, const value_type& value)");
+
+ typename D1::iterator itD1NearBegin = d1.begin();
+ typename D2::iterator itD2NearBegin = d2.begin();
+
+ std::advance(itD1NearBegin, 1);
+ eastl::advance(itD2NearBegin, 1);
+
+ d1.insert(itD1NearBegin, d1[3]);
+ d2.insert(itD2NearBegin, d2[3]);
+ nErrorCount += CompareDeques(d1, d2, "Deque insert(iterator position, const value_type& value)");
+
+ typename D1::iterator itD1NearEnd = d1.begin();
+ typename D2::iterator itD2NearEnd = d2.begin();
+
+ std::advance(itD1NearEnd, d1.size() - 1);
+ eastl::advance(itD2NearEnd, d2.size() - 1);
+
+ d1.insert(itD1NearEnd, d1[d1.size() - 2]);
+ d2.insert(itD2NearEnd, d2[d2.size() - 2]);
+ nErrorCount += CompareDeques(d1, d2, "Deque insert(iterator position, const value_type& value)");
+
+
+ //////////////////////////////////////////////////////////////////
+ // void insert(iterator position, size_type n, const value_type& value);
+ //////////////////////////////////////////////////////////////////
+
+ d1.insert(d1.begin(), d1.size() * 2, 3); // Insert a large number of items at the front.
+ d2.insert(d2.begin(), d2.size() * 2, 3);
+ nErrorCount += CompareDeques(d1, d2, "Deque insert(iterator position, size_type n, const value_type& value)");
+
+ d1.insert(d1.end(), d1.size() * 2, 3); // Insert a large number of items at the end.
+ d2.insert(d2.end(), d2.size() * 2, 3);
+ nErrorCount += CompareDeques(d1, d2, "Deque insert(iterator position, size_type n, const value_type& value)");
+
+ itD1NearBegin = d1.begin();
+ itD2NearBegin = d2.begin();
+
+ std::advance(itD1NearBegin, 3);
+ eastl::advance(itD2NearBegin, 3);
+
+ d1.insert(itD1NearBegin, 3, 4);
+ d2.insert(itD2NearBegin, 3, 4);
+ nErrorCount += CompareDeques(d1, d2, "Deque insert(iterator position, size_type n, const value_type& value)");
+
+ itD1NearEnd = d1.begin();
+ itD2NearEnd = d2.begin();
+
+ std::advance(itD1NearEnd, d1.size() - 1);
+ eastl::advance(itD2NearEnd, d2.size() - 1);
+
+ d1.insert(d1.end(), 5, 6);
+ d2.insert(d2.end(), 5, 6);
+ nErrorCount += CompareDeques(d1, d2, "Deque insert(iterator position, size_type n, const value_type& value)");
+
+
+
+ //////////////////////////////////////////////////////////////////
+ // template <typename InputIterator>
+ // void insert(iterator position, InputIterator first, InputIterator last);
+ //////////////////////////////////////////////////////////////////
+
+ itD1NearBegin = d1.begin();
+ itD2NearBegin = d2.begin();
+
+ std::advance(itD1NearBegin, 3);
+ eastl::advance(itD2NearBegin, 3);
+
+ d1.insert(itD1NearBegin, intList1.begin(), intList1.end());
+ d2.insert(itD2NearBegin, intList2.begin(), intList2.end());
+ nErrorCount += CompareDeques(d1, d2, "Deque insert(iterator position, InputIterator first, InputIterator last)");
+
+
+
+ //////////////////////////////////////////////////////////////////
+ // iterator erase(iterator position);
+ //////////////////////////////////////////////////////////////////
+
+ itD1NearBegin = d1.begin();
+ itD2NearBegin = d2.begin();
+
+ while(itD1NearBegin != d1.end()) // Run a loop whereby we erase every third element.
+ {
+ for(int i = 0; (i < 3) && (itD1NearBegin != d1.end()); ++i)
+ {
+ ++itD1NearBegin;
+ ++itD2NearBegin;
+ }
+
+ if(itD1NearBegin != d1.end())
+ {
+ itD1NearBegin = d1.erase(itD1NearBegin);
+ itD2NearBegin = d2.erase(itD2NearBegin);
+ nErrorCount += CompareDeques(d1, d2, "Deque erase(iterator position)");
+ }
+ }
+
+
+ //////////////////////////////////////////////////////////////////
+ // iterator erase(iterator first, iterator last);
+ //////////////////////////////////////////////////////////////////
+
+ itD1NearBegin = d1.begin();
+ itD2NearBegin = d2.begin();
+
+ while(itD1NearBegin != d1.end()) // Run a loop whereby we erase spans of elements.
+ {
+ typename D1::iterator itD1Saved = itD1NearBegin;
+ typename D2::iterator itD2Saved = itD2NearBegin;
+
+ for(int i = 0; (i < 11) && (itD1NearBegin != d1.end()); ++i)
+ {
+ ++itD1NearBegin;
+ ++itD2NearBegin;
+ }
+
+ if(itD1NearBegin != d1.end())
+ {
+ itD1NearBegin = d1.erase(itD1Saved, itD1NearBegin);
+ itD2NearBegin = d2.erase(itD2Saved, itD2NearBegin);
+ nErrorCount += CompareDeques(d1, d2, "Deque erase(iterator position)");
+ }
+
+ for(int i = 0; (i < 17) && (itD1NearBegin != d1.end()); ++i)
+ {
+ ++itD1NearBegin;
+ ++itD2NearBegin;
+ }
+
+ }
+
+ }
+
+
+ {
+ //////////////////////////////////////////////////////////////////
+ // reverse_iterator erase(reverse_iterator position);
+ // reverse_iterator erase(reverse_iterator first, reverse_iterator last);
+ //////////////////////////////////////////////////////////////////
+
+ //D1 d1Erase;
+ D2 d2Erase;
+
+ for(int i = 0; i < 20; i++)
+ {
+ typename D2::value_type val(i);
+ d2Erase.push_back(val);
+ }
+ VERIFY((d2Erase.size() == 20) && (d2Erase[0] == 0) && (d2Erase[19] == 19));
+
+
+ typename D2::reverse_iterator r2A = d2Erase.rbegin();
+ typename D2::reverse_iterator r2B = r2A + 3;
+ d2Erase.erase(r2A, r2B);
+ VERIFY((d2Erase.size() == 17));
+ VERIFY((d2Erase[0] == 0));
+ VERIFY((d2Erase[16] == 16));
+
+
+ r2B = d2Erase.rend();
+ r2A = r2B - 3;
+ d2Erase.erase(r2A, r2B);
+ VERIFY((d2Erase.size() == 14));
+ VERIFY((d2Erase[0] == 3));
+ VERIFY((d2Erase[13] == 16));
+
+
+ r2B = d2Erase.rend() - 1;
+ d2Erase.erase(r2B);
+ VERIFY((d2Erase.size() == 13));
+ VERIFY((d2Erase[0] == 4));
+ VERIFY((d2Erase[12] == 16));
+
+
+ r2B = d2Erase.rbegin();
+ d2Erase.erase(r2B);
+ VERIFY((d2Erase.size() == 12));
+ VERIFY((d2Erase[0] == 4));
+ VERIFY((d2Erase[11] == 15));
+
+
+ r2A = d2Erase.rbegin();
+ r2B = d2Erase.rend();
+ d2Erase.erase(r2A, r2B);
+ VERIFY(d2Erase.size() == 0);
+ }
+
+
+ VERIFY(DequeObject::sDOCount == 0);
+ VERIFY(DequeObject::sMagicErrorCount == 0);
+
+ return nErrorCount;
+}
+
+#endif // EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+
+
+int TestDeque()
+{
+ int nErrorCount = 0;
+
+ #ifndef EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+ { // Test construction
+ nErrorCount += TestDequeConstruction<SIntDeque, EIntDeque>();
+ nErrorCount += TestDequeConstruction<SIntDeque, EIntDeque1>();
+ nErrorCount += TestDequeConstruction<SIntDeque, EIntDeque32768>();
+
+ nErrorCount += TestDequeConstruction<SIntDeque, EDODeque>();
+ nErrorCount += TestDequeConstruction<SIntDeque, EDODeque1>();
+ nErrorCount += TestDequeConstruction<SIntDeque, EDODeque32768>();
+ }
+
+
+ { // Test simple mutating functionality.
+ nErrorCount += TestDequeSimpleMutation<SIntDeque, EIntDeque>();
+ nErrorCount += TestDequeSimpleMutation<SIntDeque, EIntDeque1>();
+ nErrorCount += TestDequeSimpleMutation<SIntDeque, EIntDeque32768>();
+
+ nErrorCount += TestDequeSimpleMutation<SIntDeque, EDODeque>();
+ nErrorCount += TestDequeSimpleMutation<SIntDeque, EDODeque1>();
+ nErrorCount += TestDequeSimpleMutation<SIntDeque, EDODeque32768>();
+ }
+
+ { // Test complex mutating functionality.
+ nErrorCount += TestDequeComplexMutation<SIntDeque, EIntDeque>();
+ nErrorCount += TestDequeComplexMutation<SIntDeque, EIntDeque1>();
+ nErrorCount += TestDequeComplexMutation<SIntDeque, EIntDeque32768>();
+
+ nErrorCount += TestDequeComplexMutation<SIntDeque, EDODeque>();
+ nErrorCount += TestDequeComplexMutation<SIntDeque, EDODeque1>();
+ nErrorCount += TestDequeComplexMutation<SIntDeque, EDODeque32768>();
+ }
+ #endif // EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+
+ // test deque support of move-only types
+ {
+ {
+ eastl::deque<MoveAssignable> d;
+ d.emplace_back(MoveAssignable::Create());
+ d.emplace_front(MoveAssignable::Create());
+
+ auto cd = eastl::move(d);
+ EATEST_VERIFY( d.size() == 0);
+ EATEST_VERIFY(cd.size() == 2);
+ }
+
+ {
+ // User regression but passing end() to deque::erase is not valid.
+ // Iterator passed to deque::erase but must valid and dereferencable.
+ //
+ // eastl::deque<MoveAssignable> d; // empty deque
+ // d.erase(d.begin());
+ // EATEST_VERIFY(d.size() == 0);
+ }
+
+ // simply test the basic api of deque with a move-only type
+ {
+ eastl::deque<MoveAssignable> d;
+
+ // emplace_back
+ d.emplace_back(MoveAssignable::Create());
+ d.emplace_back(MoveAssignable::Create());
+ d.emplace_back(MoveAssignable::Create());
+
+ // erase
+ d.erase(d.begin());
+ EATEST_VERIFY(d.size() == 2);
+
+ // at / front / back / operator[]
+ EATEST_VERIFY(d[0].value == 42);
+ EATEST_VERIFY(d.at(0).value == 42);
+ EATEST_VERIFY(d.front().value == 42);
+ EATEST_VERIFY(d.back().value == 42);
+
+ // clear
+ d.clear();
+ EATEST_VERIFY(d.size() == 0);
+
+ // emplace
+ d.emplace(d.begin(), MoveAssignable::Create());
+ d.emplace(d.begin(), MoveAssignable::Create());
+ EATEST_VERIFY(d.size() == 2);
+
+ // pop_back
+ d.pop_back();
+ EATEST_VERIFY(d.size() == 1);
+
+ // push_back / push_front / resize requires T be 'CopyConstructible'
+
+ {
+ eastl::deque<MoveAssignable> swapped_d;
+
+ // emplace_front
+ swapped_d.emplace_front(MoveAssignable::Create());
+ swapped_d.emplace_front(MoveAssignable::Create());
+ swapped_d.emplace_front(MoveAssignable::Create());
+
+ // swap
+ swapped_d.swap(d);
+ EATEST_VERIFY(swapped_d.size() == 1);
+ EATEST_VERIFY(d.size() == 3);
+ }
+
+ // pop_front
+ d.pop_front();
+ EATEST_VERIFY(d.size() == 2);
+
+ // insert
+ d.insert(d.end(), MoveAssignable::Create());
+ EATEST_VERIFY(d.size() == 3);
+ }
+ }
+
+ {
+ // deque(std::initializer_list<value_type> ilist, const allocator_type& allocator = EASTL_DEQUE_DEFAULT_ALLOCATOR);
+ // this_type& operator=(std::initializer_list<value_type> ilist);
+ // void assign(std::initializer_list<value_type> ilist);
+ // iterator insert(iterator position, std::initializer_list<value_type> ilist);
+ #if !defined(EA_COMPILER_NO_INITIALIZER_LISTS)
+ eastl::deque<int> intDeque = { 0, 1, 2 };
+ EATEST_VERIFY(VerifySequence(intDeque.begin(), intDeque.end(), int(), "deque std::initializer_list", 0, 1, 2, -1));
+
+ intDeque = { 13, 14, 15 };
+ EATEST_VERIFY(VerifySequence(intDeque.begin(), intDeque.end(), int(), "deque std::initializer_list", 13, 14, 15, -1));
+
+ intDeque.assign({ 16, 17, 18 });
+ EATEST_VERIFY(VerifySequence(intDeque.begin(), intDeque.end(), int(), "deque std::initializer_list", 16, 17, 18, -1));
+
+ eastl::deque<int>::iterator it = intDeque.insert(intDeque.begin(), { 14, 15 });
+ EATEST_VERIFY(VerifySequence(intDeque.begin(), intDeque.end(), int(), "deque std::initializer_list", 14, 15, 16, 17, 18, -1));
+ EATEST_VERIFY(*it == 14);
+ #endif
+ }
+
+
+ { // C++11 functionality
+ // deque(this_type&& x);
+ // deque(this_type&& x, const allocator_type& allocator);
+ // this_type& operator=(this_type&& x);
+ // void push_front(value_type&& value);
+ // void push_back(value_type&& value);
+ // iterator insert(const_iterator position, value_type&& value);
+
+ using namespace eastl;
+
+ deque<TestObject> deque3TO33(3, TestObject(33));
+ deque<TestObject> toDequeA(eastl::move(deque3TO33));
+ EATEST_VERIFY((toDequeA.size() == 3) && (toDequeA.front().mX == 33) && (deque3TO33.size() == 0));
+
+ // The following is not as strong a test of this ctor as it could be. A stronger test would be to use IntanceAllocator with different instances.
+ deque<TestObject, MallocAllocator> deque4TO44(4, TestObject(44));
+ deque<TestObject, MallocAllocator> toDequeB(eastl::move(deque4TO44), MallocAllocator());
+ EATEST_VERIFY((toDequeB.size() == 4) && (toDequeB.front().mX == 44) && (deque4TO44.size() == 0));
+
+ deque<TestObject, MallocAllocator> deque5TO55(5, TestObject(55));
+ toDequeB = eastl::move(deque5TO55);
+ EATEST_VERIFY((toDequeB.size() == 5) && (toDequeB.front().mX == 55) && (deque5TO55.size() == 0));
+ }
+
+
+ { // C++11 functionality
+ // template<class... Args>
+ // iterator emplace(const_iterator position, Args&&... args);
+
+ // template<class... Args>
+ // void emplace_front(Args&&... args);
+
+ // template<class... Args>
+ // void emplace_back(Args&&... args);
+ TestObject::Reset();
+
+ deque<TestObject, eastl::allocator, 16> toDequeA;
+
+ toDequeA.emplace_back(2, 3, 4);
+ EATEST_VERIFY_F((toDequeA.size() == 1) && (toDequeA.back().mX == (2+3+4)) && (TestObject::sTOCtorCount == 1), "size: %u, mX: %u, count: %d", (unsigned)toDequeA.size(), (unsigned)toDequeA.back().mX, (int)TestObject::sTOCtorCount);
+
+ toDequeA.emplace(toDequeA.begin(), 3, 4, 5); // This is 3 because of how subarray allocation works.
+ EATEST_VERIFY_F((toDequeA.size() == 2) && (toDequeA.front().mX == (3+4+5)) && (TestObject::sTOCtorCount == 3), "size: %u, mX: %u, count: %d", (unsigned)toDequeA.size(), (unsigned)toDequeA.front().mX, (int)TestObject::sTOCtorCount);
+
+ toDequeA.emplace_front(6, 7, 8);
+ EATEST_VERIFY_F((toDequeA.size() == 3) && (toDequeA.front().mX == (6+7+8)) && (TestObject::sTOCtorCount == 4), "size: %u, mX: %u, count: %d", (unsigned)toDequeA.size(), (unsigned)toDequeA.front().mX, (int)TestObject::sTOCtorCount);
+
+
+ // This test is similar to the emplace pathway above.
+ TestObject::Reset();
+
+ //void push_front(T&& x);
+ //void push_back(T&& x);
+ //iterator insert(const_iterator position, T&& x);
+
+ deque<TestObject, eastl::allocator, 16> toDequeC; // Specify a non-small kSubarrayCount of 16 because the move count tests below assume there is no reallocation.
+
+ toDequeC.push_back(TestObject(2, 3, 4));
+ EATEST_VERIFY((toDequeC.size() == 1) && (toDequeC.back().mX == (2+3+4)) && (TestObject::sTOMoveCtorCount == 1));
+
+ toDequeC.insert(toDequeC.begin(), TestObject(3, 4, 5));
+ EATEST_VERIFY((toDequeC.size() == 2) && (toDequeC.front().mX == (3+4+5)) && (TestObject::sTOMoveCtorCount == 3));
+
+ toDequeC.push_front(TestObject(6, 7, 8));
+ EATEST_VERIFY((toDequeC.size() == 3) && (toDequeC.front().mX == (6+7+8)) && (TestObject::sTOMoveCtorCount == 4));
+ }
+
+
+ {
+ // Regression of deque::operator= for the case of EASTL_ALLOCATOR_COPY_ENABLED=1
+ // For this test we need to use InstanceAllocator to create two containers of the same
+ // type but with different and unequal allocator instances. The bug was that when
+ // EASTL_ALLOCATOR_COPY_ENABLED was enabled operator=(this_type& x) assigned x.mAllocator
+ // to this and then proceeded to assign member elements from x to this. That's invalid
+ // because the existing elements of this were allocated by a different allocator and
+ // will be freed in the future with the allocator copied from x.
+ // The test below should work for the case of EASTL_ALLOCATOR_COPY_ENABLED == 0 or 1.
+ InstanceAllocator::reset_all();
+
+ InstanceAllocator ia0((uint8_t)0);
+ InstanceAllocator ia1((uint8_t)1);
+
+ eastl::deque<int, InstanceAllocator> v0((eastl_size_t)1, (int)0, ia0);
+ eastl::deque<int, InstanceAllocator> v1((eastl_size_t)1, (int)1, ia1);
+
+ EATEST_VERIFY((v0.front() == 0) && (v1.front() == 1));
+ #if EASTL_ALLOCATOR_COPY_ENABLED
+ EATEST_VERIFY(v0.get_allocator() != v1.get_allocator());
+ #endif
+ v0 = v1;
+ EATEST_VERIFY((v0.front() == 1) && (v1.front() == 1));
+ EATEST_VERIFY(InstanceAllocator::mMismatchCount == 0);
+ EATEST_VERIFY(v0.validate());
+ EATEST_VERIFY(v1.validate());
+ #if EASTL_ALLOCATOR_COPY_ENABLED
+ EATEST_VERIFY(v0.get_allocator() == v1.get_allocator());
+ #endif
+ }
+
+
+ { // Regression of kDequeSubarraySize calculations
+ VERIFY(EIntDeque::kSubarraySize >= 4);
+ VERIFY(EIntDeque1::kSubarraySize == 1);
+ VERIFY(EIntDeque32768::kSubarraySize == 32768);
+
+ VERIFY(EDODeque::kSubarraySize >= 2);
+ VERIFY(EDODeque1::kSubarraySize == 1);
+ VERIFY(EDODeque32768::kSubarraySize == 32768);
+ }
+
+
+ { // Regression of user-reported bug
+
+ // The following was reported by Nicolas Mercier on April 9, 2008 as causing a crash:
+ // This code breaks on our machines because it overwrites the
+ // first 4 bytes before the beginning of the memory that was
+ // allocated for mpPtrArray. So when temp goes out of scope,
+ // it will free this pointer and the debug allocator will detect
+ // that these bytes have been changed.
+
+ eastl::deque<eastl::string> testArray;
+ eastl::string s("a");
+
+ for(int j = 0; j < 65; j++)
+ testArray.push_back(s);
+
+ eastl::deque<eastl::string> temp;
+ temp = testArray; // This is where the corruption occurred.
+ }
+
+
+ { // Regression of user-reported bug
+
+ // The problem is that the pointer arrays on the deques are growing without bound.
+ // This is causing our game to crash on a soak test due to its frame event queues
+ // consuming inordinate amounts of memory. It looks like the current version of
+ // eastl::deque is missing logic to recenter the pointer array, so it keeps growing
+ // slowly as blocks are allocated on the tail and removed from the head.
+ // Note: This bug was introduced by the (mistaken) fix for April 9 bug above.
+
+ eastl::deque<int, MallocAllocator> x;
+ eastl::deque<int, MallocAllocator> y;
+
+ const MallocAllocator& maX = x.get_allocator();
+ const MallocAllocator& maY = y.get_allocator();
+
+ size_t allocVolumeX1 = 0;
+ size_t allocVolumeY1 = 0;
+ size_t allocVolumeX2 = 0;
+ size_t allocVolumeY2 = 0;
+
+ for(int i = 0; i < 1001; ++i) // With the bug, each time through this loop the containers mistakenly allocate more memory.
+ {
+ if(i == 100) // Save the allocated volume after 50 iterations.
+ {
+ allocVolumeX1 = maX.mAllocVolume;
+ allocVolumeY1 = maY.mAllocVolume;
+ }
+
+ for(int j = 0; j < 5; ++j)
+ x.push_back(0);
+
+ x.swap(y);
+
+ while(!x.empty())
+ x.pop_front();
+ }
+
+ allocVolumeX2 = maX.mAllocVolume; // Save the allocated volume after 1001 iterations.
+ allocVolumeY2 = maY.mAllocVolume;
+
+ VERIFY((allocVolumeX1 == allocVolumeX2) && (allocVolumeX2 < 350)); // Test that the volume has not changed and is below some nominal value.
+ VERIFY((allocVolumeY1 == allocVolumeY2) && (allocVolumeY2 < 350)); // This value is somewhat arbitrary and slightly hardware dependent (e.g. 32 vs. 64 bit). I bumped it up from 300 to 350 when Linux64 showed it to be 320, which was ~still OK.
+ }
+
+
+ { // Regression of user error report for the case of deque<const type>.
+ eastl::vector<int> ctorValues;
+
+ for(int v = 0; v < 10; v++)
+ ctorValues.push_back(v);
+
+ eastl::deque<const ConstType> testStruct(ctorValues.begin(), ctorValues.end());
+ eastl::deque<const int> testInt(ctorValues.begin(), ctorValues.end());
+ }
+
+
+ { // Regression to verify that const deque works.
+ const eastl::deque<int> constIntDeque1;
+ VERIFY(constIntDeque1.empty());
+
+ int intArray[3] = { 37, 38, 39 };
+ const eastl::deque<int> constIntDeque2(intArray, intArray + 3);
+ VERIFY(constIntDeque2.size() == 3);
+
+ const eastl::deque<int> constIntDeque3(4, 37);
+ VERIFY(constIntDeque3.size() == 4);
+
+ const eastl::deque<int> constIntDeque4;
+ const eastl::deque<int> constIntDeque5 = constIntDeque4;
+ }
+
+ {
+ // test shrink_to_fit
+ eastl::deque<int, CountingAllocator> d(4096);
+ d.erase(d.begin(), d.end());
+
+ auto prev = d.get_allocator().getActiveAllocationSize();
+ d.shrink_to_fit();
+ VERIFY(d.get_allocator().getActiveAllocationSize() < prev);
+ }
+
+ {
+ #ifndef EASTL_OPENSOURCE
+ auto prevAllocCount = gEASTLTest_AllocationCount;
+ #endif
+ {
+ EA_DISABLE_VC_WARNING(4625 4626)
+ struct a
+ {
+ a(int* p)
+ : ptr(p) { }
+
+ eastl::unique_ptr<int> ptr;
+ };
+ EA_RESTORE_VC_WARNING()
+
+ static_assert(eastl::has_trivial_relocate<a>::value == false, "failure");
+
+ eastl::deque<a> d;
+
+ d.emplace_back(new int(1));
+ d.emplace_back(new int(2));
+ d.emplace_back(new int(3));
+
+ d.erase(d.begin() + 1);
+ }
+ #ifndef EASTL_OPENSOURCE
+ VERIFY(gEASTLTest_AllocationCount == prevAllocCount);
+ #endif
+ }
+
+
+ { // Test erase / erase_if
+ {
+ eastl::deque<int> d = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+
+ auto numErased = eastl::erase(d, 2);
+ VERIFY((d == eastl::deque<int>{1, 3, 4, 5, 6, 7, 8, 9}));
+ VERIFY(numErased == 1);
+
+ numErased = eastl::erase(d, 7);
+ VERIFY((d == eastl::deque<int>{1, 3, 4, 5, 6, 8, 9}));
+ VERIFY(numErased == 1);
+
+ numErased = eastl::erase(d, 9);
+ VERIFY((d == eastl::deque<int>{1, 3, 4, 5, 6, 8}));
+ VERIFY(numErased == 1);
+
+ numErased = eastl::erase(d, 5);
+ VERIFY((d == eastl::deque<int>{1, 3, 4, 6, 8}));
+ VERIFY(numErased == 1);
+
+ numErased = eastl::erase(d, 3);
+ VERIFY((d == eastl::deque<int>{1, 4, 6, 8}));
+ VERIFY(numErased == 1);
+ }
+
+ {
+ eastl::deque<int> d = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+ auto numErased = eastl::erase_if(d, [](auto i) { return i % 2 == 0; });
+ VERIFY((d == eastl::deque<int>{1, 3, 5, 7, 9}));
+ VERIFY(numErased == 4);
+ }
+ }
+
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+
+ { // Test <=>
+ eastl::deque<int> d1 = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+ eastl::deque<int> d2 = {9, 8, 7, 6, 5, 4, 3, 2, 1};
+ eastl::deque<int> d3 = {1, 2, 3, 4, 5};
+ eastl::deque<int> d4 = {10};
+
+ VERIFY(d1 != d2);
+ VERIFY(d1 < d2);
+ VERIFY(d1 != d3);
+ VERIFY(d1 > d3);
+ VERIFY(d4 > d1);
+ VERIFY(d4 > d2);
+ VERIFY(d4 > d3);
+
+ VERIFY((d1 <=> d2) != 0);
+ VERIFY((d1 <=> d2) < 0);
+ VERIFY((d1 <=> d3) != 0);
+ VERIFY((d1 <=> d3) > 0);
+ VERIFY((d4 <=> d1) > 0);
+ VERIFY((d4 <=> d2) > 0);
+ VERIFY((d4 <=> d3) > 0);
+ }
+#endif
+
+ return nErrorCount;
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/test/source/TestExtra.cpp b/EASTL/test/source/TestExtra.cpp
new file mode 100644
index 0000000..52fbd62
--- /dev/null
+++ b/EASTL/test/source/TestExtra.cpp
@@ -0,0 +1,1554 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+/////////////////////////////////////////////////////////////////////////////
+// Test forward delcarations
+/////////////////////////////////////////////////////////////////////////////
+
+namespace eastl
+{
+ class allocator;
+
+ template <typename T, typename Allocator> class basic_string;
+ typedef basic_string<char, allocator> local_string8; // collides with eastl::string8 in bulkbuilds
+
+ template <typename T> struct local_less {};
+
+ static void UseForwardDeclaredString(local_string8*)
+ {
+ }
+
+
+ template <typename T, typename Allocator> class vector;
+ typedef vector<char, allocator> vector8;
+
+ static void UseForwardDeclaredVector(vector8*)
+ {
+ }
+
+
+ template <typename Value, typename Hash, typename Predicate, typename Allocator, bool bCacheHashCode> class hash_set;
+ typedef hash_set<char, char, local_less<char>, allocator, false> hash_set8;
+
+ static void UseForwardDeclaredHashSet(hash_set8*)
+ {
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator> class map;
+ typedef map<char, char, local_less<char>, allocator> map8;
+
+ static void UseForwardDeclaredMap(map8*)
+ {
+ }
+}
+
+
+#include "EASTLTest.h"
+#include <EASTL/functional.h>
+#include <EASTL/utility.h>
+#include <EASTL/memory.h>
+#include <EASTL/allocator.h>
+#include <EASTL/allocator_malloc.h>
+#include <EASTL/fixed_allocator.h>
+#include <EASTL/intrusive_list.h>
+#include <EASTL/numeric.h>
+#include <EASTL/queue.h>
+#include <EASTL/priority_queue.h>
+#include <EASTL/stack.h>
+#include <EASTL/heap.h>
+#include <EASTL/vector.h>
+#include <EASTL/deque.h>
+#include <EASTL/list.h>
+#include <EASTL/map.h>
+#include <EASTL/string.h>
+#include <EASTL/hash_set.h>
+#include <EASTL/random.h>
+#include <EASTL/bit.h>
+#include <EASTL/core_allocator_adapter.h>
+#include <EASTL/bonus/call_traits.h>
+#include <EASTL/bonus/compressed_pair.h>
+#include <EASTL/bonus/adaptors.h>
+#include <EAStdC/EAAlignment.h>
+#include <EAStdC/EAMemory.h>
+#include <EAStdC/EAString.h>
+
+#ifdef _MSC_VER
+ #pragma warning(push, 0)
+#endif
+
+#include <stdio.h>
+#include <string.h>
+
+#ifndef EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+ #include <algorithm>
+ #include <utility>
+ #include <stack>
+ #include <queue>
+ #include <vector>
+ #include <deque>
+ #include <math.h>
+#endif
+
+#if defined(_MSC_VER)
+ #pragma warning(pop)
+#endif
+
+
+
+using namespace eastl;
+
+
+
+namespace
+{
+ /// IntNode
+ ///
+ /// Test intrusive_list node.
+ ///
+ struct IntNode : public eastl::intrusive_list_node
+ {
+ int mX;
+
+ IntNode(int x = 0)
+ : mX(x) { }
+
+ operator int() const
+ { return mX; }
+ };
+
+ bool operator<(const IntNode& a, const IntNode& b)
+ { return a.mX < b.mX; }
+}
+
+
+
+
+
+
+struct TestClass
+{
+ mutable int mX;
+
+ TestClass() : mX(37) { }
+
+ void Increment()
+ {
+ mX++;
+ }
+
+ void IncrementConst() const
+ {
+ mX++;
+ }
+
+ int MultiplyBy(int x)
+ {
+ return mX * x;
+ }
+
+ int MultiplyByConst(int x) const
+ {
+ return mX * x;
+ }
+};
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// TestForwardDeclarations
+//
+static int TestForwardDeclarations()
+{
+ int nErrorCount = 0;
+
+ eastl::local_string8 s8;
+ UseForwardDeclaredString(&s8);
+
+ eastl::vector8 v8;
+ UseForwardDeclaredVector(&v8);
+
+ eastl::hash_set8 h8;
+ UseForwardDeclaredHashSet(&h8);
+
+ eastl::map8 m8;
+ UseForwardDeclaredMap(&m8);
+
+ return nErrorCount;
+}
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// fixed_pool_reference
+//
+struct fixed_pool_reference
+{
+public:
+ fixed_pool_reference(const char* = NULL)
+ {
+ mpFixedPool = NULL;
+ }
+
+ fixed_pool_reference(eastl::fixed_pool& fixedPool)
+ {
+ mpFixedPool = &fixedPool;
+ }
+
+ fixed_pool_reference(const fixed_pool_reference& x)
+ {
+ mpFixedPool = x.mpFixedPool;
+ }
+
+ fixed_pool_reference& operator=(const fixed_pool_reference& x)
+ {
+ mpFixedPool = x.mpFixedPool;
+ return *this;
+ }
+
+ void* allocate(size_t /*n*/, int /*flags*/ = 0)
+ {
+ return mpFixedPool->allocate();
+ }
+
+ void* allocate(size_t /*n*/, size_t /*alignment*/, size_t /*offset*/, int /*flags*/ = 0)
+ {
+ return mpFixedPool->allocate();
+ }
+
+ void deallocate(void* p, size_t /*n*/)
+ {
+ return mpFixedPool->deallocate(p);
+ }
+
+ const char* get_name() const
+ {
+ return "fixed_pool_reference";
+ }
+
+ void set_name(const char* /*pName*/)
+ {
+ }
+
+protected:
+ friend bool operator==(const fixed_pool_reference& a, const fixed_pool_reference& b);
+ friend bool operator!=(const fixed_pool_reference& a, const fixed_pool_reference& b);
+
+ eastl::fixed_pool* mpFixedPool;
+};
+
+
+inline bool operator==(const fixed_pool_reference& a, const fixed_pool_reference& b)
+{
+ return (a.mpFixedPool == b.mpFixedPool);
+}
+
+inline bool operator!=(const fixed_pool_reference& a, const fixed_pool_reference& b)
+{
+ return (a.mpFixedPool != b.mpFixedPool);
+}
+
+
+
+
+
+// Template instantations.
+// These tell the compiler to compile all the functions for the given class.
+template class eastl::queue<int, deque<int> >;
+template class eastl::queue<Align64, deque<Align64> >;
+template class eastl::queue<TestObject, list<TestObject> >;
+//template class eastl::queue<IntNode, intrusive_list<IntNode> >;// This test has been disabled as of the addition of initializer_list support to eastl::queue. initializer_lists have const nodes, which is incompatible with intrusive_list. You can use eastl::queue<IntNode, intrusive_list<IntNode> > as long as you don't use initializer_list with it. The problem with this line of code is that it forces compilation of the entire class.
+
+
+///////////////////////////////////////////////////////////////////////////////
+// TestQueue
+//
+static int TestQueue()
+{
+ int nErrorCount = 0;
+
+ {
+ // Exercise IntNode.
+ IntNode x, y;
+ EATEST_VERIFY((x < y) || !(x < y) || ((int)x < (int)y));
+ }
+
+ TestObject::Reset();
+
+ {
+ // queue(const Sequence& x = Sequence());
+ queue<TestObject, list<TestObject>> toListQueue;
+ queue<TestObject, list<TestObject>> toListQueue2;
+
+
+ // global operators
+ EATEST_VERIFY( (toListQueue == toListQueue2));
+ EATEST_VERIFY(!(toListQueue != toListQueue2));
+ EATEST_VERIFY( (toListQueue <= toListQueue2));
+ EATEST_VERIFY( (toListQueue >= toListQueue2));
+ EATEST_VERIFY(!(toListQueue < toListQueue2));
+ EATEST_VERIFY(!(toListQueue > toListQueue2));
+
+ // bool empty() const;
+ // size_type size() const;
+ EATEST_VERIFY(toListQueue.empty());
+ EATEST_VERIFY(toListQueue.size() == 0);
+
+
+ // void push(const value_type& value);
+ // reference front();
+ // const_reference front() const;
+ // reference back();
+ // const_reference back() const;
+ toListQueue.push(TestObject(0));
+ EATEST_VERIFY(toListQueue.front() == TestObject(0));
+ EATEST_VERIFY(toListQueue.back() == TestObject(0));
+
+ toListQueue.push(TestObject(1));
+ EATEST_VERIFY(toListQueue.front() == TestObject(0));
+ EATEST_VERIFY(toListQueue.back() == TestObject(1));
+
+ toListQueue.push(TestObject(2));
+ EATEST_VERIFY(toListQueue.front() == TestObject(0));
+ EATEST_VERIFY(toListQueue.back() == TestObject(2));
+ EATEST_VERIFY(!toListQueue.empty());
+ EATEST_VERIFY(toListQueue.size() == 3);
+
+
+ // void pop();
+ toListQueue.pop();
+ EATEST_VERIFY(toListQueue.front() == TestObject(1));
+ EATEST_VERIFY(toListQueue.back() == TestObject(2));
+
+ toListQueue.pop();
+ EATEST_VERIFY(toListQueue.front() == TestObject(2));
+ EATEST_VERIFY(toListQueue.back() == TestObject(2));
+
+ toListQueue.pop();
+ EATEST_VERIFY(toListQueue.empty());
+ EATEST_VERIFY(toListQueue.size() == 0);
+
+
+ // decltype(auto) emplace(Args&&... args);
+ toListQueue.emplace(1);
+ EATEST_VERIFY(!toListQueue.empty());
+ EATEST_VERIFY(toListQueue.front() == TestObject(1));
+ EATEST_VERIFY(toListQueue.size() == 1);
+
+
+ // container_type& get_container();
+ // const container_type& get_container() const;
+ list<TestObject>& ref = toListQueue.get_container();
+ EATEST_VERIFY(ref.size() == toListQueue.size());
+
+
+ // queue(std::initializer_list<value_type> ilist);
+ queue<int> intQueue = { 3, 4, 5 };
+ EATEST_VERIFY(intQueue.size() == 3);
+ EATEST_VERIFY(intQueue.front() == 3);
+ intQueue.pop();
+ EATEST_VERIFY(intQueue.front() == 4);
+ intQueue.pop();
+ EATEST_VERIFY(intQueue.front() == 5);
+ }
+
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ {
+ // queue(const Sequence& x = Sequence());
+ queue<TestObject, list<TestObject>> toListQueue;
+ queue<TestObject, list<TestObject>> toListQueue2;
+
+
+ // global operators
+ EATEST_VERIFY( ((toListQueue <=> toListQueue2) == 0));
+ EATEST_VERIFY(!((toListQueue <=> toListQueue2) != 0));
+ EATEST_VERIFY( ((toListQueue <=> toListQueue2) <= 0));
+ EATEST_VERIFY( ((toListQueue <=> toListQueue2) >= 0));
+ EATEST_VERIFY(!((toListQueue <=> toListQueue2) < 0));
+ EATEST_VERIFY(!((toListQueue <=> toListQueue2) > 0));
+
+ // bool empty() const;
+ // size_type size() const;
+ EATEST_VERIFY(toListQueue.empty());
+ EATEST_VERIFY(toListQueue.size() == 0);
+
+ // Verify toListQueue > toListQueue2
+ toListQueue.push(TestObject(0));
+ toListQueue.push(TestObject(1));
+ toListQueue2.push(TestObject(0));
+
+ EATEST_VERIFY(!((toListQueue <=> toListQueue2) == 0));
+ EATEST_VERIFY( ((toListQueue <=> toListQueue2) != 0));
+ EATEST_VERIFY( ((toListQueue <=> toListQueue2) >= 0));
+ EATEST_VERIFY(!((toListQueue <=> toListQueue2) <= 0));
+ EATEST_VERIFY( ((toListQueue <=> toListQueue2) > 0));
+ EATEST_VERIFY(!((toListQueue <=> toListQueue2) < 0));
+
+ // Verify toListQueue2 > toListQueue by element size
+ toListQueue2.push(TestObject(3));
+ EATEST_VERIFY(!((toListQueue <=> toListQueue2) == 0));
+ EATEST_VERIFY( ((toListQueue <=> toListQueue2) != 0));
+ EATEST_VERIFY( ((toListQueue <=> toListQueue2) <= 0));
+ EATEST_VERIFY(!((toListQueue <=> toListQueue2) >= 0));
+ EATEST_VERIFY( ((toListQueue <=> toListQueue2) < 0));
+ EATEST_VERIFY(!((toListQueue <=> toListQueue2) > 0));
+
+ queue<TestObject, list<TestObject>> toListQueue3;
+ queue<TestObject, list<TestObject>> toListQueue4;
+
+ for (int i = 0; i < 10; i++)
+ {
+ toListQueue3.push(TestObject(i));
+ if (i < 5)
+ toListQueue4.push(TestObject(i));
+ }
+
+ // Verify toListQueue4 is a strict subset of toListQueue3
+ EATEST_VERIFY(!((toListQueue3 <=> toListQueue4) == 0));
+ EATEST_VERIFY( ((toListQueue3 <=> toListQueue4) != 0));
+ EATEST_VERIFY( ((toListQueue3 <=> toListQueue4) >= 0));
+ EATEST_VERIFY(!((toListQueue3 <=> toListQueue4) <= 0));
+ EATEST_VERIFY( ((toListQueue3 <=> toListQueue4) > 0));
+ EATEST_VERIFY(!((toListQueue3 <=> toListQueue4) < 0));
+
+ // Verify that even thoughn toListQueue4 has a smaller size, it's lexicographically larger
+ toListQueue4.push(TestObject(11));
+ EATEST_VERIFY(!((toListQueue3 <=> toListQueue4) == 0));
+ EATEST_VERIFY( ((toListQueue3 <=> toListQueue4) != 0));
+ EATEST_VERIFY( ((toListQueue3 <=> toListQueue4) <= 0));
+ EATEST_VERIFY(!((toListQueue3 <=> toListQueue4) >= 0));
+ EATEST_VERIFY( ((toListQueue3 <=> toListQueue4) < 0));
+ EATEST_VERIFY(!((toListQueue3 <=> toListQueue4) > 0));
+
+ }
+
+ {
+ queue<TestObject, list<TestObject>> toListQueue1;
+ queue<TestObject, list<TestObject>> toListQueue2;
+ queue<TestObject, list<TestObject>> toListQueue3;
+
+ for (int i = 0; i < 10; i++)
+ {
+ toListQueue1.push(TestObject(i));
+ toListQueue2.push(TestObject(9-i));
+ if (i < 5)
+ toListQueue3.push(TestObject(i));
+ }
+
+ struct weak_ordering_queue
+ {
+ queue<TestObject, list<TestObject>> queue;
+ inline std::weak_ordering operator<=>(const weak_ordering_queue& b) const { return queue <=> b.queue; }
+ };
+
+ EATEST_VERIFY(synth_three_way{}(weak_ordering_queue{toListQueue1}, weak_ordering_queue{toListQueue2}) == std::weak_ordering::less);
+ EATEST_VERIFY(synth_three_way{}(weak_ordering_queue{toListQueue3}, weak_ordering_queue{toListQueue1}) == std::weak_ordering::less);
+ EATEST_VERIFY(synth_three_way{}(weak_ordering_queue{toListQueue2}, weak_ordering_queue{toListQueue1}) == std::weak_ordering::greater);
+ EATEST_VERIFY(synth_three_way{}(weak_ordering_queue{toListQueue2}, weak_ordering_queue{toListQueue3}) == std::weak_ordering::greater);
+ EATEST_VERIFY(synth_three_way{}(weak_ordering_queue{toListQueue1}, weak_ordering_queue{toListQueue1}) == std::weak_ordering::equivalent);
+ }
+ #endif
+
+ {
+ vector<TestObject> toVector;
+ for(int i = 0; i < 100; i++)
+ toVector.push_back(TestObject(i));
+
+ // template <class Allocator>
+ // queue(this_type&& x, const Allocator& allocator, typename eastl::enable_if<eastl::uses_allocator<container_type, Allocator>::value>::type* = NULL);
+ //
+ // explicit queue(container_type&& x);
+ //
+ // void push(value_type&& x);
+
+ queue<TestObject, vector<TestObject> > toQ_0;
+ queue<TestObject, vector<TestObject> > toQ_A(eastl::move(toQ_0), toQ_0.get_container().get_allocator()); // It would be better if we also tested an alternative allocator.
+ EATEST_VERIFY(toQ_A.size() == 0);
+ toQ_A.push(TestObject(1000));
+ EATEST_VERIFY(toQ_A.size() == 1);
+
+ queue<TestObject, vector<TestObject> > toQ_B(eastl::move(toQ_A), toQ_A.get_container().get_allocator()); // It would be better if we also tested an alternative allocator.
+ EATEST_VERIFY((toQ_B.size() == 1) && toQ_A.empty());
+
+ eastl::vector<TestObject> toVectorM(toVector);
+ queue<TestObject, vector<TestObject> > toQ_C(eastl::move(toVectorM));
+ EATEST_VERIFY((toQ_C.size() == toVector.size()) && toVectorM.empty());
+
+ // template <class... Args>
+ // void emplace_back(Args&&... args);
+
+ queue<TestObject, vector<TestObject> > toQ_D;
+ toQ_D.emplace(0, 1, 2);
+ EATEST_VERIFY(toQ_D.size() == 1) && (toQ_D.back() == TestObject(0, 1, 2));
+ }
+
+
+ { // Test std namespace elements contained in queue
+ #ifndef EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+ eastl::queue< std::pair<int, int> > stlQueue;
+ stlQueue.push(std::make_pair(1, 1));
+ EATEST_VERIFY(stlQueue.size() == 1);
+ #endif
+ }
+
+
+ EATEST_VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+
+
+ return nErrorCount;
+}
+
+
+
+
+
+
+// Template instantations.
+// These tell the compiler to compile all the functions for the given class.
+template class eastl::priority_queue<int, vector<int> >;
+template class eastl::priority_queue<Align64, deque<Align64> >;
+template class eastl::priority_queue<TestObject, vector<TestObject> >;
+template class eastl::priority_queue<float, vector<float>, less<float> >;
+
+
+///////////////////////////////////////////////////////////////////////////////
+// TestPriorityQueue
+//
+static int TestPriorityQueue()
+{
+ int nErrorCount = 0;
+
+ EASTLTest_Rand rng(EA::UnitTest::GetRandSeed());
+
+ TestObject::Reset();
+
+ {
+ less<TestObject> toLess;
+
+ vector<TestObject> toVector;
+ for(int i = 0; i < 100; i++)
+ toVector.push_back(TestObject(i));
+ random_shuffle(toVector.begin(), toVector.end(), rng);
+
+ list<TestObject> toList;
+ for(eastl_size_t j = 0; j < 100; j++)
+ toList.push_back(toVector[j]);
+
+
+ // priority_queue(const Compare& compare = Compare(), const Sequence& x = Sequence());
+ // template <typename InputIterator>
+ // priority_queue(InputIterator first, InputIterator last, const Compare& compare = Compare(), const Sequence& x = Sequence());
+ priority_queue<TestObject, vector<TestObject> > toPQ;
+ priority_queue<TestObject, vector<TestObject> > toPQV(toLess, toVector);
+ priority_queue<TestObject, vector<TestObject> > toPQL(toList.begin(), toList.end());
+
+ EATEST_VERIFY(toPQ.empty());
+ EATEST_VERIFY(toPQ.size() == 0);
+
+ EATEST_VERIFY(!toPQV.empty());
+ EATEST_VERIFY( toPQV.size() == toVector.size());
+
+ EATEST_VERIFY(!toPQL.empty());
+ EATEST_VERIFY( toPQL.size() == toList.size());
+
+
+ // global operators
+ EATEST_VERIFY( (toPQ != toPQL));
+ EATEST_VERIFY( (toPQV == toPQL));
+ EATEST_VERIFY(!(toPQV != toPQL));
+ EATEST_VERIFY( (toPQV <= toPQL));
+ EATEST_VERIFY( (toPQV >= toPQL));
+ EATEST_VERIFY(!(toPQV < toPQL));
+ EATEST_VERIFY(!(toPQV > toPQL));
+
+
+ // container_type& get_container();
+ // const container_type& get_container() const;
+ vector<TestObject>& ref = toPQL.get_container();
+ EATEST_VERIFY(ref.size() == toPQL.size());
+ EATEST_VERIFY(is_heap(ref.begin(), ref.end()));
+
+ // bool validate() const;
+ EATEST_VERIFY(toPQL.validate());
+ // To consider: Verify that validate detects an invalid heap.
+ // Testing this might be an issue if the validation function actively complains in some way.
+
+
+ // const_reference top() const;
+ // void pop();
+ const TestObject& to1 = toPQL.top();
+ EATEST_VERIFY(to1 == TestObject(99));
+
+ toPQL.pop();
+ EATEST_VERIFY(!toPQL.empty());
+ EATEST_VERIFY( toPQL.size() == toList.size() - 1);
+ EATEST_VERIFY(to1 == TestObject(98));
+ EATEST_VERIFY(is_heap(ref.begin(), ref.end()));
+
+
+ // void push(const value_type& value);
+ toPQL.push(TestObject(1000));
+ EATEST_VERIFY(toPQL.size() == toList.size());
+ const TestObject& to2 = toPQL.top();
+ EATEST_VERIFY(to2 == TestObject(1000));
+ toPQL.pop();
+ const TestObject& to3 = toPQL.top();
+ EATEST_VERIFY(to3 == TestObject(98));
+ EATEST_VERIFY(is_heap(ref.begin(), ref.end()));
+
+
+ // void change(size_type n);
+ TestObject& to4 = ref[50];
+ to4 = TestObject(2000);
+ toPQL.change(50);
+ const TestObject& to5 = toPQL.top();
+ EATEST_VERIFY(to5 == TestObject(2000));
+ EATEST_VERIFY(is_heap(ref.begin(), ref.end()));
+
+
+ // void remove(size_type n);
+ TestObject to6 = ref[20];
+ toPQL.remove(20);
+ EATEST_VERIFY( toPQL.size() == toList.size() - 2);
+ TestObject& to7 = ref[20];
+ EATEST_VERIFY(!(to6 == to7));
+ EATEST_VERIFY(is_heap(ref.begin(), ref.end()));
+
+
+ // priority_queue(std::initializer_list<value_type> ilist, const compare_type& compare = compare_type());
+ #if !defined(EA_COMPILER_NO_INITIALIZER_LISTS)
+ priority_queue<int, vector<int> > intPQ = { 3, 4, 5 };
+ EATEST_VERIFY(intPQ.size() == 3);
+ EATEST_VERIFY(intPQ.top() == 5);
+ intPQ.pop();
+ EATEST_VERIFY(intPQ.top() == 4);
+ intPQ.pop();
+ EATEST_VERIFY(intPQ.top() == 3);
+ #endif
+ }
+
+ {
+ vector<TestObject> toVector;
+ for(int i = 0; i < 100; i++)
+ toVector.push_back(TestObject(i));
+
+ // template <class Allocator>
+ // priority_queue(this_type&& x, const Allocator& allocator, typename eastl::enable_if<eastl::uses_allocator<container_type, Allocator>::value>::type* = NULL);
+ //
+ // explicit priority_queue(const compare_type& compare, container_type&& x);
+ //
+ // template <class InputIterator>
+ // priority_queue(InputIterator first, InputIterator last, const compare_type& compare, container_type&& x);
+ //
+ // void push(value_type&& x);
+
+ priority_queue<TestObject, vector<TestObject> > toPQ_0;
+ priority_queue<TestObject, vector<TestObject> > toPQ_A(toPQ_0.get_container().begin(), toPQ_0.get_container().begin(), eastl::less<TestObject>(), toPQ_0.get_container());
+ EATEST_VERIFY(toPQ_A.size() == 0);
+ toPQ_A.push(TestObject(1000));
+ EATEST_VERIFY(toPQ_A.size() == 1);
+
+ priority_queue<TestObject, vector<TestObject> > toPQ_B(eastl::move(toPQ_A), toPQ_A.get_container().get_allocator()); // It would be better if we also tested an alternative allocator.
+ EATEST_VERIFY((toPQ_B.size() == 1) && toPQ_A.empty());
+
+ eastl::vector<TestObject> toVectorM(toVector);
+ priority_queue<TestObject, vector<TestObject> > toPQ_C(eastl::less<TestObject>(), eastl::move(toVectorM));
+ EATEST_VERIFY((toPQ_C.size() == toVector.size()) && toVectorM.empty());
+
+ // template <class... Args>
+ // void emplace(Args&&... args);
+ priority_queue<TestObject, vector<TestObject> > toPQ_D;
+ toPQ_D.emplace(0, 1, 2);
+ EATEST_VERIFY(toPQ_D.size() == 1) && (toPQ_D.top() == TestObject(0, 1, 2));
+ }
+
+
+ EATEST_VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+
+
+ return nErrorCount;
+}
+
+
+
+
+
+
+// Template instantations.
+// These tell the compiler to compile all the functions for the given class.
+template class eastl::stack<int, vector<int> >;
+template class eastl::stack<Align64, list<Align64> >;
+template class eastl::stack<TestObject, vector<TestObject> >;
+//template class eastl::stack<IntNode, intrusive_list<IntNode> >; // This test has been disabled as of the addition of initializer_list support to eastl::stack. initializer_lists have const nodes, which is incompatible with intrusive_list. You can use eastl::stack<IntNode, intrusive_list<IntNode> > as long as you don't use initializer_list with it. The problem with this line of code is that it forces compilation of the entire class.
+
+
+///////////////////////////////////////////////////////////////////////////////
+// TestStack
+//
+static int TestStack()
+{
+ int nErrorCount = 0;
+
+ TestObject::Reset();
+
+ {
+ // stack(const Sequence& x = Sequence());
+ stack<TestObject, list<TestObject> > toListStack;
+ stack<TestObject, list<TestObject> > toListStack2;
+
+
+ // bool empty() const;
+ // size_type size() const;
+ EATEST_VERIFY(toListStack.empty());
+ EATEST_VERIFY(toListStack.size() == 0);
+
+
+ // global operators
+ EATEST_VERIFY( (toListStack == toListStack2));
+ EATEST_VERIFY(!(toListStack != toListStack2));
+ EATEST_VERIFY( (toListStack <= toListStack2));
+ EATEST_VERIFY( (toListStack >= toListStack2));
+ EATEST_VERIFY(!(toListStack < toListStack2));
+ EATEST_VERIFY(!(toListStack > toListStack2));
+
+ // void push(const value_type& value);
+ // reference top();
+ // const_reference top() const;
+ toListStack.push(TestObject(0));
+ EATEST_VERIFY(toListStack.top() == TestObject(0));
+
+ toListStack.push(TestObject(1));
+ EATEST_VERIFY(toListStack.top() == TestObject(1));
+
+ toListStack.push(TestObject(2));
+ EATEST_VERIFY( toListStack.top() == TestObject(2));
+ EATEST_VERIFY(!toListStack.empty());
+ EATEST_VERIFY( toListStack.size() == 3);
+
+ // void pop();
+ toListStack.pop();
+ EATEST_VERIFY(toListStack.top() == TestObject(1));
+
+ toListStack.pop();
+ EATEST_VERIFY(toListStack.top() == TestObject(0));
+
+ toListStack.pop();
+ EATEST_VERIFY(toListStack.empty());
+ EATEST_VERIFY(toListStack.size() == 0);
+
+
+ // container_type& get_container();
+ // const container_type& get_container() const;
+ list<TestObject>& ref = toListStack.get_container();
+ EATEST_VERIFY(ref.size() == toListStack.size());
+
+
+ // stack(std::initializer_list<value_type> ilist);
+ #if !defined(EA_COMPILER_NO_INITIALIZER_LISTS)
+ stack<int> intStack = { 3, 4, 5 };
+ EATEST_VERIFY(intStack.size() == 3);
+ EATEST_VERIFY(intStack.top() == 5);
+ intStack.pop();
+ EATEST_VERIFY(intStack.top() == 4);
+ intStack.pop();
+ EATEST_VERIFY(intStack.top() == 3);
+ #endif
+ }
+
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ {
+ // stack(const Sequence& x = Sequence());
+ stack<TestObject, list<TestObject> > toListStack;
+ stack<TestObject, list<TestObject> > toListStack2;
+
+ // bool empty() const;
+ // size_type size() const;
+ EATEST_VERIFY(toListStack.empty());
+ EATEST_VERIFY(toListStack.size() == 0);
+
+
+ // global operators
+ EATEST_VERIFY( ((toListStack <=> toListStack2) == 0));
+ EATEST_VERIFY(!((toListStack <=> toListStack2) != 0));
+ EATEST_VERIFY( ((toListStack <=> toListStack2) <= 0));
+ EATEST_VERIFY( ((toListStack <=> toListStack2) >= 0));
+ EATEST_VERIFY(!((toListStack <=> toListStack2) < 0));
+ EATEST_VERIFY(!((toListStack <=> toListStack2) > 0));
+
+ toListStack.push(TestObject(0));
+ toListStack.push(TestObject(1));
+ toListStack2.push(TestObject(0));
+
+ EATEST_VERIFY(!((toListStack <=> toListStack2) == 0));
+ EATEST_VERIFY( ((toListStack <=> toListStack2) != 0));
+ EATEST_VERIFY( ((toListStack <=> toListStack2) >= 0));
+ EATEST_VERIFY(!((toListStack <=> toListStack2) <= 0));
+ EATEST_VERIFY( ((toListStack <=> toListStack2) > 0));
+ EATEST_VERIFY(!((toListStack <=> toListStack2) < 0));
+
+ // Verify toListStack2 > toListStack by element size
+ toListStack2.push(TestObject(3));
+ EATEST_VERIFY(!((toListStack <=> toListStack2) == 0));
+ EATEST_VERIFY( ((toListStack <=> toListStack2) != 0));
+ EATEST_VERIFY( ((toListStack <=> toListStack2) <= 0));
+ EATEST_VERIFY(!((toListStack <=> toListStack2) >= 0));
+ EATEST_VERIFY( ((toListStack <=> toListStack2) < 0));
+ EATEST_VERIFY(!((toListStack <=> toListStack2) > 0));
+
+ stack<TestObject, list<TestObject> > toListStack3;
+ stack<TestObject, list<TestObject> > toListStack4;
+
+ for (int i = 0; i < 10; i++)
+ {
+ toListStack3.push(TestObject(i));
+ if (i < 5)
+ toListStack4.push(TestObject(i));
+ }
+
+ // Verify toListStack4 is a strict subset of toListStack3
+ EATEST_VERIFY(!((toListStack3 <=> toListStack4) == 0));
+ EATEST_VERIFY( ((toListStack3 <=> toListStack4) != 0));
+ EATEST_VERIFY( ((toListStack3 <=> toListStack4) >= 0));
+ EATEST_VERIFY(!((toListStack3 <=> toListStack4) <= 0));
+ EATEST_VERIFY( ((toListStack3 <=> toListStack4) > 0));
+ EATEST_VERIFY(!((toListStack3 <=> toListStack4) < 0));
+
+ // Verify that even thoughn toListQueue4 has a smaller size, it's lexicographically larger
+ toListStack4.push(TestObject(11));
+ EATEST_VERIFY(!((toListStack3 <=> toListStack4) == 0));
+ EATEST_VERIFY( ((toListStack3 <=> toListStack4) != 0));
+ EATEST_VERIFY( ((toListStack3 <=> toListStack4) <= 0));
+ EATEST_VERIFY(!((toListStack3 <=> toListStack4) >= 0));
+ EATEST_VERIFY( ((toListStack3 <=> toListStack4) < 0));
+ EATEST_VERIFY(!((toListStack3 <=> toListStack4) > 0));
+ }
+
+ {
+ stack<TestObject, list<TestObject> > toListStack1;
+ stack<TestObject, list<TestObject> > toListStack2;
+ stack<TestObject, list<TestObject> > toListStack3;
+
+ for (int i = 0; i < 10; i++)
+ {
+ toListStack1.push(TestObject(i));
+ toListStack2.push(TestObject(9-i));
+ if (i < 5)
+ toListStack3.push(TestObject(i));
+ }
+
+ struct weak_ordering_stack
+ {
+ stack<TestObject, list<TestObject> > stack;
+ inline std::weak_ordering operator<=>(const weak_ordering_stack& b) const { return stack <=> b.stack; }
+ };
+
+ EATEST_VERIFY(synth_three_way{}(weak_ordering_stack{toListStack1}, weak_ordering_stack{toListStack2}) == std::weak_ordering::less);
+ EATEST_VERIFY(synth_three_way{}(weak_ordering_stack{toListStack3}, weak_ordering_stack{toListStack1}) == std::weak_ordering::less);
+ EATEST_VERIFY(synth_three_way{}(weak_ordering_stack{toListStack2}, weak_ordering_stack{toListStack1}) == std::weak_ordering::greater);
+ EATEST_VERIFY(synth_three_way{}(weak_ordering_stack{toListStack2}, weak_ordering_stack{toListStack3}) == std::weak_ordering::greater);
+ EATEST_VERIFY(synth_three_way{}(weak_ordering_stack{toListStack1}, weak_ordering_stack{toListStack1}) == std::weak_ordering::equivalent);
+ }
+#endif
+
+
+ {
+ vector<TestObject> toVector;
+ for(int i = 0; i < 100; i++)
+ toVector.push_back(TestObject(i));
+
+ // template <class Allocator>
+ // stack(this_type&& x, const Allocator& allocator, typename eastl::enable_if<eastl::uses_allocator<container_type, Allocator>::value>::type* = NULL);
+ //
+ // explicit stack(container_type&& x);
+ //
+ // void push(value_type&& x);
+ stack<TestObject, vector<TestObject> > toS_0;
+ stack<TestObject, vector<TestObject> > toS_A(eastl::move(toS_0), toS_0.get_container().get_allocator()); // It would be better if we also tested an alternative allocator.
+ EATEST_VERIFY(toS_A.size() == 0);
+ toS_A.push(TestObject(1000));
+ EATEST_VERIFY(toS_A.size() == 1);
+
+ stack<TestObject, vector<TestObject> > toS_B(eastl::move(toS_A), toS_A.get_container().get_allocator()); // It would be better if we also tested an alternative allocator.
+ EATEST_VERIFY((toS_B.size() == 1) && toS_A.empty());
+
+ eastl::vector<TestObject> toVectorM(toVector);
+ stack<TestObject, vector<TestObject> > toS_C(eastl::move(toVectorM));
+ EATEST_VERIFY((toS_C.size() == toVector.size()) && toVectorM.empty());
+
+ {
+ // template <class... Args>
+ // void emplace_back(Args&&... args);
+ stack<TestObject, vector<TestObject>> toS_D;
+ toS_D.emplace_back(0, 1, 2);
+ EATEST_VERIFY(toS_D.size() == 1) && (toS_D.top() == TestObject(0, 1, 2));
+ }
+
+ {
+ // template <class... Args>
+ // decltype(auto) emplace(Args&&... args);
+ stack<TestObject, vector<TestObject>> toS_D;
+ auto it = toS_D.emplace(0, 1, 2);
+ EATEST_VERIFY(toS_D.size() == 1) && (toS_D.top() == TestObject(0, 1, 2));
+ EATEST_VERIFY(it == TestObject(0, 1, 2));
+ }
+ }
+
+
+ EATEST_VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+
+
+ return nErrorCount;
+}
+
+
+
+
+
+struct Size0
+{
+ // Empty
+};
+
+struct Size4
+{
+ uint32_t m32;
+};
+
+
+///////////////////////////////////////////////////////////////////////////////
+// TestCompressedPair
+//
+static int TestCompressedPair()
+{
+ int nErrorCount = 0;
+
+ compressed_pair<Size0, Size0> cp00;
+ compressed_pair<Size0, Size4> cp04;
+ compressed_pair<Size4, Size0> cp40;
+ compressed_pair<Size4, Size4> cp44;
+
+ EATEST_VERIFY(sizeof(cp00) <= 4);
+ EATEST_VERIFY(sizeof(cp04) <= 4);
+ EATEST_VERIFY(sizeof(cp40) <= 4);
+ EATEST_VERIFY(sizeof(cp44) <= 8);
+
+ return nErrorCount;
+}
+
+
+
+
+
+
+template <typename T>
+struct CallTraitsContainer
+{
+ typedef typename eastl::call_traits<T>::param_type param_type;
+ typedef typename eastl::call_traits<T>::reference reference;
+ typedef typename eastl::call_traits<T>::const_reference const_reference;
+ typedef typename eastl::call_traits<T>::value_type result_type;
+ typedef T value_type;
+
+public:
+ value_type mValue;
+
+
+ CallTraitsContainer() { }
+ CallTraitsContainer(param_type p) : mValue(p) { }
+
+ CallTraitsContainer<T>& operator=(const CallTraitsContainer<T>&) { } // Defined simply to prevent possible compiler warnings.
+
+ result_type value() { return mValue; }
+
+ reference get() { return mValue; }
+ const_reference const_get() const { return mValue; }
+
+ void call(param_type p){ }
+};
+
+
+///////////////////////////////////////////////////////////////////////////////
+// TestCallTraits
+//
+static int TestCallTraits()
+{
+ int nErrorCount = 0;
+
+ CallTraitsContainer<int> ctcInt;
+ CallTraitsContainer<int*> ctcIntPtr;
+ CallTraitsContainer<int&> ctcVoid(nErrorCount);
+ CallTraitsContainer<int[3]> ctcIntArray;
+
+ char buffer[128];
+ sprintf(buffer, "%p %p %p %p", &ctcInt, &ctcIntPtr, &ctcVoid, &ctcIntArray);
+
+ return nErrorCount;
+}
+
+
+static int AccumulateMultiply(int x, int y)
+{
+ return (x * y);
+}
+
+static eastl::string AccumulateString(eastl::string s, int x)
+{
+ s += '0' + static_cast<char>(x);
+ return s;
+}
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// TestNumeric
+//
+static int TestNumeric()
+{
+ int nErrorCount = 0;
+
+ //template <typename InputIterator, typename T>
+ //T accumulate(InputIterator first, InputIterator last, T init);
+ eastl::vector<int> v(5, 0);
+ eastl::generate(v.begin(), v.end(), GenerateIncrementalIntegers<int>(1));
+
+ int sum = eastl::accumulate(v.begin(), v.end(), 100);
+ EATEST_VERIFY(sum == (100 + 1 + 2 + 3 + 4 + 5));
+
+
+ // template <typename InputIterator, typename T, typename BinaryOperation>
+ //T accumulate(InputIterator first, InputIterator last, T init, BinaryOperation binary_op);
+
+ eastl::generate(v.begin(), v.end(), GenerateIncrementalIntegers<int>(1));
+ int product = eastl::accumulate(v.begin(), v.end(), 100, AccumulateMultiply);
+ EATEST_VERIFY(product == (100 * 1 * 2 * 3 * 4 * 5));
+
+ eastl::generate(v.begin(), v.end(), GenerateIncrementalIntegers<int>(1));
+ eastl::string s = eastl::accumulate(v.begin(), v.end(), eastl::string("0"), AccumulateString);
+ EATEST_VERIFY(s == "012345");
+
+
+ //template <typename InputIterator1, typename InputIterator2, typename T>
+ //T inner_product(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, T init);
+ // To do.
+
+ //template <typename InputIterator1, typename InputIterator2, typename T, typename BinaryOperation1, typename BinaryOperation2>
+ //T inner_product(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, T init, BinaryOperation1 binary_op1, BinaryOperation2 binary_op2)
+ // To do.
+
+ //template <typename InputIterator, typename OutputIterator>
+ //OutputIterator partial_sum(InputIterator first, InputIterator last, OutputIterator result);
+ // To do.
+
+ //template <typename InputIterator, typename OutputIterator, typename BinaryOperation>
+ //OutputIterator partial_sum(InputIterator first, InputIterator last, OutputIterator result, BinaryOperation binary_op);
+ // To do.
+
+ return nErrorCount;
+}
+
+#if defined(EA_COMPILER_CPP20_ENABLED)
+template <typename T>
+static constexpr int SignedIntMidpoint()
+{
+ int nErrorCount = 0;
+
+ EATEST_VERIFY(eastl::midpoint(T(0), T(0)) == T(0));
+ EATEST_VERIFY(eastl::midpoint(T(0), T(2)) == T(1));
+ EATEST_VERIFY(eastl::midpoint(T(0), T(4)) == T(2));
+ EATEST_VERIFY(eastl::midpoint(T(0), T(8)) == T(4));
+ EATEST_VERIFY(eastl::midpoint(T(2), T(0)) == T(1));
+ EATEST_VERIFY(eastl::midpoint(T(4), T(0)) == T(2));
+ EATEST_VERIFY(eastl::midpoint(T(8), T(0)) == T(4));
+
+ EATEST_VERIFY(eastl::midpoint(T(1), T(1)) == T(1));
+ EATEST_VERIFY(eastl::midpoint(T(1), T(3)) == T(2));
+ EATEST_VERIFY(eastl::midpoint(T(3), T(1)) == T(2));
+ EATEST_VERIFY(eastl::midpoint(T(2), T(6)) == T(4));
+ EATEST_VERIFY(eastl::midpoint(T(6), T(2)) == T(4));
+
+ EATEST_VERIFY(eastl::midpoint(T(-1), T(-1)) == T(-1));
+ EATEST_VERIFY(eastl::midpoint(T(-1), T(-3)) == T(-2));
+ EATEST_VERIFY(eastl::midpoint(T(-3), T(-1)) == T(-2));
+ EATEST_VERIFY(eastl::midpoint(T(-2), T(-6)) == T(-4));
+ EATEST_VERIFY(eastl::midpoint(T(-6), T(-2)) == T(-4));
+
+ EATEST_VERIFY(eastl::midpoint(T(-0), T(0)) == T(0));
+ EATEST_VERIFY(eastl::midpoint(T(0), T(-0)) == T(0));
+ EATEST_VERIFY(eastl::midpoint(T(-0), T(-0)) == T(0));
+ EATEST_VERIFY(eastl::midpoint(T(-1), T(1)) == T(0));
+ EATEST_VERIFY(eastl::midpoint(T(-10), T(10)) == T(0));
+ EATEST_VERIFY(eastl::midpoint(T(-3), T(7)) == T(2));
+ EATEST_VERIFY(eastl::midpoint(T(-7), T(3)) == T(-2));
+ EATEST_VERIFY(eastl::midpoint(T(-2), T(6)) == T(2));
+ EATEST_VERIFY(eastl::midpoint(T(-6), T(2)) == T(-2));
+ EATEST_VERIFY(eastl::midpoint(T(2), T(-6)) == T(-2));
+ EATEST_VERIFY(eastl::midpoint(T(6), T(-2)) == T(2));
+
+ // If an odd sum, midpoint should round towards the LHS operand.
+ EATEST_VERIFY(eastl::midpoint(T(0), T(5)) == T(2));
+ EATEST_VERIFY(eastl::midpoint(T(5), T(0)) == T(3));
+ EATEST_VERIFY(eastl::midpoint(T(1), T(4)) == T(2));
+ EATEST_VERIFY(eastl::midpoint(T(4), T(1)) == T(3));
+ EATEST_VERIFY(eastl::midpoint(T(7), T(10)) == T(8));
+ EATEST_VERIFY(eastl::midpoint(T(10), T(7)) == T(9));
+ EATEST_VERIFY(eastl::midpoint(T(-1), T(2)) == T(0));
+ EATEST_VERIFY(eastl::midpoint(T(2), T(-1)) == T(1));
+ EATEST_VERIFY(eastl::midpoint(T(-5), T(4)) == T(-1));
+ EATEST_VERIFY(eastl::midpoint(T(4), T(-5)) == T(0));
+
+ // Test absolute limits
+ constexpr T MIN = eastl::numeric_limits<T>::min();
+ constexpr T MAX = eastl::numeric_limits<T>::max();
+
+ EATEST_VERIFY(eastl::midpoint(MIN, MIN) == MIN);
+ EATEST_VERIFY(eastl::midpoint(MAX, MAX) == MAX);
+ EATEST_VERIFY(eastl::midpoint(MIN, MAX) == T(-1));
+ EATEST_VERIFY(eastl::midpoint(MAX, MIN) == T(0));
+ EATEST_VERIFY(eastl::midpoint(MIN, T(0)) == MIN / 2);
+ EATEST_VERIFY(eastl::midpoint(T(0), MIN) == MIN / 2);
+ EATEST_VERIFY(eastl::midpoint(MAX, T(0)) == (MAX / 2) + 1);
+ EATEST_VERIFY(eastl::midpoint(T(0), MAX) == (MAX / 2));
+
+ EATEST_VERIFY(eastl::midpoint(MIN, T(10)) == (MIN / 2) + 5);
+ EATEST_VERIFY(eastl::midpoint(T(10), MIN) == (MIN / 2) + 5);
+ EATEST_VERIFY(eastl::midpoint(MAX, T(10)) == (MAX / 2) + 5 + 1);
+ EATEST_VERIFY(eastl::midpoint(T(10), MAX) == (MAX / 2) + 5);
+ EATEST_VERIFY(eastl::midpoint(MIN, T(-10)) == (MIN / 2) - 5);
+ EATEST_VERIFY(eastl::midpoint(T(-10), MIN) == (MIN / 2) - 5);
+ EATEST_VERIFY(eastl::midpoint(MAX, T(-10)) == (MAX / 2) - 5 + 1);
+ EATEST_VERIFY(eastl::midpoint(T(-10), MAX) == (MAX / 2) - 5);
+
+ return nErrorCount;
+}
+
+template <typename T>
+static constexpr int UnsignedIntMidpoint()
+{
+ int nErrorCount = 0;
+
+ EATEST_VERIFY(eastl::midpoint(T(0), T(0)) == T(0));
+ EATEST_VERIFY(eastl::midpoint(T(0), T(2)) == T(1));
+ EATEST_VERIFY(eastl::midpoint(T(0), T(4)) == T(2));
+ EATEST_VERIFY(eastl::midpoint(T(0), T(8)) == T(4));
+ EATEST_VERIFY(eastl::midpoint(T(2), T(0)) == T(1));
+ EATEST_VERIFY(eastl::midpoint(T(4), T(0)) == T(2));
+ EATEST_VERIFY(eastl::midpoint(T(8), T(0)) == T(4));
+
+ EATEST_VERIFY(eastl::midpoint(T(1), T(1)) == T(1));
+ EATEST_VERIFY(eastl::midpoint(T(1), T(3)) == T(2));
+ EATEST_VERIFY(eastl::midpoint(T(3), T(1)) == T(2));
+ EATEST_VERIFY(eastl::midpoint(T(2), T(6)) == T(4));
+ EATEST_VERIFY(eastl::midpoint(T(6), T(2)) == T(4));
+
+ // If an odd sum, midpoint should round towards the LHS operand.
+ EATEST_VERIFY(eastl::midpoint(T(0), T(5)) == T(2));
+ EATEST_VERIFY(eastl::midpoint(T(5), T(0)) == T(3));
+ EATEST_VERIFY(eastl::midpoint(T(1), T(4)) == T(2));
+ EATEST_VERIFY(eastl::midpoint(T(4), T(1)) == T(3));
+ EATEST_VERIFY(eastl::midpoint(T(7), T(10)) == T(8));
+ EATEST_VERIFY(eastl::midpoint(T(10), T(7)) == T(9));
+
+ // Test absolute limits
+ constexpr T MIN = eastl::numeric_limits<T>::min();
+ constexpr T MAX = eastl::numeric_limits<T>::max();
+
+ EATEST_VERIFY(eastl::midpoint(MIN, MIN) == MIN);
+ EATEST_VERIFY(eastl::midpoint(MAX, MAX) == MAX);
+ EATEST_VERIFY(eastl::midpoint(MIN, MAX) == MAX / 2);
+ EATEST_VERIFY(eastl::midpoint(MAX, MIN) == (MAX / 2) + 1);
+ EATEST_VERIFY(eastl::midpoint(MIN, T(0)) == T(0));
+ EATEST_VERIFY(eastl::midpoint(T(0), MIN) == T(0));
+
+ EATEST_VERIFY(eastl::midpoint(MIN, T(10)) == (MIN / 2) + 5);
+ EATEST_VERIFY(eastl::midpoint(T(10), MIN) == (MIN / 2) + 5);
+ EATEST_VERIFY(eastl::midpoint(MAX, T(10)) == (MAX / 2) + 5 + 1);
+ EATEST_VERIFY(eastl::midpoint(T(10), MAX) == (MAX / 2) + 5);
+
+ return nErrorCount;
+}
+
+template <typename T>
+static constexpr int FloatMidpoint()
+{
+ // for use with floats, double, long doubles.
+ int nErrorCount = 0;
+ EATEST_VERIFY(eastl::midpoint(T(0.0), T(0.0)) == T(0.0));
+ EATEST_VERIFY(eastl::midpoint(T(0.0), T(2.0)) == T(1.0));
+ EATEST_VERIFY(eastl::midpoint(T(0.0), T(4.0)) == T(2.0));
+ EATEST_VERIFY(eastl::midpoint(T(2.0), T(0.0)) == T(1.0));
+ EATEST_VERIFY(eastl::midpoint(T(4.0), T(0.0)) == T(2.0));
+
+ EATEST_VERIFY(eastl::midpoint(T(0.5), T(0.5)) == T(0.5));
+ EATEST_VERIFY(eastl::midpoint(T(0.0), T(0.5)) == T(0.25));
+ EATEST_VERIFY(eastl::midpoint(T(0.5), T(0.0)) == T(0.25));
+ EATEST_VERIFY(eastl::midpoint(T(0.5), T(1.0)) == T(0.75));
+ EATEST_VERIFY(eastl::midpoint(T(1.0), T(0.5)) == T(0.75));
+
+ EATEST_VERIFY(eastl::midpoint(T(-0.0), T(0.0)) == T(0.0));
+ EATEST_VERIFY(eastl::midpoint(T(0.0), T(-0.0)) == T(0.0));
+ EATEST_VERIFY(eastl::midpoint(T(-0.0), T(-0.0)) == T(0.0));
+ EATEST_VERIFY(eastl::midpoint(T(-1.0), T(2.0)) == T(0.5));
+ EATEST_VERIFY(eastl::midpoint(T(-2.0), T(1)) == T(-0.5));
+ EATEST_VERIFY(eastl::midpoint(T(-3.0), T(6.0)) == T(1.5));
+ EATEST_VERIFY(eastl::midpoint(T(-6.0), T(3.0)) == T(-1.5));
+
+ // Test absolute limits
+ const T MIN = eastl::numeric_limits<T>::min();
+ const T MAX = eastl::numeric_limits<T>::max();
+
+ EATEST_VERIFY(eastl::midpoint(MIN, MIN) == MIN);
+ EATEST_VERIFY(eastl::midpoint(MAX, MAX) == MAX);
+ EATEST_VERIFY(eastl::midpoint(MIN, MAX) == MAX / 2);
+ EATEST_VERIFY(eastl::midpoint(MAX, MIN) == MAX / 2);
+ EATEST_VERIFY(eastl::midpoint(-MAX, MIN) == -MAX / 2);
+
+ EATEST_VERIFY(eastl::midpoint(MIN, T(9.0)) == T(4.5));
+ EATEST_VERIFY(eastl::midpoint(MIN, T(-9.0)) == T(-4.5));
+ EATEST_VERIFY(eastl::midpoint(T(9.0), MIN) == T(4.5));
+ EATEST_VERIFY(eastl::midpoint(T(-9.0), MIN) == T(-4.5));
+ EATEST_VERIFY(eastl::midpoint(MAX, T(9.0)) == MAX / 2 + T(4.5));
+ EATEST_VERIFY(eastl::midpoint(MAX, T(-9.0)) == MAX / 2 - T(4.5));
+ EATEST_VERIFY(eastl::midpoint(T(9.0), MAX) == MAX / 2 + T(4.5));
+ EATEST_VERIFY(eastl::midpoint(T(-9.0), MAX) == MAX / 2 - T(4.5));
+
+ return nErrorCount;
+}
+
+template <typename T>
+static constexpr int PointerMidpoint()
+{
+ int nErrorCount = 0;
+
+ const T ARR[100] = {};
+
+ EATEST_VERIFY(eastl::midpoint(ARR, ARR) == ARR);
+ EATEST_VERIFY(eastl::midpoint(ARR, ARR + 100) == ARR + 50);
+ EATEST_VERIFY(eastl::midpoint(ARR + 100, ARR) == ARR + 50);
+ EATEST_VERIFY(eastl::midpoint(ARR, ARR + 25) == ARR + 12);
+ EATEST_VERIFY(eastl::midpoint(ARR + 25, ARR) == ARR + 13);
+ EATEST_VERIFY(eastl::midpoint(ARR, ARR + 13) == ARR + 6);
+ EATEST_VERIFY(eastl::midpoint(ARR + 13, ARR) == ARR + 7);
+ EATEST_VERIFY(eastl::midpoint(ARR + 50, ARR + 100) == ARR + 75);
+ EATEST_VERIFY(eastl::midpoint(ARR + 100, ARR + 50) == ARR + 75);
+
+ return nErrorCount;
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+// TestMidpoint
+//
+static int TestMidpoint()
+{
+ int nErrorCount = 0;
+
+ // template <typename T>
+ // constexpr eastl::enable_if_t<eastl::is_arithmetic_v<T> && !eastl::is_same_v<eastl::remove_cv_t<T>, bool>, T>
+ // midpoint(const T lhs, const T rhs) EA_NOEXCEPT
+ nErrorCount += SignedIntMidpoint<int>();
+ nErrorCount += SignedIntMidpoint<char>();
+ nErrorCount += SignedIntMidpoint<short>();
+ nErrorCount += SignedIntMidpoint<long>();
+ nErrorCount += SignedIntMidpoint<long long>();
+
+ nErrorCount += UnsignedIntMidpoint<unsigned int>();
+ nErrorCount += UnsignedIntMidpoint<unsigned char>();
+ nErrorCount += UnsignedIntMidpoint<unsigned short>();
+ nErrorCount += UnsignedIntMidpoint<unsigned long>();
+ nErrorCount += UnsignedIntMidpoint<unsigned long long>();
+
+ nErrorCount += FloatMidpoint<float>();
+ nErrorCount += FloatMidpoint<double>();
+ nErrorCount += FloatMidpoint<long double>();
+
+ // template <typename T>
+ // constexpr eastl::enable_if_t<eastl::is_object_v<T>, const T*> midpoint(const T* lhs, const T* rhs)
+ nErrorCount += PointerMidpoint<int>();
+ nErrorCount += PointerMidpoint<char>();
+ nErrorCount += PointerMidpoint<short>();
+ nErrorCount += PointerMidpoint<float>();
+ nErrorCount += PointerMidpoint<double>();
+ nErrorCount += PointerMidpoint<long double>();
+
+ return nErrorCount;
+}
+
+
+template <typename T>
+static constexpr int FloatLerp()
+{
+ int nErrorCount = 0;
+
+ EATEST_VERIFY(eastl::lerp(T(0.0), T(0.0), T(0.0)) == T(0.0));
+ EATEST_VERIFY(eastl::lerp(T(1.0), T(0.0), T(0.0)) == T(1.0));
+ EATEST_VERIFY(eastl::lerp(T(-1.0), T(0.0), T(0.0)) == T(-1.0));
+ EATEST_VERIFY(eastl::lerp(T(0.0), T(1.0), T(0.0)) == T(0.0));
+ EATEST_VERIFY(eastl::lerp(T(0.0), T(-1.0), T(0.0)) == T(0.0));
+ EATEST_VERIFY(eastl::lerp(T(-1.0), T(1.0), T(1.0)) == T(1.0));
+ EATEST_VERIFY(eastl::lerp(T(1.0), T(-1.0), T(1.0)) == T(-1.0));
+ EATEST_VERIFY(eastl::lerp(T(-1.0), T(1.0), T(0.5)) == T(0.0));
+ EATEST_VERIFY(eastl::lerp(T(1.0), T(-1.0), T(0.5)) == T(0.0));
+ EATEST_VERIFY(eastl::lerp(T(5.0), T(5.0), T(0.5)) == T(5.0));
+ EATEST_VERIFY(eastl::lerp(T(-5.0), T(-5.0), T(0.5)) == T(-5.0));
+ EATEST_VERIFY(eastl::lerp(T(1.0), T(2.0), T(1.0)) == T(2.0));
+ EATEST_VERIFY(eastl::lerp(T(2.0), T(1.0), T(1.0)) == T(1.0));
+ EATEST_VERIFY(eastl::lerp(T(1.0), T(2.0), T(1.0)) == T(2.0));
+ EATEST_VERIFY(eastl::lerp(T(1.0), T(2.0), T(2.0)) == T(3.0));
+ EATEST_VERIFY(eastl::lerp(T(2.0), T(1.0), T(2.0)) == T(0.0));
+ EATEST_VERIFY(eastl::lerp(T(1.0), T(-2.0), T(2.0)) == T(-5.0));
+ EATEST_VERIFY(eastl::lerp(T(-1.0), T(2.0), T(2.0)) == T(5.0));
+ EATEST_VERIFY(eastl::lerp(T(-1.5), T(1.5), T(0.75)) == T(0.75));
+ EATEST_VERIFY(eastl::lerp(T(0.125), T(1.75), T(0.25)) == T(0.53125));
+ EATEST_VERIFY(eastl::lerp(T(-0.125), T(-1.75), T(0.5)) == T(-0.9375));
+ EATEST_VERIFY(eastl::lerp(T(-0.125), T(1.5), T(2.5)) == T(3.9375));
+
+ return nErrorCount;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// TestLerp
+//
+static int TestLerp()
+{
+ int nErrorCount = 0;
+
+ // template <class T>
+ // constexpr T lerp(const T a, const T b, const T t) EA_NOEXCEPT
+ nErrorCount += FloatLerp<float>();
+ nErrorCount += FloatLerp<double>();
+ nErrorCount += FloatLerp<long double>();
+
+ return nErrorCount;
+}
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// TestAdaptors
+//
+static int TestAdaptors()
+{
+ int nErrorCount = 0;
+
+ // reverse lvalue container
+ {
+ int int_data[] = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+ eastl::vector<int> original(begin(int_data), end(int_data));
+
+ eastl::vector<int> reversed;
+ for(auto& e : eastl::reverse(original))
+ reversed.push_back(e);
+
+ eastl::reverse(begin(original), end(original));
+ EATEST_VERIFY(reversed == original);
+ }
+
+ // reverse const lvalue container
+ {
+ int int_data[] = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+ const eastl::vector<int> original(begin(int_data), end(int_data));
+
+ eastl::vector<int> reversed;
+ for(auto& e : eastl::reverse(original))
+ reversed.push_back(e);
+
+ eastl::vector<int> reversed_original(original);
+ eastl::reverse(begin(reversed_original), end(reversed_original));
+ EATEST_VERIFY(reversed == reversed_original);
+ }
+
+ // reverse rvalue container
+ {
+ int int_data[] = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+ eastl::vector<int> original(begin(int_data), end(int_data));
+
+ eastl::vector<int> reversed;
+ for (auto& e : eastl::reverse(eastl::vector<int>(original)))
+ reversed.push_back(e);
+
+ eastl::reverse(begin(original), end(original));
+ EATEST_VERIFY(reversed == original);
+ }
+
+ return nErrorCount;
+}
+
+#if defined(EA_COMPILER_CPP20_ENABLED)
+template <typename T>
+int TestHasSingleBit()
+{
+ int nErrorCount = 0;
+
+ VERIFY(eastl::has_single_bit(T(0)) == false);
+ VERIFY(eastl::has_single_bit(T(1)) == true);
+ VERIFY(eastl::has_single_bit(T(2)) == true);
+ VERIFY(eastl::has_single_bit(T(3)) == false);
+
+ VERIFY(eastl::has_single_bit(eastl::numeric_limits<T>::min()) == false);
+ VERIFY(eastl::has_single_bit(eastl::numeric_limits<T>::max()) == false);
+
+ for (int i = 4; i < eastl::numeric_limits<T>::digits; i++)
+ {
+ T power_of_two = static_cast<T>(T(1U) << i);
+ VERIFY(eastl::has_single_bit(power_of_two));
+ VERIFY(eastl::has_single_bit(static_cast<T>(power_of_two - 1)) == false);
+ }
+
+ return nErrorCount;
+}
+
+template <typename T>
+static int TestBitCeil()
+{
+ int nErrorCount = 0;
+
+ VERIFY(eastl::bit_ceil(T(0)) == T(1));
+ VERIFY(eastl::bit_ceil(T(1)) == T(1));
+ VERIFY(eastl::bit_ceil(T(2)) == T(2));
+ VERIFY(eastl::bit_ceil(T(3)) == T(4));
+
+ EA_CONSTEXPR auto DIGITS = eastl::numeric_limits<T>::digits;
+ EA_CONSTEXPR auto MIN = eastl::numeric_limits<T>::min();
+ EA_CONSTEXPR auto MAX = static_cast<T>(T(1) << (DIGITS - 1));
+
+ VERIFY(eastl::bit_ceil(MAX) == MAX);
+ VERIFY(eastl::bit_ceil(static_cast<T>(MAX - 1)) == MAX);
+ VERIFY(eastl::bit_ceil(MIN) == T(1));
+
+ for (int i = 4; i < eastl::numeric_limits<T>::digits; i++)
+ {
+ T power_of_two = static_cast<T>(T(1U) << i);
+ VERIFY(eastl::bit_ceil(power_of_two) == power_of_two);
+ VERIFY(eastl::bit_ceil(static_cast<T>(power_of_two - 1)) == power_of_two);
+ }
+
+ return nErrorCount;
+}
+
+template <typename T>
+static int TestBitFloor()
+{
+ int nErrorCount = 0;
+ VERIFY(eastl::bit_floor(T(0)) == T(0));
+ VERIFY(eastl::bit_floor(T(1)) == T(1));
+ VERIFY(eastl::bit_floor(T(2)) == T(2));
+ VERIFY(eastl::bit_floor(T(3)) == T(2));
+
+ EA_CONSTEXPR auto DIGITS = eastl::numeric_limits<T>::digits;
+ EA_CONSTEXPR auto MIN = eastl::numeric_limits<T>::min();
+ EA_CONSTEXPR auto MAX = eastl::numeric_limits<T>::max();
+
+ VERIFY(eastl::bit_floor(MAX) == T(1) << (DIGITS - 1));
+ VERIFY(eastl::bit_floor(MIN) == T(0));
+
+ for (int i = 4; i < eastl::numeric_limits<T>::digits; i++)
+ {
+ T power_of_two = static_cast<T>(T(1U) << i);
+ VERIFY(eastl::bit_floor(power_of_two) == power_of_two);
+ VERIFY(eastl::bit_floor(static_cast<T>(power_of_two + 1)) == power_of_two);
+ }
+ return nErrorCount;
+}
+
+template <typename T>
+static int TestBitWidth()
+{
+ int nErrorCount = 0;
+
+ VERIFY(eastl::bit_width(T(0)) == T(0));
+ VERIFY(eastl::bit_width(T(1)) == T(1));
+ VERIFY(eastl::bit_width(T(2)) == T(2));
+ VERIFY(eastl::bit_width(T(3)) == T(2));
+
+ EA_CONSTEXPR auto DIGITS = eastl::numeric_limits<T>::digits;
+ EA_CONSTEXPR auto MIN = eastl::numeric_limits<T>::min();
+ EA_CONSTEXPR auto MAX = eastl::numeric_limits<T>::max();
+
+ VERIFY(eastl::bit_width(MIN) == 0);
+ VERIFY(eastl::bit_width(MAX) == DIGITS);
+
+ for (int i = 4; i < eastl::numeric_limits<T>::digits; i++)
+ {
+ T power_of_two = static_cast<T>(T(1U) << i);
+ VERIFY(eastl::bit_width(power_of_two) == static_cast<T>(i + 1));
+ }
+
+ return nErrorCount;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// TestPowerofTwo
+//
+static int TestPowerOfTwo()
+{
+ int nErrorCount = 0;
+ nErrorCount += TestHasSingleBit<unsigned int>();
+ nErrorCount += TestHasSingleBit<unsigned char>();
+ nErrorCount += TestHasSingleBit<unsigned short>();
+ nErrorCount += TestHasSingleBit<unsigned long>();
+ nErrorCount += TestHasSingleBit<unsigned long long>();
+
+ nErrorCount += TestBitCeil<unsigned int>();
+ nErrorCount += TestBitCeil<unsigned char>();
+ nErrorCount += TestBitCeil<unsigned short>();
+ nErrorCount += TestBitCeil<unsigned long>();
+ nErrorCount += TestBitCeil<unsigned long long>();
+
+ nErrorCount += TestBitFloor<unsigned int>();
+ nErrorCount += TestBitFloor<unsigned char>();
+ nErrorCount += TestBitFloor<unsigned short>();
+ nErrorCount += TestBitFloor<unsigned long>();
+ nErrorCount += TestBitFloor<unsigned long long>();
+
+ nErrorCount += TestBitWidth<unsigned int>();
+ nErrorCount += TestBitWidth<unsigned char>();
+ nErrorCount += TestBitWidth<unsigned short>();
+ nErrorCount += TestBitWidth<unsigned long>();
+ nErrorCount += TestBitWidth<unsigned long long>();
+
+ return nErrorCount;
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+// TestExtra
+//
+int TestExtra()
+{
+ int nErrorCount = 0;
+
+ nErrorCount += TestForwardDeclarations();
+ nErrorCount += TestQueue();
+ nErrorCount += TestPriorityQueue();
+ nErrorCount += TestStack();
+ nErrorCount += TestCompressedPair();
+ nErrorCount += TestCallTraits();
+ nErrorCount += TestNumeric();
+ nErrorCount += TestAdaptors();
+#if defined(EA_COMPILER_CPP20_ENABLED)
+ nErrorCount += TestMidpoint();
+ nErrorCount += TestLerp();
+ nErrorCount += TestPowerOfTwo();
+#endif
+
+ return nErrorCount;
+}
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/test/source/TestFinally.cpp b/EASTL/test/source/TestFinally.cpp
new file mode 100644
index 0000000..6e6e595
--- /dev/null
+++ b/EASTL/test/source/TestFinally.cpp
@@ -0,0 +1,107 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLTest.h"
+#include <EASTL/finally.h>
+
+
+int TestFinally()
+{
+ using namespace eastl;
+
+ int nErrorCount = 0;
+
+ {
+ #if defined(EA_COMPILER_CPP17_ENABLED)
+ {
+ // requires CTAD (class template argument deduction)
+ int a = 0;
+ {
+ VERIFY(a == 0);
+ eastl::finally _([&] { a = 42; });
+ VERIFY(a == 0);
+ }
+ VERIFY(a == 42);
+ }
+ #endif
+
+ {
+ int a = 0;
+ {
+ VERIFY(a == 0);
+ auto _ = eastl::make_finally([&] { a = 42; });
+ VERIFY(a == 0);
+ }
+ VERIFY(a == 42);
+ }
+
+ {
+ int a = 0;
+ {
+ VERIFY(a == 0);
+ auto f = eastl::make_finally([&] { a = 42; });
+ VERIFY(a == 0);
+ f.dismiss();
+ VERIFY(a == 0);
+ }
+ VERIFY(a == 0);
+ }
+
+ {
+ int a = 0;
+ {
+ VERIFY(a == 0);
+ auto f = eastl::make_finally([&] { a = 42; });
+ VERIFY(a == 0);
+ f.execute();
+ VERIFY(a == 42);
+ }
+ VERIFY(a == 42);
+ }
+
+ {
+ int a = 0;
+ {
+ VERIFY(a == 0);
+ auto f = eastl::make_finally([&] { a = 42; });
+ VERIFY(a == 0);
+ f.execute();
+ VERIFY(a == 42);
+
+ // verify the finally object doesn't re-run the callback on scope-exit.
+ a = -1;
+ }
+ VERIFY(a == -1);
+ }
+
+ {
+ struct local_flag { bool b = false; };
+
+ local_flag lf;
+ VERIFY(lf.b == false);
+
+ { auto _ = eastl::make_finally([&] { lf.b = true; }); }
+
+ VERIFY(lf.b);
+ }
+
+ // This currently does not compile by design.
+ //
+ // {
+ // int a = 0;
+ // auto lbda = [&a] { a = 1234; };
+ // {
+ // VERIFY(a == 0);
+ // auto _ = eastl::make_finally(lbda); // compiler error
+ // VERIFY(a == 0);
+ // }
+ // VERIFY(a == 1234);
+ // }
+ }
+
+ return nErrorCount;
+}
+
+
diff --git a/EASTL/test/source/TestFixedFunction.cpp b/EASTL/test/source/TestFixedFunction.cpp
new file mode 100644
index 0000000..272b545
--- /dev/null
+++ b/EASTL/test/source/TestFixedFunction.cpp
@@ -0,0 +1,614 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+#include <EABase/eabase.h>
+#include <EAAssert/eaassert.h>
+
+// Included prior to EASTLTest.h to guard against the following bug resurfacing:
+// https://github.com/electronicarts/EASTL/issues/275
+#include <EASTL/fixed_function.h>
+
+#include "EASTLTest.h"
+#include <EASTL/numeric.h>
+
+EA_DISABLE_ALL_VC_WARNINGS()
+#include <functional>
+EA_RESTORE_ALL_VC_WARNINGS()
+
+
+///////////////////////////////////////////////////////////////////////////////
+// TestFixedFunctionDtor
+//
+int TestFixedFunctionDtor()
+{
+ using namespace eastl;
+
+ int nErrorCount = 0;
+
+ {
+ TestObject to;
+ TestObject::Reset();
+ {
+ eastl::fixed_function<sizeof(TestObject), void(void)> ff = [to] {};
+ ff();
+ }
+ VERIFY(TestObject::IsClear());
+ }
+
+ return nErrorCount;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// TestFixedFunctionStdBind
+//
+int TestFixedFunctionStdBind()
+{
+ using namespace eastl;
+
+ int nErrorCount = 0;
+ int val = 0;
+
+ {
+ TestObject to;
+ auto lambda = [to, &val] { ++val; };
+ TestObject::Reset();
+ {
+ eastl::fixed_function<64, void(void)> ff = std::bind(lambda);
+ ff();
+ }
+ VERIFY(TestObject::IsClear());
+ VERIFY(val == 1);
+ }
+ {
+ TestObject to;
+ auto lambda = [to, &val] { ++val; };
+ TestObject::Reset();
+ {
+ eastl::fixed_function<64, void(void)> ff = nullptr;
+ ff = std::bind(lambda);
+ ff();
+ }
+ VERIFY(TestObject::IsClear());
+ VERIFY(val == 2);
+ }
+
+ return nErrorCount;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// TestFixedFunctionReferenceWrapper
+//
+int TestFixedFunctionReferenceWrapper()
+{
+ using namespace eastl;
+
+ int nErrorCount = 0;
+ int val = 0;
+
+ {
+ TestObject to;
+ auto lambda = [to, &val] { ++val; };
+ TestObject::Reset();
+ {
+ eastl::fixed_function<sizeof(eastl::reference_wrapper<decltype(lambda)>), void(void)> ff = eastl::reference_wrapper<decltype(lambda)>(lambda);
+ ff();
+ }
+ VERIFY(TestObject::IsClear());
+ VERIFY(val == 1);
+ }
+ {
+ TestObject to;
+ auto lambda = [to, &val] { ++val; };
+ TestObject::Reset();
+ {
+ eastl::fixed_function<sizeof(eastl::reference_wrapper<decltype(lambda)>), void(void)> ff = nullptr;
+ ff = eastl::reference_wrapper<decltype(lambda)>(lambda);
+ ff();
+ }
+ VERIFY(TestObject::IsClear());
+ VERIFY(val == 2);
+ }
+
+ return nErrorCount;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// TestFixedFunctionFunctionPointer
+//
+
+static void TestVoidRet(int* p)
+{
+ *p += 1;
+}
+
+static int TestIntRet(int* p)
+{
+ int ret = *p;
+ *p += 1;
+ return ret;
+}
+
+int TestFixedFunctionFunctionPointer()
+{
+ using namespace eastl;
+
+ typedef int (*FuncPtrInt)(int*);
+ typedef void (*FuncPtrVoid)(int*);
+
+ int nErrorCount = 0;
+ int val = 0;
+
+ {
+ eastl::fixed_function<sizeof(FuncPtrVoid), void(int*)> ff = &TestVoidRet;
+ ff(&val);
+ VERIFY(val == 1);
+ }
+ {
+ eastl::fixed_function<sizeof(FuncPtrVoid), void(int*)> ff;
+ ff = &TestVoidRet;
+ ff(&val);
+ VERIFY(val == 2);
+ }
+ {
+ eastl::fixed_function<sizeof(FuncPtrInt), int(int*)> ff = &TestIntRet;
+ int ret = ff(&val);
+ VERIFY(ret == 2);
+ VERIFY(val == 3);
+ }
+ {
+ eastl::fixed_function<sizeof(FuncPtrInt), int(int*)> ff;
+ ff = &TestIntRet;
+ int ret = ff(&val);
+ VERIFY(ret == 3);
+ VERIFY(val == 4);
+ }
+
+ return nErrorCount;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// TestFixedFunctionPointerToMemberFunction
+//
+
+int TestFixedFunctionPointerToMemberFunction()
+{
+ using namespace eastl;
+
+ struct TestVoidRet
+ {
+ TestVoidRet() : x(0) {}
+ ~TestVoidRet() = default;
+
+ void IncX() const
+ {
+ ++x;
+ }
+
+ void IncX()
+ {
+ ++x;
+ }
+
+ mutable int x = 0;
+ };
+
+ struct TestIntRet
+ {
+ TestIntRet() : x(0) {}
+
+ int IncX() const
+ {
+ return x++;
+ }
+
+ int IncX()
+ {
+ return x++;
+ }
+
+ mutable int x = 0;
+ };
+
+ int nErrorCount = 0;
+ TestVoidRet voidRet;
+ TestIntRet intRet;
+ const TestVoidRet cvoidRet;
+ const TestIntRet cintRet;
+
+ typedef void (TestVoidRet::*PTMFSize)(void);
+
+ {
+ eastl::fixed_function<sizeof(PTMFSize), void(const TestVoidRet&)> ff = static_cast<void(TestVoidRet::*)() const>(&TestVoidRet::IncX);
+ ff(cvoidRet);
+ VERIFY(cvoidRet.x == 1);
+ }
+ {
+ eastl::fixed_function<sizeof(PTMFSize), void(const TestVoidRet&)> ff = static_cast<void(TestVoidRet::*)() const>(&TestVoidRet::IncX);
+ ff(voidRet);
+ VERIFY(voidRet.x == 1);
+ }
+ {
+ eastl::fixed_function<sizeof(PTMFSize), void(TestVoidRet&)> ff = static_cast<void(TestVoidRet::*)()>(&TestVoidRet::IncX);
+ ff(voidRet);
+ VERIFY(voidRet.x == 2);
+ }
+
+ {
+ eastl::fixed_function<sizeof(PTMFSize), int(const TestIntRet&)> ff = static_cast<int(TestIntRet::*)() const>(&TestIntRet::IncX);
+ int ret = ff(cintRet);
+ VERIFY(ret == 0);
+ VERIFY(cintRet.x == 1);
+ }
+ {
+ eastl::fixed_function<sizeof(PTMFSize), int(const TestIntRet&)> ff = static_cast<int(TestIntRet::*)() const>(&TestIntRet::IncX);
+ int ret = ff(intRet);
+ VERIFY(ret == 0);
+ VERIFY(intRet.x == 1);
+ }
+ {
+ eastl::fixed_function<sizeof(PTMFSize), int(TestIntRet&)> ff = static_cast<int(TestIntRet::*)()>(&TestIntRet::IncX);
+ int ret = ff(intRet);
+ VERIFY(ret == 1);
+ VERIFY(intRet.x == 2);
+ }
+
+ return nErrorCount;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// TestFixedFunctionPointerToMemberData
+//
+
+int TestFixedFunctionPointerToMemberData()
+{
+ using namespace eastl;
+
+ struct Test
+ {
+ Test() : x(1) {}
+ int x = 1;
+ };
+
+ int nErrorCount = 0;
+
+ Test t;
+ const Test ct;
+
+ {
+ eastl::fixed_function<sizeof(void*), int(const Test&)> ff = &Test::x;
+ int ret = ff(t);
+ VERIFY(ret == 1);
+ }
+ {
+ eastl::fixed_function<sizeof(void*), int(const Test&)> ff = &Test::x;
+ int ret = ff(ct);
+ VERIFY(ret == 1);
+ }
+ {
+ eastl::fixed_function<sizeof(void*), int(const Test&)> ff;
+ ff = &Test::x;
+ int ret = ff(t);
+ VERIFY(ret == 1);
+ }
+ {
+ eastl::fixed_function<sizeof(void*), int(const Test&)> ff;
+ ff = &Test::x;
+ int ret = ff(ct);
+ VERIFY(ret == 1);
+ }
+
+ return nErrorCount;
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+// TestFixedFunctionExistingClosure
+//
+int TestFixedFunctionExistingClosure()
+{
+ using namespace eastl;
+
+ int nErrorCount = 0;
+
+ {
+ TestObject to;
+ {
+ using ff_t = eastl::fixed_function<sizeof(TestObject), void(void)>;
+ {
+ ff_t ff1 = [to] {};
+ ff_t ff3 = [to] {};
+ TestObject::Reset();
+ {
+ ff_t ff2 = ff1;
+ ff2 = ff3; // copy over function that holds existing closure state
+ }
+ VERIFY(TestObject::IsClear());
+ }
+ {
+ ff_t ff1 = [to] {};
+ TestObject::Reset();
+ ff_t ff3 = [to] {};
+ {
+ ff_t ff2 = ff1;
+ ff2 = eastl::move(ff3); // copy over function that holds existing closure state
+ }
+ VERIFY(TestObject::IsClear());
+ }
+ {
+ ff_t ff1 = [to] {};
+ TestObject::Reset();
+ {
+ ff_t ff2 = ff1;
+ ff2 = nullptr;
+ }
+ VERIFY(TestObject::IsClear());
+ }
+ {
+ TestObject::Reset();
+ ff_t ff1 = [to] {};
+ {
+ ff_t ff2 = eastl::move(ff1);
+ ff2 = nullptr;
+ }
+ VERIFY(TestObject::IsClear());
+ }
+ }
+ }
+
+ return nErrorCount;
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+// TestFixedFunctionCaptureless
+//
+// Tests calling a captureless (eg. function pointer) callable with variable
+// eastl::fixed_function size types.
+//
+template<class FixedFunctionT>
+int TestFixedFunctionCaptureless()
+{
+ int nErrorCount = 0;
+
+ FixedFunctionT fn;
+
+ EATEST_VERIFY(!fn);
+
+ fn = [](int in) { return in; };
+
+ EATEST_VERIFY(!!fn);
+
+ EATEST_VERIFY(fn(42) == 42);
+
+ return nErrorCount;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// TestFixedFunctionBasic
+//
+int TestFixedFunctionBasic()
+{
+ using namespace eastl;
+
+ int nErrorCount = 0;
+
+ {
+ struct Functor { void operator()() { return; } };
+ fixed_function<24, void(void)> fn;
+ fixed_function<24, void(void)> fn2 = nullptr;
+ EATEST_VERIFY(!fn);
+ EATEST_VERIFY(!fn2);
+ EATEST_VERIFY(fn == nullptr);
+ EATEST_VERIFY(fn2 == nullptr);
+ EATEST_VERIFY(nullptr == fn);
+ EATEST_VERIFY(nullptr == fn2);
+ fn = Functor();
+ fn2 = Functor();
+ EATEST_VERIFY(!!fn);
+ EATEST_VERIFY(!!fn2);
+ EATEST_VERIFY(fn != nullptr);
+ EATEST_VERIFY(fn2 != nullptr);
+ EATEST_VERIFY(nullptr != fn);
+ EATEST_VERIFY(nullptr != fn2);
+ fn = nullptr;
+ fn2 = fn;
+ EATEST_VERIFY(!fn);
+ EATEST_VERIFY(!fn2);
+ EATEST_VERIFY(fn == nullptr);
+ EATEST_VERIFY(fn2 == nullptr);
+ EATEST_VERIFY(nullptr == fn);
+ EATEST_VERIFY(nullptr == fn2);
+ }
+
+ {
+ using eastl::swap;
+ struct Functor { int operator()() { return 5; } };
+ fixed_function<24, int(void)> fn = Functor();
+ fixed_function<24, int(void)> fn2;
+ EATEST_VERIFY(fn() == 5);
+ EATEST_VERIFY(!fn2);
+ fn.swap(fn2);
+ EATEST_VERIFY(!fn);
+ EATEST_VERIFY(fn2() == 5);
+ swap(fn, fn2);
+ EATEST_VERIFY(fn() == 5);
+ EATEST_VERIFY(!fn2);
+ }
+
+ {
+ struct Functor { int operator()() { return 42; } };
+ fixed_function<0, int(void)> fn = Functor();
+ EATEST_VERIFY(fn() == 42);
+ }
+
+ {
+ struct Functor { int operator()(int in) { return in; } };
+ fixed_function<0, int(int)> fn = Functor();
+ EATEST_VERIFY(fn(24) == 24);
+ }
+
+ {
+ eastl::fixed_function<0, void(void)> fn;
+
+ EATEST_VERIFY(!fn);
+ fn = [] {};
+ EATEST_VERIFY(!!fn);
+ }
+
+ {
+ eastl::fixed_function<0, int(int)> fn = [](int param) { return param; };
+ EATEST_VERIFY(fn(42) == 42);
+ }
+
+ {
+ eastl::fixed_function<0, int(int)> fn = ReturnVal;
+ EATEST_VERIFY(fn(42) == 42);
+ }
+
+ {
+ eastl::fixed_function<0, int()> fn0 = ReturnZero;
+ eastl::fixed_function<0, int()> fn1 = ReturnOne;
+
+ EATEST_VERIFY(fn0() == 0 && fn1() == 1);
+ swap(fn0, fn1);
+ EATEST_VERIFY(fn0() == 1 && fn1() == 0);
+ }
+
+ {
+ eastl::fixed_function<0, int()> fn0 = ReturnZero;
+ eastl::fixed_function<0, int()> fn1 = ReturnOne;
+
+ EATEST_VERIFY(fn0() == 0 && fn1() == 1);
+ fn0 = fn1;
+ EATEST_VERIFY(fn0() == 1 && fn1() == 1);
+ }
+
+ {
+ eastl::fixed_function<0, int()> fn0 = ReturnZero;
+ eastl::fixed_function<0, int()> fn1 = ReturnOne;
+
+ EATEST_VERIFY(fn0() == 0 && fn1() == 1);
+ fn0 = eastl::move(fn1);
+ EATEST_VERIFY(fn0() == 1 && fn1 == nullptr);
+ }
+
+ {
+ eastl::fixed_function<0, int(int)> f1(nullptr);
+ EATEST_VERIFY(!f1);
+
+ eastl::fixed_function<0, int(int)> f2 = nullptr;
+ EATEST_VERIFY(!f2);
+ }
+
+ {
+ // test using a large lambda capture
+ uint64_t a = 1, b = 2, c = 3, d = 4, e = 5, f = 6;
+ auto large_add = [=] { return a + b + c + d + e + f; };
+
+ {
+ eastl::fixed_function<48, uint64_t(void)> fn = large_add;
+ auto result = fn();
+ EATEST_VERIFY(result == 21);
+ }
+
+ {
+ eastl::fixed_function<sizeof(large_add), uint64_t(void)> fn = large_add;
+ auto result = fn();
+ EATEST_VERIFY(result == 21);
+ }
+ }
+
+ {
+ using ff_0 = eastl::fixed_function<0, int(int)>;
+ using ff_1 = eastl::fixed_function<1, int(int)>;
+ using ff_4 = eastl::fixed_function<4, int(int)>;
+ using ff_8 = eastl::fixed_function<8, int(int)>;
+ using ff_64 = eastl::fixed_function<64, int(int)>;
+ using ff_128 = eastl::fixed_function<128, int(int)>;
+ using ff_4096 = eastl::fixed_function<4096, int(int)>;
+
+ static_assert(sizeof(ff_0) >= sizeof(void*), "error");
+ static_assert(sizeof(ff_1) >= sizeof(void*), "error");
+ static_assert(sizeof(ff_4) >= sizeof(void*), "error");
+ static_assert(sizeof(ff_8) >= 8, "error");
+ static_assert(sizeof(ff_64) >= 64, "error");
+ static_assert(sizeof(ff_128) >= 128, "error");
+ static_assert(sizeof(ff_4096) >= 4096, "error");
+
+ nErrorCount += TestFixedFunctionCaptureless<ff_0>();
+ nErrorCount += TestFixedFunctionCaptureless<ff_1>();
+ nErrorCount += TestFixedFunctionCaptureless<ff_4>();
+ nErrorCount += TestFixedFunctionCaptureless<ff_8>();
+ nErrorCount += TestFixedFunctionCaptureless<ff_64>();
+ nErrorCount += TestFixedFunctionCaptureless<ff_128>();
+ nErrorCount += TestFixedFunctionCaptureless<ff_4096>();
+ }
+
+ // Verify conversions to fixed_function<N> for sizes greater or equal to the source size.
+ {
+ uint32_t v0 = 130480, v1 = 936780302;
+ const uint32_t result = v0 + v1;
+
+ eastl::fixed_function<8, uint32_t(void)> ff8 = [v0, v1]
+ { return v0 + v1; };
+
+ {
+ eastl::fixed_function<16, uint32_t(void)> ff16(ff8);
+ VERIFY(result == ff16());
+ }
+
+ {
+ eastl::fixed_function<16, uint32_t(void)> ff16 = ff8;
+ VERIFY(result == ff16());
+ }
+
+ {
+ eastl::fixed_function<16, uint32_t(void)> ff16;
+ ff16 = ff8;
+ VERIFY(result == ff16());
+ }
+
+ {
+ auto ff8Copy = ff8;
+ eastl::fixed_function<16, uint32_t(void)> ff16(eastl::move(ff8Copy));
+ VERIFY(result == ff16());
+ }
+
+ {
+ auto ff8Copy = ff8;
+ eastl::fixed_function<16, uint32_t(void)> ff16 = eastl::move(ff8Copy);
+ VERIFY(result == ff16());
+ }
+
+ {
+ auto ff8Copy = ff8;
+ eastl::fixed_function<16, uint32_t(void)> ff16;
+ ff16 = eastl::move(ff8Copy);
+ VERIFY(result == ff16());
+ }
+ }
+
+ return nErrorCount;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// TestFunctional
+//
+int TestFixedFunction()
+{
+ using namespace eastl;
+
+ int nErrorCount = 0;
+
+ nErrorCount += TestFixedFunctionBasic();
+ nErrorCount += TestFixedFunctionDtor();
+ nErrorCount += TestFixedFunctionExistingClosure();
+ nErrorCount += TestFixedFunctionReferenceWrapper();
+ nErrorCount += TestFixedFunctionFunctionPointer();
+ nErrorCount += TestFixedFunctionPointerToMemberFunction();
+ nErrorCount += TestFixedFunctionPointerToMemberData();
+ nErrorCount += TestFixedFunctionStdBind();
+
+ return nErrorCount;
+}
diff --git a/EASTL/test/source/TestFixedHash.cpp b/EASTL/test/source/TestFixedHash.cpp
new file mode 100644
index 0000000..d7e20d0
--- /dev/null
+++ b/EASTL/test/source/TestFixedHash.cpp
@@ -0,0 +1,744 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include <EABase/eabase.h>
+#include "EASTLTest.h"
+#include "TestMap.h"
+#include "TestSet.h"
+#include <EASTL/fixed_hash_set.h>
+#include <EASTL/fixed_hash_map.h>
+#include <EASTL/fixed_vector.h>
+
+
+
+
+using namespace eastl;
+
+
+struct A
+{
+ int mX;
+ A(int x = 999) : mX(x) {}
+};
+
+inline bool operator==(const A& a1, const A& a2)
+ { return a1.mX == a2.mX; }
+
+
+
+namespace eastl
+{
+ template <>
+ struct hash<A>
+ {
+ size_t operator()(const A& a) const
+ { return static_cast<size_t>(a.mX); }
+ };
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+// For test of user-reported crash.
+//
+struct MemoryEntry
+{
+ size_t mSize;
+ void* mGroup;
+};
+///////////////////////////////////////////////////////////////////////////////
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// For test of bug reported by Dave Wall, May 14, 2008.
+//
+struct InstanceRenderData
+{
+ static const uint32_t kDataCount = 10; // Bug only occurs with this value.
+
+ uint32_t mPad[kDataCount];
+
+ InstanceRenderData()
+ {
+ memset(mPad, 0, sizeof(mPad));
+ }
+
+ bool operator==(const InstanceRenderData &rhs) const
+ {
+ for(uint32_t index = 0; index < kDataCount; index++)
+ {
+ if(mPad[index] != rhs.mPad[index])
+ {
+ return false;
+ }
+ }
+
+ return true;
+ }
+};
+
+namespace eastl
+{
+ template <>
+ struct hash<const InstanceRenderData>
+ {
+ size_t operator()(InstanceRenderData val) const
+ {
+ return val.mPad[0];
+ }
+ };
+
+ template <>
+ struct hash<InstanceRenderData>
+ {
+ size_t operator()(InstanceRenderData val) const
+ {
+ return val.mPad[0];
+ }
+ };
+}
+///////////////////////////////////////////////////////////////////////////////
+
+
+
+// Template instantations.
+// These tell the compiler to compile all the functions for the given class.
+template class eastl::fixed_hash_set<int, 1, 2>;
+template class eastl::fixed_hash_map<int, int, 1, 2>;
+template class eastl::fixed_hash_multiset<int, 1, 2>;
+template class eastl::fixed_hash_multimap<int, int, 1, 2>;
+
+template class eastl::fixed_hash_set<A, 1, 2>;
+template class eastl::fixed_hash_map<A, A, 1, 2>;
+template class eastl::fixed_hash_multiset<A, 1, 2>;
+template class eastl::fixed_hash_multimap<A, A, 1, 2>;
+
+template class eastl::fixed_hash_set<int, 1, 2, true, eastl::hash<int>, eastl::equal_to<int>, true>;
+template class eastl::fixed_hash_map<int, int, 1, 2, true, eastl::hash<int>, eastl::equal_to<int>, true>;
+template class eastl::fixed_hash_multiset<int, 1, 2, true, eastl::hash<int>, eastl::equal_to<int>, true>;
+template class eastl::fixed_hash_multimap<int, int, 1, 2, true, eastl::hash<int>, eastl::equal_to<int>, true>;
+
+template class eastl::fixed_hash_set<A, 1, 2, true, eastl::hash<A>, eastl::equal_to<A>, true>;
+template class eastl::fixed_hash_map<A, A, 1, 2, true, eastl::hash<A>, eastl::equal_to<A>, true>;
+template class eastl::fixed_hash_multiset<A, 1, 2, true, eastl::hash<A>, eastl::equal_to<A>, true>;
+template class eastl::fixed_hash_multimap<A, A, 1, 2, true, eastl::hash<A>, eastl::equal_to<A>, true>;
+
+// Custom allocator
+template class eastl::fixed_hash_set<int, 1, 2, true, eastl::hash<int>, eastl::equal_to<int>, false, MallocAllocator>;
+template class eastl::fixed_hash_map<int, int, 1, 2, true, eastl::hash<int>, eastl::equal_to<int>, false, MallocAllocator>;
+template class eastl::fixed_hash_multiset<int, 1, 2, true, eastl::hash<int>, eastl::equal_to<int>, false, MallocAllocator>;
+template class eastl::fixed_hash_multimap<int, int, 1, 2, true, eastl::hash<int>, eastl::equal_to<int>, false, MallocAllocator>;
+
+template class eastl::fixed_hash_set<A, 1, 2, true, eastl::hash<A>, eastl::equal_to<A>, false, MallocAllocator>;
+template class eastl::fixed_hash_map<A, A, 1, 2, true, eastl::hash<A>, eastl::equal_to<A>, false, MallocAllocator>;
+template class eastl::fixed_hash_multiset<A, 1, 2, true, eastl::hash<A>, eastl::equal_to<A>, false, MallocAllocator>;
+template class eastl::fixed_hash_multimap<A, A, 1, 2, true, eastl::hash<A>, eastl::equal_to<A>, false, MallocAllocator>;
+
+
+
+template<typename FixedHashMap, int ELEMENT_MAX, int ITERATION_MAX>
+int TestFixedHashMapClearBuckets()
+{
+ int nErrorCount = 0;
+
+ FixedHashMap fixedHashMap;
+ const auto nPreClearBucketCount = fixedHashMap.bucket_count();
+
+ for (int j = 0; j < ITERATION_MAX; j++)
+ {
+ // add elements and ensure container is valid
+ for (int i = 0; i < int(nPreClearBucketCount); i++)
+ fixedHashMap.emplace(i, i);
+ VERIFY(fixedHashMap.validate());
+
+ // ensure contents are expected values
+ for (int i = 0; i < int(nPreClearBucketCount); i++)
+ {
+ auto iter = fixedHashMap.find(i);
+
+ VERIFY(iter != fixedHashMap.end());
+ VERIFY(iter->second == i);
+ }
+
+ // validate container after its cleared its nodes and buckets
+ fixedHashMap.clear(true);
+ VERIFY(fixedHashMap.validate());
+ VERIFY(fixedHashMap.size() == 0);
+ VERIFY(fixedHashMap.bucket_count() == nPreClearBucketCount);
+ }
+
+ return nErrorCount;
+}
+
+
+EA_DISABLE_VC_WARNING(6262)
+int TestFixedHash()
+{
+ int nErrorCount = 0;
+
+ { // fixed_hash_map
+ {
+ // Test version *without* pool overflow.
+ typedef eastl::fixed_hash_map<int, int, 100, 100, false> FixedHashMapFalse;
+ FixedHashMapFalse fixedHashMap;
+
+ fixedHashMap[0] = 0;
+ fixedHashMap.insert(FixedHashMapFalse::value_type(0, 0));
+
+ VERIFY(fixedHashMap.max_size() == 100);
+ VERIFY(fixedHashMap.size() == 1);
+
+ fixedHashMap.clear();
+ VERIFY(fixedHashMap.size() == 0);
+
+ for(int i = 0; i < 100; i++)
+ fixedHashMap.insert(FixedHashMapFalse::value_type(i, i));
+ VERIFY(fixedHashMap.size() == 100);
+
+ // Verify that we allocated enough space for exactly N items.
+ // It's possible that due to alignments, there might be room for N + 1.
+ FixedHashMapFalse::allocator_type& allocator = fixedHashMap.get_allocator();
+ void* pResult = allocator.allocate(sizeof(FixedHashMapFalse::node_type));
+ if(pResult)
+ {
+ pResult = allocator.allocate(sizeof(FixedHashMapFalse::node_type));
+ VERIFY(pResult == NULL);
+ }
+
+ fixedHashMap.clear(true);
+ VERIFY(fixedHashMap.validate());
+ VERIFY(fixedHashMap.size() == 0);
+ VERIFY(fixedHashMap.bucket_count() == fixedHashMap.rehash_policy().GetPrevBucketCount(100));
+ }
+
+ {
+ // Test version *with* pool overflow.
+ typedef eastl::fixed_hash_map<int, int, 100, 100, true> FixedHashMapTrue;
+ FixedHashMapTrue fixedHashMap;
+
+ fixedHashMap[0] = 0;
+ fixedHashMap.insert(FixedHashMapTrue::value_type(0, 0));
+
+ VERIFY(fixedHashMap.max_size() == 100);
+ VERIFY(fixedHashMap.size() == 1);
+
+ fixedHashMap.clear();
+ VERIFY(fixedHashMap.size() == 0);
+
+ for(int i = 0; i < 100; i++)
+ fixedHashMap.insert(FixedHashMapTrue::value_type(i, i));
+ VERIFY(fixedHashMap.size() == 100);
+
+ FixedHashMapTrue::allocator_type& allocator = fixedHashMap.get_allocator();
+ void* pResult = allocator.allocate(sizeof(FixedHashMapTrue::node_type));
+ VERIFY(pResult != NULL);
+ allocator.deallocate(pResult, sizeof(FixedHashMapTrue::node_type));
+
+ fixedHashMap.clear(true);
+ VERIFY(fixedHashMap.validate());
+ VERIFY(fixedHashMap.size() == 0);
+ VERIFY(fixedHashMap.bucket_count() == fixedHashMap.rehash_policy().GetPrevBucketCount(100));
+
+ // get_overflow_allocator / set_overflow_allocator
+ // This is a weak test which should be improved.
+ EASTLAllocatorType a = fixedHashMap.get_allocator().get_overflow_allocator();
+ fixedHashMap.get_allocator().set_overflow_allocator(a);
+ }
+
+ // Test that fixed_hash_map (with and without overflow enabled) is usable after the node and bucket array has
+ // been cleared.
+ {
+ constexpr const int ITERATION_MAX = 5;
+ constexpr const int ELEMENT_MAX = 100;
+ constexpr const int ELEMENT_OVERFLOW_MAX = ELEMENT_MAX * 2;
+
+ TestFixedHashMapClearBuckets<eastl::fixed_hash_map<int, int, ELEMENT_MAX, ELEMENT_MAX, false>, ELEMENT_MAX, ITERATION_MAX>();
+ TestFixedHashMapClearBuckets<eastl::fixed_hash_map<int, int, ELEMENT_MAX, ELEMENT_MAX, true>, ELEMENT_OVERFLOW_MAX, ITERATION_MAX>();
+ TestFixedHashMapClearBuckets<eastl::fixed_hash_multimap<int, int, ELEMENT_MAX, ELEMENT_MAX, false>, ELEMENT_MAX, ITERATION_MAX>();
+ TestFixedHashMapClearBuckets<eastl::fixed_hash_multimap<int, int, ELEMENT_MAX, ELEMENT_MAX, true>, ELEMENT_OVERFLOW_MAX, ITERATION_MAX>();
+ }
+
+ {
+ // Test fixed_hash_map *with* overflow and ensure the underlying hashtable rehashes.
+ typedef eastl::fixed_hash_map<unsigned int, unsigned int, 512, 513, true, eastl::hash<unsigned int>, eastl::equal_to<unsigned int>, false, MallocAllocator> FixedHashMap;
+
+ FixedHashMap fixedHashMap;
+ auto old_bucket_count = fixedHashMap.bucket_count();
+ auto old_load_factor = fixedHashMap.load_factor();
+
+ for (int i = 0; i < 1000; i++)
+ fixedHashMap.insert(i);
+
+ auto new_bucket_count = fixedHashMap.bucket_count();
+ auto new_load_factor = fixedHashMap.load_factor();
+
+ VERIFY(new_bucket_count != old_bucket_count);
+ VERIFY(new_bucket_count > old_bucket_count);
+ VERIFY(new_load_factor != old_load_factor);
+ VERIFY(fixedHashMap.get_overflow_allocator().mAllocCountAll != 0);
+ }
+
+ {
+ // Test version with overflow and alignment requirements.
+ typedef fixed_hash_map<Align64, int, 1, 2, true> FixedHashMapWithAlignment;
+ typedef fixed_hash_multimap<Align64, int, 1, 2, true> FixedHashMultiMapWithAlignment;
+ typedef fixed_hash_set<Align64, 1, 2, true> FixedHashSetWithAlignment;
+ typedef fixed_hash_multiset<Align64, 1, 2, true> FixedHashMultiSetWithAlignment;
+
+ FixedHashMapWithAlignment fhm;
+ FixedHashMultiMapWithAlignment fhmm;
+ FixedHashSetWithAlignment fhs;
+ FixedHashMultiSetWithAlignment fhms;
+
+ Align64 a; a.mX = 1;
+ Align64 b; b.mX = 2;
+ Align64 c; c.mX = 3;
+ Align64 d; d.mX = 4;
+ Align64 e; e.mX = 5;
+
+ fhm.insert(a);
+ fhm.insert(b);
+ fhm.insert(c);
+ fhm.insert(d);
+ fhm.insert(e);
+ for (FixedHashMapWithAlignment::const_iterator it = fhm.begin(); it != fhm.end(); ++it)
+ {
+ const Align64* ptr = &((*it).first);
+ EATEST_VERIFY((uint64_t)ptr % EASTL_ALIGN_OF(Align64) == 0);
+ }
+ fhmm.insert(a);
+ fhmm.insert(b);
+ fhmm.insert(c);
+ fhmm.insert(d);
+ fhmm.insert(e);
+ for (FixedHashMultiMapWithAlignment::const_iterator it = fhmm.begin(); it != fhmm.end(); ++it)
+ {
+ const Align64* ptr = &((*it).first);
+ EATEST_VERIFY((uint64_t)ptr % EASTL_ALIGN_OF(Align64) == 0);
+ }
+ fhs.insert(a);
+ fhs.insert(b);
+ fhs.insert(c);
+ fhs.insert(d);
+ fhs.insert(e);
+ for (FixedHashSetWithAlignment::const_iterator it = fhs.begin(); it != fhs.end(); ++it)
+ {
+ const Align64* ptr = &(*it);
+ EATEST_VERIFY((uint64_t)ptr % EASTL_ALIGN_OF(Align64) == 0);
+ }
+ fhms.insert(a);
+ fhms.insert(b);
+ fhms.insert(c);
+ fhms.insert(d);
+ fhms.insert(e);
+ for (FixedHashMultiSetWithAlignment::const_iterator it = fhms.begin(); it != fhms.end(); ++it)
+ {
+ const Align64* ptr = &(*it);
+ EATEST_VERIFY((uint64_t)ptr % EASTL_ALIGN_OF(Align64) == 0);
+ }
+ }
+
+ {
+ typedef eastl::fixed_hash_map<int, A, 100, 100> FixedHashMap;
+ FixedHashMap fixedHashMap;
+
+ fixedHashMap[0] = A();
+ fixedHashMap.insert(FixedHashMap::value_type(0, A()));
+
+ VERIFY(fixedHashMap.size() == 1);
+ }
+
+ {
+ typedef eastl::fixed_hash_map<A, int, 100, 100> FixedHashMap;
+ FixedHashMap fixedHashMap;
+
+ fixedHashMap[A()] = 0;
+ fixedHashMap.insert(FixedHashMap::value_type(A(), 0));
+
+ VERIFY(fixedHashMap.size() == 1);
+ }
+
+ // explicitly instantiate some templated member functions
+ {
+ typedef eastl::fixed_hash_map<int, int, 100, 100, true> FixedHashMapTrue;
+ FixedHashMapTrue::value_type testValues[] = { eastl::make_pair(0, 0), eastl::make_pair(1,1) };
+ FixedHashMapTrue fixedHashMap(testValues, testValues + EAArrayCount(testValues));
+ VERIFY(fixedHashMap.size() == 2);
+ }
+ }
+
+
+ { // fixed_hash_multimap
+ {
+ typedef eastl::fixed_hash_multimap<int, int, 100, 100> FixedHashMultiMap;
+ FixedHashMultiMap fixedHashMultiMap;
+
+ fixedHashMultiMap.insert(FixedHashMultiMap::value_type(0, 0));
+ fixedHashMultiMap.insert(FixedHashMultiMap::value_type(0, 0));
+
+ VERIFY(fixedHashMultiMap.max_size() == 100);
+ VERIFY(fixedHashMultiMap.size() == 2);
+ }
+
+ // explicitly instantiate some templated member functions
+ {
+ typedef eastl::fixed_hash_multimap<int, int, 100, 100, true> FixedHashMultiMap;
+ FixedHashMultiMap::value_type testValues[] = { eastl::make_pair(0, 0), eastl::make_pair(1,1) };
+ FixedHashMultiMap fixedHashMultiMap(testValues, testValues + EAArrayCount(testValues));
+ VERIFY(fixedHashMultiMap.size() == 2);
+ }
+ }
+
+
+ { // fixed_hash_set
+ {
+ typedef eastl::fixed_hash_set<int, 100, 100> FixedHashSet;
+ FixedHashSet fixedHashSet;
+
+ fixedHashSet.insert(0);
+ fixedHashSet.insert(0);
+ VERIFY(fixedHashSet.size() == 1);
+
+ fixedHashSet.clear();
+ VERIFY(fixedHashSet.size() == 0);
+
+ for(int i = 0; i < 100; i++)
+ fixedHashSet.insert(i);
+
+ VERIFY(fixedHashSet.max_size() == 100);
+ VERIFY(fixedHashSet.size() == 100);
+
+ fixedHashSet.clear(true);
+ VERIFY(fixedHashSet.validate());
+ VERIFY(fixedHashSet.size() == 0);
+ VERIFY(fixedHashSet.bucket_count() == 1);
+ }
+
+ {
+ typedef eastl::fixed_hash_set<A, 100, 100> FixedHashSet;
+ FixedHashSet fixedHashSet;
+
+ fixedHashSet.insert(A());
+ fixedHashSet.insert(A());
+
+ VERIFY(fixedHashSet.max_size() == 100);
+ VERIFY(fixedHashSet.size() == 1);
+ }
+
+ // explicitly instantiate some templated member functions
+ {
+ typedef eastl::fixed_hash_set<A, 100, 100> FixedHashSet;
+ FixedHashSet::value_type testValues[] = { 0, 1 };
+ FixedHashSet fixedHashSet(testValues, testValues + EAArrayCount(testValues));
+ VERIFY(fixedHashSet.size() == 2);
+ }
+ }
+
+
+ { // fixed_hash_multiset
+ {
+ typedef eastl::fixed_hash_multiset<int, 100, 100> FixedHashMultiSet;
+ FixedHashMultiSet fixedHashMultiSet;
+
+ fixedHashMultiSet.insert(0);
+ fixedHashMultiSet.insert(0);
+
+ VERIFY(fixedHashMultiSet.size() == 2);
+ }
+
+
+ // explicitly instantiate some templated member functions
+ {
+ typedef eastl::fixed_hash_multiset<A, 100, 100> FixedHashMultiSet;
+ FixedHashMultiSet::value_type testValues[] = { 0, 1 };
+ FixedHashMultiSet fixedHashMultiSet(testValues, testValues + EAArrayCount(testValues));
+ VERIFY(fixedHashMultiSet.size() == 2);
+ }
+ }
+
+
+ { // Tests of various bucketCount values.
+ {
+ typedef eastl::fixed_hash_set<int, 1, 2> FixedHashSet;
+ FixedHashSet fixedHashSet;
+
+ fixedHashSet.insert(0);
+
+ VERIFY(fixedHashSet.size() == 1);
+ }
+
+ {
+ typedef eastl::fixed_hash_set<int, 2, 2> FixedHashSet;
+ FixedHashSet fixedHashSet;
+
+ fixedHashSet.insert(0);
+ fixedHashSet.insert(1);
+
+ VERIFY(fixedHashSet.size() == 2);
+ }
+
+ {
+ typedef eastl::fixed_hash_set<int, 11, 11> FixedHashSet; // 11 is one of the hashtable prime numbers.
+ FixedHashSet fixedHashSet;
+
+ for(int i = 0; i < 11; i++)
+ fixedHashSet.insert(i);
+
+ VERIFY(fixedHashSet.size() == 11);
+ }
+
+
+ {
+ typedef eastl::fixed_hash_set<int, 11, 11> FixedHashSet; // 11 is one of the hashtable prime numbers.
+ FixedHashSet fixedHashSet;
+
+ VERIFY(fixedHashSet.validate());
+ VERIFY(fixedHashSet.size() == 0);
+
+ // Clear a newly constructed, already empty container.
+ fixedHashSet.clear(true);
+ VERIFY(fixedHashSet.validate());
+ VERIFY(fixedHashSet.size() == 0);
+ VERIFY(fixedHashSet.bucket_count() == 1);
+
+ for(int i = 0; i < 11; i++)
+ fixedHashSet.insert(i);
+ VERIFY(fixedHashSet.size() == 11);
+ VERIFY(fixedHashSet.bucket_count() > 1);
+
+ fixedHashSet.clear(true);
+ VERIFY(fixedHashSet.validate());
+ VERIFY(fixedHashSet.size() == 0);
+ VERIFY(fixedHashSet.bucket_count() == 1);
+
+ for(int i = 0; i < 11; i++)
+ fixedHashSet.insert(i);
+ VERIFY(fixedHashSet.size() == 11);
+ }
+ }
+
+ { // Test of user-reported crash.
+
+ // MemoryAddressToGroupMap is a container used by one team to associate debug
+ // information with memory allocations. A crash due to corruption of the
+ // fixed size node pool was reported on consoles (no crash on PC platform).
+ const eastl_size_t kMemoryAddressMapNodeCount = 500000;
+
+ typedef eastl::fixed_hash_map<
+ const void*, // Key
+ MemoryEntry, // Value
+ kMemoryAddressMapNodeCount, // Node Count
+ kMemoryAddressMapNodeCount + 1, // Bucket Count
+ true, // Enable Overflow
+ eastl::hash<const void*>, // Hash
+ eastl::equal_to<const void*>, // Predicate
+ false, // Cache Hash Code
+ eastl::allocator // Allocator
+ > MemoryAddressToGroupMap;
+
+ MemoryAddressToGroupMap* pMap = new MemoryAddressToGroupMap;
+ EA::UnitTest::Rand rng(EA::UnitTest::GetRandSeed());
+
+ // We simulate the usage of MemoryAddressToGroupMap via simulated alloc/free actions.
+ for(eastl_size_t i = 0; i < kMemoryAddressMapNodeCount * 2; i++)
+ {
+ void* const p = (void*)(uintptr_t)rng.RandLimit(kMemoryAddressMapNodeCount);
+
+ if(pMap->find(p) == pMap->end())
+ (*pMap)[p] = MemoryEntry();
+ else
+ pMap->erase(p);
+ }
+
+ delete pMap;
+ }
+
+
+ { // Test of bug reported by Dave Wall, May 14, 2008.
+ const size_t kNumBuckets = 10; // Bug only occurred with kNumBuckets == 10 or 11.
+
+ typedef eastl::fixed_hash_map<const InstanceRenderData, uint32_t, kNumBuckets, kNumBuckets + 1, false> Map;
+
+ Map map;
+ InstanceRenderData renderData;
+
+ uint32_t count = (uint32_t)kNumBuckets;
+
+ while(count--)
+ {
+ renderData.mPad[0] = count;
+ map.insert(Map::value_type(renderData, count));
+ }
+
+ }
+
+ {
+ // Test construction of a container with an overflow allocator constructor argument.
+ MallocAllocator overflowAllocator;
+ void* p = overflowAllocator.allocate(1);
+
+ typedef eastl::fixed_hash_map<int, int, 64, 100, true, eastl::hash<int>, eastl::equal_to<int>, false, MallocAllocator> Container;
+ Container c(overflowAllocator);
+
+ for(int i = 0; i < 65; i++)
+ c.insert(Container::value_type(i, i));
+
+ VERIFY(c.get_overflow_allocator().mAllocCount == 2); // 1 for above, and 1 for overflowing from 64 to 65.
+ overflowAllocator.deallocate(p, 1);
+ }
+
+
+ {
+ // C++11 emplace and related functionality
+ nErrorCount += TestMapCpp11<eastl::fixed_hash_map<int, TestObject, 2, 7, true> >(); // Exercize a low-capacity fixed-size container.
+ nErrorCount += TestMapCpp11<eastl::fixed_hash_map<int, TestObject, 32, 7, true> >();
+
+ nErrorCount += TestMapCpp11NonCopyable<eastl::fixed_hash_map<int, NonCopyable, 2, 7, true>>();
+
+ nErrorCount += TestSetCpp11<eastl::fixed_hash_set<TestObject, 2, 7, true> >();
+ nErrorCount += TestSetCpp11<eastl::fixed_hash_set<TestObject, 32, 7, true> >();
+
+ nErrorCount += TestMultimapCpp11<eastl::fixed_hash_multimap<int, TestObject, 2, 7, true> >();
+ nErrorCount += TestMultimapCpp11<eastl::fixed_hash_multimap<int, TestObject, 32, 7, true> >();
+
+ nErrorCount += TestMultisetCpp11<eastl::fixed_hash_multiset<TestObject, 2, 7, true> >();
+ nErrorCount += TestMultisetCpp11<eastl::fixed_hash_multiset<TestObject, 32, 7, true> >();
+ }
+
+ {
+ // C++17 try_emplace and related functionality
+ nErrorCount += TestMapCpp17<eastl::fixed_hash_map<int, TestObject, 2, 7, true>>();
+ nErrorCount += TestMapCpp17<eastl::fixed_hash_map<int, TestObject, 32, 7, true> >();
+ }
+
+ {
+ // void reserve(size_type nElementCount);
+
+ // test with overflow enabled.
+ nErrorCount += HashContainerReserveTest<fixed_hash_set<int, 16>>()();
+ nErrorCount += HashContainerReserveTest<fixed_hash_multiset<int, 16>>()();
+ nErrorCount += HashContainerReserveTest<fixed_hash_map<int, int, 16>>()();
+ nErrorCount += HashContainerReserveTest<fixed_hash_multimap<int, int, 16>>()();
+
+ // API prevents testing fixed size hash container reservation without overflow enabled.
+ //
+ // nErrorCount += HashContainerReserveTest<fixed_hash_set<int, 400, 401, false>>()();
+ // nErrorCount += HashContainerReserveTest<fixed_hash_multiset<int, 400, 401, false>>()();
+ // nErrorCount += HashContainerReserveTest<fixed_hash_map<int, int, 400, 401, false>>()();
+ // nErrorCount += HashContainerReserveTest<fixed_hash_multimap<int, int, 9000, 9001, false>>()();
+ }
+
+ {
+ // initializer_list support.
+ // fixed_hash_set(std::initializer_list<value_type> ilist, const overflow_allocator_type& overflowAllocator = EASTL_FIXED_HASH_SET_DEFAULT_ALLOCATOR)
+ // this_type& operator=(std::initializer_list<value_type> ilist);
+ // void insert(std::initializer_list<value_type> ilist);
+ fixed_hash_set<int, 11> intHashSet = { 12, 13, 14 };
+ EATEST_VERIFY(intHashSet.size() == 3);
+ EATEST_VERIFY(intHashSet.find(12) != intHashSet.end());
+ EATEST_VERIFY(intHashSet.find(13) != intHashSet.end());
+ EATEST_VERIFY(intHashSet.find(14) != intHashSet.end());
+
+ intHashSet = { 22, 23, 24 };
+ EATEST_VERIFY(intHashSet.size() == 3);
+ EATEST_VERIFY(intHashSet.find(22) != intHashSet.end());
+ EATEST_VERIFY(intHashSet.find(23) != intHashSet.end());
+ EATEST_VERIFY(intHashSet.find(24) != intHashSet.end());
+
+ intHashSet.insert({ 42, 43, 44 });
+ EATEST_VERIFY(intHashSet.size() == 6);
+ EATEST_VERIFY(intHashSet.find(42) != intHashSet.end());
+ EATEST_VERIFY(intHashSet.find(43) != intHashSet.end());
+ EATEST_VERIFY(intHashSet.find(44) != intHashSet.end());
+
+ // hash_map(std::initializer_list<value_type> ilist, const overflow_allocator_type& overflowAllocator = EASTL_FIXED_HASH_SET_DEFAULT_ALLOCATOR)
+ // this_type& operator=(std::initializer_list<value_type> ilist);
+ // void insert(std::initializer_list<value_type> ilist);
+ fixed_hash_map<int, double, 11> intHashMap = { {12,12.0}, {13,13.0}, {14,14.0} };
+ EATEST_VERIFY(intHashMap.size() == 3);
+ EATEST_VERIFY(intHashMap.find(12) != intHashMap.end());
+ EATEST_VERIFY(intHashMap.find(13) != intHashMap.end());
+ EATEST_VERIFY(intHashMap.find(14) != intHashMap.end());
+
+ intHashMap = { {22,22.0}, {23,23.0}, {24,24.0} };
+ EATEST_VERIFY(intHashMap.size() == 3);
+ EATEST_VERIFY(intHashMap.find(22) != intHashMap.end());
+ EATEST_VERIFY(intHashMap.find(23) != intHashMap.end());
+ EATEST_VERIFY(intHashMap.find(24) != intHashMap.end());
+
+ intHashMap.insert({ {42,42.0}, {43,43.0}, {44,44.0} });
+ EATEST_VERIFY(intHashMap.size() == 6);
+ EATEST_VERIFY(intHashMap.find(42) != intHashMap.end());
+ EATEST_VERIFY(intHashMap.find(43) != intHashMap.end());
+ EATEST_VERIFY(intHashMap.find(44) != intHashMap.end());
+ }
+
+ {
+ constexpr int ELEM_MAX = 10;
+ typedef eastl::fixed_hash_map<int, int, ELEM_MAX, ELEM_MAX, false> FixedHashMapFalse;
+ FixedHashMapFalse fixedHashMap;
+ VERIFY(fixedHashMap.size() == 0);
+
+ for (int i = 0; i < ELEM_MAX; i++)
+ fixedHashMap.insert(FixedHashMapFalse::value_type(i, i));
+
+ VERIFY(fixedHashMap.validate());
+ VERIFY(fixedHashMap.size() == ELEM_MAX);
+
+ // Verify insert requests of nodes already in the container don't attempt to allocate memory.
+ // Because the fixed_hash_map is full any attempt to allocate memory will generate an OOM error.
+ {
+ auto result = fixedHashMap.insert(FixedHashMapFalse::value_type(0, 0));
+ VERIFY(result.second == false);
+ }
+
+ {
+ auto result = fixedHashMap.insert(fixedHashMap.begin(), FixedHashMapFalse::value_type(0, 0));
+ VERIFY(result->first == 0);
+ VERIFY(result->second == 0);
+ }
+
+ {
+ FixedHashMapFalse::value_type value(0, 0);
+ auto result = fixedHashMap.insert(eastl::move(value));
+ VERIFY(result.second == false);
+ }
+ {
+ FixedHashMapFalse::value_type value(0, 0);
+ auto result = fixedHashMap.insert(fixedHashMap.begin(), eastl::move(value));
+ VERIFY(result->first == 0);
+ VERIFY(result->second == 0);
+ }
+
+ {
+ FixedHashMapFalse::value_type value(0, 0);
+ auto result = fixedHashMap.insert(value);
+ VERIFY(result.second == false);
+ }
+
+ {
+ auto result = fixedHashMap.insert(eastl::make_pair(0, 0));
+ VERIFY(result.second == false);
+ }
+
+ {
+ // OOM, fixed allocator memory is exhausted so it can't create a node for insertation testing
+ // auto result = fixedHashMap.emplace(0, 0);
+ // VERIFY(result.second == false);
+ }
+ }
+
+ return nErrorCount;
+}
+EA_RESTORE_VC_WARNING()
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/test/source/TestFixedList.cpp b/EASTL/test/source/TestFixedList.cpp
new file mode 100644
index 0000000..9212559
--- /dev/null
+++ b/EASTL/test/source/TestFixedList.cpp
@@ -0,0 +1,563 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLTest.h"
+#include <EASTL/fixed_list.h>
+
+
+using namespace eastl;
+
+
+// Template instantations.
+// These tell the compiler to compile all the functions for the given class.
+template class eastl::fixed_list<int, 1, true, EASTLAllocatorType>;
+template class eastl::fixed_list<int, 1, false, EASTLAllocatorType>;
+
+
+/*
+// This does not compile, since the fixed_list allocator is templated on sizeof(T),
+// not just T. Thus, the full type is required at the time of instantiation, but it
+// is not available.
+// See EATech Core JIRA issue ETCR-1608 for more information.
+struct StructWithContainerOfStructs
+{
+ eastl::fixed_list<StructWithContainerOfStructs,4> children;
+};
+*/
+
+
+namespace FixedListTest
+{
+ struct Item
+ {
+ char mName[5];
+ };
+}
+
+
+EA_DISABLE_VC_WARNING(6262)
+int TestFixedList()
+{
+ int nErrorCount = 0;
+
+ {
+ // Test version *without* pool overflow.
+ typedef fixed_list<int, 64, false> FixedListInt64False;
+
+ FixedListInt64False listInt64;
+ VERIFY(listInt64.empty());
+ VERIFY(listInt64.size() == 0);
+ VERIFY(listInt64.max_size() == 64);
+
+ listInt64.push_back(1);
+ VERIFY(!listInt64.empty());
+ VERIFY(listInt64.size() == 1);
+
+ listInt64.resize(3, 2);
+ VERIFY(!listInt64.empty());
+ VERIFY(listInt64.size() == 3);
+
+ FixedListInt64False::iterator i = listInt64.begin();
+ VERIFY(*i == 1); ++i;
+ VERIFY(*i == 2); ++i;
+ VERIFY(*i == 2); ++i;
+ VERIFY(i == listInt64.end());
+
+ listInt64.resize(0);
+ VERIFY(listInt64.empty());
+ VERIFY(listInt64.size() == 0);
+
+ while(listInt64.size() < 64)
+ listInt64.push_back(0);
+
+ // Verify that we allocated enough space for exactly N items.
+ // It's possible that due to alignments, there might be room for N + 1.
+ FixedListInt64False::allocator_type& allocator = listInt64.get_allocator();
+ void* pResult = allocator.allocate(sizeof(FixedListInt64False::node_type));
+ if(pResult)
+ {
+ pResult = allocator.allocate(sizeof(FixedListInt64False::node_type));
+ VERIFY(pResult == NULL);
+ }
+ }
+
+
+ {
+ // Test version *with* pool overflow.
+ typedef fixed_list<int, 64, true> FixedListInt64True;
+
+ FixedListInt64True listInt64;
+ VERIFY(listInt64.empty());
+ VERIFY(listInt64.size() == 0);
+
+ listInt64.push_back(1);
+ VERIFY(!listInt64.empty());
+ VERIFY(listInt64.size() == 1);
+
+ listInt64.resize(3, 2);
+ VERIFY(!listInt64.empty());
+ VERIFY(listInt64.size() == 3);
+
+ FixedListInt64True::iterator i = listInt64.begin();
+ VERIFY(*i == 1); ++i;
+ VERIFY(*i == 2); ++i;
+ VERIFY(*i == 2); ++i;
+ VERIFY(i == listInt64.end());
+
+ listInt64.resize(0);
+ VERIFY(listInt64.empty());
+ VERIFY(listInt64.size() == 0);
+
+ while(listInt64.size() < 64 + 16)
+ listInt64.push_back(0);
+
+ FixedListInt64True::allocator_type& allocator = listInt64.get_allocator();
+ void* pResult = allocator.allocate(sizeof(FixedListInt64True::node_type));
+ VERIFY(pResult != NULL);
+ allocator.deallocate(pResult, sizeof(FixedListInt64True::node_type));
+
+ // get_overflow_allocator / set_overflow_allocator
+ // This is a weak test which should be improved.
+ EASTLAllocatorType a = listInt64.get_allocator().get_overflow_allocator();
+ listInt64.get_allocator().set_overflow_allocator(a);
+ }
+
+
+ {
+ // Test version *with* pool overflow with a custom overlow allocator specification.
+ typedef fixed_list<int, 64, true, MallocAllocator> FixedListInt64TrueMalloc;
+
+ FixedListInt64TrueMalloc listInt64;
+ VERIFY(listInt64.empty());
+ VERIFY(listInt64.size() == 0);
+
+ listInt64.push_back(1);
+ VERIFY(!listInt64.empty());
+ VERIFY(listInt64.size() == 1);
+
+ listInt64.resize(3, 2);
+ VERIFY(!listInt64.empty());
+ VERIFY(listInt64.size() == 3);
+
+ FixedListInt64TrueMalloc::iterator i = listInt64.begin();
+ VERIFY(*i == 1); ++i;
+ VERIFY(*i == 2); ++i;
+ VERIFY(*i == 2); ++i;
+ VERIFY(i == listInt64.end());
+
+ listInt64.resize(0);
+ VERIFY(listInt64.empty());
+ VERIFY(listInt64.size() == 0);
+
+ while(listInt64.size() < 64 + 16)
+ listInt64.push_back(0);
+
+ FixedListInt64TrueMalloc::allocator_type& allocator = listInt64.get_allocator();
+ void* pResult = allocator.allocate(sizeof(FixedListInt64TrueMalloc::node_type));
+ VERIFY(pResult != NULL);
+ allocator.deallocate(pResult, sizeof(FixedListInt64TrueMalloc::node_type));
+ }
+
+ {
+ // Test fixed list with overflow and alignment requirements.
+ typedef fixed_list<Align64, 1, true, CustomAllocator> FixedListWithAlignment;
+
+ FixedListWithAlignment fl;
+
+ Align64 a;
+
+ fl.push_back(a);
+ fl.push_back(a);
+ fl.push_back(a);
+ fl.push_back(a);
+ fl.push_back(a);
+ for (FixedListWithAlignment::const_iterator it = fl.begin(); it != fl.end(); ++it)
+ {
+ const Align64* ptr = &(*it);
+ EATEST_VERIFY((uint64_t)ptr % EASTL_ALIGN_OF(Align64) == 0);
+ }
+ }
+
+ {
+ // swap
+
+ fixed_list<int, 64>* pListInt64A = new fixed_list<int, 64>;
+ fixed_list<int, 64>* pListInt64B = new fixed_list<int, 64>;
+
+ pListInt64A->push_back(0);
+ pListInt64B->push_back(0);
+
+ swap(*pListInt64A, *pListInt64B);
+
+ delete pListInt64A;
+ delete pListInt64B;
+ }
+
+
+ {
+ // operator=
+
+ fixed_list<int, 64>* pListInt64A = new fixed_list<int, 64>;
+ fixed_list<int, 64>* pListInt64B = new fixed_list<int, 64>;
+
+ pListInt64A->push_back(0);
+ pListInt64B->push_back(0);
+
+ *pListInt64A = *pListInt64B;
+
+ delete pListInt64A;
+ delete pListInt64B;
+ }
+
+
+ {
+ // bool empty() const
+ // bool has_overflowed() const
+ // size_type size() const;
+ // size_type max_size() const
+
+ // Test a list that has overflow disabled.
+ fixed_list<int, 5, false> listInt5;
+
+ VERIFY(listInt5.max_size() == 5);
+ VERIFY(listInt5.size() == 0);
+ VERIFY(listInt5.empty());
+ VERIFY(!listInt5.has_overflowed());
+
+ listInt5.push_back(37);
+ listInt5.push_back(37);
+ listInt5.push_back(37);
+
+ VERIFY(listInt5.size() == 3);
+ VERIFY(!listInt5.empty());
+ VERIFY(!listInt5.has_overflowed());
+
+ listInt5.push_back(37);
+ listInt5.push_back(37);
+
+ VERIFY(listInt5.size() == 5);
+ VERIFY(!listInt5.empty());
+ VERIFY(!listInt5.has_overflowed());
+
+ listInt5.pop_back();
+
+ VERIFY(listInt5.size() == 4);
+ VERIFY(!listInt5.empty());
+ VERIFY(!listInt5.has_overflowed());
+ }
+
+
+ {
+ // bool empty() const
+ // bool has_overflowed() const
+ // size_type size() const;
+ // size_type max_size() const
+
+ // Test a list that has overflow enabled.
+ fixed_list<int, 5, true> listInt5;
+
+ VERIFY(listInt5.max_size() == 5);
+ VERIFY(listInt5.size() == 0);
+ VERIFY(listInt5.empty());
+ VERIFY(!listInt5.has_overflowed());
+
+ listInt5.push_back(37);
+ listInt5.push_back(37);
+ listInt5.push_back(37);
+
+ VERIFY(listInt5.size() == 3);
+ VERIFY(!listInt5.empty());
+ VERIFY(!listInt5.has_overflowed());
+
+ listInt5.push_back(37);
+ listInt5.push_back(37);
+
+ VERIFY(listInt5.size() == 5);
+ VERIFY(!listInt5.empty());
+ VERIFY(!listInt5.has_overflowed());
+
+ listInt5.push_back(37);
+
+ VERIFY(listInt5.size() == 6);
+ VERIFY(!listInt5.empty());
+ VERIFY(listInt5.has_overflowed());
+
+ listInt5.pop_back();
+
+ VERIFY(listInt5.size() == 5);
+ VERIFY(!listInt5.empty());
+ //VERIFY(listInt5.has_overflowed()); Disabled because currently has_overflowed can't detect this situation in non-debug builds.
+ }
+
+ {
+ //template <typename Compare>
+ //void merge(this_type& x, Compare compare);
+ //void unique();
+ //template <typename BinaryPredicate>
+ //void unique(BinaryPredicate);
+ //void sort();
+ //template<typename Compare>
+ //void sort(Compare compare);
+
+ const int A[] = {1, 2, 3, 4, 5, 6};
+ const int B[] = {12, 15, 13, 14, 11};
+ const int C[] = {11, 12, 13, 14, 15};
+ const int D[] = {1, 11, 2, 12, 3, 13, 4, 14, 5, 15, 6};
+ const int N = sizeof(A) / sizeof(A[0]);
+ const int M = sizeof(B) / sizeof(B[0]);
+ const int Q = sizeof(D) / sizeof(D[0]);
+
+ fixed_list<int, 32, true> list0401(A, A + N);
+ fixed_list<int, 32, true> list0402(B, B + M);
+ fixed_list<int, 32, true> list0403(C, C + M);
+ fixed_list<int, 32, true> list0404(D, D + Q);
+ fixed_list<int, 32, true> list0405(A, A + N);
+
+ list0402.sort(eastl::less<int>());
+ VERIFY(list0402 == list0403);
+
+ list0401.merge(list0402, eastl::less<int>());
+ list0404.sort();
+
+ //merge and isn't yet working for fixed_list.
+ //VERIFY(list0401 == list0404);
+
+ VERIFY(list0401.validate());
+ VERIFY(list0402.validate());
+ VERIFY(list0403.validate());
+ VERIFY(list0404.validate());
+ VERIFY(list0405.validate());
+ }
+
+
+ {
+ // void sort()
+ // void sort(Compare compare)
+
+ const int kSize = 10;
+ const int A[kSize] = { 1, 9, 2, 3, 5, 7, 4, 6, 8, 0 };
+
+ fixed_list<int, 32, true> listEmpty;
+ VERIFY(VerifySequence(listEmpty.begin(), listEmpty.end(), int(), "fixed_list::sort", -1));
+ listEmpty.sort();
+ VERIFY(VerifySequence(listEmpty.begin(), listEmpty.end(), int(), "fixed_list::sort", -1));
+
+ fixed_list<int, 32, true> list1(A, A + 1);
+ VERIFY(VerifySequence(list1.begin(), list1.end(), int(), "fixed_list::sort", 1, -1));
+ list1.sort();
+ VERIFY(VerifySequence(list1.begin(), list1.end(), int(), "fixed_list::sort", 1, -1));
+
+ fixed_list<int, 32, true> list4(A, A + 4);
+ VERIFY(VerifySequence(list4.begin(), list4.end(), int(), "fixed_list::sort", 1, 9, 2, 3, -1));
+ list4.sort();
+ VERIFY(VerifySequence(list4.begin(), list4.end(), int(), "fixed_list::sort", 1, 2, 3, 9, -1));
+
+ fixed_list<int, 32, true> listA(A, A + kSize);
+ VERIFY(VerifySequence(listA.begin(), listA.end(), int(), "fixed_list::sort", 1, 9, 2, 3, 5, 7, 4, 6, 8, 0, -1));
+ listA.sort();
+ VERIFY(VerifySequence(listA.begin(), listA.end(), int(), "fixed_list::sort", 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1));
+
+ listA.assign(A, A + kSize);
+ VERIFY(VerifySequence(listA.begin(), listA.end(), int(), "fixed_list::sort", 1, 9, 2, 3, 5, 7, 4, 6, 8, 0, -1));
+ listA.sort(eastl::less<int>());
+ VERIFY(VerifySequence(listA.begin(), listA.end(), int(), "fixed_list::sort", 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1));
+ }
+
+
+ {
+ // void merge(this_type& x);
+ // void merge(this_type& x, Compare compare);
+
+ const int kSize = 8;
+ const int A[kSize] = { 1, 2, 3, 4, 4, 5, 9, 9 };
+ const int B[kSize] = { 1, 2, 3, 4, 4, 5, 9, 9 };
+
+ fixed_list<int, 32, true> listA(A, A + kSize);
+ fixed_list<int, 32, true> listB(B, B + kSize);
+
+ listA.merge(listB);
+
+ //merge and isn't yet working for fixed_list.
+ //VERIFY(VerifySequence(listA.begin(), listA.end(), int(), "fixed_list::merge", 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 9, 9, 9, 9, -1));
+ //VERIFY(VerifySequence(listB.begin(), listB.end(), int(), "fixed_list::merge", -1));
+ }
+
+
+ {
+ // void splice(iterator position, this_type& x);
+ // void splice(iterator position, this_type& x, iterator i);
+ // void splice(iterator position, this_type& x, iterator first, iterator last);
+
+ const int kSize = 8;
+ const int A[kSize] = { 1, 2, 3, 4, 4, 5, 9, 9 };
+ const int B[kSize] = { 1, 2, 3, 4, 4, 5, 9, 9 };
+
+ fixed_list<int, 32, true> listA(A, A + kSize);
+ fixed_list<int, 32, true> listB(B, B + kSize);
+ fixed_list<int, 32, true>::iterator it;
+
+ // void splice(iterator position, this_type& x);
+ it = listA.begin(); eastl::advance(it, 2);
+ listA.splice(it, listB); // move listB into listA at position it.
+ VERIFY(VerifySequence(listA.begin(), listA.end(), int(), "fixed_list::splice", 1, 2, 1, 2, 3, 4, 4, 5, 9, 9, 3, 4, 4, 5, 9, 9, -1));
+ VERIFY(VerifySequence(listB.begin(), listB.end(), int(), "fixed_list::splice", -1));
+
+ // void splice(iterator position, this_type& x, iterator i);
+ it = listA.begin(); eastl::advance(it, 6);
+ listB.splice(listB.begin(), listA, it); // move listA's it (6th element) into the front of listB.
+ VERIFY(VerifySequence(listA.begin(), listA.end(), int(), "fixed_list::splice", 1, 2, 1, 2, 3, 4, 5, 9, 9, 3, 4, 4, 5, 9, 9, -1));
+ VERIFY(VerifySequence(listB.begin(), listB.end(), int(), "fixed_list::splice", 4, -1));
+
+ // void splice(iterator position, this_type& x, iterator first, iterator last);
+ listA.splice(listA.end(), listB, listB.begin(), listB.end()); // move listB into listA at the end of listA.
+ VERIFY(VerifySequence(listA.begin(), listA.end(), int(), "fixed_list::splice", 1, 2, 1, 2, 3, 4, 5, 9, 9, 3, 4, 4, 5, 9, 9, 4, -1));
+ VERIFY(VerifySequence(listB.begin(), listB.end(), int(), "fixed_list::splice", -1));
+ }
+
+
+ {
+ // void unique();
+ // void unique(BinaryPredicate);
+
+ const int kSize = 8;
+ const int A[kSize] = { 1, 2, 3, 4, 4, 5, 9, 9 };
+ const int B[kSize] = { 1, 2, 3, 4, 4, 5, 9, 9 };
+
+ fixed_list<int, 32, true> listA(A, A + kSize);
+ listA.unique();
+ VERIFY(VerifySequence(listA.begin(), listA.end(), int(), "fixed_list::unique", 1, 2, 3, 4, 5, 9, -1));
+
+ fixed_list<int, 32, true> listB(B, B + kSize);
+ listB.unique(eastl::equal_to<int>());
+ VERIFY(VerifySequence(listA.begin(), listA.end(), int(), "fixed_list::unique", 1, 2, 3, 4, 5, 9, -1));
+ }
+
+
+ {
+ // fixed_list(this_type&& x);
+ // fixed_list(this_type&&, const allocator_type&);
+ // this_type& operator=(this_type&& x);
+ fixed_list<TestObject, 16> list3TO33(3, TestObject(33));
+ fixed_list<TestObject, 16> toListA(eastl::move(list3TO33));
+ EATEST_VERIFY((toListA.size() == 3) && (toListA.front().mX == 33) /* && (list3TO33.size() == 0) fixed_list usually can't honor the move request. */);
+
+ // The following is not as strong a test of this ctor as it could be. A stronger test would be to use IntanceAllocator with different instances.
+ fixed_list<TestObject, 16, true, MallocAllocator> list4TO44(4, TestObject(44));
+ fixed_list<TestObject, 16, true, MallocAllocator> toListB(eastl::move(list4TO44), MallocAllocator());
+ EATEST_VERIFY((toListB.size() == 4) && (toListB.front().mX == 44) /* && (list4TO44.size() == 0) fixed_list usually can't honor the move request. */);
+
+ fixed_list<TestObject, 16, true, MallocAllocator> list5TO55(5, TestObject(55));
+ toListB = eastl::move(list5TO55);
+ EATEST_VERIFY((toListB.size() == 5) && (toListB.front().mX == 55) /* && (list5TO55.size() == 0) fixed_list usually can't honor the move request. */);
+ }
+
+
+ {
+ // template <class... Args>
+ // void emplace_front(Args&&... args);
+
+ // template <class... Args>
+ // void emplace_back(Args&&... args);
+
+ // template <class... Args>
+ // iterator emplace(const_iterator position, Args&&... args);
+
+ TestObject::Reset();
+
+ fixed_list<TestObject, 16> toListA;
+
+ toListA.emplace_front(1, 2, 3); // This uses the TestObject(int x0, int x1, int x2, bool bThrowOnCopy) constructor.
+ EATEST_VERIFY((toListA.size() == 1) && (toListA.front().mX == (1+2+3)) && (TestObject::sTOCtorCount == 1));
+
+ toListA.emplace_back(2, 3, 4);
+ EATEST_VERIFY((toListA.size() == 2) && (toListA.back().mX == (2+3+4)) && (TestObject::sTOCtorCount == 2));
+
+ toListA.emplace(toListA.begin(), 3, 4, 5);
+ EATEST_VERIFY((toListA.size() == 3) && (toListA.front().mX == (3+4+5)) && (TestObject::sTOCtorCount == 3));
+
+
+ // This test is similar to the emplace pathway above.
+ TestObject::Reset();
+
+ // void push_front(T&& x);
+ // void push_back(T&& x);
+ // iterator insert(const_iterator position, T&& x);
+
+ fixed_list<TestObject, 16> toListC;
+
+ toListC.push_front(TestObject(1, 2, 3));
+ EATEST_VERIFY((toListC.size() == 1) && (toListC.front().mX == (1+2+3)) && (TestObject::sTOMoveCtorCount == 1));
+
+ toListC.push_back(TestObject(2, 3, 4));
+ EATEST_VERIFY((toListC.size() == 2) && (toListC.back().mX == (2+3+4)) && (TestObject::sTOMoveCtorCount == 2));
+
+ toListC.insert(toListC.begin(), TestObject(3, 4, 5));
+ EATEST_VERIFY((toListC.size() == 3) && (toListC.front().mX == (3+4+5)) && (TestObject::sTOMoveCtorCount == 3));
+ }
+
+
+ {
+ // list(std::initializer_list<value_type> ilist, const allocator_type& allocator = EASTL_LIST_DEFAULT_ALLOCATOR);
+ // this_type& operator=(std::initializer_list<value_type> ilist);
+ // void assign(std::initializer_list<value_type> ilist);
+ // iterator insert(iterator position, std::initializer_list<value_type> ilist);
+ list<int> intList = { 0, 1, 2 };
+ EATEST_VERIFY(VerifySequence(intList.begin(), intList.end(), int(), "list std::initializer_list", 0, 1, 2, -1));
+
+ intList = { 13, 14, 15 };
+ EATEST_VERIFY(VerifySequence(intList.begin(), intList.end(), int(), "list std::initializer_list", 13, 14, 15, -1));
+
+ intList.assign({ 16, 17, 18 });
+ EATEST_VERIFY(VerifySequence(intList.begin(), intList.end(), int(), "list std::initializer_list", 16, 17, 18, -1));
+
+ intList.insert(intList.begin(), { 14, 15 });
+ EATEST_VERIFY(VerifySequence(intList.begin(), intList.end(), int(), "list std::initializer_list", 14, 15, 16, 17, 18, -1));
+ }
+
+
+ { // Regression of user test
+ struct Dummy
+ {
+ typedef eastl::fixed_list<FixedListTest::Item, 10, false> TCollection;
+
+ TCollection mCollection1;
+ TCollection mCollection2;
+ };
+
+ Dummy d;
+ VERIFY(d.mCollection1.size() == d.mCollection2.size());
+ }
+
+
+ {
+ // Test construction of a container with an overflow allocator constructor argument.
+ MallocAllocator overflowAllocator;
+ void* p = overflowAllocator.allocate(1);
+ fixed_list<int, 64, true, MallocAllocator> c(overflowAllocator);
+ c.resize(65);
+ VERIFY(c.get_overflow_allocator().mAllocCount == 2); // 1 for above, and 1 for overflowing from 64 to 65.
+ overflowAllocator.deallocate(p, 1);
+ }
+
+
+ // We can't do this, due to how Reset is used above:
+ // EATEST_VERIFY(TestObject::IsClear());
+ EATEST_VERIFY(TestObject::sMagicErrorCount == 0);
+ TestObject::Reset();
+
+
+ return nErrorCount;
+}
+EA_RESTORE_VC_WARNING()
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/test/source/TestFixedMap.cpp b/EASTL/test/source/TestFixedMap.cpp
new file mode 100644
index 0000000..6df97f0
--- /dev/null
+++ b/EASTL/test/source/TestFixedMap.cpp
@@ -0,0 +1,185 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLTest.h"
+#include "TestMap.h"
+#include <EASTL/fixed_map.h>
+
+EA_DISABLE_ALL_VC_WARNINGS()
+#ifndef EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+ #include <map>
+#endif
+EA_RESTORE_ALL_VC_WARNINGS()
+
+using namespace eastl;
+
+
+// Template instantations.
+// These tell the compiler to compile all the functions for the given class.
+template class eastl::fixed_map <int, float, 1>;
+template class eastl::fixed_multimap<float, int, 1>;
+template class eastl::fixed_map <int, TestObject, 1>;
+template class eastl::fixed_multimap<TestObject, int, 1>;
+
+template class eastl::fixed_map <int, float, 1, true, eastl::less<int>, MallocAllocator>;
+template class eastl::fixed_multimap<float, int, 1, true, eastl::less<float>, MallocAllocator>;
+template class eastl::fixed_map <int, TestObject, 1, true, eastl::less<int>, MallocAllocator>;
+template class eastl::fixed_multimap<TestObject, int, 1, true, eastl::less<TestObject>, MallocAllocator>;
+
+
+///////////////////////////////////////////////////////////////////////////////
+// typedefs
+//
+ const eastl_size_t kContainerSize = 1000;
+
+typedef eastl::fixed_map<int, int, kContainerSize> VM1;
+typedef eastl::fixed_map<TestObject, TestObject, kContainerSize> VM4;
+typedef eastl::fixed_multimap<int, int, kContainerSize> VMM1;
+typedef eastl::fixed_multimap<TestObject, TestObject, kContainerSize> VMM4;
+
+#ifndef EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+ typedef std::map<int, int> VM3;
+ typedef std::map<TestObject, TestObject> VM6;
+ typedef std::multimap<int, int> VMM3;
+ typedef std::multimap<TestObject, TestObject> VMM6;
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+
+EA_DISABLE_VC_WARNING(6262)
+int TestFixedMap()
+{
+ int nErrorCount = 0;
+
+ #ifndef EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+ { // Test construction
+ nErrorCount += TestMapConstruction<VM1, VM3, false>();
+ nErrorCount += TestMapConstruction<VM4, VM6, false>();
+
+ nErrorCount += TestMapConstruction<VMM1, VMM3, true>();
+ nErrorCount += TestMapConstruction<VMM4, VMM6, true>();
+ }
+
+
+ { // Test mutating functionality.
+ nErrorCount += TestMapMutation<VM1, VM3, false>();
+ nErrorCount += TestMapMutation<VM4, VM6, false>();
+
+ nErrorCount += TestMapMutation<VMM1, VMM3, true>();
+ nErrorCount += TestMapMutation<VMM4, VMM6, true>();
+ }
+ #endif // EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+
+
+ { // Test searching functionality.
+ nErrorCount += TestMapSearch<VM1, false>();
+ nErrorCount += TestMapSearch<VM4, false>();
+
+ nErrorCount += TestMapSearch<VMM1, true>();
+ nErrorCount += TestMapSearch<VMM4, true>();
+ }
+
+
+ {
+ // C++11 emplace and related functionality
+ nErrorCount += TestMapCpp11<eastl::fixed_map<int, TestObject, 32> >();
+
+ nErrorCount += TestMultimapCpp11<eastl::fixed_multimap<int, TestObject, 32> >();
+
+ nErrorCount += TestMapCpp11NonCopyable<eastl::fixed_map<int, NonCopyable, 32>>();
+ }
+
+ {
+ // C++17 try_emplace and related functionality
+ nErrorCount += TestMapCpp17<eastl::fixed_map<int, TestObject, 32>>();
+ }
+
+
+ { // Test functionality specific to fixed size containers.
+
+ VM1 vm1;
+ VMM1 vmm1;
+
+ VERIFY(vm1.max_size() == kContainerSize);
+ VERIFY(vmm1.max_size() == kContainerSize);
+ }
+
+
+ { // Regression of bug report by Eric Turmel, May 20, 2008
+ typedef eastl::fixed_map<int, TestObject, 37, false> FixedMap;
+ VERIFY(FixedMap::kMaxSize == 37);
+
+ FixedMap fixedMap;
+ FixedMap::fixed_allocator_type& a = fixedMap.get_allocator();
+
+ for(int i = 0; i < FixedMap::kMaxSize; i++)
+ {
+ VERIFY(a.can_allocate());
+
+ fixedMap.insert(FixedMap::value_type(i, TestObject(i)));
+
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED
+ // Disabled because mPool is (mistakenly) inaccessible.
+ // VERIFY((a.mPool.mnCurrentSize == a.mPool.mnPeakSize) && (a.mPool.mnCurrentSize == i));
+ #endif
+ }
+
+ VERIFY(!a.can_allocate());
+ }
+
+ {
+ // Test fixed set with overflow and alignment requirements.
+ typedef fixed_map<Align64, int, 1, true> FixedMapWithAlignment;
+ typedef fixed_multimap<Align64, int, 1, true> FixedMultiMapWithAlignment;
+
+ FixedMapWithAlignment fm;
+ FixedMultiMapWithAlignment fmm;
+
+ Align64 a; a.mX = 1;
+ Align64 b; b.mX = 2;
+ Align64 c; c.mX = 3;
+ Align64 d; d.mX = 4;
+ Align64 e; e.mX = 5;
+
+ fm.insert(a);
+ fm.insert(b);
+ fm.insert(c);
+ fm.insert(d);
+ fm.insert(e);
+ for (FixedMapWithAlignment::const_iterator it = fm.begin(); it != fm.end(); ++it)
+ {
+ const Align64* ptr = &((*it).first);
+ EATEST_VERIFY((uint64_t)ptr % EASTL_ALIGN_OF(Align64) == 0);
+ }
+
+ fmm.insert(a);
+ fmm.insert(b);
+ fmm.insert(c);
+ fmm.insert(d);
+ fmm.insert(e);
+ for (FixedMultiMapWithAlignment::const_iterator it = fmm.begin(); it != fmm.end(); ++it)
+ {
+ const Align64* ptr = &((*it).first);
+ EATEST_VERIFY((uint64_t)ptr % EASTL_ALIGN_OF(Align64) == 0);
+ }
+ }
+
+
+ return nErrorCount;
+}
+EA_RESTORE_VC_WARNING()
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/test/source/TestFixedSList.cpp b/EASTL/test/source/TestFixedSList.cpp
new file mode 100644
index 0000000..6620c79
--- /dev/null
+++ b/EASTL/test/source/TestFixedSList.cpp
@@ -0,0 +1,313 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLTest.h"
+#include <EASTL/fixed_slist.h>
+#include <EABase/eabase.h>
+
+#ifdef _MSC_VER
+ #pragma warning(push, 0)
+#endif
+
+#include <stdio.h>
+
+#if defined(_MSC_VER)
+ #pragma warning(pop)
+#endif
+
+
+using namespace eastl;
+
+
+// Template instantations.
+// These tell the compiler to compile all the functions for the given class.
+template class eastl::fixed_slist<int, 1, true, EASTLAllocatorType>;
+template class eastl::fixed_slist<int, 1, false, EASTLAllocatorType>;
+
+
+/*
+// This does not compile, since the fixed_slist allocator is templated on sizeof(T),
+// not just T. Thus, the full type is required at the time of instantiation, but it
+// is not available.
+// See EATech Core JIRA issue ETCR-1608 for more information.
+struct StructWithContainerOfStructs
+{
+ eastl::fixed_slist<StructWithContainerOfStructs,4> children;
+};
+*/
+
+
+int TestFixedSList()
+{
+ int nErrorCount = 0;
+
+ {
+ fixed_slist<int, 64> list0101;
+ VERIFY(list0101.empty());
+ VERIFY(list0101.size() == 0);
+ VERIFY(list0101.max_size() == 64);
+
+ list0101.push_front(1);
+ VERIFY(!list0101.empty());
+ VERIFY(list0101.size() == 1);
+
+ list0101.resize(3, 2);
+ VERIFY(!list0101.empty());
+ VERIFY(list0101.size() == 3);
+
+ fixed_slist<int, 64>::iterator i = list0101.begin();
+ VERIFY(*i == 1); ++i;
+ VERIFY(*i == 2); ++i;
+ VERIFY(*i == 2); ++i;
+ VERIFY(i == list0101.end());
+
+ list0101.resize(0);
+ VERIFY(list0101.empty());
+ VERIFY(list0101.size() == 0);
+ }
+
+ {
+ fixed_slist<int, 64, true, MallocAllocator> list0101;
+ VERIFY(list0101.empty());
+ VERIFY(list0101.size() == 0);
+ VERIFY(list0101.max_size() == 64);
+
+ list0101.push_front(1);
+ VERIFY(!list0101.empty());
+ VERIFY(list0101.size() == 1);
+
+ list0101.resize(3, 2);
+ VERIFY(!list0101.empty());
+ VERIFY(list0101.size() == 3);
+
+ fixed_slist<int, 64>::iterator i = list0101.begin();
+ VERIFY(*i == 1); ++i;
+ VERIFY(*i == 2); ++i;
+ VERIFY(*i == 2); ++i;
+ VERIFY(i == list0101.end());
+
+ while(list0101.size() < 64 + 16)
+ list0101.push_front(0);
+
+ list0101.resize(0);
+ VERIFY(list0101.empty());
+ VERIFY(list0101.size() == 0);
+ }
+
+ {
+ // Test fixed slist with overflow and alignment requirements.
+ typedef fixed_slist<Align64, 1, true, CustomAllocator> FixedSListWithAlignment;
+
+ FixedSListWithAlignment fsl;
+
+ Align64 a;
+
+ fsl.push_front(a);
+ fsl.push_front(a);
+ fsl.push_front(a);
+ fsl.push_front(a);
+ fsl.push_front(a);
+ for (FixedSListWithAlignment::const_iterator it = fsl.begin(); it != fsl.end(); ++it)
+ {
+ const Align64* ptr = &(*it);
+ EATEST_VERIFY((uint64_t)ptr % EASTL_ALIGN_OF(Align64) == 0);
+ }
+ }
+
+ {
+ // bool empty() const
+ // bool has_overflowed() const
+ // size_type size() const;
+ // size_type max_size() const
+
+ // Test a list that has overflow disabled.
+ fixed_slist<int, 5, false> listInt5;
+
+ VERIFY(listInt5.max_size() == 5);
+ VERIFY(listInt5.size() == 0);
+ VERIFY(listInt5.empty());
+ VERIFY(!listInt5.has_overflowed());
+
+ listInt5.push_front(37);
+ listInt5.push_front(37);
+ listInt5.push_front(37);
+
+ VERIFY(listInt5.size() == 3);
+ VERIFY(!listInt5.empty());
+ VERIFY(!listInt5.has_overflowed());
+
+ listInt5.push_front(37);
+ listInt5.push_front(37);
+
+ VERIFY(listInt5.size() == 5);
+ VERIFY(!listInt5.empty());
+ VERIFY(!listInt5.has_overflowed());
+
+ listInt5.pop_front();
+
+ VERIFY(listInt5.size() == 4);
+ VERIFY(!listInt5.empty());
+ VERIFY(!listInt5.has_overflowed());
+ }
+
+
+ {
+ // bool empty() const
+ // bool has_overflowed() const
+ // size_type size() const;
+ // size_type max_size() const
+
+ // Test a list that has overflow enabled.
+ fixed_slist<int, 5, true> listInt5;
+
+ VERIFY(listInt5.max_size() == 5);
+ VERIFY(listInt5.size() == 0);
+ VERIFY(listInt5.empty());
+ VERIFY(!listInt5.has_overflowed());
+
+ listInt5.push_front(37);
+ listInt5.push_front(37);
+ listInt5.push_front(37);
+
+ VERIFY(listInt5.size() == 3);
+ VERIFY(!listInt5.empty());
+ VERIFY(!listInt5.has_overflowed());
+
+ listInt5.push_front(37);
+ listInt5.push_front(37);
+
+ VERIFY(listInt5.size() == 5);
+ VERIFY(!listInt5.empty());
+ VERIFY(!listInt5.has_overflowed());
+
+ listInt5.push_front(37);
+
+ VERIFY(listInt5.size() == 6);
+ VERIFY(!listInt5.empty());
+ VERIFY(listInt5.has_overflowed());
+
+ listInt5.pop_front();
+
+ VERIFY(listInt5.size() == 5);
+ VERIFY(!listInt5.empty());
+ //VERIFY(listInt5.has_overflowed()); Disabled because currently has_overflowed can't detect this situation in non-debug builds.
+ }
+
+
+ {
+ // fixed_slist(this_type&& x);
+ // fixed_slist(this_type&&, const allocator_type&);
+ // this_type& operator=(this_type&& x);
+
+ fixed_slist<TestObject, 16> slist3TO33(3, TestObject(33));
+ fixed_slist<TestObject, 16> toListA(eastl::move(slist3TO33));
+ EATEST_VERIFY((toListA.size() == 3) && (toListA.front().mX == 33) /* && (slist3TO33.size() == 0) fixed_list usually can't honor the move request. */);
+
+ // The following is not as strong a test of this ctor as it could be. A stronger test would be to use IntanceAllocator with different instances.
+ fixed_slist<TestObject, 16, true, MallocAllocator> slist4TO44(4, TestObject(44));
+ fixed_slist<TestObject, 16, true, MallocAllocator> toListB(eastl::move(slist4TO44), MallocAllocator());
+ EATEST_VERIFY((toListB.size() == 4) && (toListB.front().mX == 44) /* && (slist4TO44.size() == 0) fixed_list usually can't honor the move request. */);
+
+ fixed_slist<TestObject, 16, true, MallocAllocator> slist5TO55(5, TestObject(55));
+ toListB = eastl::move(slist5TO55);
+ EATEST_VERIFY((toListB.size() == 5) && (toListB.front().mX == 55) /* && (slist5TO55.size() == 0) fixed_list usually can't honor the move request. */);
+ }
+
+
+ {
+ // template <class... Args>
+ // void emplace_front(Args&&... args);
+
+ // template <class... Args>
+ // iterator emplace_after(const_iterator position, Args&&... args);
+
+ TestObject::Reset();
+
+ fixed_slist<TestObject, 16> toListA;
+
+ toListA.emplace_front(1, 2, 3); // This uses the TestObject(int x0, int x1, int x2, bool bThrowOnCopy) constructor.
+ EATEST_VERIFY((toListA.size() == 1) && (toListA.front().mX == (1+2+3)) && (TestObject::sTOCtorCount == 1));
+
+ toListA.emplace_after(toListA.before_begin(), 3, 4, 5);
+ EATEST_VERIFY((toListA.size() == 2) && (toListA.front().mX == (3+4+5)) && (TestObject::sTOCtorCount == 2));
+
+
+ // This test is similar to the emplace pathway above.
+ TestObject::Reset();
+
+ // void push_front(T&& x);
+ // iterator insert(const_iterator position, T&& x);
+
+ fixed_slist<TestObject, 16> toListC;
+
+ toListC.push_front(TestObject(1, 2, 3));
+ EATEST_VERIFY((toListC.size() == 1) && (toListC.front().mX == (1+2+3)) && (TestObject::sTOMoveCtorCount == 1));
+
+ toListC.insert_after(toListC.before_begin(), TestObject(3, 4, 5));
+ EATEST_VERIFY((toListC.size() == 2) && (toListC.front().mX == (3+4+5)) && (TestObject::sTOMoveCtorCount == 2));
+ }
+
+
+ {
+ // slist(std::initializer_list<value_type> ilist, const allocator_type& allocator = EASTL_SLIST_DEFAULT_ALLOCATOR);
+ // this_type& operator=(std::initializer_list<value_type>);
+ // void assign(std::initializer_list<value_type> ilist);
+ // iterator insert_after(iterator position, std::initializer_list<value_type> ilist);
+ #if !defined(EA_COMPILER_NO_INITIALIZER_LISTS)
+ fixed_slist<int, 8> intList = { 0, 1, 2 };
+ EATEST_VERIFY(VerifySequence(intList.begin(), intList.end(), int(), "fixed_slist std::initializer_list", 0, 1, 2, -1));
+
+ intList = { 13, 14, 15 };
+ EATEST_VERIFY(VerifySequence(intList.begin(), intList.end(), int(), "fixed_slist std::initializer_list", 13, 14, 15, -1));
+
+ intList.assign({ 16, 17, 18 });
+ EATEST_VERIFY(VerifySequence(intList.begin(), intList.end(), int(), "fixed_slist std::initializer_list", 16, 17, 18, -1));
+
+ fixed_slist<int, 8>::iterator it = intList.insert_after(intList.before_begin(), { 14, 15 });
+ EATEST_VERIFY(VerifySequence(intList.begin(), intList.end(), int(), "fixed_slist std::initializer_list", 14, 15, 16, 17, 18, -1));
+ EATEST_VERIFY(*it == 15); // Note that slist::insert_after returns the last inserted element, not the first as with list::insert.
+ #endif
+ }
+
+
+ {
+ // Test construction of a container with an overflow allocator constructor argument.
+ //
+ // GCC 4.4 has a hard time compiling this code correctly in optimized builds as it
+ // omits the increment of the mAllocCount field when calling overflowAllocator.allocate.
+ #if defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION == 4004)
+ MallocAllocator overflowAllocator;
+ fixed_slist<int, 64, true, MallocAllocator> c(overflowAllocator);
+ c.resize(65);
+ VERIFY(c.get_overflow_allocator().mAllocCount == 1); // 1 for overflowing from 64 to 65.
+ #else
+ MallocAllocator overflowAllocator;
+ void* p = overflowAllocator.allocate(1);
+ fixed_slist<int, 64, true, MallocAllocator> c(overflowAllocator);
+ c.resize(65);
+ VERIFY(c.get_overflow_allocator().mAllocCount == 2); // 1 for above, and 1 for overflowing from 64 to 65.
+ overflowAllocator.deallocate(p, 1);
+ #endif
+ }
+
+
+ // We can't do this, due to how Reset is used above:
+ // EATEST_VERIFY(TestObject::IsClear());
+ EATEST_VERIFY(TestObject::sMagicErrorCount == 0);
+ TestObject::Reset();
+
+
+ return nErrorCount;
+}
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/test/source/TestFixedSet.cpp b/EASTL/test/source/TestFixedSet.cpp
new file mode 100644
index 0000000..8bfbe90
--- /dev/null
+++ b/EASTL/test/source/TestFixedSet.cpp
@@ -0,0 +1,207 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLTest.h"
+#include "TestSet.h"
+#include <EASTL/fixed_set.h>
+
+EA_DISABLE_ALL_VC_WARNINGS()
+#ifndef EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+ #include <set>
+#endif
+EA_RESTORE_ALL_VC_WARNINGS()
+
+using namespace eastl;
+
+
+// Template instantations.
+// These tell the compiler to compile all the functions for the given class.
+template class eastl::fixed_set <int, 1>;
+template class eastl::fixed_multiset<float, 1>;
+template class eastl::fixed_set <Align64, 1>;
+template class eastl::fixed_multiset<TestObject, 1>;
+
+
+template class eastl::fixed_set <int, 1, true, eastl::less<int>, MallocAllocator>;
+template class eastl::fixed_multiset<float, 1, true, eastl::less<float>, MallocAllocator>;
+template class eastl::fixed_set <Align64, 1, true, eastl::less<Align64>, MallocAllocator>;
+template class eastl::fixed_multiset<TestObject, 1, true, eastl::less<TestObject>, MallocAllocator>;
+
+
+///////////////////////////////////////////////////////////////////////////////
+// typedefs
+//
+const eastl_size_t kContainerSize = 1000;
+
+typedef eastl::fixed_set<int, kContainerSize> VS1;
+typedef eastl::fixed_set<TestObject, kContainerSize> VS4;
+typedef eastl::fixed_multiset<int, kContainerSize> VMS1;
+typedef eastl::fixed_multiset<TestObject, kContainerSize> VMS4;
+
+#ifndef EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+ typedef std::set<int> VS3;
+ typedef std::set<TestObject> VS6;
+ typedef std::multiset<int> VMS3;
+ typedef std::multiset<TestObject> VMS6;
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+
+EA_DISABLE_VC_WARNING(6262)
+int TestFixedSet()
+{
+ int nErrorCount = 0;
+
+ #ifndef EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+ { // Test construction
+ nErrorCount += TestSetConstruction<VS1, VS3, false>();
+ nErrorCount += TestSetConstruction<VS4, VS6, false>();
+
+ nErrorCount += TestSetConstruction<VMS1, VMS3, true>();
+ nErrorCount += TestSetConstruction<VMS4, VMS6, true>();
+ }
+
+
+ { // Test mutating functionality.
+ nErrorCount += TestSetMutation<VS1, VS3, false>();
+ nErrorCount += TestSetMutation<VS4, VS6, false>();
+
+ nErrorCount += TestSetMutation<VMS1, VMS3, true>();
+ nErrorCount += TestSetMutation<VMS4, VMS6, true>();
+ }
+ #endif // EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+
+
+ { // Test searching functionality.
+ nErrorCount += TestSetSearch<VS1, false>();
+ nErrorCount += TestSetSearch<VS4, false>();
+
+ nErrorCount += TestSetSearch<VMS1, true>();
+ nErrorCount += TestSetSearch<VMS4, true>();
+ }
+
+
+ {
+ // C++11 emplace and related functionality
+ nErrorCount += TestSetCpp11<eastl::fixed_set<TestObject, 32> >();
+
+ nErrorCount += TestMultisetCpp11<eastl::fixed_multiset<TestObject, 32> >();
+ }
+
+
+ { // Test functionality specific to fixed size containers.
+
+ VS1 vs1;
+ VMS1 vms1;
+
+ VERIFY(vs1.max_size() == kContainerSize);
+ VERIFY(vms1.max_size() == kContainerSize);
+ }
+
+
+ {
+ // Test version *without* pool overflow.
+ typedef eastl::fixed_set<int, 100, false> FixedSetFalse;
+ FixedSetFalse fixedSet;
+
+ fixedSet.insert(FixedSetFalse::value_type(0));
+ VERIFY(fixedSet.size() == 1);
+
+ fixedSet.clear();
+ VERIFY(fixedSet.size() == 0);
+
+ for(int i = 0; fixedSet.size() < 100; i++)
+ fixedSet.insert(FixedSetFalse::value_type(i));
+ VERIFY(fixedSet.size() == 100);
+
+ // Verify that we allocated enough space for exactly N items.
+ // It's possible that due to alignments, there might be room for N + 1.
+ FixedSetFalse::allocator_type& allocator = fixedSet.get_allocator();
+ void* pResult = allocator.allocate(sizeof(FixedSetFalse::node_type));
+ if(pResult)
+ {
+ pResult = allocator.allocate(sizeof(FixedSetFalse::node_type));
+ VERIFY(pResult == NULL);
+ }
+ }
+
+
+ {
+ // Test version *with* pool overflow.
+ typedef eastl::fixed_set<int, 100, true> FixedSetTrue;
+ FixedSetTrue fixedSet;
+
+ fixedSet.insert(FixedSetTrue::value_type(0));
+ VERIFY(fixedSet.size() == 1);
+
+ fixedSet.clear();
+ VERIFY(fixedSet.size() == 0);
+
+ for(int i = 0; fixedSet.size() < 100; i++)
+ fixedSet.insert(FixedSetTrue::value_type(i));
+ VERIFY(fixedSet.size() == 100);
+
+ FixedSetTrue::allocator_type& allocator = fixedSet.get_allocator();
+ void* pResult = allocator.allocate(sizeof(FixedSetTrue::node_type));
+ VERIFY(pResult != NULL);
+ allocator.deallocate(pResult, sizeof(FixedSetTrue::node_type));
+
+ // get_overflow_allocator / set_overflow_allocator
+ // This is a weak test which should be improved.
+ EASTLAllocatorType a = fixedSet.get_allocator().get_overflow_allocator();
+ fixedSet.get_allocator().set_overflow_allocator(a);
+ }
+
+ {
+ // Test fixed set with overflow and alignment requirements.
+ typedef fixed_set<Align64, 1, true> FixedSetWithAlignment;
+ typedef fixed_multiset<Align64, 1, true> FixedMultiSetWithAlignment;
+
+ FixedSetWithAlignment fs;
+ FixedMultiSetWithAlignment fms;
+
+ Align64 a; a.mX = 1;
+ Align64 b; b.mX = 2;
+ Align64 c; c.mX = 3;
+ Align64 d; d.mX = 4;
+ Align64 e; e.mX = 5;
+
+ fs.insert(a);
+ fs.insert(b);
+ fs.insert(c);
+ fs.insert(d);
+ fs.insert(e);
+ for (FixedSetWithAlignment::const_iterator it = fs.begin(); it != fs.end(); ++it)
+ {
+ const Align64* ptr = &(*it);
+ EATEST_VERIFY((uint64_t)ptr % EASTL_ALIGN_OF(Align64) == 0);
+ }
+ fms.insert(a);
+ fms.insert(b);
+ fms.insert(c);
+ fms.insert(d);
+ fms.insert(e);
+ for (FixedMultiSetWithAlignment::const_iterator it = fms.begin(); it != fms.end(); ++it)
+ {
+ const Align64* ptr = &(*it);
+ EATEST_VERIFY((uint64_t)ptr % EASTL_ALIGN_OF(Align64) == 0);
+ }
+ }
+ return nErrorCount;
+}
+EA_RESTORE_VC_WARNING()
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/test/source/TestFixedString.cpp b/EASTL/test/source/TestFixedString.cpp
new file mode 100644
index 0000000..8528dc7
--- /dev/null
+++ b/EASTL/test/source/TestFixedString.cpp
@@ -0,0 +1,500 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include <EABase/eabase.h>
+EA_DISABLE_GCC_WARNING(-Warray-bounds)
+
+#include "EASTLTest.h"
+#include <EASTL/fixed_string.h>
+#include <EASTL/fixed_substring.h>
+
+#ifdef _MSC_VER
+ #pragma warning(push, 0)
+#endif
+
+#include <string.h>
+
+#if defined(_MSC_VER)
+ #pragma warning(pop)
+#endif
+
+
+using namespace eastl;
+
+
+
+// Template instantations.
+// These tell the compiler to compile all the functions for the given class.
+template class eastl::fixed_string<char8_t, 1, true>;
+template class eastl::fixed_string<char16_t, 1, true>;
+template class eastl::fixed_string<char32_t, 1, true>;
+
+template class eastl::fixed_string<char8_t, 128, false>;
+template class eastl::fixed_string<char16_t, 128, false>;
+template class eastl::fixed_string<char32_t, 128, false>;
+
+template class eastl::fixed_string<char8_t, 128, true, MallocAllocator>;
+template class eastl::fixed_string<char16_t, 128, true, MallocAllocator>;
+template class eastl::fixed_string<char32_t, 128, true, MallocAllocator>;
+
+template class eastl::fixed_string<char8_t, 128, false, MallocAllocator>;
+template class eastl::fixed_string<char16_t, 128, false, MallocAllocator>;
+template class eastl::fixed_string<char32_t, 128, false, MallocAllocator>;
+
+template class eastl::fixed_substring<char8_t>;
+template class eastl::fixed_substring<char16_t>;
+
+
+
+
+/*
+// This does not compile, since the fixed_string allocator (among other things) is
+// templated on sizeof(T), not just T. Thus, the full type is required at the time
+// of instantiation, but it is not available.
+// See EATech Core JIRA issue ETCR-1608 for more information.
+struct StructWithContainerOfStructs
+{
+ eastl::fixed_string<StructWithContainerOfStructs,4> children;
+};
+*/
+
+
+int TestFixedSubstring()
+{
+ int nErrorCount = 0;
+
+ {
+ const char* pSource1 = "hello world";
+ const char* pSource2 = "hola mundo";
+
+ basic_string<char> str(pSource1);
+ fixed_substring<char> sub(str, 2, 5);
+
+ EATEST_VERIFY(sub.size() == 5);
+ EATEST_VERIFY(sub[0] == 'l');
+ EATEST_VERIFY(sub == "llo w");
+
+ sub.assign(pSource2);
+ EATEST_VERIFY(sub.size() == 10);
+ EATEST_VERIFY(sub[0] == pSource2[0]);
+ EATEST_VERIFY(sub == pSource2);
+
+ fixed_substring<char> sub2(sub);
+ EATEST_VERIFY(sub2.size() == 10);
+ EATEST_VERIFY(sub2[0] == pSource2[0]);
+ EATEST_VERIFY(sub2 == pSource2);
+
+ sub.assign(sub2, 1, 3);
+ EATEST_VERIFY(sub.size() == 3);
+ EATEST_VERIFY(sub == "ola");
+
+ sub.assign(pSource2, 3);
+ EATEST_VERIFY(sub.size() == 3);
+ EATEST_VERIFY(sub == "hol");
+
+ sub.assign(pSource2, pSource2 + 4);
+ EATEST_VERIFY(sub.size() == 4);
+ EATEST_VERIFY(sub == "hola");
+
+ sub = pSource1;
+ EATEST_VERIFY(sub.size() == strlen(pSource1));
+ EATEST_VERIFY(sub == pSource1);
+ }
+
+
+ { // Test fixed_substring with a C character array
+ char pArray[256];
+ fixed_substring<char> str(pArray, 255);
+
+ str.resize(5);
+ EATEST_VERIFY(str.size() == 5);
+
+ str[0] = 'a';
+ EATEST_VERIFY(str[0] == 'a');
+
+ str.sprintf("Hello %s", "world");
+ EATEST_VERIFY(str == "Hello world");
+
+ str += " Hola mundo";
+ EATEST_VERIFY(str == "Hello world Hola mundo");
+
+ str.pop_back();
+ EATEST_VERIFY(str == "Hello world Hola mund");
+
+ str.replace(6, 5, "abcdefghijlk");
+ EATEST_VERIFY(str == "Hello abcdefghijlk Hola mund");
+
+ str.clear();
+ EATEST_VERIFY(str.empty());
+ EATEST_VERIFY(str == "");
+ }
+
+
+ {
+ // Check that copies/moves don't become independent strings.
+ // They should all point to the same sub-string.
+ string str = "hello world";
+ fixed_substring<char> sub(str, 2, 5);
+
+ EATEST_VERIFY(sub.size() == 5);
+ EATEST_VERIFY(sub[0] == 'l');
+ EATEST_VERIFY(sub == "llo w");
+
+ vector<fixed_substring<char>> v;
+ for (eastl_size_t i = 0; i < 1000; ++i) {
+ v.push_back(sub);
+ }
+
+ sub[0] = 'g';
+ EATEST_VERIFY(str == "heglo world");
+ EATEST_VERIFY(sub == "glo w");
+
+ for (const auto& s : v){
+ EATEST_VERIFY(s == "glo w");
+ }
+
+ // copy construct
+ fixed_substring<char> sub2 = sub;
+
+ // copy assign
+ fixed_substring<char> sub3;
+ sub3 = sub;
+
+ // move construct
+ fixed_substring<char> sub4 = eastl::move(sub);
+
+ // move assign
+ fixed_substring<char> sub_again(str, 2, 5);
+ fixed_substring<char> sub5;
+ sub5 = eastl::move(sub_again);
+
+ EATEST_VERIFY(sub2 == "glo w");
+ EATEST_VERIFY(sub3 == "glo w");
+ EATEST_VERIFY(sub4 == "glo w");
+ EATEST_VERIFY(sub5 == "glo w");
+
+ str[5] = 'g';
+ EATEST_VERIFY(sub2 == "glogw");
+ EATEST_VERIFY(sub3 == "glogw");
+ EATEST_VERIFY(sub4 == "glogw");
+ EATEST_VERIFY(sub5 == "glogw");
+
+ }
+
+ return nErrorCount;
+}
+
+
+int TestFixedString()
+{
+ int nErrorCount = 0;
+
+ {
+ fixed_string<char, 64>::CtorSprintf cs;
+
+ fixed_string<char, 64> s8(cs, "hello world %d.", 1);
+ EATEST_VERIFY(s8 == "hello world 1.");
+ EATEST_VERIFY(s8.capacity() == 63); // 63 because the 64 includes the terminating 0, but capacity() subtracts the terminating 0 usage.
+ EATEST_VERIFY(s8.max_size() == 63);
+
+ s8.append_sprintf(" More hello %d.", 2);
+ EATEST_VERIFY(s8 == "hello world 1. More hello 2.");
+ EATEST_VERIFY(s8.capacity() == 63);
+ }
+
+
+ {
+ fixed_string<wchar_t, 64>::CtorSprintf cs;
+
+ fixed_string<wchar_t, 64> sW(cs, L"hello world %d.", 1);
+ EATEST_VERIFY(sW == L"hello world 1.");
+ EATEST_VERIFY(sW.capacity() == 63); // 63 because the 64 includes the terminating 0, but capacity() subtracts the terminating 0 usage.
+
+ sW.append_sprintf(L" More hello %d.", 2);
+ EATEST_VERIFY(sW == L"hello world 1. More hello 2.");
+ EATEST_VERIFY(sW.capacity() == 63); // 63 because the 64 includes the terminating 0, but capacity() subtracts the terminating 0 usage.
+ }
+
+
+ {
+ typedef fixed_string<char8_t, 64, true> FixedString64;
+ typedef fixed_string<char8_t, 64, false> FixedString64NoOverflow;
+ FixedString64::CtorSprintf cs;
+ FixedString64::CtorDoNotInitialize cdni;
+
+ // fixed_string();
+ FixedString64 fs1;
+ EATEST_VERIFY(fs1.size() == 0);
+ EATEST_VERIFY(fs1.capacity() == 63);
+
+ FixedString64NoOverflow fsNo;
+ EATEST_VERIFY(fs1.can_overflow() == true);
+ EATEST_VERIFY(fsNo.can_overflow() == false);
+ EATEST_VERIFY(fs1.full() == false);
+ EATEST_VERIFY(fs1.has_overflowed() == false);
+
+ const char8_t* pCStr = fs1.c_str();
+ EATEST_VERIFY(*pCStr == 0);
+
+ // fixed_string(const this_type& x);
+ FixedString64 fs2(fs1);
+ EATEST_VERIFY(fs2.size() == 0);
+ EATEST_VERIFY(fs2.capacity() == 63);
+
+ fs1 = EA_CHAR8("abc");
+ FixedString64 fs3(fs1);
+ EATEST_VERIFY(fs3.size() == 3);
+ EATEST_VERIFY(fs3.capacity() == 63);
+ EATEST_VERIFY(fs3 == EA_CHAR8("abc"));
+
+ // fixed_string(const this_type& x, size_type position, size_type n = npos);
+ FixedString64 fs4(fs1, 1, 2);
+ EATEST_VERIFY(fs4.size() == 2);
+ EATEST_VERIFY(fs4.capacity() == 63);
+ EATEST_VERIFY(fs4 == EA_CHAR8("bc"));
+
+ // fixed_string(const value_type* p, size_type n);
+ FixedString64 fs5(EA_CHAR8("abcdef"), 6);
+ EATEST_VERIFY(fs5.size() == 6);
+ EATEST_VERIFY(fs5.capacity() == 63);
+ EATEST_VERIFY(fs5 == EA_CHAR8("abcdef"));
+
+ // fixed_string(const value_type* p);
+ FixedString64 fs6(EA_CHAR8("abcdef"));
+ EATEST_VERIFY(fs6.size() == 6);
+ EATEST_VERIFY(fs6.capacity() == 63);
+ EATEST_VERIFY(fs6 == EA_CHAR8("abcdef"));
+
+ // fixed_string(size_type n, const value_type& value);
+ FixedString64 fs7(8, 'a');
+ EATEST_VERIFY(fs7.size() == 8);
+ EATEST_VERIFY(fs7.capacity() == 63);
+ EATEST_VERIFY(fs7 == EA_CHAR8("aaaaaaaa"));
+
+ // fixed_string(const value_type* pBegin, const value_type* pEnd);
+ FixedString64 fs8(&fs6[0], &fs6[5]);
+ EATEST_VERIFY(fs8.size() == 5);
+ EATEST_VERIFY(fs8.capacity() == 63);
+ EATEST_VERIFY(fs8 == EA_CHAR8("abcde"));
+
+ // fixed_string(CtorDoNotInitialize, size_type n);
+ FixedString64 fs9(cdni, 7);
+ EATEST_VERIFY(fs9.size() == 7);
+ EATEST_VERIFY(fs9.capacity() == 63);
+
+ // fixed_string(CtorSprintf, const value_type* pFormat, ...);
+ FixedString64 fs10(cs, EA_CHAR8("%d"), 37);
+ EATEST_VERIFY(fs10.size() == 2);
+ EATEST_VERIFY(fs10.capacity() == 63);
+ EATEST_VERIFY(fs10 == EA_CHAR8("37"));
+
+ // this_type& operator=(const const value_type* p);
+ // this_type& operator=(const this_type& x);
+ fs9 = EA_CHAR8("hello");
+ EATEST_VERIFY(fs9 == EA_CHAR8("hello"));
+
+ fs9 = fs10;
+ EATEST_VERIFY(fs9 == fs10);
+ EATEST_VERIFY(fs9 == EA_CHAR8("37"));
+
+ // void swap(this_type& x);
+ swap(fs7, fs9);
+ EATEST_VERIFY(fs7 == EA_CHAR8("37"));
+ EATEST_VERIFY(fs9 == EA_CHAR8("aaaaaaaa"));
+
+ // void set_capacity(size_type n);
+ fs9.set_capacity(100);
+ EATEST_VERIFY(fs9.size() == 8);
+ EATEST_VERIFY(fs9.capacity() == 100);
+ EATEST_VERIFY(fs9.full() == true);
+ EATEST_VERIFY(fs9.has_overflowed() == true);
+
+ fs9.set_capacity(100); // EATEST_VERIFY that this has no effect.
+ EATEST_VERIFY(fs9.size() == 8);
+ EATEST_VERIFY(fs9.capacity() == 100);
+ EATEST_VERIFY(fs9.full() == true);
+ EATEST_VERIFY(fs9.has_overflowed() == true);
+
+ fs9.resize(100);
+ fs9.set_capacity(100);
+ EATEST_VERIFY(fs9.size() == 100);
+ EATEST_VERIFY(fs9.capacity() == 100);
+ EATEST_VERIFY(fs9.full() == true);
+ EATEST_VERIFY(fs9.has_overflowed() == true);
+
+ fs9.set_capacity(1);
+ EATEST_VERIFY(fs9.size() == 1);
+ EATEST_VERIFY(fs9.capacity() < fs9.max_size()); // We don't test for capacity == 1, because with fixed_strings, the fixed-size capacity is the lowest it ever gets.
+ EATEST_VERIFY(fs9.full() == false);
+ EATEST_VERIFY(fs9.has_overflowed() == false);
+
+ fs9.set_capacity(0);
+ EATEST_VERIFY(fs9.size() == 0);
+ EATEST_VERIFY(fs9.capacity() < fs9.max_size()); // We don't test for capacity == 1, because with fixed_strings, the fixed-size capacity is the lowest it ever gets.
+ EATEST_VERIFY(fs9.full() == false);
+ EATEST_VERIFY(fs9.has_overflowed() == false);
+
+ // Exercise the freeing of memory in set_capacity.
+ fixed_string<char8_t, 64, true> fs88;
+ eastl_size_t capacity = fs88.capacity();
+ fs88.resize(capacity);
+ fs88.set_capacity(capacity * 2);
+ EATEST_VERIFY(fs88.capacity() >= (capacity * 2));
+
+ // void reset_lose_memory();
+ fs6.reset_lose_memory();
+ EATEST_VERIFY(fs6.size() == 0);
+ EATEST_VERIFY(fs5.capacity() == 63);
+
+ // size_type max_size() const;
+ EATEST_VERIFY(fs7.max_size() == 63);
+
+
+ // global operator +
+ {
+ // fixed_string operator+(const fixed_string& a, const fixed_string& b);
+ // fixed_string operator+(value_type* p, const fixed_string& b);
+ // fixed_string operator+(value_type c, const fixed_string& b);
+ // fixed_string operator+(const fixed_string& a, const value_type* p);
+ // fixed_string operator+(const fixed_string& a, value_type c);
+
+ typedef fixed_string<char, 8, true> FSTest; // Make it a small size so it's easily overflowed when we want.
+
+ FSTest a("abc");
+ FSTest b("def");
+ FSTest c(a + b);
+ EATEST_VERIFY(c == "abcdef");
+ c = a + "ghi";
+ EATEST_VERIFY(c == "abcghi");
+ c = "ghi" + a;
+ EATEST_VERIFY(c == "ghiabc");
+ c = a + 'g';
+ EATEST_VERIFY(c == "abcg");
+ c = 'g' + a;
+ EATEST_VERIFY(c == "gabc");
+
+ // fixed_string operator+(fixed_string&& a, fixed_string&& b);
+ // fixed_string operator+(fixed_string&& a, const fixed_string& b);
+ // fixed_string operator+(const value_type* p, fixed_string&& b);
+ // fixed_string operator+(fixed_string&& a, const value_type* p);
+ // fixed_string operator+(fixed_string&& a, value_type b);
+
+ c = eastl::move(a) + eastl::move(b);
+ EATEST_VERIFY(c == "abcdef");
+ c.clear();
+
+ FSTest a1("abc");
+ FSTest b1("def");
+ c = eastl::move(a1) + b1;
+ EATEST_VERIFY(c == "abcdef");
+ c.clear();
+
+ FSTest b2("def");
+ c = "abc" + eastl::move(b2);
+ EATEST_VERIFY(c == "abcdef");
+ c.clear();
+
+ FSTest a3("abc");
+ c = eastl::move(a3) + "def";
+ EATEST_VERIFY(c == "abcdef");
+ c.clear();
+
+ FSTest a4("abc");
+ c = eastl::move(a4) + 'd';
+ EATEST_VERIFY(c == "abcd");
+ c.clear();
+ }
+
+
+ // bool operator==(const fixed_string<& a, const fixed_string& b)
+ // bool operator!=(const fixed_string<& a, const fixed_string& b)
+ EATEST_VERIFY( fs7 != fs8);
+ EATEST_VERIFY(!(fs7 == fs8));
+ fs7 = fs8;
+ EATEST_VERIFY( fs7 == fs8);
+ EATEST_VERIFY(!(fs7 != fs8));
+ }
+
+
+ { // Test overflow allocator specification
+
+ typedef fixed_string<char8_t, 64, true, MallocAllocator> FixedString64Malloc;
+
+ FixedString64Malloc fs;
+
+ fs.push_back('a');
+ EATEST_VERIFY(fs.size() == 1);
+ EATEST_VERIFY(fs[0] == 'a');
+
+ fs.resize(95);
+ fs[94] = 'b';
+ EATEST_VERIFY(fs[0] == 'a');
+ EATEST_VERIFY(fs[94] == 'b');
+ EATEST_VERIFY(fs.size() == 95);
+
+ fs.clear();
+ EATEST_VERIFY(fs.empty());
+
+ fs.push_back('a');
+ EATEST_VERIFY(fs.size() == 1);
+ EATEST_VERIFY(fs[0] == 'a');
+
+ fs.resize(195);
+ fs[194] = 'b';
+ EATEST_VERIFY(fs[0] == 'a');
+ EATEST_VERIFY(fs[194] == 'b');
+ EATEST_VERIFY(fs.size() == 195);
+ }
+
+ {
+ // Test construction of a container with an overflow allocator constructor argument.
+ MallocAllocator overflowAllocator;
+ void* p = overflowAllocator.allocate(1);
+ fixed_string<char8_t, 64, true, MallocAllocator> c(overflowAllocator);
+ c.resize(65);
+ EATEST_VERIFY(c.get_overflow_allocator().mAllocCount == 2); // 1 for above, and 1 for overflowing from 64 to 65.
+ overflowAllocator.deallocate(p, 1);
+ }
+
+ {
+ // Regression for compile failure when EASTL_NO_RVALUE_REFERENCES is 0.
+ typedef eastl::fixed_string<char, 32, true, MallocAllocator> TestString;
+
+ TestString ts1;
+ TestString ts2(ts1 + "Test");
+
+ EATEST_VERIFY(ts1.empty() && ts2.size() == 4);
+ }
+
+ {
+ // Test equality tests of differently-sized fixed_strings.
+
+ /* Disabled because this isn't currently supported by fixed_string.
+ typedef fixed_string<char8_t, 64, true, MallocAllocator> FixedString64Malloc;
+ typedef fixed_string<char8_t, 32> FixedString32;
+
+ FixedString64Malloc s64M;
+ FixedString32 s32;
+
+ EATEST_VERIFY(s64M == s32);
+ */
+ }
+
+ nErrorCount += TestFixedSubstring();
+
+ return nErrorCount;
+}
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/test/source/TestFixedTupleVector.cpp b/EASTL/test/source/TestFixedTupleVector.cpp
new file mode 100644
index 0000000..dbeb3fc
--- /dev/null
+++ b/EASTL/test/source/TestFixedTupleVector.cpp
@@ -0,0 +1,1594 @@
+/////////////////////////////////////////////////////////////////////////////
+// TestFixedTupleVector.cpp
+//
+// Copyright (c) 2018, Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+#include "EASTLTest.h"
+
+#include <EASTL/bonus/fixed_tuple_vector.h>
+
+#include <EASTL/sort.h>
+
+using namespace eastl;
+
+template <size_t nodeCount, bool bEnableOverflow>
+int TestFixedTupleVectorVariant()
+{
+ int nErrorCount = 0;
+
+ // Test uninit'ed push-backs
+ {
+ fixed_tuple_vector<nodeCount, bEnableOverflow, int> singleElementVec;
+ EATEST_VERIFY(singleElementVec.size() == 0);
+ EATEST_VERIFY(singleElementVec.capacity() == nodeCount);
+ EATEST_VERIFY(singleElementVec.empty() == true);
+ EATEST_VERIFY(singleElementVec.validate());
+ singleElementVec.push_back_uninitialized();
+ singleElementVec.push_back(5);
+ EATEST_VERIFY(singleElementVec.size() == 2);
+ EATEST_VERIFY(singleElementVec.template get<0>()[1] == 5);
+ EATEST_VERIFY(singleElementVec.template get<int>()[1] == 5);
+ EATEST_VERIFY(singleElementVec.empty() == false);
+ EATEST_VERIFY(singleElementVec.validate());
+
+ fixed_tuple_vector<nodeCount, bEnableOverflow, int, float, bool> complexVec;
+ complexVec.reserve(5);
+ {
+ // need to call an overload of push_back that specifically grabs lvalue candidates - providing constants tend to prefer rvalue path
+ int intArg = 3;
+ float floatArg = 2.0f;
+ bool boolArg = true;
+ complexVec.push_back(intArg, floatArg, boolArg);
+ }
+ complexVec.push_back(1, 4.0f, false);
+ complexVec.push_back(2, 1.0f, true);
+ {
+ tuple<int, float, bool> complexTup(4, 3.0f, false);
+ complexVec.push_back(complexTup);
+ }
+ complexVec.push_back();
+ EATEST_VERIFY((!complexVec.has_overflowed() && complexVec.capacity() == nodeCount) || complexVec.capacity() == 5);
+ EATEST_VERIFY(*(complexVec.template get<0>()) == 3);
+ EATEST_VERIFY(complexVec.template get<float>()[1] == 4.0f);
+ EATEST_VERIFY(complexVec.template get<2>()[2] == complexVec.template get<bool>()[2]);
+ EATEST_VERIFY(complexVec.validate());
+
+ tuple<int, float, bool> defaultComplexTup;
+ EATEST_VERIFY(complexVec.at(4) == defaultComplexTup);
+
+ tuple<int*, float*, bool*> complexPtrTuple = complexVec.data();
+ EATEST_VERIFY(get<0>(complexPtrTuple) != nullptr);
+ EATEST_VERIFY(get<2>(complexPtrTuple)[2] == complexVec.template get<2>()[2]);
+
+ tuple<int&, float&, bool&> complexRefTuple = complexVec.at(2);
+ tuple<int&, float&, bool&> complexRefTupleBracket = complexVec[2];
+ tuple<int&, float&, bool&> complexRefTupleFront = complexVec.front();
+ tuple<int&, float&, bool&> complexRefTupleBack = complexVec.back();
+ EATEST_VERIFY(get<2>(complexRefTuple) == complexVec.template get<2>()[2]);
+ EATEST_VERIFY(get<1>(complexRefTupleBracket) == 1.0f);
+ EATEST_VERIFY(get<1>(complexRefTupleFront) == 2.0f);
+ EATEST_VERIFY(get<1>(complexRefTupleBack) == 0.0f);
+
+ // verify the equivalent accessors for the const container exist/compile
+ {
+ const fixed_tuple_vector<nodeCount, bEnableOverflow, int, float, bool>& constVec = complexVec;
+
+ EATEST_VERIFY(constVec.size() == 5);
+ EATEST_VERIFY(constVec.capacity() >= constVec.size());
+ EATEST_VERIFY(constVec.empty() == false);
+ EATEST_VERIFY(constVec.template get<1>() == constVec.template get<float>());
+
+ tuple<const int*, const float*, const bool*> constPtrTuple = constVec.data();
+ EATEST_VERIFY(get<0>(constPtrTuple) != nullptr);
+ EATEST_VERIFY(get<2>(constPtrTuple)[2] == constVec.template get<2>()[2]);
+
+ tuple<const int&, const float&, const bool&> constRefTuple = constVec.at(2);
+ tuple<const int&, const float&, const bool&> constRefTupleBracket = constVec[2];
+ tuple<const int&, const float&, const bool&> constRefTupleFront = constVec.front();
+ tuple<const int&, const float&, const bool&> constRefTupleBack = constVec.back();
+ EATEST_VERIFY(get<2>(constRefTuple) == constVec.template get<2>()[2]);
+ EATEST_VERIFY(get<1>(constRefTupleBracket) == 1.0f);
+ EATEST_VERIFY(get<1>(constRefTupleFront) == 2.0f);
+ EATEST_VERIFY(get<1>(constRefTupleBack) == 0.0f);
+
+ // check that return types of const-version of begin and cbegin (etc) match
+ static_assert(eastl::is_same<decltype(constVec.begin()), decltype(constVec.cbegin())>::value, "error");
+ static_assert(eastl::is_same<decltype(constVec.end()), decltype(constVec.cend())>::value, "error");
+ static_assert(eastl::is_same<decltype(constVec.rbegin()), decltype(constVec.crbegin())>::value, "error");
+ static_assert(eastl::is_same<decltype(constVec.rend()), decltype(constVec.crend())>::value, "error");
+
+ // check that return type of non-const version of begin and cbegin (etc) do _not_ match
+ static_assert(!eastl::is_same<decltype(complexVec.begin()), decltype(complexVec.cbegin())>::value, "error");
+ static_assert(!eastl::is_same<decltype(complexVec.end()), decltype(complexVec.cend())>::value, "error");
+ static_assert(!eastl::is_same<decltype(complexVec.rbegin()), decltype(complexVec.crbegin())>::value, "error");
+ static_assert(!eastl::is_same<decltype(complexVec.rend()), decltype(complexVec.crend())>::value, "error");
+ }
+ }
+
+ // test the memory layouts work for aligned structures
+ {
+ struct EA_ALIGN(16) AlignTestVec4
+ {
+ float a[4];
+ AlignTestVec4() :a{ 1.0f, 2.0f, 3.0f, 4.0f } {}
+ };
+
+ struct AlignTestByte3
+ {
+ char a[3];
+ AlignTestByte3() : a{1, 2, 3} {}
+ };
+
+ struct EA_ALIGN(8) AlignTestFourByte
+ {
+ int a[5];
+ AlignTestFourByte() : a{-1, -2, -3, -4, -5} {}
+ };
+
+ fixed_tuple_vector<nodeCount, bEnableOverflow, bool, AlignTestVec4, AlignTestByte3, AlignTestFourByte> alignElementVec;
+ alignElementVec.push_back();
+ alignElementVec.push_back();
+ alignElementVec.push_back();
+ alignElementVec.push_back();
+ alignElementVec.push_back();
+
+ EATEST_VERIFY((uintptr_t)alignElementVec.template get<AlignTestVec4>() % 16 == 0);
+ EATEST_VERIFY((uintptr_t)alignElementVec.template get<AlignTestFourByte>() % 8 == 0);
+ }
+
+ // Test various modifications
+ {
+ TestObject::Reset();
+
+ fixed_tuple_vector<nodeCount, bEnableOverflow, bool, TestObject, float> testVec;
+ testVec.reserve(10);
+ for (int i = 0; i < 10; ++i)
+ {
+ testVec.push_back(i % 3 == 0, TestObject(i), (float)i);
+ }
+ testVec.pop_back();
+ EATEST_VERIFY(testVec.size() == 9);
+
+ // test resize that does destruction of objects
+ testVec.resize(5);
+ EATEST_VERIFY(testVec.size() == 5);
+ EATEST_VERIFY(TestObject::sTOCount == 5);
+ EATEST_VERIFY((!testVec.has_overflowed() && testVec.capacity() == nodeCount) || testVec.capacity() == 10);
+
+ // test resize that does default construction of objects
+ testVec.resize(10);
+ EATEST_VERIFY(testVec.size() == 10);
+ EATEST_VERIFY(TestObject::sTOCount == 10);
+
+ // test resize with args that does destruction of objects
+ testVec.resize(5, true, TestObject(5), 5.0f);
+ EATEST_VERIFY(testVec.size() == 5);
+ EATEST_VERIFY(TestObject::sTOCount == 5);
+
+ // test resize with args that does construction of objects
+ testVec.resize(10, true, TestObject(5), 5.0f);
+ EATEST_VERIFY(testVec.size() == 10);
+ EATEST_VERIFY(TestObject::sTOCount == 10);
+ EATEST_VERIFY(testVec.validate());
+ for (unsigned int i = 5; i < 10; ++i)
+ {
+ EATEST_VERIFY(testVec.template get<0>()[i] == true);
+ EATEST_VERIFY(testVec.template get<1>()[i] == TestObject(5));
+ EATEST_VERIFY(testVec.template get<2>()[i] == 5.0f);
+ }
+
+ {
+ tuple<bool, TestObject, float> resizeTup(true, TestObject(10), 10.0f);
+ // test resize with tuple that does destruction of objects
+ testVec.resize(10, resizeTup);
+ EATEST_VERIFY(testVec.size() == 10);
+ EATEST_VERIFY(TestObject::sTOCount == 10 + 1);
+
+ // test resize with tuple that does construction of objects
+ testVec.resize(15, resizeTup);
+ EATEST_VERIFY(testVec.size() == 15);
+ EATEST_VERIFY(TestObject::sTOCount == 15 + 1);
+
+ EATEST_VERIFY(testVec.validate());
+ for (unsigned int i = 5; i < 10; ++i)
+ {
+ EATEST_VERIFY(testVec.template get<0>()[i] == true);
+ EATEST_VERIFY(testVec.template get<1>()[i] == TestObject(5));
+ EATEST_VERIFY(testVec.template get<2>()[i] == 5.0f);
+ }
+ for (unsigned int i = 10; i < 15; ++i)
+ {
+ EATEST_VERIFY(testVec.template get<0>()[i] == get<0>(resizeTup));
+ EATEST_VERIFY(testVec.template get<1>()[i] == get<1>(resizeTup));
+ EATEST_VERIFY(testVec.template get<2>()[i] == get<2>(resizeTup));
+ }
+ }
+
+ // test other modifiers
+ testVec.pop_back();
+ EATEST_VERIFY(testVec.size() == 14);
+ EATEST_VERIFY(TestObject::sTOCount == 14); // down 2 from last sTOCount check - resizeTup dtor and pop_back
+
+ if (testVec.can_overflow())
+ {
+ testVec.shrink_to_fit();
+ EATEST_VERIFY(testVec.capacity() == testVec.size());
+ }
+ EATEST_VERIFY(testVec.validate());
+
+ testVec.clear();
+ EATEST_VERIFY(testVec.empty());
+ EATEST_VERIFY(testVec.validate());
+ EATEST_VERIFY(TestObject::IsClear());
+
+ if (testVec.has_overflowed())
+ {
+ testVec.shrink_to_fit();
+ EATEST_VERIFY(testVec.capacity() == 0);
+ }
+ EATEST_VERIFY(testVec.validate());
+ TestObject::Reset();
+ }
+
+ // Test insert
+ {
+ TestObject::Reset();
+
+ // test insert with n values and lvalue args
+ {
+ fixed_tuple_vector<nodeCount, bEnableOverflow, bool, TestObject, float> testVec;
+ bool boolArg = true;
+ TestObject toArg = TestObject(0);
+ float floatArg = 0.0f;
+ testVec.reserve(10);
+
+ // test insert on empty vector that doesn't cause growth
+ toArg = TestObject(3);
+ floatArg = 3.0f;
+ auto insertIter = testVec.insert(testVec.begin(), 3, boolArg, toArg, floatArg);
+ EATEST_VERIFY(testVec.size() == 3);
+ EATEST_VERIFY(insertIter == testVec.begin());
+
+ // test insert to end of vector that doesn't cause growth
+ toArg = TestObject(5);
+ floatArg = 5.0f;
+ insertIter = testVec.insert(testVec.end(), 3, boolArg, toArg, floatArg);
+ EATEST_VERIFY(testVec.size() == 6);
+ EATEST_VERIFY(insertIter == testVec.begin() + 3);
+
+ // test insert to middle of vector that doesn't cause growth
+ toArg = TestObject(4);
+ floatArg = 4.0f;
+ testVec.insert(testVec.begin() + 3, 3, boolArg, toArg, floatArg);
+ EATEST_VERIFY(testVec.size() == 9);
+ EATEST_VERIFY(testVec.capacity() == 10 || testVec.capacity() == nodeCount);
+
+ // test insert to end of vector that causes growth
+ toArg = TestObject(6);
+ floatArg = 6.0f;
+ testVec.insert(testVec.end(), 3, boolArg, toArg, floatArg);
+ EATEST_VERIFY(testVec.size() == 12);
+ if (testVec.has_overflowed())
+ {
+ testVec.shrink_to_fit();
+ }
+ EATEST_VERIFY(testVec.capacity() == 12 || testVec.capacity() == nodeCount);
+
+ // test insert to beginning of vector that causes growth
+ toArg = TestObject(1);
+ floatArg = 1.0f;
+ testVec.insert(testVec.begin(), 3, boolArg, toArg, floatArg);
+ EATEST_VERIFY(testVec.size() == 15);
+ if (testVec.has_overflowed())
+ {
+ testVec.shrink_to_fit();
+ }
+ EATEST_VERIFY(testVec.capacity() == 15 || testVec.capacity() == nodeCount);
+
+ // test insert to middle of vector that causes growth
+ toArg = TestObject(2);
+ floatArg = 2.0f;
+ testVec.insert(testVec.begin() + 3, 3, boolArg, toArg, floatArg);
+ EATEST_VERIFY(testVec.size() == 18);
+ if (testVec.has_overflowed())
+ {
+ testVec.shrink_to_fit();
+ }
+ EATEST_VERIFY(testVec.capacity() == 18 || testVec.capacity() == nodeCount);
+
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec.template get<1>()[i] == TestObject(i / 3 + 1));
+ }
+ EATEST_VERIFY(testVec.validate());
+ }
+
+ // test insert with lvalue args
+ {
+ fixed_tuple_vector<nodeCount, bEnableOverflow, bool, TestObject, float> testVec;
+ bool boolArg = true;
+ TestObject toArg = TestObject(0);
+ float floatArg = 0.0f;
+ testVec.reserve(3);
+
+ // test insert on empty vector that doesn't cause growth
+ toArg = TestObject(3);
+ floatArg = 3.0f;
+ testVec.insert(testVec.begin(), boolArg, toArg, floatArg);
+ EATEST_VERIFY(testVec.size() == 1);
+
+ // test insert to end of vector that doesn't cause growth
+ toArg = TestObject(5);
+ floatArg = 5.0f;
+ testVec.insert(testVec.end(), boolArg, toArg, floatArg);
+ EATEST_VERIFY(testVec.size() == 2);
+
+ // test insert to middle of vector that doesn't cause growth
+ toArg = TestObject(4);
+ floatArg = 4.0f;
+ testVec.insert(testVec.begin() + 1, boolArg, toArg, floatArg);
+ EATEST_VERIFY(testVec.size() == 3);
+ EATEST_VERIFY(testVec.capacity() == 3 || testVec.capacity() == nodeCount);
+
+ // test insert to end of vector that causes growth
+ toArg = TestObject(6);
+ floatArg = 6.0f;
+ testVec.insert(testVec.end(), boolArg, toArg, floatArg);
+ EATEST_VERIFY(testVec.size() == 4);
+ if (testVec.has_overflowed())
+ {
+ testVec.shrink_to_fit();
+ }
+ EATEST_VERIFY(testVec.capacity() == 4 || testVec.capacity() == nodeCount);
+
+ // test insert to beginning of vector that causes growth
+ toArg = TestObject(1);
+ floatArg = 1.0f;
+ testVec.insert(testVec.begin(), boolArg, toArg, floatArg);
+ EATEST_VERIFY(testVec.size() == 5);
+ if (testVec.has_overflowed())
+ {
+ testVec.shrink_to_fit();
+ }
+ EATEST_VERIFY(testVec.capacity() == 5 || testVec.capacity() == nodeCount);
+
+ // test insert to middle of vector that causes growth
+ toArg = TestObject(2);
+ floatArg = 2.0f;
+ testVec.insert(testVec.begin() + 1, boolArg, toArg, floatArg);
+ EATEST_VERIFY(testVec.size() == 6);
+ if (testVec.has_overflowed())
+ {
+ testVec.shrink_to_fit();
+ }
+ EATEST_VERIFY(testVec.capacity() == 6 || testVec.capacity() == nodeCount);
+
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec.template get<1>()[i] == TestObject(i + 1));
+ }
+ EATEST_VERIFY(testVec.validate());
+ }
+
+ // test insert with n and tuple
+ {
+ fixed_tuple_vector<nodeCount, bEnableOverflow, bool, TestObject, float> testVec;
+ tuple<bool, TestObject, float> testTup;
+ testVec.reserve(10);
+
+ // test insert on empty vector that doesn't cause growth
+ testTup = tuple<bool, TestObject, float>(true, TestObject(3), 3.0f);
+ testVec.insert(testVec.begin(), 3, testTup);
+ EATEST_VERIFY(testVec.size() == 3);
+
+ // test insert to end of vector that doesn't cause growth
+ testTup = tuple<bool, TestObject, float>(true, TestObject(5), 5.0f);
+ testVec.insert(testVec.end(), 3, testTup);
+ EATEST_VERIFY(testVec.size() == 6);
+
+ // test insert to middle of vector that doesn't cause growth
+ testTup = tuple<bool, TestObject, float>(true, TestObject(4), 4.0f);
+ testVec.insert(testVec.begin() + 3, 3, testTup);
+ EATEST_VERIFY(testVec.size() == 9);
+ EATEST_VERIFY(testVec.capacity() == 10 || testVec.capacity() == nodeCount);
+
+ // test insert to end of vector that causes growth
+ testTup = tuple<bool, TestObject, float>(true, TestObject(6), 6.0f);
+ testVec.insert(testVec.end(), 3, testTup);
+ EATEST_VERIFY(testVec.size() == 12);
+ if (testVec.has_overflowed())
+ {
+ testVec.shrink_to_fit();
+ }
+ EATEST_VERIFY(testVec.capacity() == 12 || testVec.capacity() == nodeCount);
+
+ // test insert to beginning of vector that causes growth
+ testTup = tuple<bool, TestObject, float>(true, TestObject(1), 1.0f);
+ testVec.insert(testVec.begin(), 3, testTup);
+ EATEST_VERIFY(testVec.size() == 15);
+ if (testVec.has_overflowed())
+ {
+ testVec.shrink_to_fit();
+ }
+
+ EATEST_VERIFY(testVec.capacity() == 15 || testVec.capacity() == nodeCount);
+ // test insert to middle of vector that causes growth
+ testTup = tuple<bool, TestObject, float>(true, TestObject(2), 2.0f);
+ testVec.insert(testVec.begin() + 3, 3, testTup);
+ EATEST_VERIFY(testVec.size() == 18);
+ if (testVec.has_overflowed())
+ {
+ testVec.shrink_to_fit();
+ }
+ EATEST_VERIFY(testVec.capacity() == 18 || testVec.capacity() == nodeCount);
+
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec.template get<1>()[i] == TestObject(i / 3 + 1));
+ }
+ EATEST_VERIFY(testVec.validate());
+ }
+
+ // test insert with tuple
+ {
+ fixed_tuple_vector<nodeCount, bEnableOverflow, bool, TestObject, float> testVec;
+ tuple<bool, TestObject, float> testTup;
+ testVec.reserve(3);
+
+ // test insert on empty vector that doesn't cause growth
+ testTup = tuple<bool, TestObject, float>(true, TestObject(3), 3.0f);
+ testVec.insert(testVec.begin(), testTup);
+ EATEST_VERIFY(testVec.size() == 1);
+
+ // test insert to end of vector that doesn't cause growth
+ testTup = tuple<bool, TestObject, float>(true, TestObject(5), 5.0f);
+ testVec.insert(testVec.end(), testTup);
+ EATEST_VERIFY(testVec.size() == 2);
+
+ // test insert to middle of vector that doesn't cause growth
+ testTup = tuple<bool, TestObject, float>(true, TestObject(4), 4.0f);
+ testVec.insert(testVec.begin() + 1, testTup);
+ EATEST_VERIFY(testVec.size() == 3);
+ EATEST_VERIFY(testVec.capacity() == 3 || testVec.capacity() == nodeCount);
+
+ // test insert to end of vector that causes growth
+ testTup = tuple<bool, TestObject, float>(true, TestObject(6), 6.0f);
+ testVec.insert(testVec.end(), 1, testTup);
+ EATEST_VERIFY(testVec.size() == 4);
+ if (testVec.has_overflowed())
+ {
+ testVec.shrink_to_fit();
+ }
+ EATEST_VERIFY(testVec.capacity() == 4 || testVec.capacity() == nodeCount);
+
+ // test insert to beginning of vector that causes growth
+ testTup = tuple<bool, TestObject, float>(true, TestObject(1), 1.0f);
+ testVec.insert(testVec.begin(), 1, testTup);
+ EATEST_VERIFY(testVec.size() == 5);
+ if (testVec.has_overflowed())
+ {
+ testVec.shrink_to_fit();
+ }
+ EATEST_VERIFY(testVec.capacity() == 5 || testVec.capacity() == nodeCount);
+
+ // test insert to middle of vector that causes growth
+ testTup = tuple<bool, TestObject, float>(true, TestObject(2), 2.0f);
+ testVec.insert(testVec.begin() + 1, 1, testTup);
+ EATEST_VERIFY(testVec.size() == 6);
+ if (testVec.has_overflowed())
+ {
+ testVec.shrink_to_fit();
+ }
+ EATEST_VERIFY(testVec.capacity() == 6 || testVec.capacity() == nodeCount);
+
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec.template get<1>()[i] == TestObject(i + 1));
+ }
+ EATEST_VERIFY(testVec.validate());
+ }
+
+ // test insert with initList
+ {
+ fixed_tuple_vector<nodeCount, bEnableOverflow, bool, TestObject, float> testVec;
+ tuple<bool, TestObject, float> testTup;
+ testVec.reserve(10);
+
+ // test insert on empty vector that doesn't cause growth
+ testTup = tuple<bool, TestObject, float>(true, TestObject(3), 3.0f);
+ testVec.insert(testVec.begin(), {
+ {true, TestObject(3), 3.0f},
+ testTup,
+ {true, TestObject(3), 3.0f}
+ });
+ EATEST_VERIFY(testVec.size() == 3);
+
+ // test insert to end of vector that doesn't cause growth
+ testTup = tuple<bool, TestObject, float>(true, TestObject(5), 5.0f);
+ testVec.insert(testVec.end(), {
+ {true, TestObject(5), 5.0f},
+ testTup,
+ {true, TestObject(5), 5.0f}
+ });
+ EATEST_VERIFY(testVec.size() == 6);
+
+ // test insert to middle of vector that doesn't cause growth
+ testTup = tuple<bool, TestObject, float>(true, TestObject(4), 4.0f);
+ testVec.insert(testVec.begin() + 3, {
+ {true, TestObject(4), 4.0f},
+ testTup,
+ {true, TestObject(4), 4.0f}
+ });
+ EATEST_VERIFY(testVec.size() == 9);
+ EATEST_VERIFY(testVec.capacity() == 10 || testVec.capacity() == nodeCount);
+
+ // test insert to end of vector that causes growth
+ testTup = tuple<bool, TestObject, float>(true, TestObject(6), 6.0f);
+ testVec.insert(testVec.end(), {
+ {true, TestObject(6), 6.0f},
+ testTup,
+ {true, TestObject(6), 6.0f}
+ });
+ EATEST_VERIFY(testVec.size() == 12);
+ if (testVec.has_overflowed())
+ {
+ testVec.shrink_to_fit();
+ }
+ EATEST_VERIFY(testVec.capacity() == 12 || testVec.capacity() == nodeCount);
+
+ // test insert to beginning of vector that causes growth
+ testTup = tuple<bool, TestObject, float>(true, TestObject(1), 1.0f);
+ testVec.insert(testVec.begin(), {
+ {true, TestObject(1), 1.0f},
+ testTup,
+ {true, TestObject(1), 1.0f}
+ });
+ EATEST_VERIFY(testVec.size() == 15);
+ if (testVec.has_overflowed())
+ {
+ testVec.shrink_to_fit();
+ }
+ EATEST_VERIFY(testVec.capacity() == 15 || testVec.capacity() == nodeCount);
+
+ // test insert to middle of vector that causes growth
+ testTup = tuple<bool, TestObject, float>(true, TestObject(2), 2.0f);
+ testVec.insert(testVec.begin() + 3, {
+ {true, TestObject(2), 2.0f},
+ testTup,
+ {true, TestObject(2), 2.0f
+ } });
+ EATEST_VERIFY(testVec.size() == 18);
+ if (testVec.has_overflowed())
+ {
+ testVec.shrink_to_fit();
+ }
+ EATEST_VERIFY(testVec.capacity() == 18 || testVec.capacity() == nodeCount);
+
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec.template get<1>()[i] == TestObject(i / 3 + 1));
+ }
+ EATEST_VERIFY(testVec.validate());
+ }
+
+ // test insert with rvalue args
+ {
+ fixed_tuple_vector<nodeCount, bEnableOverflow, int, MoveOnlyType, TestObject> testVec;
+ testVec.reserve(3);
+
+ // test insert on empty vector that doesn't cause growth
+ testVec.insert(testVec.begin(), 3, MoveOnlyType(3), TestObject(3));
+ EATEST_VERIFY(testVec.size() == 1);
+
+ // test insert to end of vector that doesn't cause growth
+ testVec.insert(testVec.end(), 5, MoveOnlyType(5), TestObject(5));
+ EATEST_VERIFY(testVec.size() == 2);
+
+ // test insert to middle of vector that doesn't cause growth
+ testVec.insert(testVec.begin() + 1, 4, MoveOnlyType(4), TestObject(4));
+ EATEST_VERIFY(testVec.size() == 3);
+ EATEST_VERIFY(testVec.capacity() == 3 || testVec.capacity() == nodeCount);
+
+ // test insert to end of vector that causes growth
+ testVec.insert(testVec.end(), 6, MoveOnlyType(6), TestObject(6));
+ EATEST_VERIFY(testVec.size() == 4);
+ if (testVec.has_overflowed())
+ {
+ testVec.shrink_to_fit();
+ }
+ EATEST_VERIFY(testVec.capacity() == 4 || testVec.capacity() == nodeCount);
+
+ // test insert to beginning of vector that causes growth
+ testVec.insert(testVec.begin(), 1, MoveOnlyType(1), TestObject(1));
+ EATEST_VERIFY(testVec.size() == 5);
+ if (testVec.has_overflowed())
+ {
+ testVec.shrink_to_fit();
+ }
+ EATEST_VERIFY(testVec.capacity() == 5 || testVec.capacity() == nodeCount);
+
+ // test insert to middle of vector that causes growth
+ testVec.insert(testVec.begin() + 1, 2, MoveOnlyType(2), TestObject(2));
+ EATEST_VERIFY(testVec.size() == 6);
+ if (testVec.has_overflowed())
+ {
+ testVec.shrink_to_fit();
+ }
+ EATEST_VERIFY(testVec.capacity() == 6 || testVec.capacity() == nodeCount);
+
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec.template get<2>()[i] == TestObject(i + 1));
+ }
+ EATEST_VERIFY(testVec.validate());
+ }
+
+ // test insert with rvalue tuple
+ {
+ fixed_tuple_vector<nodeCount, bEnableOverflow, int, MoveOnlyType, TestObject> testVec;
+ testVec.reserve(3);
+
+ // test insert on empty vector that doesn't cause growth
+ testVec.insert(testVec.begin(), forward_as_tuple(3, MoveOnlyType(3), TestObject(3)));
+ EATEST_VERIFY(testVec.size() == 1);
+
+ // test insert to end of vector that doesn't cause growth
+ testVec.insert(testVec.end(), forward_as_tuple(5, MoveOnlyType(5), TestObject(5)));
+ EATEST_VERIFY(testVec.size() == 2);
+
+ // test insert to middle of vector that doesn't cause growth
+ testVec.insert(testVec.begin() + 1, forward_as_tuple(4, MoveOnlyType(4), TestObject(4)));
+ EATEST_VERIFY(testVec.size() == 3);
+ EATEST_VERIFY(testVec.capacity() == 3 || testVec.capacity() == nodeCount);
+
+ // test insert to end of vector that causes growth
+ testVec.insert(testVec.end(), forward_as_tuple(6, MoveOnlyType(6), TestObject(6)));
+ EATEST_VERIFY(testVec.size() == 4);
+ if (testVec.has_overflowed())
+ {
+ testVec.shrink_to_fit();
+ }
+ EATEST_VERIFY(testVec.capacity() == 4 || testVec.capacity() == nodeCount);
+
+ // test insert to beginning of vector that causes growth
+ testVec.insert(testVec.begin(), forward_as_tuple(1, MoveOnlyType(1), TestObject(1)));
+ EATEST_VERIFY(testVec.size() == 5);
+ if (testVec.has_overflowed())
+ {
+ testVec.shrink_to_fit();
+ }
+ EATEST_VERIFY(testVec.capacity() == 5 || testVec.capacity() == nodeCount);
+
+ // test insert to middle of vector that causes growth
+ testVec.insert(testVec.begin() + 1, forward_as_tuple(2, MoveOnlyType(2), TestObject(2)));
+ EATEST_VERIFY(testVec.size() == 6);
+ if (testVec.has_overflowed())
+ {
+ testVec.shrink_to_fit();
+ }
+ EATEST_VERIFY(testVec.capacity() == 6 || testVec.capacity() == nodeCount);
+
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec.template get<2>()[i] == TestObject(i + 1));
+ }
+ EATEST_VERIFY(testVec.validate());
+ }
+
+ // test insert with iterator range
+ {
+ fixed_tuple_vector<nodeCount, bEnableOverflow, bool, TestObject, float> srcVec;
+ for (unsigned int i = 0; i < 20; ++i)
+ {
+ srcVec.push_back(true, TestObject(i), (float)i);
+ }
+
+ fixed_tuple_vector<nodeCount, bEnableOverflow, bool, TestObject, float> testVec;
+ testVec.reserve(10);
+
+ // test insert on empty vector that doesn't cause growth
+ testVec.insert(testVec.begin(), srcVec.begin() + 6, srcVec.begin() + 9);
+ EATEST_VERIFY(testVec.size() == 3);
+
+ // test insert to end of vector that doesn't cause growth
+ testVec.insert(testVec.end(), srcVec.begin() + 12, srcVec.begin() + 15);
+ EATEST_VERIFY(testVec.size() == 6);
+
+ // test insert to middle of vector that doesn't cause growth
+ testVec.insert(testVec.begin() + 3, srcVec.begin() + 9, srcVec.begin() + 12);
+ EATEST_VERIFY(testVec.size() == 9);
+ EATEST_VERIFY(testVec.capacity() == 10 || testVec.capacity() == nodeCount);
+
+ // test insert to end of vector that causes growth
+ testVec.insert(testVec.end(), srcVec.begin() + 15, srcVec.begin() + 18);
+ EATEST_VERIFY(testVec.size() == 12);
+ if (testVec.has_overflowed())
+ {
+ testVec.shrink_to_fit();
+ }
+ EATEST_VERIFY(testVec.capacity() == 12 || testVec.capacity() == nodeCount);
+
+ // test insert to beginning of vector that causes growth
+ testVec.insert(testVec.begin(), srcVec.begin(), srcVec.begin() + 3);
+ EATEST_VERIFY(testVec.size() == 15);
+ if (testVec.has_overflowed())
+ {
+ testVec.shrink_to_fit();
+ }
+ EATEST_VERIFY(testVec.capacity() == 15 || testVec.capacity() == nodeCount);
+
+ // test insert to middle of vector that causes growth
+ testVec.insert(testVec.begin() + 3, srcVec.begin() + 3, srcVec.begin() + 6);
+ EATEST_VERIFY(testVec.size() == 18);
+ if (testVec.has_overflowed())
+ {
+ testVec.shrink_to_fit();
+ }
+ EATEST_VERIFY(testVec.capacity() == 18 || testVec.capacity() == nodeCount);
+
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec[i] == make_tuple(true, TestObject(i), (float)i));
+ }
+ EATEST_VERIFY(testVec.validate());
+ }
+ EATEST_VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+ }
+
+ // Test assign
+ {
+ {
+ fixed_tuple_vector<nodeCount, bEnableOverflow, bool, TestObject, float> testVec;
+
+ // test assign that grows the capacity
+ testVec.assign(20, true, TestObject(1), 1.0f);
+ EATEST_VERIFY(testVec.size() == 20);
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec[i] == make_tuple(true, TestObject(1), 1.0f));
+ }
+ EATEST_VERIFY(TestObject::sTOCount == 20);
+
+ // test assign that shrinks the vector
+ testVec.assign(10, true, TestObject(2), 2.0f);
+ EATEST_VERIFY(testVec.size() == 10);
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec[i] == make_tuple(true, TestObject(2), 2.0f));
+ }
+ EATEST_VERIFY(TestObject::sTOCount == 10);
+
+ // test assign for when there's enough capacity
+ testVec.assign(15, true, TestObject(3), 3.0f);
+ EATEST_VERIFY(testVec.size() == 15);
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec[i] == make_tuple(true, TestObject(3), 3.0f));
+ }
+ EATEST_VERIFY(TestObject::sTOCount == 15);
+ }
+
+ {
+ tuple<bool, TestObject, float> srcTup;
+ fixed_tuple_vector<nodeCount, bEnableOverflow, bool, TestObject, float> testVec;
+
+ // test assign from tuple that grows the capacity
+ srcTup = make_tuple(true, TestObject(1), 1.0f);
+ testVec.assign(20, srcTup);
+ EATEST_VERIFY(testVec.size() == 20);
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec[i] == srcTup);
+ }
+ EATEST_VERIFY(TestObject::sTOCount == 20 + 1);
+
+ // test assign from tuple that shrinks the vector
+ srcTup = make_tuple(true, TestObject(2), 2.0f);
+ testVec.assign(10, srcTup);
+ EATEST_VERIFY(testVec.size() == 10);
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec[i] == srcTup);
+ }
+ EATEST_VERIFY(TestObject::sTOCount == 10 + 1);
+
+ // test assign from tuple for when there's enough capacity
+ srcTup = make_tuple(true, TestObject(3), 3.0f);
+ testVec.assign(15, srcTup);
+ EATEST_VERIFY(testVec.size() == 15);
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec[i] == srcTup);
+ }
+ EATEST_VERIFY(TestObject::sTOCount == 15 + 1);
+ }
+
+ {
+ fixed_tuple_vector<nodeCount, bEnableOverflow, bool, TestObject, float> srcVec;
+ for (unsigned int i = 0; i < 20; ++i)
+ {
+ srcVec.push_back(true, TestObject(i), (float)i);
+ }
+ fixed_tuple_vector<nodeCount, bEnableOverflow, bool, TestObject, float> testVec;
+
+ // test assign from iter range that grows the capacity
+ testVec.assign(srcVec.begin() + 5, srcVec.begin() + 15);
+ EATEST_VERIFY(testVec.size() == 10);
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec[i] == srcVec[i+5]);
+ }
+ EATEST_VERIFY(TestObject::sTOCount == 10 + 20);
+
+ // test assign from iter range that shrinks the vector
+ testVec.assign(srcVec.begin() + 2, srcVec.begin() + 7);
+ EATEST_VERIFY(testVec.size() == 5);
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec[i] == srcVec[i + 2]);
+ }
+ EATEST_VERIFY(TestObject::sTOCount == 5 + 20);
+
+ // test assign from iter range for when there's enough capacity
+ testVec.assign(srcVec.begin() + 5, srcVec.begin() + 15);
+ EATEST_VERIFY(testVec.size() == 10);
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec[i] == srcVec[i + 5]);
+ }
+ EATEST_VERIFY(TestObject::sTOCount == 10 + 20);
+ }
+
+ {
+ fixed_tuple_vector<nodeCount, bEnableOverflow, bool, TestObject, float> testVec;
+
+ // test assign from initList that grows the capacity
+ testVec.assign({
+ { true, TestObject(1), 1.0f },
+ { true, TestObject(1), 1.0f },
+ { true, TestObject(1), 1.0f }
+ });
+ EATEST_VERIFY(testVec.size() == 3);
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec[i] == make_tuple(true, TestObject(1), 1.0f));
+ }
+ EATEST_VERIFY(TestObject::sTOCount == 3);
+
+ // test assign from initList that shrinks the vector
+ testVec.assign({
+ { true, TestObject(2), 2.0f }
+ });
+ EATEST_VERIFY(testVec.size() == 1);
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec[i] == make_tuple(true, TestObject(2), 2.0f));
+ }
+ EATEST_VERIFY(TestObject::sTOCount == 1);
+
+ // test assign from initList for when there's enough capacity
+ testVec.assign({
+ { true, TestObject(3), 3.0f },
+ { true, TestObject(3), 3.0f }
+ });
+ EATEST_VERIFY(testVec.size() == 2);
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec[i] == make_tuple(true, TestObject(3), 3.0f));
+ }
+ EATEST_VERIFY(TestObject::sTOCount == 2);
+ }
+
+ EATEST_VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+ }
+
+ // Test erase functions
+ {
+ {
+ fixed_tuple_vector<nodeCount, bEnableOverflow, bool, TestObject, float> srcVec;
+ for (unsigned int i = 0; i < 20; ++i)
+ {
+ srcVec.push_back(true, TestObject(i), (float)i);
+ }
+ fixed_tuple_vector<nodeCount, bEnableOverflow, bool, TestObject, float> testVec;
+
+ // test erase on an iter range
+ testVec.assign(srcVec.begin(), srcVec.end());
+ auto eraseIter = testVec.erase(testVec.begin() + 5, testVec.begin() + 10);
+ EATEST_VERIFY(eraseIter == testVec.begin() + 5);
+ EATEST_VERIFY(testVec.size() == 15);
+ EATEST_VERIFY(testVec.validate());
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ if (i < 5)
+ EATEST_VERIFY(testVec[i] == make_tuple(true, TestObject(i), (float)i));
+ else
+ EATEST_VERIFY(testVec[i] == make_tuple(true, TestObject(i + 5), (float)(i + 5)));
+ }
+ EATEST_VERIFY(TestObject::sTOCount == 15 + 20);
+
+ // test erase on one position
+ testVec.assign(srcVec.begin(), srcVec.end());
+ eraseIter = testVec.erase(testVec.begin() + 5);
+ EATEST_VERIFY(eraseIter == testVec.begin() + 5);
+ EATEST_VERIFY(testVec.size() == 19);
+ EATEST_VERIFY(testVec.validate());
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ if (i < 5)
+ EATEST_VERIFY(testVec[i] == make_tuple(true, TestObject(i), (float)i));
+ else
+ EATEST_VERIFY(testVec[i] == make_tuple(true, TestObject(i + 1), (float)(i + 1)));
+ }
+ EATEST_VERIFY(TestObject::sTOCount == 19 + 20);
+
+ // test erase_unsorted
+ testVec.assign(srcVec.begin(), srcVec.end());
+ eraseIter = testVec.erase_unsorted(testVec.begin() + 5);
+ EATEST_VERIFY(eraseIter == testVec.begin() + 5);
+ EATEST_VERIFY(testVec.size() == 19);
+ EATEST_VERIFY(testVec.validate());
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ if (i != 5)
+ EATEST_VERIFY(testVec[i] == make_tuple(true, TestObject(i), (float)i));
+ else
+ EATEST_VERIFY(testVec[i] == make_tuple(true, TestObject(19), (float)(19)));
+ }
+ EATEST_VERIFY(TestObject::sTOCount == 19 + 20);
+ }
+
+ // test erase again but with reverse iterators everywhere
+ {
+ fixed_tuple_vector<nodeCount, bEnableOverflow, bool, TestObject, float> srcVec;
+ for (unsigned int i = 0; i < 20; ++i)
+ {
+ srcVec.push_back(true, TestObject(i), (float)i);
+ }
+ fixed_tuple_vector<nodeCount, bEnableOverflow, bool, TestObject, float> testVec;
+
+ // test erase on an iter range
+ testVec.assign(srcVec.begin(), srcVec.end());
+ auto eraseIter = testVec.erase(testVec.rbegin() + 5, testVec.rbegin() + 10);
+ EATEST_VERIFY(eraseIter == testVec.rbegin() + 5);
+ EATEST_VERIFY(testVec.size() == 15);
+ EATEST_VERIFY(testVec.validate());
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ if (i < 10)
+ EATEST_VERIFY(testVec[i] == make_tuple(true, TestObject(i), (float)i));
+ else
+ EATEST_VERIFY(testVec[i] == make_tuple(true, TestObject(i + 5), (float)(i + 5)));
+ }
+ EATEST_VERIFY(TestObject::sTOCount == 15 + 20);
+
+ // test erase on one position
+ testVec.assign(srcVec.begin(), srcVec.end());
+ eraseIter = testVec.erase(testVec.rbegin() + 5);
+ EATEST_VERIFY(eraseIter == testVec.rbegin() + 5);
+ EATEST_VERIFY(testVec.size() == 19);
+ EATEST_VERIFY(testVec.validate());
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ if (i < 14)
+ EATEST_VERIFY(testVec[i] == make_tuple(true, TestObject(i), (float)i));
+ else
+ EATEST_VERIFY(testVec[i] == make_tuple(true, TestObject(i + 1), (float)(i + 1)));
+ }
+ EATEST_VERIFY(TestObject::sTOCount == 19 + 20);
+
+ // test erase_unsorted
+ testVec.assign(srcVec.begin(), srcVec.end());
+ eraseIter = testVec.erase_unsorted(testVec.rbegin() + 5);
+ EATEST_VERIFY(eraseIter == testVec.rbegin() + 5);
+ EATEST_VERIFY(testVec.size() == 19);
+ EATEST_VERIFY(testVec.validate());
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ if (i != 14)
+ EATEST_VERIFY(testVec[i] == make_tuple(true, TestObject(i), (float)i));
+ else
+ EATEST_VERIFY(testVec[i] == make_tuple(true, TestObject(19), (float)(19)));
+ }
+ EATEST_VERIFY(TestObject::sTOCount == 19 + 20);
+ }
+ EATEST_VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+ }
+
+ // Test multitude of constructors
+ {
+ EASTLAllocatorType ma;
+ EASTLAllocatorType otherMa;
+ TestObject::Reset();
+
+ // test ctor via initlist to prime srcVec. Equivalent to ...
+ // for (int i = 0; i < 10; ++i)
+ // srcVec.push_back(i % 3 == 0, TestObject(i), (float)i);
+
+ fixed_tuple_vector<nodeCount, bEnableOverflow, bool, TestObject, float> srcVec({
+ { true, TestObject(0), 0.0f},
+ { false, TestObject(1), 1.0f},
+ { false, TestObject(2), 2.0f},
+ { true, TestObject(3), 3.0f},
+ { false, TestObject(4), 4.0f},
+ { false, TestObject(5), 5.0f},
+ { true, TestObject(6), 6.0f},
+ { false, TestObject(7), 7.0f},
+ { false, TestObject(8), 8.0f},
+ { true, TestObject(9), 9.0f}
+ });
+
+ // copy entire tuple_vector in ctor
+ {
+ fixed_tuple_vector<nodeCount, bEnableOverflow, bool, TestObject, float> ctorFromConstRef(srcVec);
+ EATEST_VERIFY(ctorFromConstRef.size() == 10);
+ EATEST_VERIFY(ctorFromConstRef.validate());
+ for (int i = 0; i < 10; ++i)
+ {
+ EATEST_VERIFY(ctorFromConstRef.template get<0>()[i] == (i % 3 == 0));
+ EATEST_VERIFY(ctorFromConstRef.template get<1>()[i] == TestObject(i));
+ EATEST_VERIFY(ctorFromConstRef.template get<2>()[i] == (float)i);
+ }
+ }
+
+ // copy entire tuple_vector via assignment
+ {
+ fixed_tuple_vector<nodeCount, bEnableOverflow, bool, TestObject, float> ctorFromAssignment;
+ ctorFromAssignment = srcVec;
+ EATEST_VERIFY(ctorFromAssignment.size() == 10);
+ EATEST_VERIFY(ctorFromAssignment.validate());
+ for (int i = 0; i < 10; ++i)
+ {
+ EATEST_VERIFY(ctorFromAssignment.template get<0>()[i] == (i % 3 == 0));
+ EATEST_VERIFY(ctorFromAssignment.template get<1>()[i] == TestObject(i));
+ EATEST_VERIFY(ctorFromAssignment.template get<2>()[i] == (float)i);
+ }
+ }
+
+ // copy entire tuple_vector via assignment of init-list
+ {
+ fixed_tuple_vector<nodeCount, bEnableOverflow, bool, TestObject, float> ctorFromAssignment;
+ ctorFromAssignment = {
+ { true, TestObject(0), 0.0f},
+ { false, TestObject(1), 1.0f},
+ { false, TestObject(2), 2.0f},
+ { true, TestObject(3), 3.0f},
+ { false, TestObject(4), 4.0f},
+ { false, TestObject(5), 5.0f},
+ { true, TestObject(6), 6.0f},
+ { false, TestObject(7), 7.0f},
+ { false, TestObject(8), 8.0f},
+ { true, TestObject(9), 9.0f}
+ };
+ EATEST_VERIFY(ctorFromAssignment.size() == 10);
+ EATEST_VERIFY(ctorFromAssignment.validate());
+ for (int i = 0; i < 10; ++i)
+ {
+ EATEST_VERIFY(ctorFromAssignment.template get<0>()[i] == (i % 3 == 0));
+ EATEST_VERIFY(ctorFromAssignment.template get<1>()[i] == TestObject(i));
+ EATEST_VERIFY(ctorFromAssignment.template get<2>()[i] == (float)i);
+ }
+ }
+
+ // ctor tuple_vector with iterator range
+ {
+ fixed_tuple_vector<nodeCount, bEnableOverflow, bool, TestObject, float> ctorFromIters(srcVec.begin() + 2, srcVec.begin() + 7);
+ EATEST_VERIFY(ctorFromIters.size() == 5);
+ EATEST_VERIFY(ctorFromIters.validate());
+ for (int i = 2; i < 7; ++i)
+ {
+ EATEST_VERIFY(ctorFromIters.template get<0>()[i - 2] == (i % 3 == 0));
+ EATEST_VERIFY(ctorFromIters.template get<1>()[i - 2] == TestObject(i));
+ EATEST_VERIFY(ctorFromIters.template get<2>()[i - 2] == (float)i);
+ }
+ }
+
+ // ctor tuple_vector with initial size
+ {
+ fixed_tuple_vector<nodeCount, bEnableOverflow, bool, TestObject, float> ctorFromFill(10);
+ EATEST_VERIFY(ctorFromFill.size() == 10);
+ EATEST_VERIFY(ctorFromFill.validate());
+ for (int i = 0; i < 10; ++i)
+ {
+ EATEST_VERIFY(ctorFromFill.template get<0>()[i] == false);
+ EATEST_VERIFY(ctorFromFill.template get<1>()[i] == TestObject());
+ EATEST_VERIFY(ctorFromFill.template get<2>()[i] == 0.0f);
+ }
+ }
+
+ // ctor tuple_vector with initial size and args
+ {
+ fixed_tuple_vector<nodeCount, bEnableOverflow, bool, TestObject, float> ctorFromFillArgs(10, true, TestObject(5), 5.0f);
+ EATEST_VERIFY(ctorFromFillArgs.size() == 10);
+ EATEST_VERIFY(ctorFromFillArgs.validate());
+ for (int i = 0; i < 10; ++i)
+ {
+ EATEST_VERIFY(ctorFromFillArgs.template get<0>()[i] == true);
+ EATEST_VERIFY(ctorFromFillArgs.template get<1>()[i] == TestObject(5));
+ EATEST_VERIFY(ctorFromFillArgs.template get<2>()[i] == 5.0f);
+ }
+ }
+
+ // ctor tuple_vector with initial size and tuple
+ {
+ tuple<bool, TestObject, float> tup(true, TestObject(5), 5.0f);
+ fixed_tuple_vector<nodeCount, bEnableOverflow, bool, TestObject, float> ctorFromFillTup(10, tup);
+ EATEST_VERIFY(ctorFromFillTup.size() == 10);
+ EATEST_VERIFY(ctorFromFillTup.validate());
+ for (int i = 0; i < 10; ++i)
+ {
+ EATEST_VERIFY(ctorFromFillTup.template get<0>()[i] == true);
+ EATEST_VERIFY(ctorFromFillTup.template get<1>()[i] == TestObject(5));
+ EATEST_VERIFY(ctorFromFillTup.template get<2>()[i] == 5.0f);
+ }
+ }
+
+ // ctor tuple_Vector with custom mallocator
+ {
+ fixed_tuple_vector<nodeCount, bEnableOverflow, bool, TestObject, float> ctorWithAlloc(ma);
+ fixed_tuple_vector<nodeCount, bEnableOverflow, bool, TestObject, float> ctorDefault;
+
+ ctorWithAlloc.push_back();
+ ctorDefault.push_back();
+
+ EATEST_VERIFY(ctorWithAlloc == ctorDefault);
+ EATEST_VERIFY(ctorWithAlloc.validate());
+ }
+
+ // ctor fixed_tuple_vector_alloc with copy (from diff. allocator)
+ {
+ fixed_tuple_vector<nodeCount, bEnableOverflow,bool, TestObject, float> ctorFromConstRef(srcVec, ma);
+ EATEST_VERIFY(ctorFromConstRef.size() == 10);
+ EATEST_VERIFY(ctorFromConstRef.validate());
+ for (int i = 0; i < 10; ++i)
+ {
+ EATEST_VERIFY(ctorFromConstRef.template get<0>()[i] == (i % 3 == 0));
+ EATEST_VERIFY(ctorFromConstRef.template get<1>()[i] == TestObject(i));
+ EATEST_VERIFY(ctorFromConstRef.template get<2>()[i] == (float)i);
+ }
+ EATEST_VERIFY(ctorFromConstRef.validate());
+ }
+
+ // ctor tuple_vector with initial size and args
+ {
+ fixed_tuple_vector<nodeCount, bEnableOverflow,bool, TestObject, float> ctorFromFillArgs(10, true, TestObject(5), 5.0f, ma);
+ EATEST_VERIFY(ctorFromFillArgs.size() == 10);
+ EATEST_VERIFY(ctorFromFillArgs.validate());
+ for (int i = 0; i < 10; ++i)
+ {
+ EATEST_VERIFY(ctorFromFillArgs.template get<0>()[i] == true);
+ EATEST_VERIFY(ctorFromFillArgs.template get<1>()[i] == TestObject(5));
+ EATEST_VERIFY(ctorFromFillArgs.template get<2>()[i] == 5.0f);
+ }
+ }
+
+ // ctor tuple_vector via move
+ {
+ fixed_tuple_vector<nodeCount, bEnableOverflow, int, MoveOnlyType, TestObject> srcMoveVec;
+ for (int i = 0; i < 10; ++i)
+ {
+ srcMoveVec.emplace_back(move(i), MoveOnlyType(i), TestObject(i));
+ }
+
+ fixed_tuple_vector<nodeCount, bEnableOverflow, int, MoveOnlyType, TestObject> ctorFromMove(move(srcMoveVec));
+
+ EATEST_VERIFY(ctorFromMove.size() == 10);
+ EATEST_VERIFY(ctorFromMove.validate());
+ for (int i = 0; i < 10; ++i)
+ {
+ EATEST_VERIFY(ctorFromMove.template get<0>()[i] == i);
+ EATEST_VERIFY(ctorFromMove.template get<1>()[i] == MoveOnlyType(i));
+ EATEST_VERIFY(ctorFromMove.template get<2>()[i] == TestObject(i));
+ }
+ EATEST_VERIFY(srcMoveVec.size() == 0);
+ EATEST_VERIFY(srcMoveVec.validate());
+ }
+
+ // ctor tuple_vector via move (from diff. allocator)
+ {
+ fixed_tuple_vector<nodeCount, bEnableOverflow,int, MoveOnlyType, TestObject> srcMoveVec;
+ for (int i = 0; i < 10; ++i)
+ {
+ srcMoveVec.emplace_back(move(i), MoveOnlyType(i), TestObject(i));
+ }
+
+ fixed_tuple_vector<nodeCount, bEnableOverflow, int, MoveOnlyType, TestObject> ctorFromMove(move(srcMoveVec), otherMa);
+
+ EATEST_VERIFY(ctorFromMove.size() == 10);
+ EATEST_VERIFY(ctorFromMove.validate());
+ for (int i = 0; i < 10; ++i)
+ {
+ EATEST_VERIFY(ctorFromMove.template get<0>()[i] == i);
+ EATEST_VERIFY(ctorFromMove.template get<1>()[i] == MoveOnlyType(i));
+ EATEST_VERIFY(ctorFromMove.template get<2>()[i] == TestObject(i));
+ }
+ EATEST_VERIFY(srcMoveVec.size() == 0);
+ EATEST_VERIFY(srcMoveVec.validate());
+
+ // bonus test for specifying a custom allocator, but using the same one as above
+ fixed_tuple_vector<nodeCount, bEnableOverflow, int, MoveOnlyType, TestObject> ctorFromMoveSameAlloc(move(ctorFromMove), otherMa);
+ EATEST_VERIFY(ctorFromMoveSameAlloc.size() == 10);
+ EATEST_VERIFY(ctorFromMoveSameAlloc.validate());
+ for (int i = 0; i < 10; ++i)
+ {
+ EATEST_VERIFY(ctorFromMoveSameAlloc.template get<0>()[i] == i);
+ EATEST_VERIFY(ctorFromMoveSameAlloc.template get<1>()[i] == MoveOnlyType(i));
+ EATEST_VERIFY(ctorFromMoveSameAlloc.template get<2>()[i] == TestObject(i));
+ }
+ EATEST_VERIFY(ctorFromMove.size() == 0);
+ EATEST_VERIFY(ctorFromMove.validate());
+ }
+
+ // ctor tuple_vector via move-iters
+ {
+ fixed_tuple_vector<nodeCount, bEnableOverflow, int, MoveOnlyType, TestObject> srcMoveVec;
+ for (int i = 0; i < 10; ++i)
+ {
+ srcMoveVec.emplace_back(move(i), MoveOnlyType(i), TestObject(i));
+ }
+
+ fixed_tuple_vector<nodeCount, bEnableOverflow, int, MoveOnlyType, TestObject> ctorFromMove(make_move_iterator(srcMoveVec.begin() + 2), make_move_iterator(srcMoveVec.begin() + 7));
+
+ EATEST_VERIFY(ctorFromMove.size() == 5);
+ EATEST_VERIFY(ctorFromMove.validate());
+ for (int i = 2; i < 7; ++i)
+ {
+ EATEST_VERIFY(ctorFromMove.template get<0>()[i-2] == i);
+ EATEST_VERIFY(ctorFromMove.template get<1>()[i-2] == MoveOnlyType(i));
+ EATEST_VERIFY(ctorFromMove.template get<2>()[i-2] == TestObject(i));
+ }
+ EATEST_VERIFY(srcMoveVec.size() == 10);
+ EATEST_VERIFY(srcMoveVec.validate());
+ for (int i = 0; i < 2; ++i)
+ {
+ EATEST_VERIFY(srcMoveVec.template get<0>()[i] == i);
+ EATEST_VERIFY(srcMoveVec.template get<1>()[i] == MoveOnlyType(i));
+ EATEST_VERIFY(srcMoveVec.template get<2>()[i] == TestObject(i));
+ }
+ for (int i = 2; i < 7; ++i)
+ {
+ EATEST_VERIFY(srcMoveVec.template get<0>()[i] == i); // int's just get copied because they're POD
+ EATEST_VERIFY(srcMoveVec.template get<1>()[i] == MoveOnlyType(0));
+ EATEST_VERIFY(srcMoveVec.template get<2>()[i] == TestObject(0));
+ }
+ for (int i = 7; i < 10; ++i)
+ {
+ EATEST_VERIFY(srcMoveVec.template get<0>()[i] == i);
+ EATEST_VERIFY(srcMoveVec.template get<1>()[i] == MoveOnlyType(i));
+ EATEST_VERIFY(srcMoveVec.template get<2>()[i] == TestObject(i));
+ }
+ }
+
+ srcVec.clear();
+ EATEST_VERIFY(TestObject::IsClear());
+
+ TestObject::Reset();
+ }
+
+ // Test swap
+ {
+ fixed_tuple_vector<nodeCount, bEnableOverflow, int, float, bool> complexVec;
+ complexVec.push_back(3, 2.0f, true);
+ complexVec.push_back(1, 4.0f, false);
+ complexVec.push_back(2, 1.0f, true);
+ complexVec.push_back(4, 3.0f, false);
+
+ fixed_tuple_vector<nodeCount, bEnableOverflow, int, float, bool> otherComplexVec;
+ complexVec.swap(otherComplexVec);
+
+ EATEST_VERIFY(complexVec.size() == 0);
+ EATEST_VERIFY(complexVec.validate());
+ EATEST_VERIFY(otherComplexVec.validate());
+ EATEST_VERIFY(otherComplexVec.template get<0>()[0] == 3);
+ EATEST_VERIFY(otherComplexVec.template get<float>()[1] == 4.0f);
+
+ complexVec.push_back(10, 10.0f, true);
+ swap(complexVec, otherComplexVec);
+
+ EATEST_VERIFY(complexVec.validate());
+ EATEST_VERIFY(*(complexVec.template get<0>()) == 3);
+ EATEST_VERIFY(complexVec.template get<float>()[1] == 4.0f);
+
+ EATEST_VERIFY(otherComplexVec.validate());
+ EATEST_VERIFY(otherComplexVec.template get<float>()[0] == 10.0f);
+ EATEST_VERIFY(otherComplexVec.size() == 1);
+
+ }
+
+
+ // Test tuple_Vector in a ranged for, and other large-scale iterator testing
+ {
+ fixed_tuple_vector<nodeCount, bEnableOverflow, int, float, int> tripleElementVec;
+ tripleElementVec.push_back(1, 2.0f, 6);
+ tripleElementVec.push_back(2, 3.0f, 7);
+ tripleElementVec.push_back(3, 4.0f, 8);
+ tripleElementVec.push_back(4, 5.0f, 9);
+ tripleElementVec.push_back(5, 6.0f, 10);
+
+
+ // test copyConstructible, copyAssignable, swappable, prefix inc, !=, reference convertible to value_type (InputIterator!)
+ {
+ typename fixed_tuple_vector<nodeCount, bEnableOverflow, int, float, int>::iterator iter = tripleElementVec.begin();
+ ++iter;
+ auto copiedIter(iter);
+ EATEST_VERIFY(get<2>(*copiedIter) == 7);
+ EATEST_VERIFY(copiedIter == iter);
+ EATEST_VERIFY(tripleElementVec.validate_iterator(iter) != isf_none);
+ EATEST_VERIFY(tripleElementVec.validate_iterator(copiedIter) != isf_none);
+
+ ++iter;
+ copiedIter = iter;
+ EATEST_VERIFY(get<2>(*copiedIter) == 8);
+ EATEST_VERIFY(tripleElementVec.validate_iterator(iter) != isf_none);
+ EATEST_VERIFY(tripleElementVec.validate_iterator(copiedIter) != isf_none);
+
+ ++iter;
+ swap(iter, copiedIter);
+ EATEST_VERIFY(get<2>(*iter) == 8);
+ EATEST_VERIFY(get<2>(*copiedIter) == 9);
+ EATEST_VERIFY(tripleElementVec.validate_iterator(iter) != isf_none);
+ EATEST_VERIFY(tripleElementVec.validate_iterator(copiedIter) != isf_none);
+
+ EATEST_VERIFY(copiedIter != iter);
+
+ tuple<const int&, const float&, const int&> ref(*iter);
+ tuple<int, float, int> value(*iter);
+ EATEST_VERIFY(get<2>(ref) == get<2>(value));
+ }
+
+ // test postfix increment, default constructible (ForwardIterator)
+ {
+ typename fixed_tuple_vector<nodeCount, bEnableOverflow, int, float, int>::iterator iter = tripleElementVec.begin();
+ auto prefixIter = ++iter;
+
+ typename fixed_tuple_vector<nodeCount, bEnableOverflow, int, float, int>::iterator postfixIter;
+ postfixIter = iter++;
+ EATEST_VERIFY(prefixIter == postfixIter);
+ EATEST_VERIFY(get<2>(*prefixIter) == 7);
+ EATEST_VERIFY(get<2>(*iter) == 8);
+ EATEST_VERIFY(tripleElementVec.validate_iterator(iter) != isf_none);
+ EATEST_VERIFY(tripleElementVec.validate_iterator(prefixIter) != isf_none);
+ EATEST_VERIFY(tripleElementVec.validate_iterator(postfixIter) != isf_none);
+ }
+
+ // test prefix decrement and postfix decrement (BidirectionalIterator)
+ {
+ typename fixed_tuple_vector<nodeCount, bEnableOverflow, int, float, int>::iterator iter = tripleElementVec.end();
+ auto prefixIter = --iter;
+
+ typename fixed_tuple_vector<nodeCount, bEnableOverflow, int, float, int>::iterator postfixIter;
+ postfixIter = iter--;
+ EATEST_VERIFY(prefixIter == postfixIter);
+ EATEST_VERIFY(get<2>(*prefixIter) == 10);
+ EATEST_VERIFY(get<2>(*iter) == 9);
+ EATEST_VERIFY(tripleElementVec.validate_iterator(iter) != isf_none);
+ EATEST_VERIFY(tripleElementVec.validate_iterator(prefixIter) != isf_none);
+ EATEST_VERIFY(tripleElementVec.validate_iterator(postfixIter) != isf_none);
+ }
+
+ // test many arithmetic operations (RandomAccessIterator)
+ {
+ typename fixed_tuple_vector<nodeCount, bEnableOverflow, int, float, int>::iterator iter = tripleElementVec.begin();
+ auto symmetryOne = iter + 2;
+ auto symmetryTwo = 2 + iter;
+ iter += 2;
+ EATEST_VERIFY(symmetryOne == symmetryTwo);
+ EATEST_VERIFY(symmetryOne == iter);
+
+ symmetryOne = iter - 2;
+ symmetryTwo = 2 - iter;
+ iter -= 2;
+ EATEST_VERIFY(symmetryOne == symmetryTwo);
+ EATEST_VERIFY(symmetryOne == iter);
+
+ iter += 2;
+ EATEST_VERIFY(iter - symmetryOne == 2);
+
+ tuple<int&, float&, int&> symmetryRef = symmetryOne[2];
+ EATEST_VERIFY(get<2>(symmetryRef) == get<2>(*iter));
+
+ EATEST_VERIFY(symmetryOne < iter);
+ EATEST_VERIFY(iter > symmetryOne);
+ EATEST_VERIFY(symmetryOne >= symmetryTwo && iter >= symmetryOne);
+ EATEST_VERIFY(symmetryOne <= symmetryTwo && symmetryOne <= iter);
+ EATEST_VERIFY(tripleElementVec.validate_iterator(iter) != isf_none);
+ EATEST_VERIFY(tripleElementVec.validate_iterator(symmetryOne) != isf_none);
+ EATEST_VERIFY(tripleElementVec.validate_iterator(symmetryTwo) != isf_none);
+ }
+
+ // test simple iteration, and reverse iteration
+ {
+ float i = 0;
+ int j = 0;
+ EATEST_VERIFY(&get<0>(*tripleElementVec.begin()) == tripleElementVec.template get<0>());
+ EATEST_VERIFY(&get<1>(*tripleElementVec.begin()) == tripleElementVec.template get<1>());
+ for (auto iter : tripleElementVec)
+ {
+ i += get<1>(iter);
+ j += get<2>(iter);
+ }
+ EATEST_VERIFY(i == 20.0f);
+ EATEST_VERIFY(j == 40);
+
+ float reverse_i = 0;
+ int reverse_j = 0;
+
+ eastl::for_each(tripleElementVec.rbegin(), tripleElementVec.rend(),
+ [&](const tuple<int, float, int> tup)
+ {
+ reverse_i += get<1>(tup);
+ reverse_j += get<2>(tup);
+ });
+ EATEST_VERIFY(i == reverse_i);
+ EATEST_VERIFY(j == reverse_j);
+ EATEST_VERIFY(get<0>(*tripleElementVec.rbegin()) == 5);
+ }
+ }
+
+ // Test move operations
+ {
+ TestObject::Reset();
+
+ // test emplace
+ {
+ fixed_tuple_vector<nodeCount, bEnableOverflow, int, MoveOnlyType, TestObject> testVec;
+ testVec.reserve(3);
+
+ // test emplace on empty vector that doesn't cause growth
+ testVec.emplace(testVec.begin(), 3, MoveOnlyType(3), TestObject(3));
+ EATEST_VERIFY(testVec.size() == 1);
+
+ // test emplace to end of vector that doesn't cause growth
+ testVec.emplace(testVec.end(), 5, MoveOnlyType(5), TestObject(5));
+ EATEST_VERIFY(testVec.size() == 2);
+
+ // test emplace to middle of vector that doesn't cause growth
+ testVec.emplace(testVec.begin() + 1, 4, MoveOnlyType(4), TestObject(4));
+ EATEST_VERIFY(testVec.size() == 3);
+ EATEST_VERIFY(testVec.capacity() == 3 || testVec.capacity() == nodeCount);
+
+ // test emplace to end of vector that causes growth
+ testVec.emplace(testVec.end(), 6, MoveOnlyType(6), TestObject(6));
+ EATEST_VERIFY(testVec.size() == 4);
+ if (testVec.has_overflowed())
+ {
+ testVec.shrink_to_fit();
+ }
+ EATEST_VERIFY(testVec.capacity() == 4 || testVec.capacity() == nodeCount);
+
+ // test emplace to beginning of vector that causes growth
+ testVec.emplace(testVec.begin(), 1, MoveOnlyType(1), TestObject(1));
+ EATEST_VERIFY(testVec.size() == 5);
+ if (testVec.has_overflowed())
+ {
+ testVec.shrink_to_fit();
+ }
+ EATEST_VERIFY(testVec.capacity() == 5 || testVec.capacity() == nodeCount);
+
+ // test emplace to middle of vector that causes growth
+ testVec.emplace(testVec.begin() + 1, 2, MoveOnlyType(2), TestObject(2));
+ EATEST_VERIFY(testVec.size() == 6);
+ if (testVec.has_overflowed())
+ {
+ testVec.shrink_to_fit();
+ }
+ EATEST_VERIFY(testVec.capacity() == 6 || testVec.capacity() == nodeCount);
+
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec.template get<2>()[i] == TestObject(i + 1));
+ }
+ EATEST_VERIFY(testVec.validate());
+ }
+
+ // test some other miscellania around rvalues, including...
+ // push_back with rvalue args, push_back with rvalue tuple,
+ // emplace_back with args, and emplace_back with tup
+ {
+ fixed_tuple_vector<nodeCount, bEnableOverflow, int, MoveOnlyType, TestObject> v1;
+ fixed_tuple_vector<nodeCount, bEnableOverflow, int, MoveOnlyType, TestObject> v2;
+ // add some data in the vector so we can move it to the other vector.
+ v1.reserve(5);
+ auto emplacedTup = v1.emplace_back(1, MoveOnlyType(1), TestObject(1));
+ EATEST_VERIFY(emplacedTup == v1.back());
+ v1.push_back(3, MoveOnlyType(3), TestObject(3));
+ v1.emplace_back(forward_as_tuple(5, MoveOnlyType(5), TestObject(5)));
+ v1.push_back(forward_as_tuple(6, MoveOnlyType(6), TestObject(6)));
+ v1.emplace(v1.begin() + 1, 2, MoveOnlyType(2), TestObject(2));
+ v1.emplace(v1.begin() + 3, make_tuple(4, MoveOnlyType(4), TestObject(4)));
+
+ tuple<int&, MoveOnlyType&, TestObject&> movedTup = v1.at(0);
+ EATEST_VERIFY(v1.validate());
+ EATEST_VERIFY(get<0>(movedTup) == 1);
+ EATEST_VERIFY(get<0>(*v1.begin()) == 1);
+
+ for (int i = 0; i < static_cast<int>(v1.size()); ++i)
+ {
+ EATEST_VERIFY(v1.template get<0>()[i] == i + 1);
+ }
+ EATEST_VERIFY(!v1.empty() && v2.empty());
+ v2 = eastl::move(v1);
+ EATEST_VERIFY(v2.validate());
+ EATEST_VERIFY(v1.empty() && !v2.empty());
+ v1.swap(v2);
+ EATEST_VERIFY(v1.validate());
+ EATEST_VERIFY(v2.validate());
+ EATEST_VERIFY(!v1.empty() && v2.empty());
+ }
+ EATEST_VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+ }
+
+ // Test comparisons
+ {
+
+ fixed_tuple_vector<nodeCount, bEnableOverflow, bool, TestObject, float> equalsVec1, equalsVec2;
+ for (int i = 0; i < 10; ++i)
+ {
+ equalsVec1.push_back(i % 3 == 0, TestObject(i), (float)i);
+ equalsVec2.push_back(i % 3 == 0, TestObject(i), (float)i);
+ }
+ EATEST_VERIFY(equalsVec1 == equalsVec2);
+
+ using ftv = fixed_tuple_vector<nodeCount, bEnableOverflow, bool, TestObject, float>;
+ typename ftv::overflow_allocator_type otherAlloc;
+ ftv smallSizeVec(5);
+ ftv lessThanVec(10);
+ ftv greaterThanVec(10, otherAlloc);
+ for (int i = 0; i < 10; ++i)
+ {
+ lessThanVec.push_back(i % 3 == 0, TestObject(i), (float)i);
+ greaterThanVec.push_back(i % 3 == 0, TestObject(i * 2), (float)i * 2);
+ }
+ EATEST_VERIFY(equalsVec1 != smallSizeVec);
+ EATEST_VERIFY(equalsVec1 != lessThanVec);
+ EATEST_VERIFY(equalsVec1 != greaterThanVec);
+ EATEST_VERIFY(lessThanVec < greaterThanVec);
+ EATEST_VERIFY(greaterThanVec > lessThanVec);
+ EATEST_VERIFY(lessThanVec <= greaterThanVec);
+ EATEST_VERIFY(equalsVec1 <= equalsVec2);
+ EATEST_VERIFY(equalsVec1 >= equalsVec2);
+ }
+
+ // Test partition
+ {
+ {
+ fixed_tuple_vector<nodeCount, bEnableOverflow, bool, TestObject, float, MoveOnlyType> vec;
+ for (int i = 0; i < 10; ++i)
+ {
+ vec.push_back(i % 3 == 0, TestObject(i), (float)i, MoveOnlyType(i));
+ }
+
+ eastl::partition(vec.begin(), vec.end(), [](tuple<bool&, TestObject&, float&, MoveOnlyType&> a)
+ { return get<0>(a) == true; });
+
+ // partition will split the array into 4 elements where the bool property is true, and 6 where it's false
+ for (int i = 0; i < 4; ++i)
+ EATEST_VERIFY(vec.template get<0>()[i] == true);
+ for (int i = 4; i < 10; ++i)
+ EATEST_VERIFY(vec.template get<0>()[i] == false);
+
+ EATEST_VERIFY(vec.validate());
+ EATEST_VERIFY(TestObject::sTOCount == 10);
+ }
+ EATEST_VERIFY(TestObject::IsClear());
+ }
+ return nErrorCount;
+}
+
+int TestFixedTupleVector()
+{
+ int nErrorCount = 0;
+
+ nErrorCount += TestFixedTupleVectorVariant<2, true>();
+ nErrorCount += TestFixedTupleVectorVariant<16, true>();
+ nErrorCount += TestFixedTupleVectorVariant<64, false>();
+
+ return nErrorCount;
+}
+
+
diff --git a/EASTL/test/source/TestFixedVector.cpp b/EASTL/test/source/TestFixedVector.cpp
new file mode 100644
index 0000000..aeb3ba2
--- /dev/null
+++ b/EASTL/test/source/TestFixedVector.cpp
@@ -0,0 +1,581 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLTest.h"
+#include <EASTL/fixed_vector.h>
+#include <EASTL/unique_ptr.h>
+#include <EAStdC/EAMemory.h>
+#include <new>
+
+#if defined(EA_COMPILER_CPP17_ENABLED) && __has_include(<variant>)
+#include <variant> //Variant not present in older standards
+#endif
+
+
+using namespace eastl;
+
+
+// Template instantations.
+// These tell the compiler to compile all the functions for the given class.
+template class eastl::fixed_vector<int, 1, true>;
+template class eastl::fixed_vector<Align64, 1, true>;
+template class eastl::fixed_vector<TestObject, 1, true>;
+
+template class eastl::fixed_vector<int, 1, false>;
+template class eastl::fixed_vector<Align64, 1, false>;
+template class eastl::fixed_vector<TestObject, 1, false>;
+
+/*
+// This does not compile, since the fixed_vector allocator is templated on sizeof(T),
+// not just T. Thus, the full type is required at the time of instantiation, but it
+// is not available.
+// See EATech Core JIRA issue ETCR-1608 for more information.
+struct StructWithContainerOfStructs
+{
+ eastl::fixed_vector<StructWithContainerOfStructs,4> children;
+};
+*/
+
+
+namespace
+{
+ // Aligned objects should be CustomAllocator instead of the default, because the
+ // EASTL default might be unable to do aligned allocations, but CustomAllocator always can.
+ typedef fixed_vector<Align64, 3, true, CustomAllocator> VA64;
+
+ VA64 vA64;
+ Align64 a64(5);
+ Align64* pA64 = &a64;
+}
+
+
+int TestFixedVector()
+{
+ int nErrorCount = 0;
+
+ TestObject::Reset();
+
+ { // Test the aligned_buffer template
+ {
+ eastl::aligned_buffer<sizeof(TestObject), EASTL_ALIGN_OF(TestObject)> toAlignedBuffer;
+ TestObject* const pTO = new(toAlignedBuffer.buffer) TestObject;
+ #if !defined(__GNUC__) // GCC complains about strict aliasing here.
+ EATEST_VERIFY(pTO->mX == ((TestObject*)&toAlignedBuffer.buffer[0])->mX);
+ #endif
+ pTO->~TestObject();
+ }
+
+ {
+ eastl::aligned_buffer<sizeof(Align64), EASTL_ALIGN_OF(Align64)> a64AlignedBuffer;
+ Align64* const pAlign64 = new(a64AlignedBuffer.buffer) Align64;
+ #if !defined(__GNUC__) // GCC complains about strict aliasing here.
+ EATEST_VERIFY(pAlign64->mX == ((Align64*)&a64AlignedBuffer.buffer[0])->mX);
+ #endif
+ pAlign64->~Align64();
+ }
+ }
+
+ {
+ // fixed_vector();
+ // size_type max_size() const;
+ fixed_vector<int, 1, true> v;
+ EATEST_VERIFY(VerifySequence(v.begin(), v.end(), int(), "fixed_vector", -1));
+ EATEST_VERIFY(v.max_size() == 1);
+
+ // fixed_vector();
+ typedef fixed_vector<int, 8, false> FixedVectorInt8;
+ FixedVectorInt8 fv1;
+ EATEST_VERIFY(fv1.size() == 0);
+ EATEST_VERIFY(fv1.capacity() == 8);
+
+ // this_type& operator=(const base_type& x);
+ FixedVectorInt8 fv2 = fv1;
+ EATEST_VERIFY(fv2.size() == 0);
+ EATEST_VERIFY(fv2.capacity() == 8);
+
+ // fixed_vector(const base_type& x);
+ FixedVectorInt8 fv3(fv1);
+ EATEST_VERIFY(fv3.size() == 0);
+ EATEST_VERIFY(fv3.capacity() == 8);
+
+ // explicit fixed_vector(size_type n);
+ FixedVectorInt8 fv4(5);
+ EATEST_VERIFY(fv4.size() == 5);
+ EATEST_VERIFY(fv4.capacity() == 8);
+ EATEST_VERIFY((fv4[0] == 0) && (fv4[4] == 0));
+
+ // fixed_vector(size_type n, const value_type& value);
+ FixedVectorInt8 fv5((eastl_size_t)5, (int)3);
+ EATEST_VERIFY(fv5.size() == 5);
+ EATEST_VERIFY(fv5.capacity() == 8);
+ EATEST_VERIFY((fv5[0] == 3) && (fv5[4] == 3));
+
+ // fixed_vector(InputIterator first, InputIterator last);
+ const int intArray[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
+ FixedVectorInt8 fv6(intArray, intArray + 8);
+ EATEST_VERIFY(fv6.size() == 8);
+ EATEST_VERIFY(fv5.capacity() == 8);
+ EATEST_VERIFY((fv6[0] == 0) && (fv6[7] == 7));
+
+ // void reset_lose_memory();
+ fv6.reset_lose_memory();
+ EATEST_VERIFY(fv6.size() == 0);
+ EATEST_VERIFY(fv6.capacity() == 8);
+
+ // void set_capacity(size_type);
+ fv6.set_capacity(100); // overflow is disabled, so this should have no effect.
+ EATEST_VERIFY(fv6.size() == 0);
+ EATEST_VERIFY(fv6.capacity() == 8); // EATEST_VERIFY that the capacity is unchanged.
+
+ fv6.resize(8);
+ EATEST_VERIFY(fv6.size() == 8);
+ fv6.set_capacity(1);
+ EATEST_VERIFY(fv6.size() == 1);
+ EATEST_VERIFY(fv6.capacity() == 8);
+
+ // Exercise the freeing of memory in set_capacity.
+ fixed_vector<int, 8, true> fv88;
+ eastl_size_t capacity = fv88.capacity();
+ fv88.resize(capacity);
+ fv88.set_capacity(capacity * 2);
+ EATEST_VERIFY(fv88.capacity() >= (capacity * 2));
+
+ // void swap(this_type& x);
+ // FixedVectorInt8 fv7(5, 3); // MSVC-ARM64 generated an internal compiler error on this line.
+ FixedVectorInt8 fv7 = {3, 3, 3, 3, 3};
+ FixedVectorInt8 fv8(intArray, intArray + 8);
+
+ swap(fv7, fv8);
+ EATEST_VERIFY(fv7.size() == 8);
+ EATEST_VERIFY((fv7[0] == 0) && (fv7[7] == 7));
+ EATEST_VERIFY(fv8.size() == 5);
+ EATEST_VERIFY((fv8[0] == 3) && (fv8[4] == 3));
+
+ fv7.swap(fv8);
+ EATEST_VERIFY(fv8.size() == 8);
+ EATEST_VERIFY((fv8[0] == 0) && (fv8[7] == 7));
+ EATEST_VERIFY(fv7.size() == 5);
+ EATEST_VERIFY((fv7[0] == 3) && (fv7[4] == 3));
+
+ // Test a recent optimization we added, which was to do a pointer swap of the fixed_vector pointers
+ // for the case that both fixed_vectors were overflowed and using the heap instead of their fixed buffers.
+ fixed_vector<int8_t, 4, true> fvo5;
+ fixed_vector<int8_t, 4, true> fvo6;
+ fvo5.resize(5, 5);
+ EATEST_VERIFY(fvo5.has_overflowed());
+ fvo6.resize(6, 6);
+ EATEST_VERIFY(fvo6.has_overflowed());
+ fvo5.swap(fvo6);
+ EATEST_VERIFY(fvo5.size() == 6); // Verify that sizes are swapped.
+ EATEST_VERIFY(fvo6.size() == 5);
+ EATEST_VERIFY(EA::StdC::Memcheck8(fvo5.data(), 6, fvo5.size()) == NULL); // Verify that contents are swapped.
+ EATEST_VERIFY(EA::StdC::Memcheck8(fvo6.data(), 5, fvo6.size()) == NULL);
+
+ // global operators
+ EATEST_VERIFY( fv7 != fv8);
+ EATEST_VERIFY(!(fv7 == fv8));
+ fv7 = fv8;
+ EATEST_VERIFY( fv7 == fv8);
+ EATEST_VERIFY(!(fv7 != fv8));
+ EATEST_VERIFY(fv7.validate());
+ EATEST_VERIFY(fv8.validate());
+ }
+
+
+ {
+ // POD types
+ typedef fixed_vector<int, 1, true> vInt;
+
+ vInt v;
+ int n = 5;
+ int* pN = &n;
+
+ v.insert(v.begin(), pN, pN + 1);
+ EATEST_VERIFY(VerifySequence(v.begin(), v.end(), int(), "fixed_vector", 5, -1));
+ EATEST_VERIFY(v.validate());
+ }
+
+
+ {
+ // non POD types
+ typedef fixed_vector<TestObject, 1, true> VTO;
+
+ VTO v;
+ TestObject to(5);
+ TestObject* pTO = &to;
+
+ v.insert(v.begin(), pTO, pTO + 1);
+ EATEST_VERIFY(VerifySequence(v.begin(), v.end(), int(), "fixed_vector", 5, -1));
+ EATEST_VERIFY(v.validate());
+ }
+
+
+ {
+ // non POD types
+
+ // The variables used here are declared above in the global space.
+ vA64.insert(vA64.begin(), pA64, pA64 + 1);
+ EATEST_VERIFY(VerifySequence(vA64.begin(), vA64.end(), int(), "fixed_vector", 5, -1));
+ EATEST_VERIFY(((uintptr_t)&a64 % kEASTLTestAlign64) == 0);
+ EATEST_VERIFY(((uintptr_t)vA64.data() % kEASTLTestAlign64) == 0);
+ EATEST_VERIFY(((uintptr_t)&vA64[0] % kEASTLTestAlign64) == 0);
+ EATEST_VERIFY(vA64.max_size() == 3);
+ EATEST_VERIFY(vA64.validate());
+ }
+
+
+ {
+ // Test for potential bug reported Sep. 19, 2006.
+ typedef eastl::fixed_vector<void*, 160, false> FixedVector;
+ FixedVector v;
+ int* p = (int*)(uintptr_t)0;
+
+ for(int i = 0; i < 100; i++, p++)
+ v.push_back(p);
+
+ EATEST_VERIFY(v.size() == 100);
+ EATEST_VERIFY(eastl::unique(v.begin(), v.end()) == v.end());
+
+ FixedVector::iterator it = eastl::lower_bound(v.begin(), v.end(), p - 30);
+ EATEST_VERIFY(v.validate_iterator(it) == (isf_valid | isf_current | isf_can_dereference));
+ EATEST_VERIFY((*it) == (p - 30));
+
+ v.erase(it);
+
+ EATEST_VERIFY(v.size() == 99);
+ EATEST_VERIFY(eastl::unique(v.begin(), v.end()) == v.end());
+ }
+
+ {
+ typedef fixed_vector<Align64, 4, true, CustomAllocator> FixedVectorWithAlignment;
+
+ FixedVectorWithAlignment fv;
+
+ Align64 a;
+
+ fv.push_back(a);
+ fv.push_back(a);
+ fv.push_back(a);
+ fv.push_back(a);
+ fv.push_back(a);
+ for (FixedVectorWithAlignment::const_iterator it = fv.begin(); it != fv.end(); ++it)
+ {
+ const Align64* ptr = &(*it);
+ EATEST_VERIFY((uint64_t)ptr % EASTL_ALIGN_OF(Align64) == 0);
+ }
+ }
+
+ { // Test overflow allocator specification
+ typedef fixed_vector<char8_t, 64, true, MallocAllocator> FixedString64Malloc;
+
+ FixedString64Malloc fs;
+
+ fs.push_back('a');
+ EATEST_VERIFY(fs.size() == 1);
+ EATEST_VERIFY(fs[0] == 'a');
+
+ fs.resize(95);
+ fs[94] = 'b';
+ EATEST_VERIFY(fs[0] == 'a');
+ EATEST_VERIFY(fs[94] == 'b');
+ EATEST_VERIFY(fs.size() == 95);
+ EATEST_VERIFY(fs.validate());
+
+ fs.clear();
+ EATEST_VERIFY(fs.empty());
+
+ fs.push_back('a');
+ EATEST_VERIFY(fs.size() == 1);
+ EATEST_VERIFY(fs[0] == 'a');
+ EATEST_VERIFY(fs.validate());
+
+ fs.resize(195);
+ fs[194] = 'b';
+ EATEST_VERIFY(fs[0] == 'a');
+ EATEST_VERIFY(fs[194] == 'b');
+ EATEST_VERIFY(fs.size() == 195);
+ EATEST_VERIFY(fs.validate());
+
+ // get_overflow_allocator / set_overflow_allocator
+ fs.set_capacity(0); // This should free all memory allocated by the existing (overflow) allocator.
+ EATEST_VERIFY(fs.validate());
+ MallocAllocator a;
+ fs.get_allocator().set_overflow_allocator(a);
+ EATEST_VERIFY(fs.validate());
+ fs.resize(400);
+ EATEST_VERIFY(fs.validate());
+ }
+
+
+ {
+ //Test clear(bool freeOverflow)
+ const size_t nodeCount = 4;
+ typedef fixed_vector<int, nodeCount, true> vInt4;
+ vInt4 fv;
+ for (int i = 0; (unsigned)i < nodeCount+1; i++)
+ {
+ fv.push_back(i);
+ }
+ vInt4::size_type capacity = fv.capacity();
+ EATEST_VERIFY(capacity >= nodeCount+1);
+ fv.clear(false);
+ EATEST_VERIFY(fv.size() == 0);
+ EATEST_VERIFY(fv.capacity() == capacity);
+ fv.push_back(1);
+ fv.clear(true);
+ EATEST_VERIFY(fv.size() == 0);
+ EATEST_VERIFY(fv.capacity() == nodeCount);
+ }
+
+
+ {
+ // bool empty() const
+ // bool has_overflowed() const
+ // size_type size() const;
+ // size_type max_size() const
+
+ // Test a vector that has overflow disabled.
+ fixed_vector<int, 5, false> vInt5;
+
+ EATEST_VERIFY(vInt5.max_size() == 5);
+ EATEST_VERIFY(vInt5.size() == 0);
+ EATEST_VERIFY(vInt5.empty());
+ EATEST_VERIFY(!vInt5.has_overflowed());
+
+ vInt5.push_back(37);
+ vInt5.push_back(37);
+ vInt5.push_back(37);
+
+ EATEST_VERIFY(vInt5.size() == 3);
+ EATEST_VERIFY(!vInt5.empty());
+ EATEST_VERIFY(!vInt5.has_overflowed());
+
+ vInt5.push_back(37);
+ vInt5.push_back(37);
+
+ EATEST_VERIFY(vInt5.size() == 5);
+ EATEST_VERIFY(!vInt5.empty());
+ EATEST_VERIFY(!vInt5.has_overflowed());
+
+ vInt5.pop_back();
+
+ EATEST_VERIFY(vInt5.size() == 4);
+ EATEST_VERIFY(!vInt5.empty());
+ EATEST_VERIFY(!vInt5.has_overflowed());
+ EATEST_VERIFY(vInt5.validate());
+ }
+
+
+ {
+ // bool empty() const
+ // bool has_overflowed() const
+ // size_type size() const;
+ // size_type max_size() const
+
+ // Test a list that has overflow enabled.
+ fixed_vector<int, 5, true> vInt5;
+
+ EATEST_VERIFY(vInt5.max_size() == 5);
+ EATEST_VERIFY(vInt5.size() == 0);
+ EATEST_VERIFY(vInt5.empty());
+ EATEST_VERIFY(!vInt5.has_overflowed());
+
+ vInt5.push_back(37);
+ vInt5.push_back(37);
+ vInt5.push_back(37);
+
+ EATEST_VERIFY(vInt5.size() == 3);
+ EATEST_VERIFY(!vInt5.empty());
+ EATEST_VERIFY(!vInt5.has_overflowed());
+
+ vInt5.push_back(37);
+ vInt5.push_back(37);
+
+ EATEST_VERIFY(vInt5.size() == 5);
+ EATEST_VERIFY(!vInt5.empty());
+ EATEST_VERIFY(!vInt5.has_overflowed());
+
+ vInt5.push_back(37);
+
+ EATEST_VERIFY(vInt5.size() == 6);
+ EATEST_VERIFY(!vInt5.empty());
+ EATEST_VERIFY(vInt5.has_overflowed());
+
+ vInt5.clear();
+
+ EATEST_VERIFY(vInt5.size() == 0);
+ EATEST_VERIFY(vInt5.empty());
+ EATEST_VERIFY(vInt5.has_overflowed()); // Note that we declare the container full, as it is no longer using the fixed-capacity.
+ EATEST_VERIFY(vInt5.validate());
+ }
+
+ {
+ // void* push_back_uninitialized();
+
+ int64_t toCount0 = TestObject::sTOCount;
+
+ eastl::fixed_vector<TestObject, 32, false> vTO1; // <-- bEnableOverflow = false
+ EATEST_VERIFY(TestObject::sTOCount == toCount0);
+
+ for(int i = 0; i < 25; i++) // 25 is simply a number that is <= 32.
+ {
+ void* pTO1 = vTO1.push_back_uninitialized();
+ EATEST_VERIFY(TestObject::sTOCount == (toCount0 + i));
+
+ new(pTO1) TestObject(i);
+ EATEST_VERIFY(TestObject::sTOCount == (toCount0 + i + 1));
+ EATEST_VERIFY(vTO1.back().mX == i);
+ EATEST_VERIFY(vTO1.validate());
+ }
+ }
+
+ {
+ // void* push_back_uninitialized();
+
+ int64_t toCount0 = TestObject::sTOCount;
+
+ eastl::fixed_vector<TestObject, 15, true> vTO2; // <-- bEnableOverflow = true
+ EATEST_VERIFY(TestObject::sTOCount == toCount0);
+
+ for(int i = 0; i < 25; i++) // 25 is simply a number that is > 15.
+ {
+ void* pTO2 = vTO2.push_back_uninitialized();
+ EATEST_VERIFY(TestObject::sTOCount == (toCount0 + i));
+
+ new(pTO2) TestObject(i);
+ EATEST_VERIFY(TestObject::sTOCount == (toCount0 + i + 1));
+ EATEST_VERIFY(vTO2.back().mX == i);
+ EATEST_VERIFY(vTO2.validate());
+ }
+ }
+
+ { // Try to repro user report that fixed_vector on the stack crashes.
+ eastl::fixed_vector<int, 10, false> fvif;
+ eastl::fixed_vector<int, 10, true> fvit;
+ eastl::fixed_vector<TestObject, 10, false> fvof;
+ eastl::fixed_vector<TestObject, 10, true> fvot;
+ eastl::fixed_vector<int, 10, false, MallocAllocator> fvimf;
+ eastl::fixed_vector<int, 10, true, MallocAllocator> fvimt;
+ eastl::fixed_vector<TestObject, 10, false, MallocAllocator> fvomf;
+ eastl::fixed_vector<TestObject, 10, true, MallocAllocator> fvomt;
+
+ fvif.push_back(1);
+ fvit.push_back(1);
+ fvimf.push_back(1);
+ fvimt.push_back(1);
+
+ fvif.clear();
+ fvit.clear();
+ fvimf.clear();
+ fvimt.clear();
+ }
+
+ {
+ // Test construction of a container with an overflow allocator constructor argument.
+ MallocAllocator overflowAllocator;
+ void* p = overflowAllocator.allocate(1);
+ fixed_vector<int, 64, true, MallocAllocator> c(overflowAllocator);
+ c.resize(65);
+ EATEST_VERIFY(c.get_overflow_allocator().mAllocCount == 2); // 1 for above, and 1 for overflowing from 64 to 65.
+ overflowAllocator.deallocate(p, 1);
+ }
+
+ EATEST_VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+
+
+ { // Test for crash bug reported by Arpit Baldeva.
+ eastl::fixed_vector<void*, 1, true> test;
+
+ test.push_back(NULL);
+ test.push_back(NULL);
+ test.erase(eastl::find(test.begin(), test.end(), (void*)NULL));
+ test.erase(eastl::find(test.begin(), test.end(), (void*)NULL));
+ EATEST_VERIFY(test.empty());
+ EATEST_VERIFY(test.validate());
+
+ test.set_capacity(0); // "Does nothing currently."
+ EATEST_VERIFY(test.capacity() == 0);
+ EATEST_VERIFY(test.validate());
+
+ } // "Crash here."
+
+ {
+ const int FV_SIZE = 100;
+ fixed_vector<unique_ptr<unsigned int>, FV_SIZE> fvmv1; // to move via move assignment operator
+ fixed_vector<unique_ptr<unsigned int>, FV_SIZE> fvmv2; // to move via move copy constructor
+
+ for (unsigned int i = 0; i < FV_SIZE; ++i) // populate fvmv1
+ fvmv1.push_back(make_unique<unsigned int>(i));
+
+ fvmv2 = eastl::move(fvmv1); // Test move assignment operator
+
+ for (unsigned int i = 0; i < FV_SIZE; ++i)
+ {
+ EATEST_VERIFY(!fvmv1[i]);
+ EATEST_VERIFY(*fvmv2[i] == i);
+ }
+ EATEST_VERIFY(fvmv2.validate());
+
+ swap(fvmv1, fvmv2); // Test swap with move-only objects
+ for (unsigned int i = 0; i < FV_SIZE; ++i)
+ {
+ EATEST_VERIFY(*fvmv1[i] == i);
+ EATEST_VERIFY(!fvmv2[i]);
+ }
+ EATEST_VERIFY(fvmv1.validate());
+ EATEST_VERIFY(fvmv2.validate());
+
+ fixed_vector<unique_ptr<unsigned int>, FV_SIZE> fv = eastl::move(fvmv1); // Test move copy constructor
+ for (unsigned int i = 0; i < FV_SIZE; ++i)
+ {
+ EATEST_VERIFY(!fvmv1[i]);
+ EATEST_VERIFY(*fv[i] == i);
+ }
+ EATEST_VERIFY(fv.validate());
+ }
+
+ { // Test that ensures that move ctor that triggers realloc (e.g. > capacity) does so via move code path
+ eastl::fixed_vector<TestObject, 1, true> fv1;
+ fv1.push_back(TestObject(0));
+ fv1.push_back(TestObject(0));
+ int64_t copyCtorCount0 = TestObject::sTOCopyCtorCount, moveCtorCount0 = TestObject::sTOMoveCtorCount;
+ decltype(fv1) fv2 = eastl::move(fv1);
+ EATEST_VERIFY(TestObject::sTOCopyCtorCount == copyCtorCount0 && TestObject::sTOMoveCtorCount == (moveCtorCount0 + 2));
+ }
+ { // Same as above but with custom statefull allocator
+ struct MyAlloc : public eastl::allocator
+ {
+ MyAlloc()=default;
+ MyAlloc(int i) : dummy(i) {}
+ int dummy;
+ };
+ eastl::fixed_vector<TestObject, 1, true, MyAlloc> fv1;
+ fv1.push_back(TestObject(0));
+ fv1.push_back(TestObject(0));
+ int64_t copyCtorCount0 = TestObject::sTOCopyCtorCount, moveCtorCount0 = TestObject::sTOMoveCtorCount;
+ decltype(fv1) fv2(eastl::move(fv1), MyAlloc(123));
+ EATEST_VERIFY(TestObject::sTOCopyCtorCount == copyCtorCount0 && TestObject::sTOMoveCtorCount == (moveCtorCount0 + 2));
+ }
+
+ #if defined(EA_COMPILER_CPP17_ENABLED) && __has_include(<variant>)
+ //Test pairing of std::variant with fixed_vector
+ {
+ eastl::fixed_vector<std::variant<int>, 4> v;
+ eastl::fixed_vector<std::variant<int>, 4> b = eastl::move(v);
+ }
+ #endif
+ return nErrorCount;
+}
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/test/source/TestFunctional.cpp b/EASTL/test/source/TestFunctional.cpp
new file mode 100644
index 0000000..1e25200
--- /dev/null
+++ b/EASTL/test/source/TestFunctional.cpp
@@ -0,0 +1,1529 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include <EABase/eabase.h>
+#include <EAAssert/eaassert.h>
+#include "EASTLTest.h"
+#include <EASTL/memory.h>
+#include <EASTL/functional.h>
+#include <EASTL/hash_set.h>
+#include <EASTL/set.h>
+#include <EASTL/list.h>
+#include <EAStdC/EAString.h>
+
+EA_DISABLE_ALL_VC_WARNINGS()
+#include <functional>
+EA_RESTORE_ALL_VC_WARNINGS()
+
+namespace
+{
+
+ // Used for eastl::function tests
+ static int TestIntRet(int* p)
+ {
+ int ret = *p;
+ *p += 1;
+ return ret;
+ }
+
+ // Used for str_less tests below.
+ template <typename T>
+ struct Results
+ {
+ const T* p1;
+ const T* p2;
+ bool expectedResult; // The expected result of the expression (p1 < p2)
+ };
+
+
+ // Used for const_mem_fun_t below.
+ struct X
+ {
+ X() { }
+ void DoNothing() const { }
+ };
+
+ template <typename T>
+ void foo(typename T::argument_type arg)
+ {
+ typename T::result_type (T::*pFunction)(typename T::argument_type) const = &T::operator();
+ T t(&X::DoNothing);
+ (t.*pFunction)(arg);
+ }
+
+
+ // Used for equal_to_2 tests below.
+ struct N1{
+ N1(int x) : mX(x) { }
+ int mX;
+ };
+
+ struct N2{
+ N2(int x) : mX(x) { }
+ int mX;
+ };
+
+ bool operator==(const N1& n1, const N1& n1a){ return (n1.mX == n1a.mX); }
+ bool operator==(const N1& n1, const N2& n2) { return (n1.mX == n2.mX); }
+ bool operator==(const N2& n2, const N1& n1) { return (n2.mX == n1.mX); }
+ bool operator==(const volatile N1& n1, const volatile N1& n1a) { return (n1.mX == n1a.mX); }
+
+ bool operator!=(const N1& n1, const N1& n1a){ return (n1.mX != n1a.mX); }
+ bool operator!=(const N1& n1, const N2& n2) { return (n1.mX != n2.mX); }
+ bool operator!=(const N2& n2, const N1& n1) { return (n2.mX != n1.mX); }
+ bool operator!=(const volatile N1& n1, const volatile N1& n1a) { return (n1.mX != n1a.mX); }
+
+ bool operator< (const N1& n1, const N1& n1a){ return (n1.mX < n1a.mX); }
+ bool operator< (const N1& n1, const N2& n2) { return (n1.mX < n2.mX); }
+ bool operator< (const N2& n2, const N1& n1) { return (n2.mX < n1.mX); }
+ bool operator< (const volatile N1& n1, const volatile N1& n1a) { return (n1.mX < n1a.mX); }
+
+
+ // Used for mem_fun tests below.
+ struct TestClass
+ {
+ mutable int mX;
+
+ TestClass() : mX(37) { }
+
+ void Increment()
+ {
+ mX++;
+ }
+
+ void IncrementConst() const
+ {
+ mX++;
+ }
+
+ int MultiplyBy(int x)
+ {
+ return mX * x;
+ }
+
+ int MultiplyByConst(int x) const
+ {
+ return mX * x;
+ }
+ };
+}
+
+
+// Template instantations.
+// These tell the compiler to compile all the functions for the given class.
+typedef eastl::basic_string<char8_t, MallocAllocator> String8MA;
+typedef eastl::basic_string<char16_t, MallocAllocator> String16MA;
+
+template struct eastl::string_hash<String8MA>;
+template struct eastl::string_hash<String16MA>;
+
+template class eastl::hash_set<String8MA, eastl::string_hash<String8MA> >;
+template class eastl::hash_set<String16MA, eastl::string_hash<String16MA> >;
+
+
+// Helper function for testing our default hash implementations for pod types which
+// simply returns the static_cast<size_t> of the val passed in
+template<typename T>
+int TestHashHelper(T val)
+{
+ int nErrorCount = 0;
+
+ EATEST_VERIFY(eastl::hash<T>()(val) == static_cast<size_t>(val));
+
+ return nErrorCount;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// TestFunctional
+//
+int TestFunctional()
+{
+ using namespace eastl;
+
+ int nErrorCount = 0;
+
+ {
+ // str_equal_to
+ char p0[] = "";
+ char p1[] = "hello";
+ char p2[] = "world";
+ char p3[] = "helllllo";
+ char p4[] = "hello"; // Intentionally the same value as p1.
+
+ // str_equal_to
+ typedef hash_set<const char*, hash<const char*>, str_equal_to<const char*> > StringHashSet;
+ StringHashSet shs;
+
+ shs.insert(p1);
+ shs.insert(p2);
+ shs.insert(p3);
+
+ StringHashSet::iterator it = shs.find(p0);
+ EATEST_VERIFY(it == shs.end());
+
+ it = shs.find(p1);
+ EATEST_VERIFY(it != shs.end());
+
+ it = shs.find(p2);
+ EATEST_VERIFY(it != shs.end());
+
+ it = shs.find(p4);
+ EATEST_VERIFY(it != shs.end());
+ }
+
+ {
+ // str_less<const char8_t*>
+ Results<char> results8[] =
+ {
+ { "", "", false },
+ { "", "a", true },
+ { "a", "", false },
+ { "a", "a", false },
+ { "a", "b", true },
+ { "____a", "____a", false },
+ { "____a", "____b", true },
+ { "____b", "____a", false },
+ { "_\xff", "_a", false }, // Test high values, which exercises the signed/unsiged comparison behavior.
+ { "_a", "_\xff", true }
+ };
+
+ str_less<const char*> sl8;
+ for(size_t i = 0; i < EAArrayCount(results8); i++)
+ {
+ // Verify that our test is in line with the strcmp function.
+ bool bResult = (EA::StdC::Strcmp(results8[i].p1, results8[i].p2) < 0);
+ EATEST_VERIFY_F(bResult == results8[i].expectedResult, "Strcmp failure, test %zu. Expected \"%s\" to be %sless than \"%s\"", i, results8[i].p1, results8[i].expectedResult ? "" : "not ", results8[i].p2);
+
+ // Verify that str_less achieves the expected results.
+ bResult = sl8(results8[i].p1, results8[i].p2);
+ EATEST_VERIFY_F(bResult == results8[i].expectedResult, "str_less test failure, test %zu. Expected \"%s\" to be %sless than \"%s\"", i, results8[i].p1, results8[i].expectedResult ? "" : "not ", results8[i].p2);
+ }
+
+ // str_less<const wchar_t*>
+ Results<wchar_t> resultsW[] =
+ {
+ { L"", L"", false },
+ { L"", L"a", true },
+ { L"a", L"", false },
+ { L"a", L"a", false },
+ { L"a", L"b", true },
+ { L"____a", L"____a", false },
+ { L"____a", L"____b", true },
+ { L"____b", L"____a", false },
+ { L"_\xffff", L"_a", false }, // Test high values, which exercises the signed/unsiged comparison behavior.
+ { L"_a", L"_\xffff", true }
+ };
+
+ str_less<const wchar_t*> slW;
+ for(size_t i = 0; i < EAArrayCount(resultsW); i++)
+ {
+ // Verify that our test is in line with the strcmp function.
+ bool bResult = (EA::StdC::Strcmp(resultsW[i].p1, resultsW[i].p2) < 0);
+ EATEST_VERIFY_F(bResult == resultsW[i].expectedResult, "Strcmp failure, test %zu. Expected \"%s\" to be %sless than \"%s\"", i, results8[i].p1, results8[i].expectedResult ? "" : "not ", results8[i].p2);
+
+ // Verify that str_less achieves the expected results.
+ bResult = slW(resultsW[i].p1, resultsW[i].p2);
+ EATEST_VERIFY_F(bResult == resultsW[i].expectedResult, "str_less test failure, test %zu. Expected \"%ls\" to be %sless than \"%ls\"", i, resultsW[i].p1, resultsW[i].expectedResult ? "" : "not ", resultsW[i].p2);
+ }
+ }
+
+ {
+ // str_less
+ char p0[] = "";
+ char p1[] = "hello";
+ char p2[] = "world";
+ char p3[] = "helllllo";
+ char p4[] = "hello"; // Intentionally the same value as p1.
+
+ typedef set<const char*, str_less<const char*> > StringSet;
+ StringSet ss;
+
+ ss.insert(p1);
+ ss.insert(p2);
+ ss.insert(p3);
+
+ StringSet::iterator it = ss.find(p0);
+ EATEST_VERIFY(it == ss.end());
+
+ it = ss.find(p1);
+ EATEST_VERIFY(it != ss.end());
+
+ it = ss.find(p2);
+ EATEST_VERIFY(it != ss.end());
+
+ it = ss.find(p4);
+ EATEST_VERIFY(it != ss.end());
+ }
+
+ {
+ // equal_to_2
+ N1 n11(1);
+ N1 n13(3);
+ N2 n21(1);
+ N2 n22(2);
+ const N1 cn11(1);
+ const N1 cn13(3);
+ volatile N1 vn11(1);
+ volatile N1 vn13(3);
+ const volatile N1 cvn11(1);
+ const volatile N1 cvn13(3);
+
+ equal_to_2<N1, N2> e;
+ EATEST_VERIFY(e(n11, n21));
+ EATEST_VERIFY(e(n21, n11));
+
+ equal_to_2<N1, N1> es;
+ EATEST_VERIFY(es(n11, n11));
+ EATEST_VERIFY(!es(n11, n13));
+
+ equal_to_2<const N1, N1> ec;
+ EATEST_VERIFY(ec(cn11, n11));
+ EATEST_VERIFY(ec(n11, cn11));
+
+ equal_to_2<N1, const N1> ec2;
+ EATEST_VERIFY(ec2(n11, cn11));
+ EATEST_VERIFY(ec2(cn11, n11));
+
+ equal_to_2<const N1, const N1> ecc;
+ EATEST_VERIFY(ecc(cn11, cn11));
+
+ equal_to_2<volatile N1, N1> ev;
+ EATEST_VERIFY(ev(vn11, n11));
+ EATEST_VERIFY(ev(n11, vn11));
+
+ equal_to_2<N1, volatile N1> ev2;
+ EATEST_VERIFY(ev2(n11, vn11));
+ EATEST_VERIFY(ev2(vn11, n11));
+
+ equal_to_2<volatile N1, volatile N1> evv;
+ EATEST_VERIFY(evv(vn11, vn11));
+
+ equal_to_2<const volatile N1, N1> ecv;
+ EATEST_VERIFY(ecv(cvn11, n11));
+ EATEST_VERIFY(ecv(n11, cvn11));
+
+ equal_to_2<N1, const volatile N1> ecv2;
+ EATEST_VERIFY(ecv2(n11, cvn11));
+ EATEST_VERIFY(ecv2(cvn11, n11));
+
+ equal_to_2<const volatile N1, const volatile N1> ecvcv;
+ EATEST_VERIFY(ecvcv(cvn11, cvn11));
+
+ // not_equal_to_2
+ not_equal_to_2<N1, N2> n;
+ EATEST_VERIFY(n(n11, n22));
+ EATEST_VERIFY(n(n22, n11));
+
+ not_equal_to_2<N1, N1> ns;
+ EATEST_VERIFY(ns(n11, n13));
+ EATEST_VERIFY(!ns(n11, n11));
+
+ not_equal_to_2<const N1, N1> nc;
+ EATEST_VERIFY(nc(cn11, n13));
+ EATEST_VERIFY(nc(n13, cn11));
+
+ not_equal_to_2<N1, const N1> nc2;
+ EATEST_VERIFY(nc2(n13, cn11));
+ EATEST_VERIFY(nc2(cn11, n13));
+
+ not_equal_to_2<const N1, const N1> ncc;
+ EATEST_VERIFY(ncc(cn11, cn13));
+
+ not_equal_to_2<volatile N1, N1> nv;
+ EATEST_VERIFY(nv(vn11, n13));
+ EATEST_VERIFY(nv(n11, vn13));
+
+ not_equal_to_2<N1, volatile N1> nv2;
+ EATEST_VERIFY(nv2(n11, vn13));
+ EATEST_VERIFY(nv2(vn11, n13));
+
+ not_equal_to_2<volatile N1, volatile N1> nvv;
+ EATEST_VERIFY(nvv(vn11, vn13));
+
+ not_equal_to_2<const volatile N1, N1> ncv;
+ EATEST_VERIFY(ncv(cvn11, n13));
+ EATEST_VERIFY(ncv(n11, cvn13));
+
+ not_equal_to_2<N1, const volatile N1> ncv2;
+ EATEST_VERIFY(ncv2(n11, cvn13));
+ EATEST_VERIFY(ncv2(cvn11, n13));
+
+ not_equal_to_2<const volatile N1, const volatile N1> ncvcv;
+ EATEST_VERIFY(ncvcv(cvn11, cvn13));
+
+ // less_2
+ less_2<N1, N2> le;
+ EATEST_VERIFY(le(n11, n22));
+ EATEST_VERIFY(le(n22, n13));
+
+ less_2<N1, N1> les;
+ EATEST_VERIFY(les(n11, n13));
+
+ less_2<const N1, N1> lec;
+ EATEST_VERIFY(lec(cn11, n13));
+ EATEST_VERIFY(lec(n11, cn13));
+
+ less_2<N1, const N1> lec2;
+ EATEST_VERIFY(lec2(n11, cn13));
+ EATEST_VERIFY(lec2(cn11, n13));
+
+ less_2<const N1, const N1> lecc;
+ EATEST_VERIFY(lecc(cn11, cn13));
+
+ less_2<volatile N1, N1> lev;
+ EATEST_VERIFY(lev(vn11, n13));
+ EATEST_VERIFY(lev(n11, vn13));
+
+ less_2<N1, volatile N1> lev2;
+ EATEST_VERIFY(lev2(n11, vn13));
+ EATEST_VERIFY(lev2(vn11, n13));
+
+ less_2<volatile N1, volatile N1> levv;
+ EATEST_VERIFY(levv(vn11, vn13));
+
+ less_2<const volatile N1, N1> lecv;
+ EATEST_VERIFY(lecv(cvn11, n13));
+ EATEST_VERIFY(lecv(n11, cvn13));
+
+ less_2<N1, const volatile N1> lecv2;
+ EATEST_VERIFY(lecv2(n11, cvn13));
+ EATEST_VERIFY(lecv2(cvn11, n13));
+
+ less_2<const volatile N1, const volatile N1> lecvcv;
+ EATEST_VERIFY(lecvcv(cvn11, cvn13));
+ }
+
+
+ {
+ // Test defect report entry #297.
+ const X x;
+ foo< const_mem_fun_t<void, X> >(&x);
+ }
+
+
+ {
+ // mem_fun (no argument version)
+ TestClass tc0, tc1, tc2;
+ TestClass* tcArray[3] = { &tc0, &tc1, &tc2 };
+
+ for_each(tcArray, tcArray + 3, mem_fun(&TestClass::Increment));
+ EATEST_VERIFY((tc0.mX == 38) && (tc1.mX == 38) && (tc2.mX == 38));
+
+ for_each(tcArray, tcArray + 3, mem_fun(&TestClass::IncrementConst));
+ EATEST_VERIFY((tc0.mX == 39) && (tc1.mX == 39) && (tc2.mX == 39));
+ }
+
+
+ {
+ // mem_fun (one argument version)
+ TestClass tc0, tc1, tc2;
+ TestClass* tcArray[3] = { &tc0, &tc1, &tc2 };
+ int intArray1[3] = { -1, 0, 2 };
+ int intArray2[3] = { -9, -9, -9 };
+
+ transform(tcArray, tcArray + 3, intArray1, intArray2, mem_fun(&TestClass::MultiplyBy));
+ EATEST_VERIFY((intArray2[0] == -37) && (intArray2[1] == 0) && (intArray2[2] == 74));
+
+ intArray2[0] = intArray2[1] = intArray2[2] = -9;
+ transform(tcArray, tcArray + 3, intArray1, intArray2, mem_fun(&TestClass::MultiplyByConst));
+ EATEST_VERIFY((intArray2[0] == -37) && (intArray2[1] == 0) && (intArray2[2] == 74));
+ }
+
+
+ {
+ // mem_fun_ref (no argument version)
+ TestClass tcArray[3];
+
+ for_each(tcArray, tcArray + 3, mem_fun_ref(&TestClass::Increment));
+ EATEST_VERIFY((tcArray[0].mX == 38) && (tcArray[1].mX == 38) && (tcArray[2].mX == 38));
+
+ for_each(tcArray, tcArray + 3, mem_fun_ref(&TestClass::IncrementConst));
+ EATEST_VERIFY((tcArray[0].mX == 39) && (tcArray[1].mX == 39) && (tcArray[2].mX == 39));
+ }
+
+
+ {
+ // mem_fun_ref (one argument version)
+ TestClass tcArray[3];
+ int intArray1[3] = { -1, 0, 2 };
+ int intArray2[3] = { -9, -9, -9 };
+
+ transform(tcArray, tcArray + 3, intArray1, intArray2, mem_fun_ref(&TestClass::MultiplyBy));
+ EATEST_VERIFY((intArray2[0] == -37) && (intArray2[1] == 0) && (intArray2[2] == 74));
+
+ intArray2[0] = intArray2[1] = intArray2[2] = -9;
+ transform(tcArray, tcArray + 3, intArray1, intArray2, mem_fun_ref(&TestClass::MultiplyByConst));
+ EATEST_VERIFY((intArray2[0] == -37) && (intArray2[1] == 0) && (intArray2[2] == 74));
+ }
+
+
+ {
+ // Template instantations.
+ // These tell the compiler to compile all the functions for the given class.
+ eastl::hash_set<String8MA, eastl::string_hash<String8MA> > hs8;
+ eastl::hash_set<String16MA, eastl::string_hash<String16MA> > hs16;
+
+ EATEST_VERIFY(hs8.empty());
+ EATEST_VERIFY(hs16.empty());
+ }
+
+ {
+ // unary_compose
+ /*
+ eastl::vector<double> angles;
+ eastl::vector<double> sines;
+
+ eastl::transform(angles.begin(), angles.end(), sines.begin(),
+ eastl::compose1(eastl::negate<double>(),
+ eastl::compose1(eastl::ptr_fun(sin),
+ eastl::bind2nd(eastl::multiplies<double>(), 3.14159 / 180.0))));
+ */
+
+ // binary_compose
+ list<int> L;
+
+ eastl::list<int>::iterator in_range =
+ eastl::find_if(L.begin(), L.end(),
+ eastl::compose2(eastl::logical_and<bool>(),
+ eastl::bind2nd(eastl::greater_equal<int>(), 1),
+ eastl::bind2nd(eastl::less_equal<int>(), 10)));
+ EATEST_VERIFY(in_range == L.end());
+ }
+
+ {
+ nErrorCount += TestHashHelper<int>(4330);
+ nErrorCount += TestHashHelper<bool>(true);
+ nErrorCount += TestHashHelper<char>('E');
+ nErrorCount += TestHashHelper<signed char>('E');
+ nErrorCount += TestHashHelper<unsigned char>('E');
+ nErrorCount += TestHashHelper<char8_t>('E');
+ nErrorCount += TestHashHelper<char16_t>(0xEAEA);
+ nErrorCount += TestHashHelper<char32_t>(0x00EA4330);
+ #if !defined(EA_WCHAR_T_NON_NATIVE)
+ nErrorCount += TestHashHelper<wchar_t>(L'E');
+ #endif
+ nErrorCount += TestHashHelper<signed short>(4330);
+ nErrorCount += TestHashHelper<unsigned short>(4330u);
+ nErrorCount += TestHashHelper<signed int>(4330);
+ nErrorCount += TestHashHelper<unsigned int>(4330u);
+ nErrorCount += TestHashHelper<signed long>(4330l);
+ nErrorCount += TestHashHelper<unsigned long>(4330ul);
+ nErrorCount += TestHashHelper<signed long long>(4330ll);
+ nErrorCount += TestHashHelper<unsigned long long>(4330ll);
+ nErrorCount += TestHashHelper<float>(4330.099999f);
+ nErrorCount += TestHashHelper<double>(4330.055);
+ nErrorCount += TestHashHelper<long double>(4330.0654l);
+
+ {
+ enum hash_enum_test { e1, e2, e3 };
+ nErrorCount += TestHashHelper<hash_enum_test>(e1);
+ nErrorCount += TestHashHelper<hash_enum_test>(e2);
+ nErrorCount += TestHashHelper<hash_enum_test>(e3);
+ }
+ }
+
+
+#if defined(EA_COMPILER_CPP11_ENABLED) && EASTL_VARIADIC_TEMPLATES_ENABLED
+ // On platforms do not support variadic templates the eastl::invoke (eastl::mem_fn is built on eastl::invoke)
+ // implementation is extremely basic and does not hold up. A significant amount of code would have to be written
+ // and I don't believe the investment is justified at this point. If you require this functionality on older
+ // compilers please contact us.
+ //
+
+ // eastl::invoke
+ {
+ struct TestStruct
+ {
+ TestStruct(int inValue) : value(inValue) {}
+ void Add(int addAmount) { value += addAmount; }
+ int GetValue() { return value; }
+ int& GetValueReference() { return value; }
+ void NoThrow(int inValue) EA_NOEXCEPT {}
+ int value;
+ };
+
+ struct TestFunctor
+ {
+ void operator()() { called = true; }
+ bool called = false;
+ };
+
+ struct TestFunctorNoThrow
+ {
+ void operator()() EA_NOEXCEPT { called = true; }
+ bool called = false;
+ };
+
+ struct TestFunctorArguments
+ {
+ void operator()(int i) { value = i; }
+ int value = 0;
+ };
+
+ {
+ TestStruct a(42);
+ eastl::invoke(&TestStruct::Add, a, 10);
+ EATEST_VERIFY(a.value == 52);
+
+ static_assert(eastl::is_same<typename eastl::invoke_result<decltype(&TestStruct::Add), TestStruct, int>::type, void>::value, "incorrect type for invoke_result");
+ static_assert(eastl::is_invocable<decltype(&TestStruct::Add), TestStruct, int>::value, "incorrect value for is_invocable");
+ static_assert(eastl::is_nothrow_invocable<decltype(&TestStruct::NoThrow), TestStruct, int>::value, "incorrect value for is_nothrow_invocable");
+ static_assert(!eastl::is_nothrow_invocable<decltype(&TestStruct::Add), TestStruct, int>::value, "incorrect value for is_nothrow_invocable");
+ }
+ {
+ TestStruct a(42);
+ eastl::invoke(&TestStruct::Add, &a, 10);
+ EATEST_VERIFY(a.value == 52);
+
+ static_assert(eastl::is_same<typename eastl::invoke_result<decltype(&TestStruct::Add), TestStruct *, int>::type, void>::value, "incorrect type for invoke_result");
+ static_assert(eastl::is_invocable<decltype(&TestStruct::Add), TestStruct *, int>::value, "incorrect value for is_invocable");
+ static_assert(eastl::is_nothrow_invocable<decltype(&TestStruct::NoThrow), TestStruct *, int>::value, "incorrect value for is_nothrow_invocable");
+ static_assert(!eastl::is_nothrow_invocable<decltype(&TestStruct::Add), TestStruct *, int>::value, "incorrect value for is_nothrow_invocable");
+ }
+ {
+ TestStruct a(42);
+ eastl::reference_wrapper<TestStruct> r(a);
+ eastl::invoke(&TestStruct::Add, r, 10);
+ EATEST_VERIFY(a.value == 52);
+
+ static_assert(eastl::is_same<typename eastl::invoke_result<decltype(&TestStruct::Add), eastl::reference_wrapper<TestStruct>, int>::type, void>::value, "incorrect type for invoke_result");
+ static_assert(eastl::is_invocable<decltype(&TestStruct::Add), eastl::reference_wrapper<TestStruct>, int>::value, "incorrect value for is_invocable");
+ static_assert(eastl::is_nothrow_invocable<decltype(&TestStruct::NoThrow), eastl::reference_wrapper<TestStruct>, int>::value, "incorrect value for is_nothrow_invocable");
+ static_assert(!eastl::is_nothrow_invocable<decltype(&TestStruct::Add), eastl::reference_wrapper<TestStruct>, int>::value, "incorrect value for is_nothrow_invocable");
+ }
+ {
+ TestStruct a(42);
+ eastl::invoke(&TestStruct::GetValueReference, a) = 43;
+ EATEST_VERIFY(a.value == 43);
+
+ static_assert(eastl::is_same<typename eastl::invoke_result<decltype(&TestStruct::GetValueReference), TestStruct &>::type, int &>::value, "incorrect type for invoke_result");
+ static_assert(eastl::is_invocable<decltype(&TestStruct::GetValueReference), TestStruct &>::value, "incorrect value for is_invocable");
+ }
+ {
+ TestStruct a(42);
+ EATEST_VERIFY(eastl::invoke(&TestStruct::value, a) == 42);
+
+ static_assert(eastl::is_same<typename eastl::invoke_result<decltype(&TestStruct::value), TestStruct &>::type, int &>::value, "incorrect type for invoke_result");
+ static_assert(eastl::is_invocable<decltype(&TestStruct::value), TestStruct &>::value, "incorrect value for is_invocable");
+ }
+ {
+ TestStruct a(42);
+ eastl::invoke(&TestStruct::value, a) = 43;
+ EATEST_VERIFY(a.value == 43);
+
+ static_assert(eastl::is_same<typename eastl::invoke_result<decltype(&TestStruct::value), TestStruct &>::type, int &>::value, "incorrect type for invoke_result");
+ static_assert(eastl::is_invocable<decltype(&TestStruct::value), TestStruct &>::value, "incorrect value for is_invocable");
+ }
+ {
+ TestStruct a(42);
+ eastl::invoke(&TestStruct::value, &a) = 43;
+ EATEST_VERIFY(a.value == 43);
+
+ static_assert(eastl::is_same<typename eastl::invoke_result<decltype(&TestStruct::value), TestStruct *>::type, int &>::value, "incorrect type for invoke_result");
+ static_assert(eastl::is_invocable<decltype(&TestStruct::value), TestStruct *>::value, "incorrect value for is_invocable");
+ }
+ {
+ TestStruct a(42);
+ eastl::reference_wrapper<TestStruct> r(a);
+ eastl::invoke(&TestStruct::value, r) = 43;
+ EATEST_VERIFY(a.value == 43);
+
+ static_assert(eastl::is_same<typename eastl::invoke_result<decltype(&TestStruct::value), eastl::reference_wrapper<TestStruct>>::type, int &>::value, "incorrect type for invoke_result");
+ static_assert(eastl::is_invocable<decltype(&TestStruct::GetValue), eastl::reference_wrapper<TestStruct>>::value, "incorrect value for is_invocable");
+ }
+
+ #ifndef EA_COMPILER_GNUC
+ {
+ TestStruct a(42);
+ EATEST_VERIFY(eastl::invoke(&TestStruct::GetValue, a) == 42);
+
+ static_assert(
+ eastl::is_same<typename eastl::invoke_result<decltype(&TestStruct::GetValue), TestStruct*>::type, int>::value,
+ "incorrect type for invoke_result");
+
+ static_assert(eastl::is_invocable<decltype(&TestStruct::GetValue), TestStruct*>::value, "incorrect value for is_invocable");
+ }
+ #endif
+ {
+ TestFunctor f;
+ eastl::invoke(f);
+ EATEST_VERIFY(f.called);
+
+ static_assert(eastl::is_same<typename eastl::invoke_result<decltype(f)>::type, void>::value, "incorrect type for invoke_result");
+ static_assert(eastl::is_invocable<decltype(f)>::value, "incorrect value for is_invocable");
+ static_assert(!eastl::is_nothrow_invocable<decltype(f)>::value, "incorrect value for is_nothrow_invocable");
+ }
+ {
+ TestFunctorNoThrow f;
+ eastl::invoke(f);
+ EATEST_VERIFY(f.called);
+
+ static_assert(eastl::is_same<typename eastl::invoke_result<decltype(f)>::type, void>::value, "incorrect type for invoke_result");
+ static_assert(eastl::is_invocable<decltype(f)>::value, "incorrect value for is_invocable");
+ static_assert(eastl::is_nothrow_invocable<decltype(f)>::value, "incorrect value for is_nothrow_invocable");
+ }
+ {
+ TestFunctorArguments f;
+ eastl::invoke(f, 42);
+ EATEST_VERIFY(f.value == 42);
+
+ static_assert(eastl::is_same<typename eastl::invoke_result<decltype(f), int>::type, void>::value, "incorrect type for invoke_result");
+ static_assert(eastl::is_invocable<decltype(f), int>::value, "incorrect value for is_invocable");
+ }
+ {
+ struct TestInvokeConstAccess
+ {
+ void ConstMemberFunc(int i) const {}
+ void ConstVolatileMemberFunc(int i) const volatile {}
+
+ int mI;
+ };
+
+ static_assert(eastl::is_invocable<decltype(&TestInvokeConstAccess::ConstMemberFunc), const TestInvokeConstAccess*, int>::value, "incorrect value for is_invocable");
+ static_assert(eastl::is_invocable<decltype(&TestInvokeConstAccess::ConstVolatileMemberFunc), const volatile TestInvokeConstAccess*, int>::value, "incorrect value for is_invocable");
+ }
+ {
+ struct TestReferenceWrapperInvoke
+ {
+ int NonConstMemberFunc(int i) { return i; }
+ int ConstMemberFunc(int i) const { return i; }
+
+ int mI = 1;
+ const int mIC = 1;
+ };
+
+ TestReferenceWrapperInvoke testStruct;
+ int ret;
+
+ ret = eastl::invoke(&TestReferenceWrapperInvoke::NonConstMemberFunc, eastl::ref(testStruct), 1);
+ EATEST_VERIFY(ret == 1);
+
+ ret = eastl::invoke(&TestReferenceWrapperInvoke::ConstMemberFunc, eastl::ref(testStruct), 1);
+ EATEST_VERIFY(ret == 1);
+
+ ret = eastl::invoke(&TestReferenceWrapperInvoke::mI, eastl::ref(testStruct));
+ EATEST_VERIFY(ret == 1);
+
+ ret = eastl::invoke(&TestReferenceWrapperInvoke::mIC, eastl::ref(testStruct));
+ EATEST_VERIFY(ret == 1);
+ }
+ {
+ static bool called = false;
+ auto f = [] {called = true;};
+ eastl::invoke(f);
+ EATEST_VERIFY(called);
+
+ static_assert(eastl::is_same<typename eastl::invoke_result<decltype(f)>::type, void>::value, "incorrect type for invoke_result");
+ static_assert(eastl::is_invocable<decltype(f)>::value, "incorrect value for is_invocable");
+ }
+ {
+ static int value = 0;
+ auto f = [](int i) {value = i;};
+ eastl::invoke(f, 42);
+ EATEST_VERIFY(value == 42);
+
+ static_assert(eastl::is_same<typename eastl::invoke_result<decltype(f), int>::type, void>::value, "incorrect type for invoke_result");
+ static_assert(eastl::is_invocable<decltype(f), int>::value, "incorrect value for is_invocable");
+ }
+ {
+ struct A {};
+ struct B : public A {};
+ struct C : public A {};
+
+ struct TestStruct
+ {
+ A a() { return A(); };
+ B b() { return B(); };
+ C c() EA_NOEXCEPT { return C(); };
+ };
+
+ static_assert(!eastl::is_invocable_r<B, decltype(&TestStruct::a), TestStruct>::value, "incorrect value for is_invocable_r");
+ static_assert(eastl::is_invocable_r<A, decltype(&TestStruct::b), TestStruct>::value, "incorrect value for is_invocable_r");
+ static_assert(eastl::is_invocable_r<B, decltype(&TestStruct::b), TestStruct>::value, "incorrect value for is_invocable_r");
+ static_assert(!eastl::is_nothrow_invocable_r<B, decltype(&TestStruct::b), TestStruct>::value, "incorrect value for is_nothrow_invocable_r");
+ static_assert(eastl::is_nothrow_invocable_r<C, decltype(&TestStruct::c), TestStruct>::value, "incorrect value for is_nothrow_invocable_r");
+ }
+ }
+
+ // eastl::mem_fn
+ {
+ struct AddingStruct
+ {
+ AddingStruct(int inValue) : value(inValue) {}
+ void Add(int addAmount) { value += addAmount; }
+ void Add2(int add1, int add2) { value += (add1 + add2); }
+ int value;
+ };
+
+ struct OverloadedStruct
+ {
+ OverloadedStruct(int inValue) : value(inValue) {}
+ int &Value() { return value; }
+ const int &Value() const { return value; }
+ int value;
+ };
+
+ {
+ AddingStruct a(42);
+ eastl::mem_fn(&AddingStruct::Add)(a, 6);
+ EATEST_VERIFY(a.value == 48);
+ }
+ {
+ AddingStruct a(42);
+ eastl::mem_fn(&AddingStruct::Add2)(a, 3, 3);
+ EATEST_VERIFY(a.value == 48);
+ }
+ {
+ AddingStruct a(42);
+ auto fStructAdd = eastl::mem_fn(&AddingStruct::Add);
+ fStructAdd(a,6);
+ EATEST_VERIFY(a.value == 48);
+ }
+ {
+ OverloadedStruct a(42);
+ EATEST_VERIFY(eastl::mem_fn<int &()>(&OverloadedStruct::Value)(a) == 42);
+ EATEST_VERIFY(eastl::mem_fn<const int &() const>(&OverloadedStruct::Value)(a) == 42);
+ }
+ }
+#endif
+
+ // eastl::function
+ {
+ {
+ {
+ struct Functor { int operator()() { return 42; } };
+ eastl::function<int(void)> fn = Functor();
+ EATEST_VERIFY(fn() == 42);
+ }
+
+ {
+ struct Functor { int operator()(int in) { return in; } };
+ eastl::function<int(int)> fn = Functor();
+ EATEST_VERIFY(fn(24) == 24);
+ }
+ }
+
+ {
+ int val = 0;
+ auto lambda = [&val] { ++val; };
+ {
+ eastl::function<void(void)> ff = std::bind(lambda);
+ ff();
+ VERIFY(val == 1);
+ }
+ {
+ eastl::function<void(void)> ff = nullptr;
+ ff = std::bind(lambda);
+ ff();
+ VERIFY(val == 2);
+ }
+ }
+
+ {
+ int val = 0;
+ {
+ eastl::function<int(int*)> ff = &TestIntRet;
+ int ret = ff(&val);
+ EATEST_VERIFY(ret == 0);
+ EATEST_VERIFY(val == 1);
+ }
+ {
+ eastl::function<int(int*)> ff;
+ ff = &TestIntRet;
+ int ret = ff(&val);
+ EATEST_VERIFY(ret == 1);
+ EATEST_VERIFY(val == 2);
+ }
+ }
+
+ {
+ struct Test { int x = 1; };
+ Test t;
+ const Test ct;
+
+ {
+ eastl::function<int(const Test&)> ff = &Test::x;
+ int ret = ff(t);
+ EATEST_VERIFY(ret == 1);
+ }
+ {
+ eastl::function<int(const Test&)> ff = &Test::x;
+ int ret = ff(ct);
+ EATEST_VERIFY(ret == 1);
+ }
+ {
+ eastl::function<int(const Test&)> ff;
+ ff = &Test::x;
+ int ret = ff(t);
+ EATEST_VERIFY(ret == 1);
+ }
+ {
+ eastl::function<int(const Test&)> ff;
+ ff = &Test::x;
+ int ret = ff(ct);
+ EATEST_VERIFY(ret == 1);
+ }
+ }
+
+ {
+ struct TestVoidRet
+ {
+ void IncX() const
+ {
+ ++x;
+ }
+
+ void IncX()
+ {
+ ++x;
+ }
+
+ mutable int x = 0;
+ };
+
+ TestVoidRet voidRet;
+ const TestVoidRet cvoidRet;
+
+ {
+ eastl::function<void(const TestVoidRet&)> ff = static_cast<void(TestVoidRet::*)() const>(&TestVoidRet::IncX);
+ ff(cvoidRet);
+ VERIFY(cvoidRet.x == 1);
+ }
+ {
+ eastl::function<void(const TestVoidRet&)> ff = static_cast<void(TestVoidRet::*)() const>(&TestVoidRet::IncX);
+ ff(voidRet);
+ VERIFY(voidRet.x == 1);
+ }
+ {
+ eastl::function<void(TestVoidRet&)> ff = static_cast<void(TestVoidRet::*)()>(&TestVoidRet::IncX);
+ ff(voidRet);
+ VERIFY(voidRet.x == 2);
+ }
+ }
+
+ {
+ int val = 0;
+ struct Functor { void operator()(int* p) { *p += 1; } };
+ Functor functor;
+ {
+ eastl::function<void(int*)> ff = eastl::reference_wrapper<Functor>(functor);
+ ff(&val);
+ EATEST_VERIFY(val == 1);
+ }
+
+ {
+ eastl::function<void(int*)> ff;
+ ff = eastl::reference_wrapper<Functor>(functor);
+ ff(&val);
+ EATEST_VERIFY(val == 2);
+ }
+ }
+
+ {
+ {
+ auto lambda = []{};
+ EA_UNUSED(lambda);
+ static_assert(internal::is_functor_inplace_allocatable<decltype(lambda), EASTL_FUNCTION_DEFAULT_CAPTURE_SSO_SIZE>::value == true, "lambda equivalent to function pointer does not fit in eastl::function local memory.");
+ }
+
+ {
+ eastl::function<void(void)> fn;
+
+ EATEST_VERIFY(!fn);
+ fn = [] {};
+ EATEST_VERIFY(!!fn);
+ }
+
+ {
+ eastl::function<int(int)> fn = [](int param) { return param; };
+ EATEST_VERIFY(fn(42) == 42);
+ }
+
+ {
+ eastl::function<int(int)> fn = ReturnVal;
+ EATEST_VERIFY(fn(42) == 42);
+ }
+
+ {
+ eastl::function<int()> fn0 = ReturnZero;
+ eastl::function<int()> fn1 = ReturnOne;
+
+ EATEST_VERIFY(fn0() == 0 && fn1() == 1);
+ swap(fn0, fn1);
+ EATEST_VERIFY(fn0() == 1 && fn1() == 0);
+ }
+
+ {
+ eastl::function<int()> fn0 = ReturnZero;
+ eastl::function<int()> fn1 = ReturnOne;
+
+ EATEST_VERIFY(fn0() == 0 && fn1() == 1);
+ fn0 = fn1;
+ EATEST_VERIFY(fn0() == 1 && fn1() == 1);
+ }
+
+ {
+ eastl::function<int()> fn0 = ReturnZero;
+ eastl::function<int()> fn1 = ReturnOne;
+
+ EATEST_VERIFY(fn0() == 0 && fn1() == 1);
+ fn0 = eastl::move(fn1);
+ EATEST_VERIFY(fn0() == 1 && fn1 == nullptr);
+ }
+
+ {
+ eastl::function<int(int)> f1(nullptr);
+ EATEST_VERIFY(!f1);
+
+ eastl::function<int(int)> f2 = nullptr;
+ EATEST_VERIFY(!f2);
+ }
+ }
+
+ {
+ // test the default allocator path by using a lambda capture too large to fit into the eastl::function local
+ // storage.
+ uint64_t a = 1, b = 2, c = 3, d = 4, e = 5, f = 6;
+ eastl::function<uint64_t(void)> fn = [=] { return a + b + c + d + e + f; };
+ auto result = fn();
+ EATEST_VERIFY(result == 21);
+ }
+
+ {
+ struct Functor { void operator()() { return; } };
+ eastl::function<void(void)> fn;
+ eastl::function<void(void)> fn2 = nullptr;
+ EATEST_VERIFY(!fn);
+ EATEST_VERIFY(!fn2);
+ EATEST_VERIFY(fn == nullptr);
+ EATEST_VERIFY(fn2 == nullptr);
+ EATEST_VERIFY(nullptr == fn);
+ EATEST_VERIFY(nullptr == fn2);
+ fn = Functor();
+ fn2 = Functor();
+ EATEST_VERIFY(!!fn);
+ EATEST_VERIFY(!!fn2);
+ EATEST_VERIFY(fn != nullptr);
+ EATEST_VERIFY(fn2 != nullptr);
+ EATEST_VERIFY(nullptr != fn);
+ EATEST_VERIFY(nullptr != fn2);
+ fn = nullptr;
+ fn2 = fn;
+ EATEST_VERIFY(!fn);
+ EATEST_VERIFY(!fn2);
+ EATEST_VERIFY(fn == nullptr);
+ EATEST_VERIFY(fn2 == nullptr);
+ EATEST_VERIFY(nullptr == fn);
+ EATEST_VERIFY(nullptr == fn2);
+ }
+
+ {
+ using eastl::swap;
+ struct Functor { int operator()() { return 5; } };
+ eastl::function<int(void)> fn = Functor();
+ eastl::function<int(void)> fn2;
+ EATEST_VERIFY(fn() == 5);
+ EATEST_VERIFY(!fn2);
+ fn.swap(fn2);
+ EATEST_VERIFY(!fn);
+ EATEST_VERIFY(fn2() == 5);
+ swap(fn, fn2);
+ EATEST_VERIFY(fn() == 5);
+ EATEST_VERIFY(!fn2);
+ }
+
+ {
+ uint64_t a = 1, b = 2, c = 3, d = 4, e = 5, f = 6;
+ eastl::function<uint64_t(void)> fn([=] { return a + b + c + d + e + f; });
+
+ auto result = fn();
+ EATEST_VERIFY(result == 21);
+ }
+
+ // user regression "self assigment" tests
+ {
+ eastl::function<int(void)> fn = [cache = 0] () mutable { return cache++; };
+
+ EATEST_VERIFY(fn() == 0);
+ EATEST_VERIFY(fn() == 1);
+ EATEST_VERIFY(fn() == 2);
+
+ EA_DISABLE_CLANG_WARNING(-Wunknown-pragmas)
+ EA_DISABLE_CLANG_WARNING(-Wunknown-warning-option)
+ EA_DISABLE_CLANG_WARNING(-Wself-assign-overloaded)
+ fn = fn;
+ EA_RESTORE_CLANG_WARNING()
+ EA_RESTORE_CLANG_WARNING()
+ EA_RESTORE_CLANG_WARNING()
+
+ EATEST_VERIFY(fn() == 3);
+ EATEST_VERIFY(fn() == 4);
+ EATEST_VERIFY(fn() == 5);
+
+ fn = eastl::move(fn);
+
+ EATEST_VERIFY(fn() == 6);
+ EATEST_VERIFY(fn() == 7);
+ EATEST_VERIFY(fn() == 8);
+ }
+
+ // user regression for memory leak when re-assigning an eastl::function which already holds a large closure.
+ {
+ static int sCtorCount = 0;
+ static int sDtorCount = 0;
+
+ {
+ struct local
+ {
+ local() { sCtorCount++; }
+ local(const local&) { sCtorCount++; }
+ local(local&&) { sCtorCount++; }
+ ~local() { sDtorCount++; }
+
+ void operator=(const local&) = delete; // suppress msvc warning
+ } l;
+
+ eastl::function<bool()> f;
+
+ f = [l]() { return false; };
+
+ // ensure closure resources are cleaned up when assigning to a non-null eastl::function.
+ f = [l]() { return true; };
+ }
+
+ EATEST_VERIFY(sCtorCount == sDtorCount);
+ }
+ }
+
+ // Checking _MSC_EXTENSIONS is required because the Microsoft calling convention classifiers are only available when
+ // compiler specific C/C++ language extensions are enabled.
+ #if defined(EA_PLATFORM_MICROSOFT) && defined(_MSC_EXTENSIONS)
+ {
+ // no arguments
+ typedef void(__stdcall * StdCallFunction)();
+ typedef void(__cdecl * CDeclFunction)();
+
+ // only varargs
+ typedef void(__stdcall * StdCallFunctionWithVarargs)(...);
+ typedef void(__cdecl * CDeclFunctionWithVarargs)(...);
+
+ // arguments and varargs
+ typedef void(__stdcall * StdCallFunctionWithVarargsAtEnd)(int, int, int, ...);
+ typedef void(__cdecl * CDeclFunctionWithVarargsAtEnd)(int, short, long, ...);
+
+ static_assert(!eastl::is_function<StdCallFunction>::value, "is_function failure");
+ static_assert(!eastl::is_function<CDeclFunction>::value, "is_function failure");
+ static_assert(eastl::is_function<typename eastl::remove_pointer<StdCallFunction>::type>::value, "is_function failure");
+ static_assert(eastl::is_function<typename eastl::remove_pointer<CDeclFunction>::type>::value, "is_function failure");
+ static_assert(eastl::is_function<typename eastl::remove_pointer<StdCallFunctionWithVarargs>::type>::value, "is_function failure");
+ static_assert(eastl::is_function<typename eastl::remove_pointer<CDeclFunctionWithVarargs>::type>::value, "is_function failure");
+ static_assert(eastl::is_function<typename eastl::remove_pointer<StdCallFunctionWithVarargsAtEnd>::type>::value, "is_function failure");
+ static_assert(eastl::is_function<typename eastl::remove_pointer<CDeclFunctionWithVarargsAtEnd>::type>::value, "is_function failure");
+ }
+ #endif
+
+ // Test Function Objects
+ #if defined(EA_COMPILER_CPP14_ENABLED)
+ {
+ // eastl::plus<void>
+ {
+ {
+ auto result = eastl::plus<>{}(40, 2);
+ EA_UNUSED(result);
+ EATEST_VERIFY(result == 42);
+ }
+
+ {
+ auto result = eastl::plus<>{}(40.0, 2.0);
+ EA_UNUSED(result);
+ EATEST_VERIFY(result == 42.0);
+ }
+
+ {
+ auto result = eastl::plus<>{}(eastl::string("4"), "2");
+ EA_UNUSED(result);
+ EATEST_VERIFY(result == "42");
+ }
+ }
+
+ // eastl::minus<void>
+ {
+ {
+ auto result = eastl::minus<>{}(6, 2);
+ EA_UNUSED(result);
+ EATEST_VERIFY(result == 4);
+ }
+
+ {
+ auto result = eastl::minus<>{}(6.0, 2.0);
+ EA_UNUSED(result);
+ EATEST_VERIFY(result == 4.0);
+ }
+ }
+
+ // eastl::multiplies
+ {
+ {
+ auto result = eastl::multiplies<>{}(6, 2);
+ EA_UNUSED(result);
+ EATEST_VERIFY(result == 12);
+ }
+
+ {
+ auto result = eastl::multiplies<>{}(6.0, 2.0);
+ EA_UNUSED(result);
+ EATEST_VERIFY(result == 12.0);
+ }
+ }
+
+
+ // eastl::divides
+ {
+ {
+ auto result = eastl::divides<>{}(6, 2);
+ EA_UNUSED(result);
+ EATEST_VERIFY(result == 3);
+ }
+
+ {
+ auto result = eastl::divides<>{}(6.0, 2.0);
+ EA_UNUSED(result);
+ EATEST_VERIFY(result == 3.0);
+ }
+ }
+
+ // eastl::modulus
+ {
+ {
+ auto result = eastl::modulus<>{}(6, 2);
+ EA_UNUSED(result);
+ EATEST_VERIFY(result == 0);
+ }
+
+ {
+ auto result = eastl::modulus<>{}(7, 2);
+ EA_UNUSED(result);
+ EATEST_VERIFY(result == 1);
+ }
+ }
+
+ // eastl::negate
+ {
+ {
+ auto result = eastl::negate<>{}(42);
+ EA_UNUSED(result);
+ EATEST_VERIFY(result == -42);
+ }
+
+ {
+ auto result = eastl::negate<>{}(42.0);
+ EA_UNUSED(result);
+ EATEST_VERIFY(result == -42.0);
+ }
+ }
+
+ // eastl::equal_to
+ {
+ {
+ auto result = eastl::equal_to<>{}(40, 2);
+ EA_UNUSED(result);
+ EATEST_VERIFY(!result);
+ }
+
+ {
+ auto result = eastl::equal_to<>{}(40, 40);
+ EA_UNUSED(result);
+ EATEST_VERIFY(result);
+ }
+ }
+
+ // eastl::not_equal_to
+ {
+ {
+ auto result = eastl::not_equal_to<>{}(40, 2);
+ EA_UNUSED(result);
+ EATEST_VERIFY(result);
+ }
+
+ {
+ auto result = eastl::not_equal_to<>{}(40, 40);
+ EA_UNUSED(result);
+ EATEST_VERIFY(!result);
+ }
+ }
+
+ // eastl::greater<void>
+ {
+ {
+ auto result = eastl::greater<>{}(40, 2);
+ EA_UNUSED(result);
+ EATEST_VERIFY(result);
+ }
+
+ {
+ auto result = eastl::greater<>{}(1, 2);
+ EA_UNUSED(result);
+ EATEST_VERIFY(!result);
+ }
+
+ {
+ auto result = eastl::greater<>{}(eastl::string("4"), "2");
+ EA_UNUSED(result);
+ EATEST_VERIFY(result);
+ }
+ }
+
+ // eastl::less<void>
+ {
+ {
+ auto result = eastl::less<>{}(40, 2);
+ EA_UNUSED(result);
+ EATEST_VERIFY(!result);
+ }
+
+ {
+ auto result = eastl::less<>{}(1, 2);
+ EA_UNUSED(result);
+ EATEST_VERIFY(result);
+ }
+
+ {
+ auto result = eastl::less<>{}(eastl::string("4"), "2");
+ EA_UNUSED(result);
+ EATEST_VERIFY(!result);
+ }
+ }
+
+ // eastl::greater_equal<void>
+ {
+ {
+ auto result = eastl::greater_equal<>{}(40, 2);
+ EA_UNUSED(result);
+ EATEST_VERIFY(result);
+ }
+
+ {
+ auto result = eastl::greater_equal<>{}(40, 40);
+ EA_UNUSED(result);
+ EATEST_VERIFY(result);
+ }
+
+ {
+ auto result = eastl::greater_equal<>{}(40, 43);
+ EA_UNUSED(result);
+ EATEST_VERIFY(!result);
+ }
+ }
+
+ // eastl::less_equal<void>
+ {
+ {
+ auto result = eastl::less_equal<>{}(40, 2);
+ EA_UNUSED(result);
+ EATEST_VERIFY(!result);
+ }
+
+ {
+ auto result = eastl::less_equal<>{}(40, 40);
+ EA_UNUSED(result);
+ EATEST_VERIFY(result);
+ }
+
+ {
+ auto result = eastl::less_equal<>{}(40, 43);
+ EA_UNUSED(result);
+ EATEST_VERIFY(result);
+ }
+ }
+
+ // eastl::logical_and
+ {
+ auto result = eastl::logical_and<>{}(true, true);
+ EATEST_VERIFY(result);
+ result = eastl::logical_and<>{}(true, false);
+ EATEST_VERIFY(!result);
+ result = eastl::logical_and<>{}(false, true);
+ EATEST_VERIFY(!result);
+ result = eastl::logical_and<>{}(false, false);
+ EATEST_VERIFY(!result);
+
+ bool b = false;
+ result = eastl::logical_and<>{}(b, false);
+ EATEST_VERIFY(!result);
+ }
+
+ // eastl::logical_or
+ {
+ auto result = eastl::logical_or<>{}(true, true);
+ EATEST_VERIFY(result);
+ result = eastl::logical_or<>{}(true, false);
+ EATEST_VERIFY(result);
+ result = eastl::logical_or<>{}(false, true);
+ EATEST_VERIFY(result);
+ result = eastl::logical_or<>{}(false, false);
+ EATEST_VERIFY(!result);
+
+ bool b = false;
+ result = eastl::logical_or<>{}(b, false);
+ EATEST_VERIFY(!result);
+ result = eastl::logical_or<>{}(b, true);
+ EATEST_VERIFY(result);
+ }
+
+ // eastl::logical_not
+ {
+ auto result = eastl::logical_not<>{}(true);
+ EATEST_VERIFY(!result);
+ result = eastl::logical_not<>{}(result);
+ EATEST_VERIFY(result);
+ result = eastl::logical_not<>{}(false);
+ EATEST_VERIFY(result);
+ }
+ }
+ #endif
+
+ // not_fn
+ {
+ {
+ auto ft = eastl::not_fn([] { return true; });
+ auto ff = eastl::not_fn([] { return false; });
+
+ EATEST_VERIFY(ft() == false);
+ EATEST_VERIFY(ff() == true);
+ }
+ }
+
+ // reference_wrapper
+ {
+ // operator T&
+ {
+ int i = 0;
+ eastl::reference_wrapper<int> r(i);
+ int &j = r;
+ j = 42;
+
+ EATEST_VERIFY(i == 42);
+ }
+
+ // get
+ {
+ int i = 0;
+ eastl::reference_wrapper<int> r(i);
+ r.get() = 42;
+
+ EATEST_VERIFY(i == 42);
+ }
+
+ // copy constructor
+ {
+ int i = 0;
+ eastl::reference_wrapper<int> r(i);
+ eastl::reference_wrapper<int> copy(r);
+ copy.get() = 42;
+
+ EATEST_VERIFY(i == 42);
+ }
+
+ // assignment
+ {
+ int i = 0;
+ int j = 0;
+
+ eastl::reference_wrapper<int> r1(i);
+ eastl::reference_wrapper<int> r2(j);
+
+ r2 = r1; // rebind r2 to refer to i
+ r2.get() = 42;
+
+ EATEST_VERIFY(i == 42);
+ EATEST_VERIFY(j == 0);
+ }
+
+ // invoke
+ {
+ struct Functor
+ {
+ bool called = false;
+ void operator()() {called = true;}
+ };
+
+ Functor f;
+ eastl::reference_wrapper<Functor> r(f);
+ r();
+
+ EATEST_VERIFY(f.called == true);
+ }
+
+ // ref/cref
+ {
+ {
+ int i = 0;
+ eastl::reference_wrapper<int> r1 = eastl::ref(i);
+ r1.get() = 42;
+
+ eastl::reference_wrapper<int> r2 = eastl::ref(r1);
+
+ EATEST_VERIFY(i == 42);
+ EATEST_VERIFY(r2 == 42);
+ }
+
+ {
+ int i = 1337;
+ eastl::reference_wrapper<const int> r1 = eastl::cref(i);
+ EATEST_VERIFY(r1 == 1337);
+
+ eastl::reference_wrapper<const int> r2 = eastl::cref(r1);
+ EATEST_VERIFY(r2 == 1337);
+ }
+ }
+ }
+
+ return nErrorCount;
+}
+
+// Test that we can instantiate invoke_result with incorrect argument types.
+// This should be instantiable, but should not have a `type` typedef.
+struct TestInvokeResult
+{
+ int f(int i) {return i;}
+};
+
+template struct eastl::invoke_result<decltype(&TestInvokeResult::f), TestInvokeResult, void>;
+
+static_assert(!eastl::is_invocable<decltype(&TestInvokeResult::f), TestInvokeResult, void>::value, "incorrect value for is_invocable");
+static_assert(!eastl::is_invocable<decltype(&TestInvokeResult::f), TestInvokeResult, int, int>::value, "incorrect value for is_invocable");
+static_assert(eastl::is_invocable<decltype(&TestInvokeResult::f), TestInvokeResult, int>::value, "incorrect value for is_invocable");
+
+static_assert(!eastl::is_invocable_r<int, decltype(&TestInvokeResult::f), TestInvokeResult, void>::value, "incorrect value for is_invocable_r");
+static_assert(!eastl::is_invocable_r<void, decltype(&TestInvokeResult::f), TestInvokeResult, int, int>::value, "incorrect value for is_invocable_r");
+static_assert(eastl::is_invocable_r<void, decltype(&TestInvokeResult::f), TestInvokeResult, int>::value, "incorrect value for is_invocable_r");
+static_assert(eastl::is_invocable_r<int, decltype(&TestInvokeResult::f), TestInvokeResult, int>::value, "incorrect value for is_invocable_r");
+
+struct TestCallableInvokeResult
+{
+ int operator()(int i) {return i;}
+};
+
+template struct eastl::invoke_result<TestCallableInvokeResult, void>;
+
+static_assert(!eastl::is_invocable<TestCallableInvokeResult, void>::value, "incorrect value for is_invocable");
+static_assert(!eastl::is_invocable<TestCallableInvokeResult, int, int>::value, "incorrect value for is_invocable");
+static_assert(eastl::is_invocable<TestCallableInvokeResult, int>::value, "incorrect value for is_invocable");
+
+static_assert(!eastl::is_invocable_r<int, TestCallableInvokeResult, void>::value, "incorrect value for is_invocable_r");
+static_assert(!eastl::is_invocable_r<void, TestCallableInvokeResult, int, int>::value, "incorrect value for is_invocable_r");
+static_assert(eastl::is_invocable_r<void, TestCallableInvokeResult, int>::value, "incorrect value for is_invocable_r");
+static_assert(eastl::is_invocable_r<int, TestCallableInvokeResult, int>::value, "incorrect value for is_invocable_r");
+
+typedef decltype(eastl::ref(eastl::declval<TestCallableInvokeResult&>())) TestCallableRefInvokeResult;
+
+static_assert(!eastl::is_invocable<TestCallableRefInvokeResult, void>::value, "incorrect value for is_invocable");
+static_assert(!eastl::is_invocable<TestCallableRefInvokeResult, int, int>::value, "incorrect value for is_invocable");
+static_assert(eastl::is_invocable<TestCallableRefInvokeResult, int>::value, "incorrect value for is_invocable");
+
+static_assert(!eastl::is_invocable_r<int, TestCallableRefInvokeResult, void>::value, "incorrect value for is_invocable_r");
+static_assert(!eastl::is_invocable_r<void, TestCallableRefInvokeResult, int, int>::value, "incorrect value for is_invocable_r");
+static_assert(eastl::is_invocable_r<void, TestCallableRefInvokeResult, int>::value, "incorrect value for is_invocable_r");
+static_assert(eastl::is_invocable_r<int, TestCallableRefInvokeResult, int>::value, "incorrect value for is_invocable_r");
diff --git a/EASTL/test/source/TestHash.cpp b/EASTL/test/source/TestHash.cpp
new file mode 100644
index 0000000..1bcf996
--- /dev/null
+++ b/EASTL/test/source/TestHash.cpp
@@ -0,0 +1,1505 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLTest.h"
+#include "TestMap.h"
+#include "TestSet.h"
+#include <EASTL/hash_set.h>
+#include <EASTL/hash_map.h>
+#include <EASTL/unordered_set.h>
+#include <EASTL/unordered_map.h>
+#include <EASTL/map.h>
+#include <EASTL/string.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/vector.h>
+#include <EASTL/unique_ptr.h>
+
+EA_DISABLE_ALL_VC_WARNINGS()
+#include <string.h>
+EA_RESTORE_ALL_VC_WARNINGS()
+
+
+using namespace eastl;
+
+namespace eastl
+{
+ template <>
+ struct hash<Align32>
+ {
+ size_t operator()(const Align32& a32) const
+ { return static_cast<size_t>(a32.mX); }
+ };
+
+ // extension to hash an eastl::pair
+ template <typename T1, typename T2>
+ struct hash<pair<T1, T2>>
+ {
+ size_t operator()(const pair<T1, T2>& c) const
+ {
+ return static_cast<size_t>(hash<T1>()(c.first) ^ hash<T2>()(c.second));
+ }
+ };
+}
+
+// For regression code below.
+class HashRegressionA { public: int x; };
+class HashRegressionB { public: int y; };
+
+
+// For regression code below.
+struct Struct {
+ char8_t name[128];
+};
+
+
+// For regression code below.
+template<class HashType>
+struct HashTest
+{
+ template<typename... Args>
+ auto operator()(Args&&... args)
+ {
+ return eastl::hash<HashType>{}(eastl::forward<Args>(args)...);
+ }
+};
+
+
+
+// What we are doing here is creating a special case of a hashtable where the key compare
+// function is not the same as the value operator==. 99% of the time when you create a
+// hashtable the key compare (predicate) is simply key_equal or something else that's
+// identical to operator== for the hashtable value type. But for some tests we want
+// to exercise the case that these aren't different. A result of this difference is that
+// you can lookup an element in a hash table and the returned value is not == to the
+// value you looked up, because it succeeds the key compare but not operator==.
+struct HashtableValue
+{
+ HashtableValue(eastl_size_t d = 0, eastl_size_t e = 0) : mData(d), mExtra(e){}
+ void Set(eastl_size_t d, eastl_size_t e = 0) { mData = d; mExtra = e; }
+
+ eastl_size_t mData;
+ eastl_size_t mExtra;
+};
+
+bool operator==(const HashtableValue& htv1, const HashtableValue& htv2)
+{
+ return (htv1.mData == htv2.mData) && (htv1.mExtra == htv2.mExtra); // Fully compare the HashTableValue.
+}
+
+struct HashtableValuePredicate
+{
+ bool operator()(const HashtableValue& htv1, const HashtableValue& htv2) const
+ { return (htv1.mData == htv2.mData); } // Compare just the mData portion of HashTableValue.
+};
+
+struct HashtableValueHash
+{
+ size_t operator()(const HashtableValue& htv) const
+ { return static_cast<size_t>(htv.mData); }
+};
+
+
+
+
+// Explicit Template instantiations.
+// These tell the compiler to compile all the functions for the given class.
+template class eastl::hashtable<int,
+ eastl::pair<const int, int>,
+ eastl::allocator,
+ eastl::use_first<eastl::pair<const int, int>>,
+ eastl::equal_to<int>,
+ eastl::hash<int>,
+ mod_range_hashing,
+ default_ranged_hash,
+ prime_rehash_policy,
+ true, // bCacheHashCode
+ true, // bMutableIterators
+ true // bUniqueKeys
+ >;
+template class eastl::hashtable<int,
+ eastl::pair<const int, int>,
+ eastl::allocator,
+ eastl::use_first<eastl::pair<const int, int>>,
+ eastl::equal_to<int>,
+ eastl::hash<int>,
+ mod_range_hashing,
+ default_ranged_hash,
+ prime_rehash_policy,
+ false, // bCacheHashCode
+ true, // bMutableIterators
+ true // bUniqueKeys
+ >;
+// TODO(rparolin): known compiler error, we should fix this.
+// template class eastl::hashtable<int,
+// eastl::pair<const int, int>,
+// eastl::allocator,
+// eastl::use_first<eastl::pair<const int, int>>,
+// eastl::equal_to<int>,
+// eastl::hash<int>,
+// mod_range_hashing,
+// default_ranged_hash,
+// prime_rehash_policy,
+// false, // bCacheHashCode
+// true, // bMutableIterators
+// false // bUniqueKeys
+// >;
+
+// Note these will only compile non-inherited functions. We provide explicit
+// template instantiations for the hashtable base class above to get compiler
+// coverage of those inherited hashtable functions.
+template class eastl::hash_set<int>;
+template class eastl::hash_multiset<int>;
+template class eastl::hash_map<int, int>;
+template class eastl::hash_multimap<int, int>;
+template class eastl::hash_set<Align32>;
+template class eastl::hash_multiset<Align32>;
+template class eastl::hash_map<Align32, Align32>;
+template class eastl::hash_multimap<Align32, Align32>;
+
+// validate static assumptions about hashtable core types
+typedef eastl::hash_node<int, false> HashNode1;
+typedef eastl::hash_node<int, true> HashNode2;
+static_assert(eastl::is_default_constructible<HashNode1>::value, "hash_node static error");
+static_assert(eastl::is_default_constructible<HashNode2>::value, "hash_node static error");
+static_assert(eastl::is_copy_constructible<HashNode1>::value, "hash_node static error");
+static_assert(eastl::is_copy_constructible<HashNode2>::value, "hash_node static error");
+static_assert(eastl::is_move_constructible<HashNode1>::value, "hash_node static error");
+static_assert(eastl::is_move_constructible<HashNode2>::value, "hash_node static error");
+
+// A custom hash function that has a high number of collisions is used to ensure many keys share the same hash value.
+struct colliding_hash
+{
+ size_t operator()(const int& val) const
+ { return static_cast<size_t>(val % 3); }
+};
+
+
+
+int TestHash()
+{
+ int nErrorCount = 0;
+
+ { // Test declarations
+ hash_set<int> hashSet;
+ hash_multiset<int> hashMultiSet;
+ hash_map<int, int> hashMap;
+ hash_multimap<int, int> hashMultiMap;
+
+ hash_set<int> hashSet2(hashSet);
+ EATEST_VERIFY(hashSet2.size() == hashSet.size());
+ EATEST_VERIFY(hashSet2 == hashSet);
+
+ hash_multiset<int> hashMultiSet2(hashMultiSet);
+ EATEST_VERIFY(hashMultiSet2.size() == hashMultiSet.size());
+ EATEST_VERIFY(hashMultiSet2 == hashMultiSet);
+
+ hash_map<int, int> hashMap2(hashMap);
+ EATEST_VERIFY(hashMap2.size() == hashMap.size());
+ EATEST_VERIFY(hashMap2 == hashMap);
+
+ hash_multimap<int, int> hashMultiMap2(hashMultiMap);
+ EATEST_VERIFY(hashMultiMap2.size() == hashMultiMap.size());
+ EATEST_VERIFY(hashMultiMap2 == hashMultiMap);
+
+
+ // allocator_type& get_allocator();
+ // void set_allocator(const allocator_type& allocator);
+ hash_set<int>::allocator_type& allocator = hashSet.get_allocator();
+ hashSet.set_allocator(EASTLAllocatorType());
+ hashSet.set_allocator(allocator);
+ // To do: Try to find something better to test here.
+
+
+ // const key_equal& key_eq() const;
+ // key_equal& key_eq();
+ hash_set<int> hs;
+ const hash_set<int> hsc;
+
+ const hash_set<int>::key_equal& ke = hsc.key_eq();
+ hs.key_eq() = ke;
+
+
+ // const char* get_name() const;
+ // void set_name(const char* pName);
+ #if EASTL_NAME_ENABLED
+ hashMap.get_allocator().set_name("test");
+ const char* pName = hashMap.get_allocator().get_name();
+ EATEST_VERIFY(equal(pName, pName + 5, "test"));
+ #endif
+ }
+
+
+ {
+ hash_set<int> hashSet;
+
+ // Clear a newly constructed, already empty container.
+ hashSet.clear(true);
+ EATEST_VERIFY(hashSet.validate());
+ EATEST_VERIFY(hashSet.size() == 0);
+ EATEST_VERIFY(hashSet.bucket_count() == 1);
+
+ for(int i = 0; i < 100; ++i)
+ hashSet.insert(i);
+ EATEST_VERIFY(hashSet.validate());
+ EATEST_VERIFY(hashSet.size() == 100);
+
+ hashSet.clear(true);
+ EATEST_VERIFY(hashSet.validate());
+ EATEST_VERIFY(hashSet.size() == 0);
+ EATEST_VERIFY(hashSet.bucket_count() == 1);
+
+ for(int i = 0; i < 100; ++i)
+ hashSet.insert(i);
+ EATEST_VERIFY(hashSet.validate());
+ EATEST_VERIFY(hashSet.size() == 100);
+
+ hashSet.clear(true);
+ EATEST_VERIFY(hashSet.validate());
+ EATEST_VERIFY(hashSet.size() == 0);
+ EATEST_VERIFY(hashSet.bucket_count() == 1);
+ }
+
+
+ { // Test hash_set
+
+ // size_type size() const
+ // bool empty() const
+ // insert_return_type insert(const value_type& value);
+ // insert_return_type insert(const value_type& value, hash_code_t c, node_type* pNodeNew = NULL);
+ // iterator insert(const_iterator, const value_type& value);
+ // iterator find(const key_type& k);
+ // const_iterator find(const key_type& k) const;
+ // size_type count(const key_type& k) const;
+
+ typedef hash_set<int> HashSetInt;
+
+ HashSetInt hashSet;
+ const HashSetInt::size_type kCount = 10000;
+
+ EATEST_VERIFY(hashSet.empty());
+ EATEST_VERIFY(hashSet.size() == 0);
+ EATEST_VERIFY(hashSet.count(0) == 0);
+
+ for(int i = 0; i < (int)kCount; i++)
+ hashSet.insert(i);
+
+ EATEST_VERIFY(!hashSet.empty());
+ EATEST_VERIFY(hashSet.size() == kCount);
+ EATEST_VERIFY(hashSet.count(0) == 1);
+
+ for(HashSetInt::iterator it = hashSet.begin(); it != hashSet.end(); ++it)
+ {
+ int value = *it;
+ EATEST_VERIFY(value < (int)kCount);
+ }
+
+ for(int i = 0; i < (int)kCount * 2; i++)
+ {
+ HashSetInt::iterator it = hashSet.find(i);
+
+ if(i < (int)kCount)
+ EATEST_VERIFY(it != hashSet.end());
+ else
+ EATEST_VERIFY(it == hashSet.end());
+ }
+
+ // insert_return_type insert(const value_type& value, hash_code_t c, node_type* pNodeNew = NULL);
+ HashSetInt::node_type* pNode = hashSet.allocate_uninitialized_node();
+ HashSetInt::insert_return_type r = hashSet.insert(eastl::hash<int>()(999999), pNode, 999999);
+ EATEST_VERIFY(r.second == true);
+ pNode = hashSet.allocate_uninitialized_node();
+ r = hashSet.insert(eastl::hash<int>()(999999), pNode, 999999);
+ EATEST_VERIFY(r.second == false);
+ hashSet.free_uninitialized_node(pNode);
+ hashSet.erase(999999);
+
+
+ // iterator begin();
+ // const_iterator begin() const;
+ // iterator end();
+ // const_iterator end() const;
+
+ int* const pIntArray = new int[kCount];
+ memset(pIntArray, 0, kCount * sizeof(int)); // We want to make sure each element is present only once.
+ int nCount = 0;
+
+ for(HashSetInt::iterator it = hashSet.begin(); it != hashSet.end(); ++it, ++nCount)
+ {
+ int i = *it;
+
+ EATEST_VERIFY((i >= 0) && (i < (int)kCount) && (pIntArray[i] == 0));
+ pIntArray[i] = 1;
+ }
+
+ EATEST_VERIFY(nCount == (int)kCount);
+ delete[] pIntArray;
+ }
+
+
+ {
+ // size_type bucket_count() const
+ // size_type bucket_size(size_type n) const
+ // float load_factor() const
+ // float get_max_load_factor() const;
+ // void set_max_load_factor(float fMaxLoadFactor);
+ // void rehash(size_type n);
+ // const RehashPolicy& rehash_policy() const
+ // void rehash_policy(const RehashPolicy& rehashPolicy);
+
+ typedef hash_set<int> HashSetInt;
+
+ HashSetInt hashSet;
+
+ float fLoadFactor = hashSet.load_factor();
+ EATEST_VERIFY(fLoadFactor == 0.f);
+
+ hashSet.set_max_load_factor(65536.f * 512.f);
+ float fMaxLoadFactor = hashSet.get_max_load_factor();
+ EATEST_VERIFY(fMaxLoadFactor == (65536.f * 512.f));
+
+ hashSet.rehash(20);
+ HashSetInt::size_type n = hashSet.bucket_count();
+ EATEST_VERIFY((n >= 20) && (n < 25));
+
+ for(int i = 0; i < 100000; i++)
+ hashSet.insert(i); // This also tests for high loading.
+
+ HashSetInt::size_type n2 = hashSet.bucket_count();
+ EATEST_VERIFY(n2 == n); // Verify no rehashing has occured, due to our high load factor.
+
+ n = hashSet.bucket_size(0);
+ EATEST_VERIFY(n >= ((hashSet.size() / hashSet.bucket_count()) / 2)); // It will be some high value. We divide by 2 to give it some slop.
+ EATEST_VERIFY(hashSet.validate());
+
+ hash_set<int>::rehash_policy_type rp = hashSet.rehash_policy();
+ rp.mfGrowthFactor = 1.5f;
+ hashSet.rehash_policy(rp);
+ EATEST_VERIFY(hashSet.validate());
+
+
+ // local_iterator begin(size_type n);
+ // local_iterator end(size_type n);
+ // const_local_iterator begin(size_type n) const;
+ // const_local_iterator end(size_type n) const;
+
+ HashSetInt::size_type b = hashSet.bucket_count() - 1;
+ hash<int> IntHash;
+ for(HashSetInt::const_local_iterator cli = hashSet.begin(b); cli != hashSet.end(b); ++cli)
+ {
+ int v = *cli;
+ EATEST_VERIFY((IntHash(v) % hashSet.bucket_count()) == b);
+ }
+
+
+ // clear();
+
+ hashSet.clear();
+ EATEST_VERIFY(hashSet.validate());
+ EATEST_VERIFY(hashSet.empty());
+ EATEST_VERIFY(hashSet.size() == 0);
+ EATEST_VERIFY(hashSet.count(0) == 0);
+
+ hashSet.clear(true);
+ EATEST_VERIFY(hashSet.validate());
+ EATEST_VERIFY(hashSet.bucket_count() == 1);
+ }
+
+
+ {
+ // void reserve(size_type nElementCount);
+ nErrorCount += HashContainerReserveTest<hash_set<int>>()();
+ nErrorCount += HashContainerReserveTest<hash_multiset<int>>()();
+ nErrorCount += HashContainerReserveTest<hash_map<int, int>>()();
+ nErrorCount += HashContainerReserveTest<hash_multimap<int, int>>()();
+ }
+
+
+ { // Test hash_set with cached hash code.
+
+ // insert_return_type insert(const value_type& value) ;
+ // iterator find(const key_type& k);
+ // const_iterator find(const key_type& k) const;
+
+ typedef hash_set<int, hash<int>, equal_to<int>, EASTLAllocatorType, true> HashSetIntC;
+
+ HashSetIntC hashSet;
+ const int kCount = 10000;
+
+ for(int i = 0; i < kCount; i++)
+ hashSet.insert(i);
+
+ for(HashSetIntC::iterator it = hashSet.begin(); it != hashSet.end(); ++it)
+ {
+ int value = *it;
+ EATEST_VERIFY(value < kCount);
+ }
+
+ for(int i = 0; i < kCount * 2; i++)
+ {
+ HashSetIntC::iterator it = hashSet.find(i);
+ if(i < kCount)
+ EATEST_VERIFY(it != hashSet.end());
+ else
+ EATEST_VERIFY(it == hashSet.end());
+ }
+ }
+
+ {
+ // ENABLE_IF_HASHCODE_U32(HashCodeT, iterator) find_by_hash(HashCodeT c)
+ // ENABLE_IF_HASHCODE_U32(HashCodeT, const_iterator) find_by_hash(HashCodeT c) const
+ {
+ // NOTE(rparolin):
+ // these overloads of find_by_hash contains a static assert that forces a compiler error in the event it is
+ // used with a hashtable configured to not cache the hash value in the node.
+ }
+
+ // iterator find_by_hash(const key_type& k, hash_code_t c)
+ // const_iterator find_by_hash(const key_type& k, hash_code_t c) const
+ #ifdef EA_COMPILER_CPP14_ENABLED
+ {
+ auto FindByHashTest = [&nErrorCount](auto& hashSet)
+ {
+ const int kCount = 10000;
+ for(int i = 0; i < kCount; i++)
+ hashSet.insert(i);
+
+ for(int i = 0; i < kCount * 2; i++)
+ {
+ auto it = hashSet.find_by_hash(i, i);
+
+ if(i < kCount)
+ EATEST_VERIFY(it != hashSet.end());
+ else
+ EATEST_VERIFY(it == hashSet.end());
+ }
+ };
+
+ {
+ typedef hash_set<int, hash<int>, equal_to<int>, EASTLAllocatorType, true> HashSetIntC;
+ HashSetIntC hashSetC;
+ FindByHashTest(hashSetC);
+
+ typedef hash_set<int, hash<int>, equal_to<int>, EASTLAllocatorType, false> HashSetInt;
+ HashSetInt hashSet;
+ FindByHashTest(hashSet);
+ }
+ }
+ #endif
+ }
+
+
+ {
+ // hash_set(const allocator_type& allocator);
+ // hashtable& operator=(const this_type& x);
+ // bool validate() const;
+
+ hash_set<int> hashSet1(EASTLAllocatorType("hash_set name"));
+ hash_set<int> hashSet2(hashSet1);
+
+ for(int i = 0; i < 10; i++)
+ {
+ hashSet1.insert(i);
+ hashSet2.insert(i);
+ }
+
+ hashSet1 = hashSet2;
+
+ EATEST_VERIFY(hashSet1.validate());
+ EATEST_VERIFY(hashSet2.validate());
+ }
+
+
+ {
+ // hash_set(size_type nBucketCount, const Hash& hashFunction = Hash(), const Predicate& predicate = Predicate(), const allocator_type& allocator);
+ // hashtable(const hashtable& x);
+ // hashtable& operator=(const this_type& x);
+ // void swap(this_type& x);
+ // bool validate() const;
+ {
+ hash_set<int> hashSet3(0);
+ hash_set<int> hashSet4(1);
+ hash_set<int> hashSet5(2);
+ hash_set<int> hashSet6(3);
+ hash_set<int> hashSet7(4);
+
+ hashSet4 = hashSet3;
+ hashSet6 = hashSet5;
+ hashSet3 = hashSet7;
+
+ for(int i = 0; i < 10; i++)
+ {
+ hashSet3.insert(i);
+ hashSet4.insert(i);
+ hashSet5.insert(i);
+ hashSet6.insert(i);
+ hashSet7.insert(i);
+ }
+
+ hashSet4 = hashSet3;
+ hashSet6 = hashSet5;
+ hashSet3 = hashSet7;
+
+ EATEST_VERIFY(hashSet3.validate());
+ EATEST_VERIFY(hashSet4.validate());
+ EATEST_VERIFY(hashSet5.validate());
+ EATEST_VERIFY(hashSet6.validate());
+ EATEST_VERIFY(hashSet7.validate());
+
+ swap(hashSet4, hashSet3);
+ swap(hashSet6, hashSet5);
+ swap(hashSet3, hashSet7);
+
+ EATEST_VERIFY(hashSet3.validate());
+ EATEST_VERIFY(hashSet4.validate());
+ EATEST_VERIFY(hashSet5.validate());
+ EATEST_VERIFY(hashSet6.validate());
+ EATEST_VERIFY(hashSet7.validate());
+
+ hash_set<int> hashSet8(hashSet6);
+ hash_set<int> hashSet9(hashSet7);
+ hash_set<int> hashSet10(hashSet8);
+
+ EATEST_VERIFY(hashSet8.validate());
+ EATEST_VERIFY(hashSet9.validate());
+ EATEST_VERIFY(hashSet10.validate());
+ }
+
+ // test hashtable::swap using different allocator instances
+ {
+ typedef hash_set<int, eastl::hash<int>, eastl::equal_to<int>, InstanceAllocator> HS;
+ HS hashSet1(InstanceAllocator("hash_set1 name", 111));
+ HS hashSet2(InstanceAllocator("hash_set2 name", 222));
+
+ for(int i = 0; i < 10; i++)
+ {
+ hashSet1.insert(i);
+ hashSet2.insert(i+10);
+ }
+
+ hashSet2.swap(hashSet1);
+
+ EATEST_VERIFY(hashSet1.validate());
+ EATEST_VERIFY(hashSet2.validate());
+
+ EATEST_VERIFY(hashSet1.get_allocator().mInstanceId == 222);
+ EATEST_VERIFY(hashSet2.get_allocator().mInstanceId == 111);
+
+ EATEST_VERIFY(eastl::all_of(eastl::begin(hashSet2), eastl::end(hashSet2), [](int i) { return i < 10; }));
+ EATEST_VERIFY(eastl::all_of(eastl::begin(hashSet1), eastl::end(hashSet1), [](int i) { return i >= 10; }));
+ }
+ }
+
+
+ {
+ // hash_set(InputIterator first, InputIterator last, size_type nBucketCount = 8, const Hash& hashFunction = Hash(), const Predicate& predicate = Predicate(), const allocator_type& allocator);
+ // bool validate() const;
+
+ vector<int> intArray;
+ for(int i = 0; i < 1000; i++)
+ intArray.push_back(i);
+
+ hash_set<int> hashSet1(intArray.begin(), intArray.end(), 0);
+ hash_set<int> hashSet2(intArray.begin(), intArray.end(), 1);
+ hash_set<int> hashSet3(intArray.begin(), intArray.end(), 2);
+ hash_set<int> hashSet4(intArray.begin(), intArray.end(), 3);
+
+ EATEST_VERIFY(hashSet1.validate());
+ EATEST_VERIFY(hashSet2.validate());
+ EATEST_VERIFY(hashSet3.validate());
+ EATEST_VERIFY(hashSet4.validate());
+
+
+ // bool validate_iterator(const_iterator i) const;
+ hash_set<int>::iterator it;
+ int result = hashSet1.validate_iterator(it);
+ EATEST_VERIFY(result == isf_none);
+
+ it = hashSet1.begin();
+ result = hashSet2.validate_iterator(it);
+ EATEST_VERIFY(result == isf_none);
+ result = hashSet1.validate_iterator(it);
+ EATEST_VERIFY(result == (isf_valid | isf_current | isf_can_dereference));
+
+ it = hashSet1.end();
+ result = hashSet1.validate_iterator(it);
+ EATEST_VERIFY(result == (isf_valid | isf_current));
+
+
+ // void reset_lose_memory();
+ hashSet1.reset_lose_memory();
+ hashSet1 = hashSet2;
+
+ EATEST_VERIFY(hashSet1.validate());
+ EATEST_VERIFY(hashSet2.validate());
+
+ hashSet3.reset_lose_memory();
+ hashSet4 = hashSet3;
+
+ EATEST_VERIFY(hashSet3.validate());
+ EATEST_VERIFY(hashSet4.validate());
+
+ hashSet2.reset_lose_memory();
+ hashSet3.reset_lose_memory();
+ swap(hashSet2, hashSet3);
+
+ EATEST_VERIFY(hashSet3.validate());
+ EATEST_VERIFY(hashSet4.validate());
+
+ hashSet2 = hashSet3;
+ EATEST_VERIFY(hashSet2.validate());
+ }
+
+
+ {
+ // void insert(InputIterator first, InputIterator last);
+ vector<int> intArray1;
+ vector<int> intArray2;
+
+ for(int i = 0; i < 1000; i++)
+ {
+ intArray1.push_back(i + 0);
+ intArray2.push_back(i + 500);
+ }
+
+ hash_set<int> hashSet1(intArray1.begin(), intArray1.end());
+ hashSet1.insert(intArray2.begin(), intArray2.end());
+ EATEST_VERIFY(hashSet1.validate());
+
+ hash_set<int> hashSet2;
+ hashSet2.insert(intArray1.begin(), intArray1.end());
+ hashSet2.insert(intArray2.begin(), intArray2.end());
+ EATEST_VERIFY(hashSet2.validate());
+
+ EATEST_VERIFY(hashSet1 == hashSet2);
+
+
+ // insert_return_type insert(const_iterator, const value_type& value)
+ for(int j = 0; j < 1000; j++)
+ hashSet1.insert(hashSet1.begin(), j);
+
+ insert_iterator< hash_set<int> > ii(hashSet1, hashSet1.begin());
+ for(int j = 0; j < 1000; j++)
+ *ii++ = j;
+ }
+
+
+ {
+ // C++11 emplace and related functionality
+ nErrorCount += TestMapCpp11<eastl::hash_map<int, TestObject>>();
+ nErrorCount += TestMapCpp11<eastl::unordered_map<int, TestObject>>();
+
+ nErrorCount += TestSetCpp11<eastl::hash_set<TestObject>>();
+ nErrorCount += TestSetCpp11<eastl::unordered_set<TestObject>>();
+
+ nErrorCount += TestMultimapCpp11<eastl::hash_multimap<int, TestObject>>();
+ nErrorCount += TestMultimapCpp11<eastl::unordered_multimap<int, TestObject>>();
+
+ nErrorCount += TestMultisetCpp11<eastl::hash_multiset<TestObject>>();
+ nErrorCount += TestMultisetCpp11<eastl::unordered_multiset<TestObject>>();
+
+ nErrorCount += TestMapCpp11NonCopyable<eastl::hash_map<int, NonCopyable>>();
+ nErrorCount += TestMapCpp11NonCopyable<eastl::unordered_map<int, NonCopyable>>();
+ }
+
+ {
+ // C++17 try_emplace and related functionality
+ nErrorCount += TestMapCpp17<eastl::hash_map<int, TestObject>>();
+ nErrorCount += TestMapCpp17<eastl::unordered_map<int, TestObject>>();
+ }
+
+
+ {
+ // initializer_list support.
+ // hash_set(std::initializer_list<value_type> ilist, size_type nBucketCount = 0, const Hash& hashFunction = Hash(),
+ // const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_SET_DEFAULT_ALLOCATOR)
+ // this_type& operator=(std::initializer_list<value_type> ilist);
+ // void insert(std::initializer_list<value_type> ilist);
+ hash_set<int> intHashSet = { 12, 13, 14 };
+ EATEST_VERIFY(intHashSet.size() == 3);
+ EATEST_VERIFY(intHashSet.find(12) != intHashSet.end());
+ EATEST_VERIFY(intHashSet.find(13) != intHashSet.end());
+ EATEST_VERIFY(intHashSet.find(14) != intHashSet.end());
+
+ intHashSet = { 22, 23, 24 };
+ EATEST_VERIFY(intHashSet.size() == 3);
+ EATEST_VERIFY(intHashSet.find(22) != intHashSet.end());
+ EATEST_VERIFY(intHashSet.find(23) != intHashSet.end());
+ EATEST_VERIFY(intHashSet.find(24) != intHashSet.end());
+
+ intHashSet.insert({ 42, 43, 44 });
+ EATEST_VERIFY(intHashSet.size() == 6);
+ EATEST_VERIFY(intHashSet.find(42) != intHashSet.end());
+ EATEST_VERIFY(intHashSet.find(43) != intHashSet.end());
+ EATEST_VERIFY(intHashSet.find(44) != intHashSet.end());
+ }
+
+ {
+ // eastl::pair<iterator, iterator> equal_range(const key_type& k);
+ // eastl::pair<const_iterator, const_iterator> equal_range(const key_type& k) const;
+ // const_iterator erase(const_iterator, const_iterator);
+ // size_type erase(const key_type&);
+ // To do.
+ }
+
+
+ { // hash_set erase_if
+ hash_set<int> m = {0, 1, 2, 3, 4};
+ auto numErased = eastl::erase_if(m, [](auto i) { return i % 2 == 0; });
+ VERIFY((m == hash_set<int>{1, 3}));
+ VERIFY(numErased == 3);
+ }
+
+ { // hash_multiset erase_if
+ hash_multiset<int> m = {0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 4};
+ auto numErased = eastl::erase_if(m, [](auto i) { return i % 2 == 0; });
+ VERIFY((m == hash_multiset<int>{1, 1, 1, 3}));
+ VERIFY(numErased == 12);
+ }
+
+
+
+
+
+
+ { // Test hash_map
+
+ // insert_return_type insert(const value_type& value);
+ // insert_return_type insert(const key_type& key);
+ // iterator find(const key_type& k);
+ // const_iterator find(const key_type& k) const;
+
+ typedef hash_map<int, int> HashMapIntInt;
+ HashMapIntInt hashMap;
+ const int kCount = 10000;
+
+ for(int i = 0; i < kCount; i++)
+ {
+ HashMapIntInt::value_type vt(i, i);
+ hashMap.insert(vt);
+ }
+
+ const HashMapIntInt const_hashMap = hashMap; // creating a const version to test for const correctness
+
+ for(auto& e : hashMap)
+ {
+ int k = e.first;
+ int v = e.second;
+ EATEST_VERIFY(k < kCount);
+ EATEST_VERIFY(v == k);
+ EATEST_VERIFY(hashMap.at(k) == k);
+ EATEST_VERIFY(const_hashMap.at(k) == k);
+ hashMap.at(k) = k << 4;
+ }
+
+ for(auto& e : hashMap)
+ {
+ int k = e.first;
+ int v = e.second;
+ EATEST_VERIFY(k < kCount);
+ EATEST_VERIFY(v == (k << 4));
+ }
+
+ for(int i = 0; i < kCount * 2; i++)
+ {
+ HashMapIntInt::iterator it = hashMap.find(i);
+
+ if(i < kCount)
+ {
+ EATEST_VERIFY(it != hashMap.end());
+
+ int k = (*it).first;
+ int v = (*it).second;
+ EATEST_VERIFY(v == (k << 4));
+ }
+ else
+ EATEST_VERIFY(it == hashMap.end());
+ }
+
+ for(int i = 0; i < kCount; i++)
+ {
+ int v = hashMap.at(i);
+ EATEST_VERIFY(v == (i << 4));
+ }
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ hashMap.at(kCount);
+ EASTL_ASSERT_MSG(false, "at accessor did not throw out_of_range exception");
+ }
+ catch(const std::out_of_range) { }
+ catch(const std::exception& e)
+ {
+ string e_msg(e.what());
+ string msg = "wrong exception with message \"" + e_msg + "\" thrown";
+ EASTL_ASSERT_MSG(false, msg.c_str());
+ }
+ #endif
+ HashMapIntInt::insert_return_type result = hashMap.insert(88888);
+ EATEST_VERIFY(result.second == true);
+ result = hashMap.insert(88888);
+ EATEST_VERIFY(result.second == false);
+ result.first->second = 0;
+
+ // const_iterator erase(const_iterator);
+ size_t nExpectedSize = hashMap.size();
+
+ HashMapIntInt::iterator it50 = hashMap.find(50);
+ EATEST_VERIFY(it50 != hashMap.end());
+
+ HashMapIntInt::iterator itNext = hashMap.erase(it50);
+ nExpectedSize--;
+ EATEST_VERIFY(itNext != hashMap.end()); // Strictly speaking, this isn't guaranteed to be so. But statistically it is very likely. We'll fix this if it becomes a problem.
+ EATEST_VERIFY(hashMap.size() == nExpectedSize);
+
+ HashMapIntInt::size_type n = hashMap.erase(10);
+ nExpectedSize--;
+ EATEST_VERIFY(n == 1);
+ EATEST_VERIFY(hashMap.size() == nExpectedSize);
+
+ HashMapIntInt::iterator it60 = hashMap.find(60);
+ EATEST_VERIFY(itNext != hashMap.end());
+
+ HashMapIntInt::iterator it60Incremented(it60);
+ for(int i = 0; (i < 5) && (it60Incremented != hashMap.end()); ++i)
+ {
+ ++it60Incremented;
+ --nExpectedSize;
+ }
+
+ hashMap.erase(it60, it60Incremented);
+ EATEST_VERIFY(hashMap.size() == nExpectedSize);
+
+
+ // insert_return_type insert(const value_type& value, hash_code_t c, node_type* pNodeNew = NULL);
+ HashMapIntInt::node_type* pNode = hashMap.allocate_uninitialized_node();
+ HashMapIntInt::insert_return_type r = hashMap.insert(eastl::hash<int>()(999999), pNode, HashMapIntInt::value_type(999999, 999999));
+ EATEST_VERIFY(r.second == true);
+ pNode = hashMap.allocate_uninitialized_node();
+ r = hashMap.insert(eastl::hash<int>()(999999), pNode, HashMapIntInt::value_type(999999, 999999));
+ EATEST_VERIFY(r.second == false);
+ hashMap.free_uninitialized_node(pNode);
+ hashMap.erase(999999);
+
+
+ // mapped_type& operator[](const key_type& key)
+ // hash_map is unique among the map/set containers in having this function.
+ hashMap.clear();
+
+ int x = hashMap[0]; // A default-constructed int (i.e. 0) should be returned.
+ EATEST_VERIFY(x == 0);
+
+ hashMap[1] = 1;
+ x = hashMap[1];
+ EATEST_VERIFY(x == 1); // Verify that the value we assigned is returned and a default-constructed value is not returned.
+
+ hashMap[0] = 10; // Overwrite our previous 0 with 10.
+ hashMap[1] = 11;
+ x = hashMap[0];
+ EATEST_VERIFY(x == 10); // Verify the value is as expected.
+ x = hashMap[1];
+ EATEST_VERIFY(x == 11);
+ }
+
+
+ { // Test hash_map
+
+ // Aligned objects should be CustomAllocator instead of the default, because the
+ // EASTL default might be unable to do aligned allocations, but CustomAllocator always can.
+ hash_map<Align32, int, eastl::hash<Align32>, eastl::equal_to<Align32>, CustomAllocator> hashMap;
+ const int kCount = 10000;
+
+ for(int i = 0; i < kCount; i++)
+ {
+ Align32 a32(i); // GCC 2.x doesn't like the Align32 object being created in the ctor below.
+ hash_map<Align32, int>::value_type vt(a32, i);
+ hashMap.insert(vt);
+ }
+
+ for(hash_map<Align32, int>::iterator it = hashMap.begin(); it != hashMap.end(); ++it)
+ {
+ const Align32& k = (*it).first;
+ int v = (*it).second;
+ EATEST_VERIFY(k.mX < 10000);
+ EATEST_VERIFY(v == k.mX);
+ }
+
+ for(int i = 0; i < kCount * 2; i++)
+ {
+ hash_map<Align32, int>::iterator it = hashMap.find(Align32(i));
+
+ if(i < kCount)
+ {
+ EATEST_VERIFY(it != hashMap.end());
+
+ const Align32& k = (*it).first;
+ int v = (*it).second;
+ EATEST_VERIFY(v == k.mX);
+ }
+ else
+ EATEST_VERIFY(it == hashMap.end());
+ }
+ }
+
+ { // hash_map erase_if
+ hash_map<int, int> m = {{0, 0}, {1, 1}, {2, 2}, {3, 3}, {4, 4}};
+ auto numErased = eastl::erase_if(m, [](auto p) { return p.first % 2 == 0; });
+ VERIFY((m == hash_map<int, int>{{1, 1}, {3, 3}}));
+ VERIFY(numErased == 3);
+ }
+
+ { // hash_multimap erase_if
+ hash_multimap<int, int> m = {{0, 0}, {0, 0}, {0, 0}, {0, 0}, {1, 1}, {2, 2},
+ {2, 2}, {2, 2}, {2, 2}, {3, 3}, {3, 3}, {4, 4}};
+ auto numErased = eastl::erase_if(m, [](auto p) { return p.first % 2 == 0; });
+ VERIFY((m == hash_multimap<int, int>{{1, 1}, {3, 3}, {3, 3}}));
+ VERIFY(numErased == 9);
+ }
+
+
+
+ {
+ // template <typename U, typename UHash, typename BinaryPredicate>
+ // iterator find_as(const U& u, UHash uhash, BinaryPredicate predicate);
+ // template <typename U, typename UHash, typename BinaryPredicate>
+ // const_iterator find_as(const U& u, UHash uhash, BinaryPredicate predicate) const;
+ // template <typename U>
+ // iterator find_as(const U& u);
+ // template <typename U>
+ // const_iterator find_as(const U& u) const;
+
+ typedef hash_set<string> HashSetString;
+
+ HashSetString hashSet;
+ const int kCount = 100;
+
+ for(int i = 0; i < kCount; i++)
+ {
+ string::CtorSprintf cs; // GCC 2.x doesn't like this value being created in the ctor below.
+ string s(cs, "%d", i);
+ hashSet.insert(s);
+ }
+
+ for(int i = 0; i < kCount * 2; i++)
+ {
+ char pString[32];
+ sprintf(pString, "%d", i);
+
+ HashSetString::iterator it = hashSet.find_as(pString);
+ if(i < kCount)
+ EATEST_VERIFY(it != hashSet.end());
+ else
+ EATEST_VERIFY(it == hashSet.end());
+
+ it = hashSet.find_as(pString, hash<const char*>(), equal_to_2<string, const char*>());
+ if(i < kCount)
+ EATEST_VERIFY(it != hashSet.end());
+ else
+ EATEST_VERIFY(it == hashSet.end());
+
+ string::CtorSprintf cs;
+ string s(cs, "%d", i);
+
+ it = hashSet.find_as(s);
+ if (i < kCount)
+ EATEST_VERIFY(it != hashSet.end());
+ else
+ EATEST_VERIFY(it == hashSet.end());
+ }
+ }
+
+
+ {
+ // Test const containers.
+ const hash_set<int> constHashSet;
+
+ hash_set<int>::const_iterator i = constHashSet.begin();
+ hash_set<int>::const_iterator i3 = i;
+ hash_set<int>::iterator i2;
+ i3 = i2;
+
+ EATEST_VERIFY(i3 == i2);
+
+ //const std::tr1::unordered_set<int> constUSet;
+ //std::tr1::unordered_set<int>::const_iterator i = constUSet.begin();
+ //*i = 0;
+ }
+
+ {
+ // global operator ==, !=
+ EASTLTest_Rand rng(EA::UnitTest::GetRandSeed());
+ const eastl_size_t kIterationCount = 100;
+ const eastl_size_t kDataRange = 50;
+
+ {
+ typedef hash_set<HashtableValue, HashtableValueHash, HashtableValuePredicate> HashSet;
+ HashtableValue value;
+
+ HashSet h1;
+ HashSet h2;
+ EATEST_VERIFY(h1 == h2);
+
+ for(eastl_size_t i = 0; i < kIterationCount; i++)
+ {
+ value.mData = rng.RandLimit(kDataRange);
+ h1.insert(value); // Leave value.mExtra as 0.
+ }
+
+ EATEST_VERIFY(h1 != h2);
+ h2 = h1;
+ EATEST_VERIFY(h1 == h2);
+
+ // Test the case of the containers being the same size but having a single different value, despite that it's key compare yields equal.
+ HashSet h2Saved(h2);
+ HashSet::iterator it = h2.find(value);
+ HashtableValue valueModified(value.mData, 1);
+ h2.erase(it);
+ h2.insert(valueModified);
+ EATEST_VERIFY(h1 != h2);
+ h2 = h2Saved;
+
+ // Test the case of the containers being the same size but having a single different key.
+ h2Saved = h2;
+ h2.erase(h2.find(value));
+ h2.insert(kDataRange); // Insert something that could not have been in h2.
+ EATEST_VERIFY(h1 != h2);
+ h2 = h2Saved;
+
+ h1.erase(h1.find(value)); // Erase from h1 whatever the last value was.
+ EATEST_VERIFY(h1 != h2);
+ }
+
+ {
+ typedef hash_multiset<HashtableValue, HashtableValueHash, HashtableValuePredicate> HashSet;
+ HashtableValue value;
+
+ HashSet h1;
+ HashSet h2;
+ EATEST_VERIFY(h1 == h2);
+
+ for(eastl_size_t i = 0; i < kIterationCount; i++)
+ {
+ value.mData = rng.RandLimit(kDataRange);
+ h1.insert(value); // Leave value.mExtra as 0.
+ }
+
+ EATEST_VERIFY(h1 != h2);
+ h2 = h1;
+ EATEST_VERIFY(h1 == h2);
+
+ // Test the case of the containers being the same size but having a single different value, despite that it's key compare yields equal.
+ HashSet h2Saved(h2);
+ HashSet::iterator it = h2.find(value);
+ HashtableValue valueModified(value.mData, 1);
+ h2.erase(it);
+ h2.insert(valueModified);
+ EATEST_VERIFY(h1 != h2);
+ h2 = h2Saved;
+
+ // Test the case of the containers being the same size but having a single different key.
+ h2Saved = h2;
+ h2.erase(h2.find(value));
+ h2.insert(kDataRange); // Insert something that could not have been in h2.
+ EATEST_VERIFY(h1 != h2);
+ h2 = h2Saved;
+
+ h1.erase(h1.find(value)); // Erase from h1 whatever the last value was.
+ EATEST_VERIFY(h1 != h2);
+ }
+
+ {
+ // For simplicity we duplicate the HashtableValue::mData member as the hash map key.
+ typedef hash_map<eastl_size_t, HashtableValue, HashtableValueHash, HashtableValuePredicate> HashMap;
+ HashtableValue value;
+
+ HashMap h1;
+ HashMap h2;
+ EATEST_VERIFY(h1 == h2);
+
+ for(eastl_size_t i = 0; i < kIterationCount; i++)
+ {
+ value.mData = rng.RandLimit(kDataRange);
+ h1.insert(HashMap::value_type(value.mData, value)); // Leave value.mExtra as 0.
+ }
+
+ EATEST_VERIFY(h1 != h2);
+ h2 = h1;
+ EATEST_VERIFY(h1 == h2);
+
+ // Test the case of the containers being the same size but having a single different value, despite that it's key compare yields equal.
+ HashMap h2Saved(h2);
+ HashMap::iterator it = h2.find(value.mData); // We are using value.mData as the key as well, so we can do a find via it.
+ HashtableValue valueModified(value.mData, 1);
+ h2.erase(it);
+ h2.insert(HashMap::value_type(valueModified.mData, valueModified));
+ EATEST_VERIFY(h1 != h2);
+ h2 = h2Saved;
+
+ // Test the case of the containers being the same size but having a single different key.
+ h2Saved = h2;
+ h2.erase(h2.find(value.mData));
+ h2.insert(HashMap::value_type(kDataRange, HashtableValue(kDataRange))); // Insert something that could not have been in h2.
+ EATEST_VERIFY(h1 != h2);
+ h2 = h2Saved;
+
+ h1.erase(h1.find(value.mData)); // Erase from h1 whatever the last value was.
+ EATEST_VERIFY(h1 != h2);
+ }
+
+ {
+ // For simplicity we duplicate the HashtableValue::mData member as the hash map key.
+ typedef hash_multimap<eastl_size_t, HashtableValue, HashtableValueHash, HashtableValuePredicate> HashMap;
+ HashtableValue value;
+
+ HashMap h1;
+ HashMap h2;
+ EATEST_VERIFY(h1 == h2);
+
+ for(eastl_size_t i = 0; i < kIterationCount; i++)
+ {
+ value.mData = rng.RandLimit(kDataRange);
+ h1.insert(HashMap::value_type(value.mData, value)); // Leave value.mExtra as 0.
+ }
+
+ EATEST_VERIFY(h1 != h2);
+ h2 = h1;
+ EATEST_VERIFY(h1 == h2);
+
+ // Test the case of the containers being the same size but having a single different value, despite that it's key compare yields equal.
+ HashMap h2Saved(h2);
+ HashMap::iterator it = h2.find(value.mData); // We are using value.mData as the key as well, so we can do a find via it.
+ HashtableValue valueModified(value.mData, 1);
+ h2.erase(it);
+ h2.insert(HashMap::value_type(valueModified.mData, valueModified));
+ EATEST_VERIFY(h1 != h2);
+ h2 = h2Saved;
+
+ // Test the case of the containers being the same size but having a single different key.
+ h2Saved = h2;
+ h2.erase(h2.find(value.mData));
+ h2.insert(HashMap::value_type(kDataRange, HashtableValue(kDataRange))); // Insert something that could not have been in h2.
+ EATEST_VERIFY(h1 != h2);
+ h2 = h2Saved;
+
+ h1.erase(h1.find(value.mData)); // Erase from h1 whatever the last value was.
+ EATEST_VERIFY(h1 != h2);
+ }
+ }
+
+ {
+ typedef eastl::hash_multiset<int> HashMultisetInt;
+
+ HashMultisetInt hashMultiSet;
+
+ // insert_return_type insert(const value_type& value, hash_code_t c, node_type* pNodeNew = NULL);
+ HashMultisetInt::node_type* pNode = hashMultiSet.allocate_uninitialized_node();
+ HashMultisetInt::iterator it1 = hashMultiSet.insert(eastl::hash<int>()(999999), pNode, 999999);
+ EATEST_VERIFY(it1 != hashMultiSet.end());
+ pNode = hashMultiSet.allocate_uninitialized_node();
+ HashMultisetInt::iterator it2 = hashMultiSet.insert(eastl::hash<int>()(999999), pNode, 999999);
+ EATEST_VERIFY(it2 != hashMultiSet.end() && it2 != it1);
+ }
+
+ {
+ // Regression of compiler warning reported by Jeff Litz/Godfather regarding
+ // strict aliasing (EASTL 1.09.01) December 2007).
+ typedef eastl::hash_multimap<uint32_t, uint32_t*> Map;
+ Map* pMap = new Map;
+ delete pMap;
+ }
+
+ {
+ // Regression of user-reported crash.
+ eastl::hash_map<int, eastl::string*>* _hmTextureList;
+ _hmTextureList = new eastl::hash_map<int, eastl::string*>();
+ eastl::string* a = NULL;
+ (*_hmTextureList)[0] = a;
+ delete _hmTextureList;
+ }
+
+ {
+ // Regression of user-reported Android compiler error.
+ typedef eastl::hash_multimap<HashRegressionA*, HashRegressionB> HMM;
+ HMM m_hash;
+
+ // Section 1
+ for (HMM::iterator it = m_hash.begin(); it != m_hash.end(); it++)
+ it->second.y = 1;
+
+ // Section 2
+ HashRegressionA* pA = NULL;
+ eastl::pair<HMM::iterator, HMM::iterator> pair = m_hash.equal_range(pA);
+ (void)pair;
+ }
+
+ {
+ // Regression of user-reported GCC 4.8 compile failure.
+ typedef eastl::hash_map<int64_t, Struct> AuditByBlazeIdMap;
+
+ AuditByBlazeIdMap auditBlazeIds;
+ AuditByBlazeIdMap tempAuditBlazeIds;
+
+ auditBlazeIds.swap(tempAuditBlazeIds); // This line was generating an unexpected compiler failure.
+ EATEST_VERIFY(auditBlazeIds.empty() && tempAuditBlazeIds.empty());
+ }
+
+ {
+ // This test is designed to designed to use the find_range_by_hash method to walk over all keys in a hash bucket (located by a hash value).
+
+ // Use the 'colliding_hash' hash function to intentionally create lots of collisions in a predictable way.
+ typedef hash_map<int, int, colliding_hash> HM;
+ HM hashMap;
+
+ // Add some numbers to the hashMap.
+ for(int i=0; i<90; i++)
+ {
+ hashMap[i] = i;
+ }
+
+ // Try to find a hash value that doesn't exist
+ {
+ eastl::pair<HM::iterator, HM::iterator> i = hashMap.find_range_by_hash(1000);
+ EATEST_VERIFY(i.first == hashMap.end());
+ EATEST_VERIFY(i.second == hashMap.end());
+ }
+
+ {
+ int iterations = 0;
+ for(eastl::pair<HM::iterator, HM::iterator> i = hashMap.find_range_by_hash(1); i.first != i.second; i.first++)
+ {
+ int nodeValue = i.first.get_node()->mValue.first;
+ EATEST_VERIFY(nodeValue % 3 == 1); // Verify the hash of the node matches the expected value
+ iterations++;
+ }
+ EATEST_VERIFY(iterations == 30);
+ }
+
+ {
+ const HM &constHashMap = hashMap;
+ int iterations = 0;
+ for(eastl::pair<HM::const_iterator, HM::const_iterator> i = constHashMap.find_range_by_hash(1); i.first != i.second; i.first++)
+ {
+ int nodeValue = i.first.get_node()->mValue.first;
+ EATEST_VERIFY(nodeValue % 3 == 1); // Verify the hash of the node matches the expected value
+ iterations++;
+ }
+ EATEST_VERIFY(iterations == 30);
+ }
+ }
+
+ // test hashtable holding move-only types
+ #if !defined(EA_COMPILER_MSVC_2013)
+ {
+ struct Movable
+ {
+ Movable() {}
+ Movable(Movable&&) = default;
+ Movable& operator=(Movable&&) = default;
+ Movable(const Movable&) = delete;
+ Movable& operator=(const Movable&) = delete;
+
+ bool operator==(Movable) const { return true; }
+
+ struct Hash
+ {
+ size_t operator()(Movable) const { return 0; }
+ };
+ };
+
+ eastl::unordered_set<Movable, Movable::Hash> a, b;
+ swap(a,b);
+ }
+ #endif
+
+ {
+ // hashtable(this_type&& x);
+ // hashtable(this_type&& x, const allocator_type& allocator);
+ // this_type& operator=(this_type&& x);
+
+ // template <class... Args>
+ // insert_return_type emplace(Args&&... args);
+
+ // template <class... Args>
+ // iterator emplace_hint(const_iterator position, Args&&... args);
+
+ // template <class P> // Requires that "value_type is constructible from forward<P>(otherValue)."
+ // insert_return_type insert(P&& otherValue);
+
+ // iterator insert(const_iterator hint, value_type&& value);
+
+ // Regression of user reported compiler error in hashtable sfinae mechanism
+ {
+ TestObject::Reset();
+ eastl::hash_set<TestObject> toSet;
+ toSet.emplace(3, 4, 5);
+ }
+ }
+
+
+
+ {
+ // initializer_list support.
+ // hash_map(std::initializer_list<value_type> ilist, size_type nBucketCount = 0, const Hash& hashFunction = Hash(),
+ // const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_MAP_DEFAULT_ALLOCATOR)
+ // this_type& operator=(std::initializer_list<value_type> ilist);
+ // void insert(std::initializer_list<value_type> ilist);
+
+ // VS2013 has a known issue when dealing with std::initializer_lists
+ // https://connect.microsoft.com/VisualStudio/feedback/details/792355/compiler-confused-about-whether-to-use-a-initializer-list-assignment-operator
+ #if !defined(EA_COMPILER_NO_INITIALIZER_LISTS) && !(defined(_MSC_VER) && _MSC_VER == 1800)
+ hash_map<int, double> intHashMap = { {12,12.0}, {13,13.0}, {14,14.0} };
+ EATEST_VERIFY(intHashMap.size() == 3);
+ EATEST_VERIFY(intHashMap.find(12) != intHashMap.end());
+ EATEST_VERIFY(intHashMap.find(13) != intHashMap.end());
+ EATEST_VERIFY(intHashMap.find(14) != intHashMap.end());
+
+ intHashMap = { {22,22.0}, {23,23.0}, {24,24.0} };
+ EATEST_VERIFY(intHashMap.size() == 3);
+ EATEST_VERIFY(intHashMap.find(22) != intHashMap.end());
+ EATEST_VERIFY(intHashMap.find(23) != intHashMap.end());
+ EATEST_VERIFY(intHashMap.find(24) != intHashMap.end());
+
+ intHashMap.insert({ {42,42.0}, {43,43.0}, {44,44.0} });
+ EATEST_VERIFY(intHashMap.size() == 6);
+ EATEST_VERIFY(intHashMap.find(42) != intHashMap.end());
+ EATEST_VERIFY(intHashMap.find(43) != intHashMap.end());
+ EATEST_VERIFY(intHashMap.find(44) != intHashMap.end());
+ #endif
+ }
+
+ // Can't use move semantics with hash_map::operator[]
+ //
+ // GCC has a bug with overloading rvalue and lvalue function templates.
+ // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=54425
+ //
+ // error: 'eastl::pair<T1, T2>::pair(T1&&) [with T1 = const int&; T2 = const int&]' cannot be overloaded
+ // error: with 'eastl::pair<T1, T2>::pair(const T1&) [with T1 = const int&; T2 = const int&]'
+ #if !defined(EA_COMPILER_GNUC)
+ {
+ EA_DISABLE_VC_WARNING(4626)
+ struct Key
+ {
+ Key() {}
+ Key(Key&&) {}
+ Key(const Key&&) {}
+ bool operator==(const Key&) const { return true; }
+
+ private:
+ Key(const Key&) {}
+ };
+ EA_RESTORE_VC_WARNING()
+
+ struct Hash
+ {
+ std::size_t operator()(const Key&) const { return 0; }
+ };
+
+ Key key1, key2;
+ eastl::hash_map<Key, int, Hash> hm;
+ hm[eastl::move(key1)] = 12345;
+
+ EATEST_VERIFY(hm[eastl::move(key2)] == 12345);
+ }
+ #endif
+
+ {
+ using AllocatorType = CountingAllocator;
+ using String = eastl::basic_string<char8_t, AllocatorType>;
+ using StringStringMap = eastl::map<String, String, eastl::equal_to<String>, AllocatorType>;
+ using StringStringHashMap = eastl::hash_map<String, String, eastl::string_hash<String>, eastl::equal_to<String>, AllocatorType>;
+ AllocatorType::resetCount();
+
+ {
+ StringStringHashMap myMap(5); // construct map with 5 buckets, so we don't rehash on insert
+ String key("mykey01234567890000000000000000000000000000");
+ String value("myvalue01234567890000000000000000000000000000");
+ AllocatorType::resetCount();
+
+ myMap.insert(eastl::make_pair(eastl::move(key), eastl::move(value)));
+ EATEST_VERIFY(AllocatorType::getTotalAllocationCount() == 1);
+ }
+ {
+ StringStringHashMap myMap(5); // construct map with 5 buckets, so we don't rehash on insert
+ String key("mykey01234567890000000000000000000000000000");
+ String value("myvalue01234567890000000000000000000000000000");
+ AllocatorType::resetCount();
+
+ myMap.emplace(eastl::move(key), eastl::move(value));
+ EATEST_VERIFY(AllocatorType::getTotalAllocationCount() == 1);
+ }
+ {
+ StringStringMap myMap;
+ String key("mykey01234567890000000000000000000000000000");
+ String value("myvalue01234567890000000000000000000000000000");
+ AllocatorType::resetCount();
+
+ myMap.insert(eastl::make_pair(eastl::move(key), eastl::move(value)));
+ EATEST_VERIFY(AllocatorType::getTotalAllocationCount() == 1);
+ }
+ {
+ StringStringMap myMap;
+ String key("mykey01234567890000000000000000000000000000");
+ String value("myvalue01234567890000000000000000000000000000");
+ AllocatorType::resetCount();
+
+ myMap.emplace(eastl::move(key), eastl::move(value));
+ EATEST_VERIFY(AllocatorType::getTotalAllocationCount() == 1);
+ }
+ }
+
+
+ {
+ struct name_equals
+ {
+ bool operator()(const eastl::pair<int, const char*>& a, const eastl::pair<int, const char*>& b) const
+ {
+ if (a.first != b.first)
+ return false;
+
+ return strcmp(a.second, b.second) == 0;
+ }
+ };
+
+ {
+ int n = 42;
+ const char* pCStrName = "electronic arts";
+ eastl::hash_map<eastl::pair<int, const char*>, bool, eastl::hash<eastl::pair<int, const char*>>, name_equals, eastl::allocator> m_TempNames;
+ m_TempNames[eastl::make_pair(n, pCStrName)] = true;
+
+ auto isFound = (m_TempNames.find(eastl::make_pair(n, pCStrName)) != m_TempNames.end());
+ VERIFY(isFound);
+ }
+ }
+
+ { // User reported regression for code changes limiting hash code generated for non-arithmetic types.
+ { VERIFY(HashTest<char>{}('a') == size_t('a')); }
+ { VERIFY(HashTest<int>{}(42) == 42); }
+ { VERIFY(HashTest<unsigned>{}(42) == 42); }
+ { VERIFY(HashTest<signed>{}(42) == 42); }
+ { VERIFY(HashTest<short>{}(short(42)) == 42); }
+ { VERIFY(HashTest<unsigned short>{}((unsigned short)42) == 42); }
+ { VERIFY(HashTest<int>{}(42) == 42); }
+ { VERIFY(HashTest<unsigned int>{}(42) == 42); }
+ { VERIFY(HashTest<long int>{}(42) == 42); }
+ { VERIFY(HashTest<unsigned long int>{}(42) == 42); }
+ { VERIFY(HashTest<long long int>{}(42) == 42); }
+ { VERIFY(HashTest<unsigned long long int>{}(42) == 42); }
+
+ #if defined(EA_HAVE_INT128) && EA_HAVE_INT128
+ { VERIFY(HashTest<uint128_t>{}(UINT128_C(0, 42)) == 42); }
+ #endif
+ }
+
+ return nErrorCount;
+}
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/test/source/TestHeap.cpp b/EASTL/test/source/TestHeap.cpp
new file mode 100644
index 0000000..4709ecf
--- /dev/null
+++ b/EASTL/test/source/TestHeap.cpp
@@ -0,0 +1,295 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLTest.h"
+#include <EASTL/heap.h>
+#include <EASTL/vector.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/sort.h>
+#include <EABase/eabase.h>
+#include <algorithm> //std::pop_heap
+
+#ifdef _MSC_VER
+ #pragma warning(push, 0)
+#endif
+
+#ifndef EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+ #include <algorithm>
+#endif
+
+#if defined(_MSC_VER)
+ #pragma warning(pop)
+#endif
+
+
+using namespace eastl;
+
+
+
+
+int VerifyHeaps(uint32_t* pArray2, uint32_t* pArray3, uint32_t nArraySize)
+{
+ int nErrorCount = 0;
+ bool bResult;
+
+ bResult = is_heap(pArray2, pArray2 + nArraySize);
+ EATEST_VERIFY(bResult);
+
+ bResult = is_heap(pArray3, pArray3 + nArraySize);
+ EATEST_VERIFY(bResult);
+
+ // bResult = (memcmp(pArray2, pArray3, nArraySize * sizeof(uint32_t)) == 0);
+ // EATEST_VERIFY(bResult);
+ //
+ // The above does not work on iOS since CM added -stdlib=libc++ to the linker switch
+ // even though it was already used in our compile switches.
+ // It would appear that on clang or iOS the heap is actually structured in a unique way,
+ // possibly for optimization. Iterating over the array and using pop_heap verifies
+ // that the heaps have the same elements and are retrieved in the same manner.
+ // The underlying storage may be different.
+ uint32_t* pArray2_copy = new uint32_t[nArraySize];
+ uint32_t* pArray3_copy = new uint32_t[nArraySize];
+
+ memcpy(pArray2_copy, pArray2, sizeof(uint32_t) * nArraySize);
+ memcpy(pArray3_copy, pArray3, sizeof(uint32_t) * nArraySize);
+
+ for(uint32_t i = 0; i < nArraySize; i++)
+ {
+ EATEST_VERIFY(pArray2_copy[0] == pArray3_copy[0]);
+ std::pop_heap(pArray2_copy, pArray2_copy + nArraySize - i);
+ pop_heap(pArray3_copy, pArray3_copy + nArraySize - i);
+ }
+ delete[] pArray2_copy;
+ delete[] pArray3_copy;
+ return nErrorCount;
+}
+
+
+
+int TestHeap()
+{
+ int nErrorCount = 0;
+
+ // We do a bit of our heap testing by simply doing rng operations and comparing
+ // to a standard STL implementation of the heap functions.
+
+ {
+ #ifndef EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+
+ EA::UnitTest::Rand rng(EA::UnitTest::GetRandSeed());
+
+ const int32_t kMinArraySize = 2;
+ const int32_t kMaxArraySize = 1000;
+ const int32_t kMinValue = 0;
+ const int32_t kMaxValue = 500;
+
+ // To consider, instead of using 25, try conditioning on EA::UnitTest::GetSystemSpeed().
+ // I tried this, but even though Caps and PC are the same system speed, Caps was quite slower
+ // than PC doing 75 loops
+ for(int i = 0; (i < 25) && (nErrorCount == 0); i++)
+ {
+ //
+ // Set up an array of data to work with as a heap.
+ uint32_t nArraySizeInitial = (uint32_t)rng.RandRange(kMinArraySize, kMaxArraySize);
+ uint32_t nArraySize = nArraySizeInitial;
+ uint32_t* pArray1 = new uint32_t[nArraySize + 1]; // Array1 is the original data. // +1 because we append an additional element in the is_heap_until test below.
+ uint32_t* pArray2 = new uint32_t[nArraySize + 1]; // Array2 is the data in std::make_heap
+ uint32_t* pArray3 = new uint32_t[nArraySize + 1]; // Array3 is the data in eastl::make_heap.
+
+ for(uint32_t j = 0; j < nArraySize; j++)
+ pArray1[j] = pArray2[j] = pArray3[j] = (uint32_t)rng.RandRange(kMinValue, kMaxValue);
+
+
+ // make_heap
+ std::make_heap(pArray2, pArray2 + nArraySize);
+ make_heap(pArray3, pArray3 + nArraySize);
+ VerifyHeaps(pArray2, pArray3, nArraySize);
+
+
+ // is_heap_until
+ {
+ pArray3[nArraySize] = kMaxValue + 1; // Append a value which is guaranteed to break the heap.
+ uint32_t* pUntil = is_heap_until(pArray3, pArray3 + (nArraySize + 1));
+ EATEST_VERIFY_F(pUntil == (pArray3 + nArraySize), "is_heap_until failure in iteration %d for array size %I32u.", nArraySize);
+ }
+
+
+ // pop_heap
+ const int popCount = min<uint32_t>(200, nArraySize);
+ for(int k = 0; (k < popCount) && (nErrorCount == 0); k++, nArraySize--)
+ {
+ std::pop_heap(pArray2, pArray2 + nArraySize);
+ pArray2[nArraySize - 1] = 0xffffffff; // Set it to some value so we can recognize it in a debugger.
+
+ pop_heap(pArray3, pArray3 + nArraySize);
+ pArray3[nArraySize - 1] = 0xffffffff;
+
+ VerifyHeaps(pArray2, pArray3, nArraySize - 1);
+ }
+
+
+ // push_heap
+ const int pushCount = popCount;
+ for(int m = 0; (m < pushCount) && (nErrorCount == 0); m++, nArraySize++)
+ {
+ const uint32_t n = (uint32_t)rng.RandRange(kMinValue, kMaxValue);
+
+ pArray2[nArraySize] = n;
+ std::push_heap(pArray2, pArray2 + nArraySize + 1);
+
+ pArray3[nArraySize] = n;
+ push_heap(pArray3, pArray3 + nArraySize + 1);
+
+ VerifyHeaps(pArray2, pArray3, nArraySize + 1);
+ }
+
+ uint32_t originalSize = nArraySize;
+ // remove_heap
+ // Because the heap that stdlib on iOS and other platforms differs, different elements
+ // will be removed. After calling remove heap, we cannot call VerifyHeaps anymore, but
+ // can still check that heap format is retained.
+ const int eraseCount = popCount;
+ for(int e = 0; (e < eraseCount) && (nErrorCount == 0); e++, nArraySize--)
+ {
+ const uint32_t position = (uint32_t)rng.RandRange(0, nArraySize);
+
+ remove_heap(pArray2, nArraySize, position);
+ pArray2[nArraySize - 1] = 0xffffffff;
+
+ remove_heap(pArray3, nArraySize, position);
+ pArray3[nArraySize - 1] = 0xffffffff;
+
+ //use is_heap_until to verify remove_heap is working.
+ if(nArraySize > 1) //If we just popped last element, don't use is_heap_until
+ {
+ uint32_t* pUntil = is_heap_until(pArray2, pArray2 + (nArraySize));
+ EATEST_VERIFY_F(pUntil == (pArray2 + nArraySize - 1), "pUntil failure for pArray2 with array size %I32u.", nArraySize);
+
+ pUntil = is_heap_until(pArray3, pArray3 + (nArraySize));
+ EATEST_VERIFY_F(pUntil == (pArray3 + nArraySize - 1), "failure for pArray3 with array size %I32u.", nArraySize);
+ }
+ }
+
+ // push_heap -- increase the heap size back to the original size.
+ for(int m = 0; (m < pushCount) && (nErrorCount == 0); m++, nArraySize++)
+ {
+ const uint32_t n = (uint32_t)rng.RandRange(kMinValue, kMaxValue);
+
+ pArray2[nArraySize] = n;
+ std::push_heap(pArray2, pArray2 + nArraySize + 1);
+
+ pArray3[nArraySize] = n;
+ push_heap(pArray3, pArray3 + nArraySize + 1);
+ }
+
+ EATEST_VERIFY_F(nArraySize == originalSize, "Array size is %d not original size %d", nArraySize , originalSize);
+
+ uint32_t* pUntil = is_heap_until(pArray2, pArray2 + (nArraySize));
+ EATEST_VERIFY_F(pUntil == (pArray2 + nArraySize), "failure for pArray2 with array size %I32u.", nArraySize);
+ pUntil = is_heap_until(pArray3, pArray3 + (nArraySize));
+ EATEST_VERIFY_F(pUntil == (pArray3 + nArraySize), "failure for pArray3 with array size %I32u.", nArraySize);
+
+
+ // change_heap
+ const int changeCount = popCount;
+ for(int r = 0; (r < changeCount) && (nErrorCount == 0); r++, nArraySize--)
+ {
+ uint32_t position = (uint32_t)rng.RandRange(0, nArraySize);
+ uint32_t newValue = (uint32_t)rng.RandRange(kMinValue, kMaxValue);
+
+ if(rng.RandLimit(5) == 0) // One in five chance that we use the heap top position.
+ position = 0;
+ if(rng.RandLimit(5) != 0) // One in five chance that we do no change.
+ pArray2[position] = pArray3[position] = newValue;
+
+ // There is no std::change_heap, so we just use ours for this test.
+ change_heap(pArray2, nArraySize, position);
+ pArray2[nArraySize - 1] = 0xffffffff;
+
+ change_heap(pArray3, nArraySize, position);
+ pArray3[nArraySize - 1] = 0xffffffff;
+
+ if(nArraySize > 1) //If we just removed last element, don't use is_heap_until
+ {
+ uint32_t* pUntilChanged = is_heap_until(pArray2, pArray2 + (nArraySize));
+ EATEST_VERIFY_F(pUntilChanged == (pArray2 + nArraySize - 1), "failure for pArray2 with array size %I32u.", nArraySize);
+ pUntilChanged = is_heap_until(pArray3, pArray3 + (nArraySize));
+ EATEST_VERIFY_F(pUntilChanged == (pArray3 + nArraySize - 1), "failure for pArray3 with array size %I32u.", nArraySize);
+ }
+ }
+
+
+ // sort_heap
+ std::sort_heap(pArray2, pArray2 + nArraySize);
+ sort_heap(pArray3, pArray3 + nArraySize);
+
+ for(uint32_t q = 1; (q < nArraySize) && (nErrorCount == 0); q++)
+ {
+ EATEST_VERIFY(pArray2[q-1] <= pArray2[q]);
+ EATEST_VERIFY(pArray3[q-1] <= pArray3[q]);
+ }
+ // Free our heap data.
+ delete[] pArray1;
+ delete[] pArray2;
+ delete[] pArray3;
+ }
+
+ #endif // EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+ }
+
+ {
+ // Test aligned types.
+
+ // Aligned objects should be CustomAllocator instead of the default, because the
+ // EASTL default might be unable to do aligned allocations, but CustomAllocator always can.
+ eastl::vector<Align64, CustomAllocator> heap;
+
+ for(int i = 0; i < 16; i++)
+ heap.push_back(Align64(i));
+
+ eastl::make_heap(heap.begin(), heap.end());
+ EATEST_VERIFY(is_heap(heap.begin(), heap.end()));
+
+ heap.push_back(Align64(7));
+ eastl::push_heap(heap.begin(), heap.end());
+ EATEST_VERIFY(is_heap(heap.begin(), heap.end()));
+
+ heap.push_back(Align64(7));
+ eastl::push_heap(heap.begin(), heap.end());
+ heap.pop_back();
+ EATEST_VERIFY(is_heap(heap.begin(), heap.end()));
+
+ eastl::remove_heap(heap.begin(), heap.size(), (eastl_size_t)4);
+ heap.pop_back();
+ EATEST_VERIFY(is_heap(heap.begin(), heap.end()));
+
+ eastl::sort_heap(heap.begin(), heap.end());
+ EATEST_VERIFY(is_sorted(heap.begin(), heap.end()));
+ }
+
+ {
+ Align16 heap[5];
+
+ eastl::make_heap(heap, heap + 5);
+ EATEST_VERIFY(is_heap(heap, heap + 5));
+
+ eastl::partial_sort(heap, heap + 3, heap + 5);
+ }
+
+ return nErrorCount;
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/test/source/TestIntrusiveHash.cpp b/EASTL/test/source/TestIntrusiveHash.cpp
new file mode 100644
index 0000000..f089aab
--- /dev/null
+++ b/EASTL/test/source/TestIntrusiveHash.cpp
@@ -0,0 +1,773 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLTest.h"
+#include <EASTL/internal/intrusive_hashtable.h>
+#include <EASTL/intrusive_hash_set.h>
+#include <EASTL/intrusive_hash_map.h>
+#include <EABase/eabase.h>
+
+
+
+using namespace eastl;
+
+
+namespace
+{
+ struct SetWidget : public intrusive_hash_node
+ {
+ SetWidget(int x = 0)
+ : mX(x) { }
+ int mX;
+ };
+
+ inline bool operator==(const SetWidget& a, const SetWidget& b)
+ { return a.mX == b.mX; }
+
+ struct SWHash
+ {
+ size_t operator()(const SetWidget& sw) const
+ {
+ return (size_t)sw.mX;
+ }
+ };
+
+ struct SetWidgetComparable // Exists for the sole purpose of testing the find_as function.
+ {
+ SetWidgetComparable(int x = 0)
+ : mX(x) { }
+ int mX;
+ };
+
+ struct SWCHash
+ {
+ size_t operator()(const SetWidgetComparable& swc) const
+ {
+ return (size_t)swc.mX;
+ }
+ };
+
+ bool operator==(const SetWidget& a, const SetWidgetComparable& b)
+ { return a.mX == b.mX; }
+
+
+
+ struct MapWidget : public intrusive_hash_node_key<int>
+ {
+ MapWidget(int x = 0)
+ : mX(x) { }
+ int mX;
+ };
+
+ inline bool operator==(const MapWidget& a, const MapWidget& b)
+ { return a.mX == b.mX; }
+
+ //struct MapWidgetComparable // Exists for the sole purpose of testing the find_as function.
+ //{
+ // MapWidgetComparable(int x = 0)
+ // : mX(x) { }
+ // int mX;
+ //};
+ //
+ //bool operator==(const SetWidget& a, const MapWidgetComparable& b)
+ // { return a.mX == b.mX; }
+
+
+
+
+ // IHWidget
+ //
+ // Implements the intrusive node data directly instead of inheriting from intrusive_hash_node.
+ //
+ struct IHWidget
+ {
+ IHWidget(int x = 0)
+ : mX(x) { }
+
+ int mX;
+ IHWidget* mpNext;
+ typedef int key_type;
+ int mKey;
+
+ };
+
+ inline bool operator==(const IHWidget& a, const IHWidget& b)
+ { return a.mX == b.mX; }
+
+ struct IHWHash
+ {
+ size_t operator()(const IHWidget& ihw) const
+ {
+ return (size_t)ihw.mX;
+ }
+ };
+
+} // namespace
+
+
+
+
+// Template instantations.
+// These tell the compiler to compile all the functions for the given class.
+//template class intrusive_hash_set<SetWidget>;
+//template class intrusive_hash_map<MapWidget>;
+
+
+template class eastl::intrusive_hashtable<SetWidget, SetWidget, SWHash, eastl::equal_to<SetWidget>, 37, true, true>;
+template class eastl::intrusive_hashtable<int, MapWidget, eastl::hash<int>, eastl::equal_to<int>, 37, false, true>;
+
+template class eastl::intrusive_hash_set<SetWidget, 37, SWHash>;
+template class eastl::intrusive_hash_multiset<SetWidget, 37, SWHash>;
+
+template class eastl::intrusive_hash_map<int, MapWidget, 37>;
+template class eastl::intrusive_hash_multimap<int, MapWidget, 37>;
+
+template class eastl::intrusive_hash_set<IHWidget, 37, IHWHash>;
+template class eastl::intrusive_hash_multiset<IHWidget, 37, IHWHash>;
+
+template class eastl::intrusive_hash_map<int, IHWidget, 37, IHWHash>;
+template class eastl::intrusive_hash_multimap<int, IHWidget, 37, IHWHash>;
+
+
+
+
+
+int TestIntrusiveHash()
+{
+ int nErrorCount = 0;
+
+ {
+ SetWidget sw1, sw2;
+ VERIFY(sw1 == sw2);
+
+ MapWidget mw1, mw2;
+ VERIFY(mw1 == mw2);
+
+ IHWidget iw1, iw2;
+ VERIFY(iw1 == iw2);
+
+ IHWHash ih1;
+ VERIFY(ih1.operator()(iw1) == ih1.operator()(iw2));
+ }
+
+ {
+ // Test intrusive_hash_set
+
+ const size_t kBucketCount = 37;
+ typedef intrusive_hash_set<SetWidget, kBucketCount, SWHash> IHM_SW;
+
+ const size_t kArraySize = 100;
+ SetWidget swArray[kArraySize];
+
+ int nExpectedKeySum = 0; // We use this as a checksum in order to do validity checks below.
+
+ for(size_t i = 0; i < kArraySize; i++)
+ {
+ swArray[i].mX = (int)i;
+ nExpectedKeySum += (int)i;
+ }
+
+
+ // const key_equal& key_eq() const;
+ // key_equal& key_eq();
+ IHM_SW ih;
+ const IHM_SW ihc;
+
+ const IHM_SW::key_equal& ke = ihc.key_eq();
+ ih.key_eq() = ke;
+
+
+ // intrusive_hashtable(const Hash&, const Equal&);
+ // void swap(this_type& x);
+ // size_type size() const;
+ // bool empty() const;
+ // size_type bucket_count() const;
+ // size_type bucket_size(size_type n) const;
+ // float load_factor() const;
+ // void clear();
+ // bool validate() const;
+
+ IHM_SW ihmSW1;
+ IHM_SW ihmSW2;
+
+ VERIFY(ihmSW1.size() == 0);
+ VERIFY(ihmSW1.empty());
+ VERIFY(ihmSW1.validate());
+ VERIFY(ihmSW2.validate());
+
+ ihmSW1.swap(ihmSW2);
+
+ VERIFY(ihmSW1.validate());
+ VERIFY(ihmSW2.validate());
+ VERIFY(ihmSW2.bucket_count() == kBucketCount);
+ VERIFY(ihmSW2.bucket_size(0) == 0);
+ VERIFY(ihmSW2.bucket_size(kBucketCount - 1) == 0);
+ VERIFY(ihmSW1.load_factor() == 0.f);
+ VERIFY(ihmSW2.load_factor() == 0.f);
+
+ ihmSW1.clear();
+ VERIFY(ihmSW1.validate());
+ VERIFY(ihmSW1.begin() == ihmSW1.end());
+
+
+ // void insert(InputIterator first, InputIterator last);
+ // insert_return_type insert(value_type& value);
+ // void swap(this_type& x);
+ // void clear();
+
+ ihmSW1.clear();
+ ihmSW1.insert(swArray, swArray + (kArraySize - 10));
+ for(int i = 0; i < 10; i++) // insert the remaining elements via the other insert function.
+ {
+ pair<IHM_SW::iterator, bool> result = ihmSW1.insert(swArray[(kArraySize - 10) + i]);
+ VERIFY(result.second == true);
+ }
+
+ VERIFY(ihmSW1.size() == kArraySize);
+ VERIFY(ihmSW1.validate());
+
+ for(size_t i = 0; i < kArraySize; i++)
+ {
+ // Try to re-insert the elements. All insertions should fail.
+ pair<IHM_SW::iterator, bool> result = ihmSW1.insert(swArray[i]);
+ VERIFY(result.second == false);
+ }
+
+ VERIFY(ihmSW1.size() == kArraySize);
+ VERIFY(!ihmSW1.empty());
+ VERIFY(ihmSW1.validate());
+
+ ihmSW2.clear();
+ ihmSW1.swap(ihmSW2);
+
+
+ // size_type size() const;
+ // bool empty() const;
+ // size_type count(const key_type& k) const;
+ // size_type bucket_size(size_type n) const;
+ // float load_factor() const;
+ // size_type bucket(const key_type& k) const
+
+ VERIFY(ihmSW1.validate());
+ VERIFY(ihmSW2.validate());
+ VERIFY(ihmSW1.size() == 0);
+ VERIFY(ihmSW1.empty());
+ VERIFY(ihmSW2.size() == kArraySize);
+ VERIFY(!ihmSW2.empty());
+ VERIFY(ihmSW1.load_factor() == 0.f);
+ VERIFY(ihmSW2.load_factor() > 2.f);
+ VERIFY(ihmSW1.count(0) == 0);
+ VERIFY(ihmSW1.count(999999) == 0);
+ VERIFY(ihmSW2.count(0) == 1);
+ VERIFY(ihmSW2.count(999999) == 0);
+ VERIFY(ihmSW2.bucket_size(0) == 3); // We just happen to know this should be so based on the distribution.
+ VERIFY(ihmSW2.bucket(13) == (13 % kBucketCount)); // We know this is so because our hash function simply returns n.
+ VERIFY(ihmSW2.bucket(10000) == (10000 % kBucketCount)); // We know this is so because our hash function simply returns n.
+
+
+ // iterator begin();
+ // const_iterator begin() const;
+
+ ihmSW1.swap(ihmSW2);
+ int nSum = 0;
+
+ for(IHM_SW::iterator it = ihmSW1.begin(); it != ihmSW1.end(); ++it)
+ {
+ const SetWidget& sw = *it; // Recall that set iterators are const_iterators.
+
+ nSum += sw.mX;
+
+ const int iresult = ihmSW1.validate_iterator(it);
+ VERIFY(iresult == (isf_valid | isf_current | isf_can_dereference));
+
+ IHM_SW::iterator itf = ihmSW1.find(sw.mX);
+ VERIFY(itf == it);
+ }
+
+ VERIFY(nSum == nExpectedKeySum);
+
+
+ // iterator end();
+ // const_iterator end() const;
+
+ const IHM_SW& ihmSW1Const = ihmSW1;
+
+ for(IHM_SW::const_iterator itc = ihmSW1Const.begin(); itc != ihmSW1Const.end(); ++itc)
+ {
+ const SetWidget& sw = *itc;
+
+ IHM_SW::const_iterator itf = ihmSW1.find(sw.mX);
+ VERIFY(itf == itc);
+ }
+
+
+ // local_iterator begin(size_type n)
+ // local_iterator end(size_type)
+
+ for(IHM_SW::local_iterator itl = ihmSW1.begin(5); itl != ihmSW1.end(5); ++itl)
+ {
+ const SetWidget& sw = *itl; // Recall that set iterators are const_iterators.
+
+ VERIFY((sw.mX % kBucketCount) == 5);
+ }
+
+
+ // const_local_iterator begin(size_type n) const
+ // const_local_iterator end(size_type) const
+
+ for(IHM_SW::const_local_iterator itlc = ihmSW1Const.begin(5); itlc != ihmSW1Const.end(5); ++itlc)
+ {
+ const SetWidget& sw = *itlc;
+
+ VERIFY((sw.mX % kBucketCount) == 5);
+ }
+
+
+ // iterator find(const key_type& k);
+ // const_iterator find(const key_type& k) const;
+
+ IHM_SW::iterator itf = ihmSW1.find(SetWidget(99999));
+ VERIFY(itf == ihmSW1.end());
+
+ IHM_SW::const_iterator itfc = ihmSW1Const.find(SetWidget(99999));
+ VERIFY(itfc == ihmSW1Const.end());
+
+
+ // iterator find_as(const U& u);
+ // const_iterator find_as(const U& u) const;
+
+ //itf = ihmSW1.find_as(SetWidget(7)); // Can't work unless there was a default eastl::hash function for SetWidget.
+ //VERIFY(itf->mX == 7);
+
+ //itfc = ihmSW1Const.find_as(SetWidget(7));
+ //VERIFY(itfc->mX == 7);
+
+
+ // iterator find_as(const U& u, UHash uhash, BinaryPredicate predicate);
+ // const_iterator find_as(const U& u, UHash uhash, BinaryPredicate predicate) const;
+
+ itf = ihmSW1.find_as(SetWidgetComparable(7), SWCHash(), eastl::equal_to_2<SetWidget, SetWidgetComparable>());
+ VERIFY(itf->mX == 7);
+
+ itfc = ihmSW1Const.find_as(SetWidgetComparable(7), SWCHash(), eastl::equal_to_2<SetWidget, SetWidgetComparable>());
+ VERIFY(itfc->mX == 7);
+
+
+ // iterator erase(iterator);
+ // iterator erase(iterator, iterator);
+ // size_type erase(const key_type&);
+
+ eastl_size_t n = ihmSW1.erase(SetWidget(99999));
+ VERIFY(n == 0);
+
+ n = ihmSW1.erase(SetWidget(17));
+ VERIFY(n == 1);
+
+ itf = ihmSW1.find(SetWidget(18));
+ VERIFY(itf != ihmSW1.end());
+ VERIFY(ihmSW1.validate_iterator(itf) == (isf_valid | isf_current | isf_can_dereference));
+
+ itf = ihmSW1.erase(itf);
+ VERIFY(itf != ihmSW1.end());
+ VERIFY(ihmSW1.validate_iterator(itf) == (isf_valid | isf_current | isf_can_dereference));
+
+ itf = ihmSW1.find(SetWidget(18));
+ VERIFY(itf == ihmSW1.end());
+
+ itf = ihmSW1.find(SetWidget(19));
+ VERIFY(itf != ihmSW1.end());
+
+ IHM_SW::iterator itf2(itf);
+ eastl::advance(itf2, 7);
+ VERIFY(itf2 != ihmSW1.end());
+ VERIFY(ihmSW1.validate_iterator(itf2) == (isf_valid | isf_current | isf_can_dereference));
+
+ itf = ihmSW1.erase(itf, itf2);
+ VERIFY(itf != ihmSW1.end());
+ VERIFY(ihmSW1.validate_iterator(itf) == (isf_valid | isf_current | isf_can_dereference));
+
+ itf = ihmSW1.find(SetWidget(19));
+ VERIFY(itf == ihmSW1.end());
+
+
+ // eastl::pair<iterator, iterator> equal_range(const key_type& k);
+ // eastl::pair<const_iterator, const_iterator> equal_range(const key_type& k) const;
+
+ eastl::pair<IHM_SW::iterator, IHM_SW::iterator> p = ihmSW1.equal_range(SetWidget(1));
+ VERIFY(p.first != ihmSW1.end());
+ VERIFY(p.second != ihmSW1.end());
+
+ eastl::pair<IHM_SW::const_iterator, IHM_SW::const_iterator> pc = ihmSW1Const.equal_range(SetWidget(1));
+ VERIFY(pc.first != ihmSW1Const.end());
+ VERIFY(pc.second != ihmSW1Const.end());
+
+
+ // void clear();
+ // bool validate() const;
+ // int validate_iterator(const_iterator i) const;
+
+ IHM_SW::iterator itTest;
+ int iresult = ihmSW1.validate_iterator(itTest);
+ VERIFY(iresult == isf_none);
+
+ itTest = ihmSW1.begin();
+ iresult = ihmSW1.validate_iterator(itTest);
+ VERIFY(iresult == (isf_valid | isf_current | isf_can_dereference));
+
+ itTest = ihmSW1.end();
+ iresult = ihmSW1.validate_iterator(itTest);
+ VERIFY(iresult == (isf_valid | isf_current));
+
+ ihmSW1.clear();
+ ihmSW2.clear();
+ VERIFY(ihmSW1.validate());
+ VERIFY(ihmSW2.validate());
+
+ itTest = ihmSW1.begin();
+ iresult = ihmSW1.validate_iterator(itTest);
+ VERIFY(iresult == (isf_valid | isf_current));
+ }
+
+
+ {
+ // Test intrusive_hash_map
+
+ const size_t kBucketCount = 37;
+ typedef intrusive_hash_map<int, MapWidget, kBucketCount> IHM_MW;
+
+ const size_t kArraySize = 100;
+ MapWidget mwArray[kArraySize];
+
+ int nExpectedKeySum = 0; // We use this as a checksum in order to do validity checks below.
+
+ for(size_t i = 0; i < kArraySize; i++)
+ {
+ mwArray[i].mKey = (int)i;
+ mwArray[i].mX = (int)i;
+ nExpectedKeySum += (int)i;
+ }
+
+
+ // intrusive_hashtable(const Hash&, const Equal&);
+ // void swap(this_type& x);
+ // size_type size() const;
+ // bool empty() const;
+ // size_type bucket_count() const;
+ // size_type bucket_size(size_type n) const;
+ // float load_factor() const;
+ // void clear();
+ // bool validate() const;
+
+ IHM_MW ihmMW1;
+ IHM_MW ihmMW2;
+
+ VERIFY(ihmMW1.size() == 0);
+ VERIFY(ihmMW1.empty());
+ VERIFY(ihmMW1.validate());
+ VERIFY(ihmMW2.validate());
+
+ ihmMW1.swap(ihmMW2);
+
+ VERIFY(ihmMW1.validate());
+ VERIFY(ihmMW2.validate());
+ VERIFY(ihmMW2.bucket_count() == kBucketCount);
+ VERIFY(ihmMW2.bucket_size(0) == 0);
+ VERIFY(ihmMW2.bucket_size(kBucketCount - 1) == 0);
+ VERIFY(ihmMW1.load_factor() == 0.f);
+ VERIFY(ihmMW2.load_factor() == 0.f);
+
+ ihmMW1.clear();
+ VERIFY(ihmMW1.validate());
+ VERIFY(ihmMW1.begin() == ihmMW1.end());
+
+
+ // void insert(InputIterator first, InputIterator last);
+ // insert_return_type insert(value_type& value);
+ // void swap(this_type& x);
+ // void clear();
+
+ ihmMW1.clear();
+ ihmMW1.insert(mwArray, mwArray + (kArraySize - 10));
+ for(int i = 0; i < 10; i++) // insert the remaining elements via the other insert function.
+ {
+ pair<IHM_MW::iterator, bool> result = ihmMW1.insert(mwArray[(kArraySize - 10) + i]);
+ VERIFY(result.second == true);
+ }
+
+ VERIFY(ihmMW1.size() == kArraySize);
+ VERIFY(ihmMW1.validate());
+
+ for(size_t i = 0; i < kArraySize; i++)
+ {
+ // Try to re-insert the elements. All insertions should fail.
+ pair<IHM_MW::iterator, bool> result = ihmMW1.insert(mwArray[i]);
+ VERIFY(result.second == false);
+ }
+
+ VERIFY(ihmMW1.size() == kArraySize);
+ VERIFY(!ihmMW1.empty());
+ VERIFY(ihmMW1.validate());
+
+ ihmMW2.clear();
+ ihmMW1.swap(ihmMW2);
+
+
+ // size_type size() const;
+ // bool empty() const;
+ // size_type count(const key_type& k) const;
+ // size_type bucket_size(size_type n) const;
+ // float load_factor() const;
+ // size_type bucket(const key_type& k) const
+
+ VERIFY(ihmMW1.validate());
+ VERIFY(ihmMW2.validate());
+ VERIFY(ihmMW1.size() == 0);
+ VERIFY(ihmMW1.empty());
+ VERIFY(ihmMW2.size() == kArraySize);
+ VERIFY(!ihmMW2.empty());
+ VERIFY(ihmMW1.load_factor() == 0.f);
+ VERIFY(ihmMW2.load_factor() > 2.f);
+ VERIFY(ihmMW1.count(0) == 0);
+ VERIFY(ihmMW1.count(999999) == 0);
+ VERIFY(ihmMW2.count(0) == 1);
+ VERIFY(ihmMW2.count(999999) == 0);
+ VERIFY(ihmMW2.bucket_size(0) == 3); // We just happen to know this should be so based on the distribution.
+ VERIFY(ihmMW2.bucket(13) == (13 % kBucketCount)); // We know this is so because our hash function simply returns n.
+ VERIFY(ihmMW2.bucket(10000) == (10000 % kBucketCount)); // We know this is so because our hash function simply returns n.
+
+
+ // iterator begin();
+ // const_iterator begin() const;
+
+ ihmMW1.swap(ihmMW2);
+ int nSum = 0;
+
+ for(IHM_MW::iterator it = ihmMW1.begin(); it != ihmMW1.end(); ++it)
+ {
+ IHM_MW::value_type& v = *it;
+
+ VERIFY(v.mKey == v.mX); // We intentionally made this so above.
+ nSum += v.mKey;
+
+ const int iresult = ihmMW1.validate_iterator(it);
+ VERIFY(iresult == (isf_valid | isf_current | isf_can_dereference));
+
+ IHM_MW::iterator itf = ihmMW1.find(v.mKey);
+ VERIFY(itf == it);
+ }
+
+ VERIFY(nSum == nExpectedKeySum);
+
+
+ // iterator end();
+ // const_iterator end() const;
+
+ const IHM_MW& ihmMW1Const = ihmMW1;
+
+ for(IHM_MW::const_iterator itc = ihmMW1Const.begin(); itc != ihmMW1Const.end(); ++itc)
+ {
+ const IHM_MW::value_type& v = *itc;
+
+ VERIFY(v.mKey == v.mX); // We intentionally made this so above.
+
+ IHM_MW::const_iterator itf = ihmMW1Const.find(v.mKey);
+ VERIFY(itf == itc);
+ }
+
+
+ // local_iterator begin(size_type n)
+ // local_iterator end(size_type)
+
+ for(IHM_MW::local_iterator itl = ihmMW1.begin(5); itl != ihmMW1.end(5); ++itl)
+ {
+ IHM_MW::value_type& v = *itl;
+
+ VERIFY(v.mKey == v.mX); // We intentionally made this so above.
+ }
+
+
+ // const_local_iterator begin(size_type n) const
+ // const_local_iterator end(size_type) const
+
+ for(IHM_MW::const_local_iterator itlc = ihmMW1Const.begin(5); itlc != ihmMW1Const.end(5); ++itlc)
+ {
+ const IHM_MW::value_type& v = *itlc;
+
+ VERIFY(v.mKey == v.mX); // We intentionally made this so above.
+ }
+
+
+ // iterator find(const key_type& k);
+ // const_iterator find(const key_type& k) const;
+
+ IHM_MW::iterator itf = ihmMW1.find(99999);
+ VERIFY(itf == ihmMW1.end());
+
+ IHM_MW::const_iterator itfc = ihmMW1Const.find(99999);
+ VERIFY(itfc == ihmMW1Const.end());
+
+
+ // iterator find_as(const U& u);
+ // const_iterator find_as(const U& u) const;
+
+ itf = ihmMW1.find_as(7.f);
+ VERIFY(itf->mKey == 7);
+
+ itfc = ihmMW1Const.find_as(7.f);
+ VERIFY(itfc->mKey == 7);
+
+ itf = ihmMW1.find_as(8);
+ VERIFY(itf->mKey == 8);
+
+ itfc = ihmMW1Const.find_as(8);
+ VERIFY(itfc->mKey == 8);
+
+
+ // iterator find_as(const U& u, UHash uhash, BinaryPredicate predicate);
+ // const_iterator find_as(const U& u, UHash uhash, BinaryPredicate predicate) const;
+
+ itf = ihmMW1.find_as(7.f, eastl::hash<float>(), eastl::equal_to_2<int, float>());
+ VERIFY(itf->mKey == 7);
+
+ itfc = ihmMW1Const.find_as(7.f, eastl::hash<float>(), eastl::equal_to_2<int, float>());
+ VERIFY(itfc->mKey == 7);
+
+
+ // iterator erase(iterator);
+ // iterator erase(iterator, iterator);
+ // size_type erase(const key_type&);
+
+ eastl_size_t n = ihmMW1.erase(99999);
+ VERIFY(n == 0);
+
+ n = ihmMW1.erase(17);
+ VERIFY(n == 1);
+
+ itf = ihmMW1.find(18);
+ VERIFY(itf != ihmMW1.end());
+ VERIFY(ihmMW1.validate_iterator(itf) == (isf_valid | isf_current | isf_can_dereference));
+
+ itf = ihmMW1.erase(itf);
+ VERIFY(itf != ihmMW1.end());
+ VERIFY(ihmMW1.validate_iterator(itf) == (isf_valid | isf_current | isf_can_dereference));
+
+ itf = ihmMW1.find(18);
+ VERIFY(itf == ihmMW1.end());
+
+ itf = ihmMW1.find(19);
+ VERIFY(itf != ihmMW1.end());
+
+ IHM_MW::iterator itf2(itf);
+ eastl::advance(itf2, 7);
+ VERIFY(itf2 != ihmMW1.end());
+ VERIFY(ihmMW1.validate_iterator(itf2) == (isf_valid | isf_current | isf_can_dereference));
+
+ itf = ihmMW1.erase(itf, itf2);
+ VERIFY(itf != ihmMW1.end());
+ VERIFY(ihmMW1.validate_iterator(itf) == (isf_valid | isf_current | isf_can_dereference));
+
+ itf = ihmMW1.find(19);
+ VERIFY(itf == ihmMW1.end());
+
+
+ // eastl::pair<iterator, iterator> equal_range(const key_type& k);
+ // eastl::pair<const_iterator, const_iterator> equal_range(const key_type& k) const;
+
+ eastl::pair<IHM_MW::iterator, IHM_MW::iterator> p = ihmMW1.equal_range(1);
+ VERIFY(p.first != ihmMW1.end());
+ VERIFY(p.second != ihmMW1.end());
+
+ eastl::pair<IHM_MW::const_iterator, IHM_MW::const_iterator> pc = ihmMW1Const.equal_range(1);
+ VERIFY(pc.first != ihmMW1Const.end());
+ VERIFY(pc.second != ihmMW1Const.end());
+
+
+ // void clear();
+ // bool validate() const;
+ // int validate_iterator(const_iterator i) const;
+
+ IHM_MW::iterator itTest;
+ int iresult = ihmMW1.validate_iterator(itTest);
+ VERIFY(iresult == isf_none);
+
+ itTest = ihmMW1.begin();
+ iresult = ihmMW1.validate_iterator(itTest);
+ VERIFY(iresult == (isf_valid | isf_current | isf_can_dereference));
+
+ itTest = ihmMW1.end();
+ iresult = ihmMW1.validate_iterator(itTest);
+ VERIFY(iresult == (isf_valid | isf_current));
+
+ ihmMW1.clear();
+ ihmMW2.clear();
+ VERIFY(ihmMW1.validate());
+ VERIFY(ihmMW2.validate());
+
+ itTest = ihmMW1.begin();
+ iresult = ihmMW1.validate_iterator(itTest);
+ VERIFY(iresult == (isf_valid | isf_current));
+ }
+
+
+ {
+ // Test case of single bucket.
+ eastl::intrusive_hash_set<SetWidget, 1, SWHash> hs;
+ SetWidget node1, node2, node3;
+
+ node1.mX = 1;
+ node2.mX = 2;
+ node3.mX = 3;
+
+ hs.insert(node1);
+ hs.insert(node2);
+ hs.insert(node3);
+
+ const eastl_size_t removeCount = hs.erase(node3);
+ VERIFY(removeCount == 1);
+ }
+
+
+ {
+ // Test intrusive_hashtable_iterator(value_type* pNode, value_type** pBucket = NULL)
+ eastl::intrusive_hash_set<SetWidget, 37, SWHash> hs;
+ SetWidget node1, node2, node3;
+
+ node1.mX = 1;
+ node2.mX = 2;
+ node3.mX = 3;
+
+ hs.insert(node1);
+ hs.insert(node2);
+ hs.insert(node3);
+
+ VERIFY(hs.validate());
+
+ hs.remove(node1);
+ hs.remove(node2);
+ hs.remove(node3);
+
+ VERIFY(hs.validate());
+
+ hs.insert(node1);
+ hs.insert(node2);
+ hs.insert(node3);
+
+ VERIFY(hs.validate());
+ }
+
+ return nErrorCount;
+}
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/test/source/TestIntrusiveList.cpp b/EASTL/test/source/TestIntrusiveList.cpp
new file mode 100644
index 0000000..60b2378
--- /dev/null
+++ b/EASTL/test/source/TestIntrusiveList.cpp
@@ -0,0 +1,403 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLTest.h"
+#include <EASTL/intrusive_list.h>
+#include <EABase/eabase.h>
+
+EA_DISABLE_ALL_VC_WARNINGS()
+#include <stdio.h>
+#include <stdarg.h>
+#include <stddef.h>
+
+#ifndef EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+ #include <string>
+#endif
+EA_RESTORE_ALL_VC_WARNINGS()
+
+using namespace eastl;
+
+
+namespace
+{
+
+ /// IntNode
+ ///
+ /// Test intrusive_list node.
+ ///
+ struct IntNode : public eastl::intrusive_list_node
+ {
+ int mX;
+
+ IntNode(int x = 0)
+ : mX(x) { }
+
+ operator int() const
+ { return mX; }
+ };
+
+
+ /// ListInit
+ ///
+ /// Utility class for setting up a list.
+ ///
+ class ListInit
+ {
+ public:
+ ListInit(intrusive_list<IntNode>& container, IntNode* pNodeArray)
+ : mpContainer(&container), mpNodeArray(pNodeArray)
+ {
+ mpContainer->clear();
+ }
+
+ ListInit& operator+=(int x)
+ {
+ mpNodeArray->mX = x;
+ mpContainer->push_back(*mpNodeArray++);
+ return *this;
+ }
+
+ ListInit& operator,(int x)
+ {
+ mpNodeArray->mX = x;
+ mpContainer->push_back(*mpNodeArray++);
+ return *this;
+ }
+
+ protected:
+ intrusive_list<IntNode>* mpContainer;
+ IntNode* mpNodeArray;
+ };
+
+} // namespace
+
+
+
+
+// Template instantations.
+// These tell the compiler to compile all the functions for the given class.
+template class eastl::intrusive_list<IntNode>;
+
+
+
+int TestIntrusiveList()
+{
+ int nErrorCount = 0;
+ int i;
+
+ {
+ // Verify that intrusive_list_node is a POD, at least when EASTL_VALIDATE_INTRUSIVE_LIST is disabled.
+ #if !EASTL_VALIDATE_INTRUSIVE_LIST
+ // is_pod doesn't currently detect structs as PODs, even though it should.
+ // This is due to limitations in C++.
+ // VERIFY(eastl::is_pod<eastl::intrusive_list_node>::value);
+
+ const size_t offset = offsetof(intrusive_list_node, mpPrev);
+ VERIFY(offset == sizeof(intrusive_list_node*));
+ #endif
+ }
+
+ {
+ IntNode nodes[20];
+
+ intrusive_list<IntNode> ilist;
+
+ // Enforce that the intrusive_list copy ctor is visible. If it is not,
+ // then the class is not a POD type as it is supposed to be.
+ delete new intrusive_list<IntNode>(ilist);
+
+ #ifndef __GNUC__ // GCC warns on this, though strictly specaking it is allowed to.
+ // Enforce that offsetof() can be used with an intrusive_list in a struct;
+ // it requires a POD type. Some compilers will flag warnings or even errors
+ // when this is violated.
+ struct Test {
+ intrusive_list<IntNode> m;
+ };
+ (void)offsetof(Test, m);
+ #endif
+
+ // begin / end
+ VERIFY(VerifySequence(ilist.begin(), ilist.end(), int(), "ctor()", -1));
+
+
+ // push_back
+ ListInit(ilist, nodes) += 0, 1, 2, 3, 4, 5, 6, 7, 8, 9;
+ VERIFY(VerifySequence(ilist.begin(), ilist.end(), int(), "push_back()", 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1));
+
+
+ // iterator / begin
+ intrusive_list<IntNode>::iterator it = ilist.begin();
+ VERIFY(it->mX == 0);
+ ++it;
+ VERIFY(it->mX == 1);
+ ++it;
+ VERIFY(it->mX == 2);
+ ++it;
+ VERIFY(it->mX == 3);
+
+
+ // const_iterator / begin
+ const intrusive_list<IntNode> cilist;
+ intrusive_list<IntNode>::const_iterator cit;
+ for(cit = cilist.begin(); cit != cilist.end(); ++cit)
+ VERIFY(cit == cilist.end()); // This is guaranteed to be false.
+
+
+ // reverse_iterator / rbegin
+ intrusive_list<IntNode>::reverse_iterator itr = ilist.rbegin();
+ VERIFY(itr->mX == 9);
+ ++itr;
+ VERIFY(itr->mX == 8);
+ ++itr;
+ VERIFY(itr->mX == 7);
+ ++itr;
+ VERIFY(itr->mX == 6);
+
+
+ // iterator++/--
+ {
+ intrusive_list<IntNode>::iterator it1(ilist.begin());
+ intrusive_list<IntNode>::iterator it2(ilist.begin());
+
+ ++it1;
+ ++it2;
+ if ((it1 != it2++) || (++it1 != it2))
+ VERIFY(!"[iterator::increment] fail\n");
+
+ if ((it1 != it2--) || (--it1 != it2))
+ VERIFY(!"[iterator::decrement] fail\n");
+ }
+
+
+ // clear / empty
+ VERIFY(!ilist.empty());
+
+ ilist.clear();
+ VERIFY(VerifySequence(ilist.begin(), ilist.end(), int(), "clear()", -1));
+ VERIFY(ilist.empty());
+
+
+ // splice
+ ListInit(ilist, nodes) += 0, 1, 2, 3, 4, 5, 6, 7, 8, 9;
+
+ ilist.splice(++ilist.begin(), ilist, --ilist.end());
+ VERIFY(VerifySequence(ilist.begin(), ilist.end(), int(), "splice(single)", 0, 9, 1, 2, 3, 4, 5, 6, 7, 8, -1));
+
+ intrusive_list<IntNode> ilist2;
+ ListInit(ilist2, nodes+10) += 10, 11, 12, 13, 14, 15, 16, 17, 18, 19;
+
+ ilist.splice(++++ilist.begin(), ilist2);
+ VERIFY(VerifySequence(ilist2.begin(), ilist2.end(), int(), "splice(whole)", -1));
+ VERIFY(VerifySequence(ilist.begin(), ilist.end(), int(), "splice(whole)", 0, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 1, 2, 3, 4, 5, 6, 7, 8, -1));
+
+ ilist.splice(ilist.begin(), ilist, ++++ilist.begin(), ----ilist.end());
+ VERIFY(VerifySequence(ilist.begin(), ilist.end(), int(), "splice(range)", 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 1, 2, 3, 4, 5, 6, 0, 9, 7, 8, -1));
+
+ ilist.clear();
+ ilist.swap(ilist2);
+ VERIFY(VerifySequence(ilist.begin(), ilist.end(), int(), "swap(empty)", -1));
+ VERIFY(VerifySequence(ilist2.begin(), ilist2.end(), int(), "swap(empty)", -1));
+
+ ilist2.push_back(nodes[0]);
+ ilist.splice(ilist.begin(), ilist2);
+ VERIFY(VerifySequence(ilist.begin(), ilist.end(), int(), "splice(single)", 0, -1));
+ VERIFY(VerifySequence(ilist2.begin(), ilist2.end(), int(), "splice(single)", -1));
+
+
+ // splice(single) -- evil case (splice at or right after current position)
+ ListInit(ilist, nodes) += 0, 1, 2, 3, 4;
+ ilist.splice(++++ilist.begin(), *++++ilist.begin());
+ VERIFY(VerifySequence(ilist.begin(), ilist.end(), int(), "splice(single)", 0, 1, 2, 3, 4, -1));
+ ilist.splice(++++++ilist.begin(), *++++ilist.begin());
+ VERIFY(VerifySequence(ilist.begin(), ilist.end(), int(), "splice(single)", 0, 1, 2, 3, 4, -1));
+
+
+ // splice(range) -- evil case (splice right after current position)
+ ListInit(ilist, nodes) += 0, 1, 2, 3, 4;
+ ilist.splice(++++ilist.begin(), ilist, ++ilist.begin(), ++++ilist.begin());
+ VERIFY(VerifySequence(ilist.begin(), ilist.end(), int(), "splice(range)", 0, 1, 2, 3, 4, -1));
+
+
+ // push_front / push_back
+ ilist.clear();
+ ilist2.clear();
+ for(i = 4; i >= 0; --i)
+ ilist.push_front(nodes[i]);
+ for(i = 5; i < 10; ++i)
+ ilist2.push_back(nodes[i]);
+
+ VERIFY(VerifySequence(ilist.begin(), ilist.end(), int(), "push_front()", 0, 1, 2, 3, 4, -1));
+ VERIFY(VerifySequence(ilist2.begin(), ilist2.end(), int(), "push_back()", 5, 6, 7, 8, 9, -1));
+
+ for(i = 4; i >= 0; --i)
+ {
+ ilist.pop_front();
+ ilist2.pop_back();
+ }
+
+ VERIFY(ilist.empty() && ilist2.empty());
+ VERIFY(VerifySequence(ilist.begin(), ilist.end(), int(), "pop_front()", -1));
+ VERIFY(VerifySequence(ilist2.begin(), ilist2.end(), int(), "pop_back()", -1));
+
+
+ // contains / locate
+ for(i = 0; i < 5; ++i)
+ ilist.push_back(nodes[i]);
+
+ VERIFY( ilist.contains(nodes[2]));
+ VERIFY(!ilist.contains(nodes[7]));
+
+ it = ilist.locate(nodes[3]);
+ VERIFY(it->mX == 3);
+
+ it = ilist.locate(nodes[8]);
+ VERIFY(it == ilist.end());
+
+
+ // reverse
+ ilist.reverse();
+ VERIFY(VerifySequence(ilist.begin(), ilist.end(), int(), "push_front()", 4, 3, 2, 1, 0, -1));
+
+
+ // validate / validate_iterator
+ VERIFY(ilist.validate());
+ it = ilist.locate(nodes[3]);
+ VERIFY((ilist.validate_iterator(it) & (isf_valid | isf_can_dereference)) != 0);
+ VERIFY( ilist.validate_iterator(intrusive_list<IntNode>::iterator(NULL)) == isf_none);
+
+
+ // swap()
+ ilist.swap(ilist2);
+ VERIFY(VerifySequence(ilist.begin(), ilist.end(), int(), "swap()", -1));
+ VERIFY(VerifySequence(ilist2.begin(), ilist2.end(), int(), "swap()", 4, 3, 2, 1, 0, -1));
+
+
+ // erase()
+ ListInit(ilist2, nodes) += 0, 1, 2, 3, 4;
+ ListInit(ilist, nodes+5) += 5, 6, 7, 8, 9;
+ ilist.erase(++++ilist.begin());
+ VERIFY(VerifySequence(ilist.begin(), ilist.end(), int(), "erase(single)", 5, 6, 8, 9, -1));
+
+ ilist.erase(ilist.begin(), ilist.end());
+ VERIFY(VerifySequence(ilist.begin(), ilist.end(), int(), "erase(all)", -1));
+
+ ilist2.erase(++ilist2.begin(), ----ilist2.end());
+ VERIFY(VerifySequence(ilist2.begin(), ilist2.end(), int(), "erase(range)", 0, 3, 4, -1));
+
+
+ // size
+ VERIFY(ilist2.size() == 3);
+
+
+ // pop_front / pop_back
+ ilist2.pop_front();
+ VERIFY(VerifySequence(ilist2.begin(), ilist2.end(), int(), "pop_front()", 3, 4, -1));
+
+ ilist2.pop_back();
+ VERIFY(VerifySequence(ilist2.begin(), ilist2.end(), int(), "pop_back()", 3, -1));
+ }
+
+
+ {
+ // Test copy construction and assignment.
+ // The following *should* not compile.
+
+ intrusive_list<IntNode> ilist1;
+ intrusive_list<IntNode> ilist2(ilist1);
+ ilist1 = ilist2;
+ }
+
+
+ {
+ // void sort()
+ // void sort(Compare compare)
+
+ const int kSize = 10;
+ IntNode nodes[kSize];
+
+ intrusive_list<IntNode> listEmpty;
+ listEmpty.sort();
+ VERIFY(VerifySequence(listEmpty.begin(), listEmpty.end(), int(), "list::sort", -1));
+
+ intrusive_list<IntNode> list1;
+ ListInit(list1, nodes) += 1;
+ list1.sort();
+ VERIFY(VerifySequence(list1.begin(), list1.end(), int(), "list::sort", 1, -1));
+ list1.clear();
+
+ intrusive_list<IntNode> list4;
+ ListInit(list4, nodes) += 1, 9, 2, 3;
+ list4.sort();
+ VERIFY(VerifySequence(list4.begin(), list4.end(), int(), "list::sort", 1, 2, 3, 9, -1));
+ list4.clear();
+
+ intrusive_list<IntNode> listA;
+ ListInit(listA, nodes) += 1, 9, 2, 3, 5, 7, 4, 6, 8, 0;
+ listA.sort();
+ VERIFY(VerifySequence(listA.begin(), listA.end(), int(), "list::sort", 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1));
+ listA.clear();
+
+ intrusive_list<IntNode> listB;
+ ListInit(listB, nodes) += 1, 9, 2, 3, 5, 7, 4, 6, 8, 0;
+ listB.sort(eastl::less<int>());
+ VERIFY(VerifySequence(listB.begin(), listB.end(), int(), "list::sort", 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1));
+ listB.clear();
+ }
+
+
+ {
+ // void merge(this_type& x);
+ // void merge(this_type& x, Compare compare);
+
+ const int kSize = 8;
+ IntNode nodesA[kSize];
+ IntNode nodesB[kSize];
+
+ intrusive_list<IntNode> listA;
+ ListInit(listA, nodesA) += 1, 2, 3, 4, 4, 5, 9, 9;
+
+ intrusive_list<IntNode> listB;
+ ListInit(listB, nodesB) += 1, 2, 3, 4, 4, 5, 9, 9;
+
+ listA.merge(listB);
+ VERIFY(VerifySequence(listA.begin(), listA.end(), int(), "list::merge", 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 9, 9, 9, 9, -1));
+ VERIFY(VerifySequence(listB.begin(), listB.end(), int(), "list::merge", -1));
+ }
+
+
+ {
+ // void unique();
+ // void unique(BinaryPredicate);
+
+ const int kSize = 8;
+ IntNode nodesA[kSize];
+ IntNode nodesB[kSize];
+
+ intrusive_list<IntNode> listA;
+ ListInit(listA, nodesA) += 1, 2, 3, 4, 4, 5, 9, 9;
+ listA.unique();
+ VERIFY(VerifySequence(listA.begin(), listA.end(), int(), "list::unique", 1, 2, 3, 4, 5, 9, -1));
+
+ intrusive_list<IntNode> listB;
+ ListInit(listB, nodesB) += 1, 2, 3, 4, 4, 5, 9, 9;
+ listB.unique(eastl::equal_to<int>());
+ VERIFY(VerifySequence(listA.begin(), listA.end(), int(), "list::unique", 1, 2, 3, 4, 5, 9, -1));
+ }
+
+
+ return nErrorCount;
+}
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/test/source/TestIntrusiveSDList.cpp b/EASTL/test/source/TestIntrusiveSDList.cpp
new file mode 100644
index 0000000..13a4802
--- /dev/null
+++ b/EASTL/test/source/TestIntrusiveSDList.cpp
@@ -0,0 +1,315 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLTest.h"
+#include <EASTL/bonus/intrusive_sdlist.h>
+#include <EASTL/string.h>
+#include <EABase/eabase.h>
+
+#ifdef _MSC_VER
+ #pragma warning(push, 0)
+#endif
+
+#include <stdarg.h>
+#include <stdio.h>
+
+#if defined(_MSC_VER)
+ #pragma warning(pop)
+#endif
+
+
+
+using namespace eastl;
+
+
+namespace TestSDListLocal
+{
+
+ struct IntNode : public intrusive_sdlist_node
+ {
+ IntNode() {}
+ IntNode(int x) : mX(x) {}
+ operator int() const { return mX; }
+
+ int mX;
+ };
+
+ typedef intrusive_sdlist<IntNode> IntrusiveSDList;
+
+ template <class T>
+ eastl::string IntListToString8(const T& cont)
+ {
+ eastl::string s("<");
+ char buf[64];
+
+ for(typename T::const_iterator it(cont.begin()), itEnd(cont.end()); it != itEnd; ++it)
+ {
+ const int& v = *it;
+ sprintf(buf, " %d", v);
+ s += buf;
+ }
+
+ s += " >";
+ return s;
+ }
+
+
+ template <class T>
+ bool VerifyContainer(const T& cont, const char *testname, ...)
+ {
+ //if (!cont.validate()) {
+ // EASTLTest_Printf("intrusive_list[%s] container damaged!\n", testname);
+ // return false;
+ //}
+
+ typename T::const_iterator it(cont.begin()), itEnd(cont.end());
+ va_list val;
+ int index = 0;
+
+ va_start(val, testname);
+ while(it != itEnd)
+ {
+ int next = va_arg(val, int);
+
+ if (next == -1 || next != *it)
+ {
+ const int value = *it;
+ const char* const pString = IntListToString8(cont).c_str();
+ EASTLTest_Printf("intrusive_list[%s] Mismatch at index %d: expected %d, found %d; contents: %s\n", testname, index, next, value, pString);
+ va_end(val);
+ return false;
+ }
+
+ ++it;
+ ++index;
+ }
+
+ if (va_arg(val, int) != -1)
+ {
+ do {
+ ++index;
+ } while(va_arg(val, int) != -1);
+
+ const int countainerSize = (int)cont.size();
+ const char* const pString = IntListToString8(cont).c_str();
+ EASTLTest_Printf("intrusive_list[%s] Too many elements: expected %d, found %d; contents: %s\n", testname, index, countainerSize, pString);
+ va_end(val);
+ return false;
+ }
+
+ va_end(val);
+
+ // We silence this by default for a quieter test run.
+ // EASTLTest_Printf("intrusive_list[%s] pass\n", testname);
+ return true;
+ }
+
+
+ class ListInit
+ {
+ public:
+ ListInit(intrusive_sdlist<IntNode>& container, IntNode* pNodeArray)
+ : mpContainer(&container), mpNodeArray(pNodeArray)
+ {
+ mpContainer->clear();
+ }
+
+ ListInit& operator+=(int x)
+ {
+ mpNodeArray->mX = x;
+ mpContainer->push_back(*mpNodeArray++);
+ return *this;
+ }
+
+ ListInit& operator,(int x)
+ {
+ mpNodeArray->mX = x;
+ mpContainer->push_back(*mpNodeArray++);
+ return *this;
+ }
+
+ protected:
+ intrusive_sdlist<IntNode>* mpContainer;
+ IntNode* mpNodeArray;
+ };
+
+} // namespace
+
+
+
+
+// Template instantations.
+// These tell the compiler to compile all the functions for the given class.
+template class eastl::intrusive_sdlist<TestSDListLocal::IntNode>;
+
+
+
+int TestIntrusiveSDList()
+{
+ using namespace TestSDListLocal;
+
+ int nErrorCount = 0;
+
+ IntNode nodes[20];
+
+ IntrusiveSDList l;
+
+ // Enforce that the intrusive_list copy ctor is visible. If it is not, then
+ // the class is not a POD type as it is supposed to.
+ delete new IntrusiveSDList(l);
+
+ // Enforce that offsetof() can be used with an intrusive_list in a struct;
+ // it requires a POD type. Some compilers will flag warnings or even errors
+ // when this is violated.
+ struct Test { IntrusiveSDList m; };
+
+ #ifndef __GNUC__ // GCC warns on this, though strictly specaking it is allowed to.
+ (void)offsetof(Test, m);
+ #endif
+
+ VERIFY(VerifyContainer(l, "ctor()", -1));
+
+ // push_back
+ ListInit(l, nodes) += 0, 1, 2, 3, 4, 5, 6, 7, 8, 9;
+ VERIFY(VerifyContainer(l, "push_back()", 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1));
+
+ // iterator++
+ {
+ IntrusiveSDList::iterator it1(l.begin());
+ IntrusiveSDList::iterator it2(l.begin());
+
+ ++it1;
+ ++it2;
+
+ if (it1 != it2++ || ++it1 != it2) {
+ VERIFY(!"[iterator::increment] fail\n");
+ }
+ }
+
+ // clear()/empty()
+ VERIFY(!l.empty());
+
+ l.clear();
+ VERIFY(VerifyContainer(l, "clear()", -1));
+ VERIFY(l.empty());
+
+ l.erase(l.begin(), l.end()); // Erase an already empty container.
+ VERIFY(l.empty());
+
+ IntrusiveSDList l2;
+
+ // splice
+ //ListInit(l, nodes) += 0, 1, 2, 3, 4, 5, 6, 7, 8, 9;
+ //
+ //l.splice(++l.begin(), l, --l.end());
+ //VERIFY(VerifyContainer(l, "splice(single)", 0, 9, 1, 2, 3, 4, 5, 6, 7, 8, -1));
+ //
+ //ListInit(l2, nodes+10) += 10, 11, 12, 13, 14, 15, 16, 17, 18, 19;
+ //
+ //l.splice(++++l.begin(), l2);
+ //VERIFY(VerifyContainer(l2, "splice(whole)", -1));
+ //VERIFY(VerifyContainer(l, "splice(whole)", 0, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 1, 2, 3, 4, 5, 6, 7, 8, -1));
+
+ //l.splice(l.begin(), l, ++++l.begin(), ----l.end());
+ //VERIFY(VerifyContainer(l, "splice(range)", 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 1, 2, 3, 4, 5, 6, 0, 9, 7, 8, -1));
+
+ //l.clear();
+ //l.swap(l2);
+ //VERIFY(VerifyContainer(l, "swap(empty)", -1));
+ //VERIFY(VerifyContainer(l2, "swap(empty)", -1));
+
+ //l2.push_back(nodes[0]);
+ //l.splice(l.begin(), l2);
+ //VERIFY(VerifyContainer(l, "splice(single)", 0, -1));
+ //VERIFY(VerifyContainer(l2, "splice(single)", -1));
+
+ // splice(single) -- evil case (splice at or right after current position)
+ //ListInit(l, nodes) += 0, 1, 2, 3, 4;
+ //l.splice(++++l.begin(), *++++l.begin());
+ //VERIFY(VerifyContainer(l, "splice(single)", 0, 1, 2, 3, 4, -1));
+ //l.splice(++++++l.begin(), *++++l.begin());
+ //VERIFY(VerifyContainer(l, "splice(single)", 0, 1, 2, 3, 4, -1));
+
+ // splice(range) -- evil case (splice right after current position)
+ //ListInit(l, nodes) += 0, 1, 2, 3, 4;
+ //l.splice(++++l.begin(), l, ++l.begin(), ++++l.begin());
+ //VERIFY(VerifyContainer(l, "splice(range)", 0, 1, 2, 3, 4, -1));
+
+ // push_front()
+ l.clear();
+ l2.clear();
+ for(int i=4; i>=0; --i) {
+ l.push_front(nodes[i]);
+ l2.push_front(nodes[i+5]);
+ }
+
+ VERIFY(VerifyContainer(l, "push_front()", 0, 1, 2, 3, 4, -1));
+ VERIFY(VerifyContainer(l2, "push_front()", 5, 6, 7, 8, 9, -1));
+
+ // swap()
+ l.swap(l2);
+ VERIFY(VerifyContainer(l, "swap()", 5, 6, 7, 8, 9, -1));
+ VERIFY(VerifyContainer(l2, "swap()", 0, 1, 2, 3, 4, -1));
+
+ // erase()
+ ListInit(l2, nodes) += 0, 1, 2, 3, 4;
+ ListInit(l, nodes+5) += 5, 6, 7, 8, 9;
+ l.erase(++++l.begin());
+ VERIFY(VerifyContainer(l, "erase(single)", 5, 6, 8, 9, -1));
+
+ l.erase(l.begin(), l.end());
+ VERIFY(VerifyContainer(l, "erase(all)", -1));
+
+ ListInit(l, nodes) += 0, 1, 2;
+ VERIFY(l2.size() == 3);
+
+ l2.pop_front();
+ VERIFY(VerifyContainer(l2, "pop_front()", 1, 2, -1));
+
+ l2.pop_back();
+ VERIFY(VerifyContainer(l2, "pop_back()", 1, -1));
+
+ // remove
+ IntNode i1(1), i2(2), i3(3);
+ l.clear();
+
+ l.push_front(i1);
+ IntrusiveSDList::remove(i1);
+ VERIFY(VerifyContainer(l, "remove()", -1));
+
+ l.push_front(i1);
+ l.push_front(i2);
+ IntrusiveSDList::remove(i1);
+ VERIFY(VerifyContainer(l, "remove()", 2, -1));
+
+ l.push_front(i1);
+ IntrusiveSDList::remove(i2);
+ VERIFY(VerifyContainer(l, "remove()", 1, -1));
+
+ l.push_back(i2);
+ l.push_back(i3);
+ IntrusiveSDList::remove(i2);
+ VERIFY(VerifyContainer(l, "remove()", 1, 3, -1));
+
+
+ // const_iterator / begin
+ const intrusive_sdlist<IntNode> cilist;
+ intrusive_sdlist<IntNode>::const_iterator cit;
+ for(cit = cilist.begin(); cit != cilist.end(); ++cit)
+ VERIFY(cit == cilist.end()); // This is guaranteed to be false.
+
+
+
+ return nErrorCount;
+}
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/test/source/TestIntrusiveSList.cpp b/EASTL/test/source/TestIntrusiveSList.cpp
new file mode 100644
index 0000000..0112eea
--- /dev/null
+++ b/EASTL/test/source/TestIntrusiveSList.cpp
@@ -0,0 +1,38 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLTest.h"
+#include <EASTL/bonus/intrusive_slist.h>
+#include <EABase/eabase.h>
+
+
+
+// Template instantations.
+// These tell the compiler to compile all the functions for the given class.
+//template class intrusive_slist<int>;
+
+
+
+int TestIntrusiveSList()
+{
+ int nErrorCount = 0;
+
+ // As of this writing, we don't yet have a completed intrusive_slist implementation.
+ // The interface is in place but the implementation hasn't been done yet.
+
+ return nErrorCount;
+}
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/test/source/TestIterator.cpp b/EASTL/test/source/TestIterator.cpp
new file mode 100644
index 0000000..b6c6f76
--- /dev/null
+++ b/EASTL/test/source/TestIterator.cpp
@@ -0,0 +1,579 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLTest.h"
+#include <EASTL/deque.h>
+#include <EASTL/iterator.h>
+#include <EASTL/vector.h>
+#include <EASTL/set.h>
+#include <EASTL/array.h>
+#include <EASTL/numeric.h>
+#include <EASTL/list.h>
+#include <EASTL/slist.h>
+#include <EASTL/string.h>
+#include <EASTL/intrusive_list.h>
+#include <EASTL/memory.h>
+#include <EASTL/unique_ptr.h>
+
+EA_DISABLE_ALL_VC_WARNINGS()
+#include <stdio.h>
+#include <string.h>
+EA_RESTORE_ALL_VC_WARNINGS()
+
+template <class T>
+using detect_iterator_traits_reference = typename eastl::iterator_traits<T>::reference;
+
+// This is used below, though is currently disabled as documented below.
+struct IListNode : public eastl::intrusive_list_node{};
+
+int TestIterator_advance()
+{
+ int nErrorCount = 0;
+
+ {
+ // void advance(InputIterator& i, Distance n)
+ const int num_elements = 10;
+ int i;
+
+ eastl::vector<int> v;
+ for(i = 0; i < num_elements; i++)
+ v.push_back(i);
+
+ // test forward advancement
+ eastl::vector<int>::iterator it = v.begin();
+ for(i = 0; i < num_elements; i++)
+ {
+ EATEST_VERIFY(*it == v[i]);
+ eastl::advance(it, 1);
+ }
+
+ // test backwards advancement
+ eastl::vector<int>::iterator it2 = v.end();
+ i = num_elements - 1;
+ do
+ {
+ eastl::advance(it2, -1);
+ EATEST_VERIFY(*it2 == v[i]);
+ }
+ while(i-- != 0);
+ }
+
+ {
+ // void advance(InputIterator& i, Distance n)
+ eastl::list<int> intList;
+ intList.push_back(0);
+ intList.push_back(1);
+ intList.push_back(42);
+ intList.push_back(2);
+
+ eastl::list<int>::iterator it = intList.begin();
+ eastl::advance(it, intList.size());
+ EATEST_VERIFY(it == intList.end());
+
+ // Exercise advance with an signed Distance type.
+ it = intList.begin();
+ eastl::advance(it, (ssize_t)intList.size());
+ EATEST_VERIFY(it == intList.end());
+
+
+ eastl::slist<int> intSlist;
+ intSlist.push_front(0);
+ intSlist.push_front(1);
+ intSlist.push_front(42);
+ intSlist.push_front(2);
+
+ eastl::slist<int>::iterator its = intSlist.begin();
+ eastl::advance(its, intSlist.size());
+ EATEST_VERIFY(its == intSlist.end());
+
+ // Exercise advance with an signed Distance type.
+ its = intSlist.begin();
+ eastl::advance(its, (ssize_t)intSlist.size());
+ EATEST_VERIFY(its == intSlist.end());
+ }
+
+ {
+ // void next(InputIterator& i, Distance n)
+ eastl::vector<int> v;
+ v.push_back(0);
+ v.push_back(1);
+ v.push_back(42);
+ v.push_back(2);
+
+ eastl::vector<int>::iterator it = v.begin();
+ EATEST_VERIFY(*eastl::next(it, 0) == 0);
+ EATEST_VERIFY(*eastl::next(it /*testing the iterator distance default value*/) == 1);
+ EATEST_VERIFY(*eastl::next(it, 2) == 42);
+ }
+
+ {
+ // void prev(InputIterator& i, Distance n)
+ eastl::vector<int> v;
+ v.push_back(0);
+ v.push_back(1);
+ v.push_back(42);
+ v.push_back(2);
+
+ eastl::vector<int>::iterator it = v.end();
+ EATEST_VERIFY(*eastl::prev(it, 2) == 42);
+ EATEST_VERIFY(*eastl::prev(it /*testing the iterator distance default value*/) == 2);
+ }
+
+ return nErrorCount;
+}
+
+int TestIterator_moveIterator()
+{
+ int nErrorCount = 0;
+
+ {
+ eastl::vector<int> v = {0, 1, 42, 2};
+ const auto constBeginMoveIter = eastl::make_move_iterator(v.begin());
+
+ // operator++(int)
+ auto moveIter = constBeginMoveIter;
+ moveIter++; // the result of the expression is the incremented value, we need this test to read the existing state of the iterator.
+ EATEST_VERIFY(*moveIter != *constBeginMoveIter);
+
+ // operator--(int)
+ moveIter = constBeginMoveIter + 2; // points to '42'
+ moveIter--; // the result of the expression is the incremented value, we need this test to read the existing state of the iterator.
+ EATEST_VERIFY(*moveIter != *(constBeginMoveIter + 2));
+ }
+
+ {
+ // Ensure that move_iterator indeed move yielded value whenever possible.
+ auto x = eastl::make_unique<int>(42);
+ auto* pX = &x;
+ auto moveIter = eastl::make_move_iterator(pX);
+
+ constexpr bool isCorrectReferenceType = eastl::is_same_v<decltype(moveIter)::reference, eastl::unique_ptr<int>&&>;
+ constexpr bool isCorrectReturnType = eastl::is_same_v<decltype(*moveIter), eastl::unique_ptr<int>&&>;
+
+ static_assert(isCorrectReferenceType, "move_iterator::reference has wrong type.");
+ static_assert(isCorrectReturnType, "move_iterator::operator*() has wrong return type.");
+ EATEST_VERIFY(isCorrectReferenceType);
+ EATEST_VERIFY(isCorrectReturnType);
+
+ auto pMoveX = *moveIter;
+ EATEST_VERIFY(*pMoveX == 42);
+ }
+
+ // Bellow are regression tests that ensure we are covering the defect LWG 2106: http://cplusplus.github.io/LWG/lwg-defects.html#2106
+ {
+ // Check that we support iterators yielding const references.
+ const int x = 42;
+ const int* pX = &x;
+ auto moveIter = eastl::make_move_iterator(pX);
+
+ constexpr bool isCorrectReferenceType = eastl::is_same_v<decltype(moveIter)::reference, const int&&>;
+ constexpr bool isCorrectReturnType = eastl::is_same_v<decltype(*moveIter), const int&&>;
+
+ static_assert(isCorrectReferenceType, "move_iterator::reference has wrong type.");
+ static_assert(isCorrectReturnType, "move_iterator::operator*() has wrong return type.");
+ EATEST_VERIFY(isCorrectReferenceType);
+ EATEST_VERIFY(isCorrectReturnType);
+
+ auto pCopiedX = *moveIter;
+ EATEST_VERIFY(pCopiedX == 42);
+ }
+
+ {
+ // Check that we support iterators yielding plain value (typically a proxy-iterator).
+ struct FakeProxyIterator
+ {
+ using iterator_category = EASTL_ITC_NS::forward_iterator_tag;
+ using difference_type = ptrdiff_t;
+ using value_type = int;
+ using pointer = int; // Note that we are yielding by value.
+ using reference = int; // Note that we are yielding by value.
+
+ reference operator*() const { return 42; }
+ pointer operator->() { return 42; }
+ FakeProxyIterator& operator++() { return *this; }
+ FakeProxyIterator operator++(int) { return {}; }
+
+ bool operator==(const FakeProxyIterator& rhs) { return true; };
+ bool operator!=(const FakeProxyIterator& rhs) { return false; };
+ };
+
+ FakeProxyIterator it = {};
+ auto moveIter = eastl::make_move_iterator(it);
+
+ constexpr bool isCorrectReferenceType = eastl::is_same_v<decltype(moveIter)::reference, int>;
+ constexpr bool isCorrectReturnType = eastl::is_same_v<decltype(*moveIter), int>;
+
+ static_assert(isCorrectReferenceType, "move_iterator::reference has wrong type.");
+ static_assert(isCorrectReturnType, "move_iterator::operator*() has wrong return type.");
+ EATEST_VERIFY(isCorrectReferenceType);
+ EATEST_VERIFY(isCorrectReturnType);
+
+ auto pCopiedX = *moveIter;
+ EATEST_VERIFY(pCopiedX == 42);
+ }
+
+ return nErrorCount;
+}
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// TestIterator
+//
+int TestIterator()
+{
+ int nErrorCount = 0;
+ nErrorCount += TestIterator_advance();
+ nErrorCount += TestIterator_moveIterator();
+
+ {
+ // reverse_iterator
+ // reverse_iterator<Iterator> make_reverse_iterator(Iterator mi)
+ {
+ eastl::vector<int> src;
+ for(int i = 0; i < 10; i++)
+ src.push_back(i); // src should become {0,1,2,3,4,5,6,7,8,9}
+
+ auto itr = eastl::make_reverse_iterator(src.end());
+ EATEST_VERIFY(*itr == 9); ++itr;
+ EATEST_VERIFY(*itr == 8); ++itr;
+ EATEST_VERIFY(*itr == 7); ++itr;
+ EATEST_VERIFY(*itr == 6); ++itr;
+ EATEST_VERIFY(*itr == 5); ++itr;
+ EATEST_VERIFY(*itr == 4); ++itr;
+ EATEST_VERIFY(*itr == 3); ++itr;
+ EATEST_VERIFY(*itr == 2); ++itr;
+ EATEST_VERIFY(*itr == 1); ++itr;
+ EATEST_VERIFY(*itr == 0); ++itr;
+ EATEST_VERIFY( itr == src.rend());
+ EATEST_VERIFY( itr == eastl::make_reverse_iterator(src.begin()));
+ }
+ }
+
+ {
+ // Regression bug with assign/insert combined with reverse iterator.
+ eastl::vector<int> a;
+ for (int i = 0; i < 10; ++i) {
+ a.push_back(i);
+ }
+
+ eastl::deque<int> d;
+ d.assign(a.rbegin(), a.rend());
+ for (int i = 0; i < 10; ++i) {
+ EATEST_VERIFY(a[i] == d[a.size() - i - 1]);
+ }
+ d.insert(d.end(), a.rbegin(), a.rend());
+ for (int i = 0; i < 10; ++i) {
+ EATEST_VERIFY(a[i] == d[d.size() - i - 1]);
+ }
+
+ eastl::vector<int> b;
+ b.assign(a.rbegin(), a.rend());
+ for (int i = 0; i < 10; ++i) {
+ EATEST_VERIFY(a[i] == b[a.size() - i - 1]);
+ }
+ b.insert(b.end(), a.rbegin(), a.rend());
+ for (int i = 0; i < 10; ++i) {
+ EATEST_VERIFY(a[i] == b[b.size() - i - 1]);
+ }
+ }
+
+ {
+ // move_iterator
+ // move_iterator<Iterator> make_move_iterator(Iterator mi)
+ typedef eastl::vector<eastl::string> StringArray;
+
+ StringArray src;
+ for(eastl_size_t i = 0; i < 4; i++)
+ src.push_back(eastl::string(1, (char8_t)('0' + i))); // v should become {"0", "1", "2", "3"};
+
+ // Moves the values out of the string array and into the result.
+ StringArray dst(eastl::make_move_iterator(src.begin()), eastl::make_move_iterator(src.end()));
+
+ EATEST_VERIFY((src.size() == 4) && (src[0] == "") && (src[3] == ""));
+ EATEST_VERIFY((dst.size() == 4) && (dst[0] == "0") && (dst[3] == "3"));
+ }
+
+ {
+ // back_insert_iterator
+ // back_inserter
+ EA_CPP14_CONSTEXPR int n = 3;
+ eastl::vector<TestObject> v1, v2, v3;
+ v1.resize(n); v2.reserve(n); v3.reserve(n);
+ {
+ int64_t copyCtorCount0 = TestObject::sTOCopyCtorCount, moveCtorCount0 = TestObject::sTOMoveCtorCount;
+ eastl::copy(v1.begin(), v1.end(), eastl::back_inserter(v2));
+ EATEST_VERIFY(v1.size() == v2.size() && TestObject::sTOCopyCtorCount == (copyCtorCount0 + n) &&
+ TestObject::sTOMoveCtorCount == moveCtorCount0);
+ }
+ {
+ int64_t copyCtorCount0 = TestObject::sTOCopyCtorCount, moveCtorCount0 = TestObject::sTOMoveCtorCount;
+ eastl::move(v1.begin(), v1.end(), eastl::back_inserter(v3));
+ EATEST_VERIFY(v1.size() == v3.size() && TestObject::sTOCopyCtorCount == copyCtorCount0 &&
+ TestObject::sTOMoveCtorCount == (moveCtorCount0 + n));
+ }
+ }
+
+ {
+ // front_insert_iterator
+ // front_inserter
+ // To do.
+ }
+
+ {
+ // insert_iterator
+ // inserter
+ // To do.
+ }
+
+ {
+ // difference_type distance(InputIterator first, InputIterator last)
+ eastl::vector<int> intVector = {0, 1, 2, 3, 4, 5, 6, 7};
+ EATEST_VERIFY(eastl::distance(intVector.begin(), intVector.end()) == 8);
+ }
+
+
+ {
+ #if EASTL_BEGIN_END_ENABLED
+ // begin / end
+ // auto inline begin(Container& container) -> decltype(container.begin())
+ // auto inline end(Container& container) -> decltype(container.end())
+
+ eastl::vector<int> intVector;
+ eastl::vector<int>::iterator intVectorIterator = eastl::begin(intVector);
+ EATEST_VERIFY(intVectorIterator == eastl::end(intVector));
+
+ eastl::list<int> intList;
+ eastl::list<int>::iterator intListIterator = eastl::begin(intList);
+ EATEST_VERIFY(intListIterator == eastl::end(intList));
+
+ eastl::set<int> intSet;
+ eastl::set<int>::iterator intSetIterator = eastl::begin(intSet);
+ EATEST_VERIFY(intSetIterator == eastl::end(intSet));
+
+ eastl::array<int, 0> intArray;
+ eastl::array<int>::iterator intArrayIterator = eastl::begin(intArray);
+ EATEST_VERIFY(intArrayIterator == eastl::end(intArray));
+
+ eastl::intrusive_list<IListNode> intIList;
+ eastl::intrusive_list<IListNode>::iterator intIListIterator = eastl::begin(intIList);
+ EATEST_VERIFY(intIListIterator == eastl::end(intIList));
+
+ eastl::string8 str8;
+ eastl::string8::iterator string8Iterator = eastl::begin(str8);
+ EATEST_VERIFY(string8Iterator == eastl::end(str8));
+ #endif
+ }
+
+ // eastl::data
+ {
+ eastl::array<int, 0> intArray;
+ int* pIntArrayData = eastl::data(intArray);
+ EATEST_VERIFY(pIntArrayData == intArray.data());
+
+ eastl::vector<int> intVector;
+ int* pIntVectorData = eastl::data(intVector);
+ EATEST_VERIFY(pIntVectorData == intVector.data());
+
+ int intCArray[34];
+ int* pIntCArray = eastl::data(intCArray);
+ EATEST_VERIFY(pIntCArray == intCArray);
+
+ std::initializer_list<int> intInitList;
+ const int* pIntInitList = eastl::data(intInitList);
+ EATEST_VERIFY(pIntInitList == intInitList.begin());
+ }
+
+ // eastl::size
+ {
+ eastl::vector<int> intVector;
+ intVector.push_back();
+ intVector.push_back();
+ intVector.push_back();
+ EATEST_VERIFY(eastl::size(intVector) == 3);
+
+ int intCArray[34];
+ EATEST_VERIFY(eastl::size(intCArray) == 34);
+ static_assert(eastl::size(intCArray) == 34, "eastl::size failure");
+ }
+
+ // eastl::ssize
+ {
+ eastl::vector<int> intVector;
+ intVector.push_back();
+ intVector.push_back();
+ intVector.push_back();
+ EATEST_VERIFY(eastl::ssize(intVector) == (signed)3);
+
+ int intCArray[34];
+ EATEST_VERIFY(eastl::ssize(intCArray) == (signed)34);
+ static_assert(eastl::ssize(intCArray) == 34, "eastl::ssize failure");
+ }
+
+ // eastl::empty
+ {
+ eastl::vector<int> intVector;
+ EATEST_VERIFY(eastl::empty(intVector));
+ intVector.push_back();
+ EATEST_VERIFY(!eastl::empty(intVector));
+
+ std::initializer_list<int> intInitListEmpty;
+ EATEST_VERIFY(eastl::empty(intInitListEmpty));
+ EATEST_VERIFY(!eastl::empty({1, 2, 3, 4, 5, 6}));
+ }
+
+ // Range-based for loops
+ {
+ {
+ eastl::vector<int> v;
+ int I = 0;
+
+ v.push_back(0);
+ v.push_back(1);
+
+ for(int i : v)
+ EATEST_VERIFY(i == I++);
+ }
+
+ {
+ eastl::string s8;
+ char C = 'a';
+
+ s8.push_back('a');
+ s8.push_back('b');
+
+ for(char c : s8)
+ EATEST_VERIFY(c == C++);
+ }
+ }
+
+
+ {
+ // is_iterator_wrapper
+ static_assert((eastl::is_iterator_wrapper<void>::value == false), "is_iterator_wrapper failure");
+ static_assert((eastl::is_iterator_wrapper<int>::value == false), "is_iterator_wrapper failure");
+ static_assert((eastl::is_iterator_wrapper<int*>::value == false), "is_iterator_wrapper failure");
+ static_assert((eastl::is_iterator_wrapper<eastl::array<int>::iterator>::value == false), "is_iterator_wrapper failure");
+ static_assert((eastl::is_iterator_wrapper<eastl::array<char>*>::value == false), "is_iterator_wrapper failure");
+ static_assert((eastl::is_iterator_wrapper<eastl::vector<char> >::value == false), "is_iterator_wrapper failure");
+ static_assert((eastl::is_iterator_wrapper<eastl::generic_iterator<int*> >::value == true), "is_iterator_wrapper failure");
+ static_assert((eastl::is_iterator_wrapper<eastl::move_iterator<eastl::array<int>::iterator> >::value == true), "is_iterator_wrapper failure");
+ static_assert((eastl::is_iterator_wrapper<eastl::reverse_iterator<eastl::array<int>::iterator> >::value == false), "is_iterator_wrapper failure");
+ static_assert((eastl::is_iterator_wrapper<eastl::reverse_iterator<int*> >::value == false), "is_iterator_wrapper failure");
+ static_assert((eastl::is_iterator_wrapper<eastl::reverse_iterator<eastl::move_iterator<int*>> >::value == true), "is_iterator_wrapper failure");
+ }
+
+
+ {
+ // unwrap_iterator
+ int intArray[2];
+ int* pInt = eastl::unwrap_iterator(&intArray[0]);
+ intArray[0] = 17;
+ EATEST_VERIFY(*pInt == 17);
+ static_assert((eastl::is_same<decltype(eastl::unwrap_iterator(&intArray[0])), int*>::value == true), "unwrap_iterator failure");
+
+ eastl::generic_iterator<int*> giIntArray(intArray);
+ pInt = eastl::unwrap_iterator(giIntArray);
+ intArray[0] = 18;
+ EATEST_VERIFY(*pInt == 18);
+ static_assert((eastl::is_same<decltype(eastl::unwrap_iterator(giIntArray)), int*>::value == true), "unwrap_iterator failure");
+
+ eastl::vector<int> intVector(4, 19);
+ eastl::vector<int>::iterator itVector = eastl::unwrap_iterator(intVector.begin());
+ EATEST_VERIFY(*itVector == 19);
+ static_assert((eastl::is_same<decltype(eastl::unwrap_iterator(intVector.begin())), eastl::vector<int>::iterator>::value == true), "unwrap_iterator failure");
+
+ eastl::move_iterator<eastl::vector<int>::iterator> miIntVector(intVector.begin());
+ itVector = eastl::unwrap_iterator(miIntVector);
+ intVector[0] = 20;
+ EATEST_VERIFY(*itVector == 20);
+ static_assert((eastl::is_same<decltype(eastl::unwrap_iterator(miIntVector)), eastl::vector<int>::iterator>::value == true), "unwrap_iterator failure");
+
+ eastl::reverse_iterator<eastl::vector<int>::iterator> riIntVector = intVector.rbegin();
+ eastl::reverse_iterator<eastl::vector<int>::iterator> riUnwrapped = eastl::unwrap_iterator(riIntVector);
+ EATEST_VERIFY(*riUnwrapped == 19);
+ static_assert((eastl::is_same<decltype(eastl::unwrap_iterator(riIntVector)), eastl::reverse_iterator<eastl::vector<int>::iterator>>::value == true), "unwrap_iterator failure");
+
+ eastl::reverse_iterator<eastl::move_iterator<eastl::vector<int>::iterator>> rimiIntVec(miIntVector);
+ static_assert((eastl::is_same<decltype(eastl::unwrap_iterator(rimiIntVec)), eastl::reverse_iterator<eastl::vector<int>::iterator>>::value == true), "unwrap_iterator failure");
+
+ eastl::reverse_iterator<eastl::generic_iterator<int*>> rigiIntArray(giIntArray);
+ static_assert((eastl::is_same<decltype(eastl::unwrap_iterator(rigiIntArray)), eastl::reverse_iterator<int*>>::value == true), "unwrap_iterator failure");
+
+ eastl::deque<int> intDeque(3);
+ eastl::deque<int>::iterator begin = intDeque.begin();
+ eastl::generic_iterator<eastl::deque<int>::iterator> giWrappedBegin(begin);
+ static_assert((eastl::is_same<decltype(eastl::unwrap_iterator(giWrappedBegin)), eastl::deque<int>::iterator>::value == true), "unwrap_iterator failure");
+
+ eastl::deque<int>::iterator unwrappedBegin = eastl::unwrap_iterator(giWrappedBegin);
+ EATEST_VERIFY(begin == unwrappedBegin);
+ }
+
+ {
+ // unwrap_generic_iterator
+ int intArray[2] = {0, 1};
+ eastl::generic_iterator<int*> giIntArray(intArray);
+ int* pInt = eastl::unwrap_generic_iterator(giIntArray);
+ EATEST_VERIFY(*pInt == 0);
+ static_assert((eastl::is_same<decltype(eastl::unwrap_generic_iterator(giIntArray)), int*>::value == true), "unwrap_iterator failure");
+
+ eastl::move_iterator<int*> miIntArray(intArray);
+ static_assert((eastl::is_same<decltype(eastl::unwrap_generic_iterator(miIntArray)), eastl::move_iterator<int*>>::value == true), "unwrap_iterator failure");
+
+ eastl::vector<int> intVector(1, 1);
+ eastl::generic_iterator<eastl::vector<int>::iterator> giVectorInt(intVector.begin());
+ eastl::vector<int>::iterator it = unwrap_generic_iterator(giVectorInt);
+ EATEST_VERIFY(*it == 1);
+ static_assert((eastl::is_same<decltype(eastl::unwrap_generic_iterator(giVectorInt)), eastl::vector<int>::iterator>::value == true), "unwrap_iterator failure");
+ }
+
+ {
+ // unwrap_move_iterator
+ int intArray[2] = {0, 1};
+ eastl::move_iterator<int*> miIntArray(intArray);
+ int* pInt = eastl::unwrap_move_iterator(miIntArray);
+ EATEST_VERIFY(*pInt == 0);
+ static_assert((eastl::is_same<decltype(eastl::unwrap_move_iterator(miIntArray)), int*>::value == true), "unwrap_iterator failure");
+
+ eastl::generic_iterator<int*> giIntArray(intArray);
+ static_assert((eastl::is_same<decltype(eastl::unwrap_move_iterator(giIntArray)), eastl::generic_iterator<int*>>::value == true), "unwrap_iterator failure");
+
+ eastl::vector<int> intVector(1, 1);
+ eastl::move_iterator<eastl::vector<int>::iterator> miVectorInt(intVector.begin());
+ eastl::vector<int>::iterator it = unwrap_move_iterator(miVectorInt);
+ EATEST_VERIFY(*it == 1);
+ static_assert((eastl::is_same<decltype(eastl::unwrap_move_iterator(miVectorInt)), eastl::vector<int>::iterator>::value == true), "unwrap_iterator failure");
+ }
+
+ {
+ // array cbegin - cend
+ int arr[3]{ 1, 2, 3 };
+ auto b = eastl::cbegin(arr);
+ auto e = eastl::cend(arr);
+ EATEST_VERIFY(*b == 1);
+
+ auto dist = eastl::distance(b,e);
+ EATEST_VERIFY(dist == 3);
+ }
+
+ {
+ // Regression test that ensure N3844 is working correctly.
+ static_assert(!eastl::is_detected<detect_iterator_traits_reference, int>::value, "detecting iterator_traits<int> should SFINAE gracefully.");
+ }
+
+ return nErrorCount;
+}
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/test/source/TestList.cpp b/EASTL/test/source/TestList.cpp
new file mode 100644
index 0000000..001b79a
--- /dev/null
+++ b/EASTL/test/source/TestList.cpp
@@ -0,0 +1,1090 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+#include "EASTLTest.h"
+#include <EASTL/list.h>
+#include <EASTL/sort.h>
+#include <EASTL/fixed_allocator.h>
+
+using namespace eastl;
+
+// Template instantations.
+// These tell the compiler to compile all the functions for the given class.
+template class eastl::list<bool>;
+template class eastl::list<int>;
+template class eastl::list<Align64>;
+template class eastl::list<TestObject>;
+// template class eastl::list<eastl::unique_ptr<int>>;
+
+
+int TestList()
+{
+ int nErrorCount = 0;
+
+ // list();
+ {
+ eastl::list<int> l;
+ VERIFY(l.size() == 0);
+ VERIFY(l.empty());
+ VERIFY(l.validate());
+ VERIFY(l.begin() == l.end());
+ }
+
+ // list(const allocator_type& allocator);
+ {
+ MallocAllocator::reset_all();
+ MallocAllocator mallocator;
+ {
+ eastl::list<int, MallocAllocator> l(mallocator);
+ VERIFY(l.get_allocator() == mallocator);
+ l.push_front(42);
+ VERIFY(MallocAllocator::mAllocCountAll != 0);
+ }
+ VERIFY(MallocAllocator::mAllocCountAll == MallocAllocator::mFreeCountAll);
+ }
+
+ // explicit list(size_type n, const allocator_type& allocator = EASTL_LIST_DEFAULT_ALLOCATOR);
+ {
+ const int test_size = 42;
+ eastl::list<int> l(test_size);
+ VERIFY(!l.empty());
+ VERIFY(l.size() == test_size);
+ VERIFY(l.validate());
+
+ VERIFY(eastl::all_of(l.begin(), l.end(), [](int e)
+ { return e == 0; }));
+ }
+
+ // list(size_type n, const value_type& value, const allocator_type& allocator = EASTL_LIST_DEFAULT_ALLOCATOR);
+ {
+ const int test_size = 42;
+ const int test_val = 435;
+
+ eastl::list<int> l(42, test_val);
+ VERIFY(!l.empty());
+ VERIFY(l.size() == test_size);
+ VERIFY(l.validate());
+
+ VERIFY(eastl::all_of(l.begin(), l.end(), [=](int e)
+ { return e == test_val; }));
+ }
+
+ // list(const this_type& x);
+ {
+ eastl::list<int> a = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ eastl::list<int> b(a);
+ VERIFY(a == b);
+ VERIFY(a.validate());
+ VERIFY(a.size() == b.size());
+ VERIFY(b.validate());
+ }
+
+ // list(const this_type& x, const allocator_type& allocator);
+ {
+ MallocAllocator mallocator;
+ eastl::list<int, MallocAllocator> a = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ eastl::list<int, MallocAllocator> b(a, mallocator);
+ VERIFY(a == b);
+ VERIFY(a.validate());
+ VERIFY(a.size() == b.size());
+ VERIFY(b.validate());
+ VERIFY(a.get_allocator() == b.get_allocator());
+ }
+
+ // list(this_type&& x);
+ // list(this_type&&, const allocator_type&);
+ {
+ eastl::list<int> a = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ VERIFY(!a.empty());
+ VERIFY(a.size() == 10);
+ VERIFY(a.validate());
+
+ eastl::list<int> b(eastl::move(a));
+ VERIFY(a.empty());
+ VERIFY(!b.empty());
+ VERIFY(a.size() == 0);
+ VERIFY(b.size() == 10);
+
+ VERIFY(a != b);
+ VERIFY(a.size() != b.size());
+ VERIFY(a.validate());
+ VERIFY(b.validate());
+ }
+
+ // list(std::initializer_list<value_type> ilist, const allocator_type& allocator = EASTL_LIST_DEFAULT_ALLOCATOR);
+ {
+ eastl::list<int> a = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ eastl::for_each(a.begin(), a.end(), [&](int e)
+ {
+ static int inc = 0;
+ VERIFY(inc++ == e);
+ });
+ }
+
+ // list(InputIterator first, InputIterator last);
+ {
+ eastl::list<int> ref = {3, 4, 5, 6, 7};
+ eastl::list<int> a = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+
+ auto start = a.begin();
+ eastl::advance(start, 3);
+
+ auto end = start;
+ eastl::advance(end, 5);
+
+ eastl::list<int> b(start, end);
+
+ VERIFY(b == ref);
+ VERIFY(a.validate());
+ VERIFY(b.validate());
+
+ VERIFY(a.size() == 10);
+ VERIFY(b.size() == 5);
+
+ VERIFY(!b.empty());
+ VERIFY(!a.empty());
+ }
+
+ // this_type& operator=(const this_type& x);
+ // this_type& operator=(std::initializer_list<value_type> ilist);
+ // this_type& operator=(this_type&& x);
+ {
+ const eastl::list<int> a = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ eastl::list<int> b = a;
+ VERIFY(a.validate());
+ VERIFY(b.validate());
+ VERIFY(a.size() == 10);
+ VERIFY(b.size() == 10);
+ VERIFY(!a.empty());
+ VERIFY(!b.empty());
+ VERIFY(b == a);
+
+ eastl::list<int> c = eastl::move(b);
+ VERIFY(b.empty());
+
+ VERIFY(c == a);
+ VERIFY(c.size() == 10);
+ VERIFY(c.validate());
+ }
+
+ // void swap(this_type& x);
+ {
+ eastl::list<int> a = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ eastl::list<int> b = {};
+
+ VERIFY(a.validate());
+ VERIFY(b.validate());
+ VERIFY(!a.empty());
+ VERIFY(b.empty());
+
+ b.swap(a);
+
+ VERIFY(a.validate());
+ VERIFY(b.validate());
+ VERIFY(a.empty());
+ VERIFY(!b.empty());
+ }
+
+ // void assign(size_type n, const value_type& value);
+ {
+ eastl::list<int> ref = {42, 42, 42, 42};
+ eastl::list<int> a = {0, 1, 2, 3};
+ a.assign(4, 42);
+ VERIFY(a == ref);
+ VERIFY(a.validate());
+ VERIFY(!a.empty());
+ VERIFY(a.size() == 4);
+ }
+
+ // void assign(InputIterator first, InputIterator last);
+ {
+ eastl::list<int> ref = eastl::list<int>{3, 4, 5, 6, 7};
+ eastl::list<int> a = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ eastl::list<int> b;
+
+ auto start = a.begin();
+ eastl::advance(start, 3);
+
+ auto end = start;
+ eastl::advance(end, 5);
+
+ b.assign(start, end);
+
+ VERIFY(b == ref);
+ VERIFY(a.validate());
+ VERIFY(b.validate());
+
+ VERIFY(a.size() == 10);
+ VERIFY(b.size() == 5);
+
+ VERIFY(!b.empty());
+ VERIFY(!a.empty());
+ }
+
+ // void assign(std::initializer_list<value_type> ilist);
+ {
+ eastl::list<int> ref = eastl::list<int>{3, 4, 5, 6, 7};
+ eastl::list<int> a = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ eastl::list<int> b;
+
+ auto start = a.begin();
+ eastl::advance(start, 3);
+
+ auto end = start;
+ eastl::advance(end, 5);
+
+ b.assign(start, end);
+
+ VERIFY(b == ref);
+ VERIFY(a.validate());
+ VERIFY(b.validate());
+
+ VERIFY(a.size() == 10);
+ VERIFY(b.size() == 5);
+
+ VERIFY(!b.empty());
+ VERIFY(!a.empty());
+ }
+
+ // iterator begin()
+ // const_iterator begin() const
+ // const_iterator cbegin() const
+ // iterator end()
+ // const_iterator end() const
+ // const_iterator cend() const
+ {
+ eastl::list<int> a = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+
+ {
+ static int inc = 0;
+ auto iter = a.begin();
+ while(iter != a.end())
+ {
+ VERIFY(*iter++ == inc++);
+ }
+ }
+
+ {
+ static int inc = 0;
+ auto iter = a.cbegin();
+ while(iter != a.cend())
+ {
+ VERIFY(*iter++ == inc++);
+ }
+ }
+ }
+
+ // reverse_iterator rbegin()
+ // const_reverse_iterator rbegin() const
+ // const_reverse_iterator crbegin() const
+ // reverse_iterator rend()
+ // const_reverse_iterator rend() const
+ // const_reverse_iterator crend() const
+ {
+ eastl::list<int> a = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+
+ {
+ static int inc = 9;
+ auto iter = a.rbegin();
+ while(iter != a.rend())
+ {
+ VERIFY(*iter == inc--);
+ iter++;
+ }
+ }
+
+ {
+ static int inc = 9;
+ auto iter = a.crbegin();
+ while(iter != a.crend())
+ {
+ VERIFY(*iter == inc--);
+ iter++;
+ }
+ }
+ }
+
+ // bool empty() const
+ {
+ {
+ eastl::list<int> a = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ VERIFY(!a.empty());
+ }
+
+ {
+ eastl::list<int> a = {};
+ VERIFY(a.empty());
+ }
+ }
+
+ // size_type size() const
+ {
+ {
+ eastl::list<int> a = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ VERIFY(a.size() == 10);
+ }
+
+ {
+ eastl::list<int> a = {0, 1, 2, 3, 4};
+ VERIFY(a.size() == 5);
+ }
+
+ {
+ eastl::list<int> a = {0, 1};
+ VERIFY(a.size() == 2);
+ }
+
+ {
+ eastl::list<int> a = {};
+ VERIFY(a.size() == 0);
+ }
+ }
+
+ // void resize(size_type n, const value_type& value);
+ // void resize(size_type n);
+ {
+ {
+ eastl::list<int> a;
+ a.resize(10);
+ VERIFY(a.size() == 10);
+ VERIFY(!a.empty());
+ VERIFY(eastl::all_of(a.begin(), a.end(), [](int i)
+ { return i == 0; }));
+ }
+
+ {
+ eastl::list<int> a;
+ a.resize(10, 42);
+ VERIFY(a.size() == 10);
+ VERIFY(!a.empty());
+ VERIFY(eastl::all_of(a.begin(), a.end(), [](int i)
+ { return i == 42; }));
+ }
+ }
+
+ // reference front();
+ // const_reference front() const;
+ {
+ {
+ eastl::list<int> a = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ VERIFY(a.front() == 0);
+
+ a.front() = 42;
+ VERIFY(a.front() == 42);
+ }
+
+ {
+ const eastl::list<int> a = {5, 6, 7, 8, 9};
+ VERIFY(a.front() == 5);
+ }
+
+ {
+ eastl::list<int> a = {9};
+ VERIFY(a.front() == 9);
+
+ a.front() = 42;
+ VERIFY(a.front() == 42);
+ }
+ }
+
+ // reference back();
+ // const_reference back() const;
+ {
+ {
+ eastl::list<int> a = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ VERIFY(a.back() == 9);
+
+ a.back() = 42;
+ VERIFY(a.back() == 42);
+ }
+
+ {
+ const eastl::list<int> a = {5, 6, 7, 8, 9};
+ VERIFY(a.back() == 9);
+ }
+
+ {
+ eastl::list<int> a = {9};
+ VERIFY(a.back() == 9);
+
+ a.back() = 42;
+ VERIFY(a.back() == 42);
+ }
+ }
+
+ // void emplace_front(Args&&... args);
+ // void emplace_front(value_type&& value);
+ // void emplace_front(const value_type& value);
+ {
+ eastl::list<int> ref = {9, 8, 7, 6, 5, 4, 3, 2, 1, 0};
+ eastl::list<int> a;
+
+ for(int i = 0; i < 10; i++)
+ a.emplace_front(i);
+
+ VERIFY(a == ref);
+ }
+
+ // template <typename... Args>
+ // void emplace_back(Args&&... args);
+ // void emplace_back(value_type&& value);
+ // void emplace_back(const value_type& value);
+ {
+ {
+ eastl::list<int> ref = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ eastl::list<int> a;
+
+ for(int i = 0; i < 10; i++)
+ a.emplace_back(i);
+
+ VERIFY(a == ref);
+ }
+
+ {
+ struct A
+ {
+ A() : mValue(0) {}
+ A(int in) : mValue(in) {}
+ int mValue;
+ bool operator==(const A& other) const { return mValue == other.mValue; }
+ };
+
+ {
+ eastl::list<A> ref = {{1}, {2}, {3}};
+ eastl::list<A> a;
+
+ a.emplace_back(1);
+ a.emplace_back(2);
+ a.emplace_back(3);
+
+ VERIFY(a == ref);
+ }
+
+ {
+ eastl::list<A> ref = {{1}, {2}, {3}};
+ eastl::list<A> a;
+
+ a.emplace_back(A(1));
+ a.emplace_back(A(2));
+ a.emplace_back(A(3));
+
+ VERIFY(a == ref);
+ }
+
+
+ {
+ eastl::list<A> ref = {{1}, {2}, {3}};
+ eastl::list<A> a;
+
+ A a1(1);
+ A a2(2);
+ A a3(3);
+
+ a.emplace_back(a1);
+ a.emplace_back(a2);
+ a.emplace_back(a3);
+
+ VERIFY(a == ref);
+ }
+ }
+ }
+
+ // void push_front(const value_type& value);
+ // void push_front(value_type&& x);
+ // reference push_front();
+ {
+ {
+ eastl::list<int> ref = {9, 8, 7, 6, 5, 4, 3, 2, 1, 0};
+ eastl::list<int> a;
+
+ for(int i = 0; i < 10; i++)
+ a.push_front(i);
+
+ VERIFY(a == ref);
+
+ }
+
+ {
+ eastl::list<int> a;
+ auto& front_ref = a.push_front();
+ front_ref = 42;
+ VERIFY(a.front() == 42);
+ }
+ }
+
+ // void* push_front_uninitialized();
+ {
+ eastl::list<int> a;
+ for (unsigned i = 0; i < 100; i++)
+ {
+ VERIFY(a.push_front_uninitialized() != nullptr);
+ VERIFY(a.size() == (i + 1));
+ }
+ }
+
+ // void push_back(const value_type& value);
+ // void push_back(value_type&& x);
+ {
+ {
+ eastl::list<int> ref = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ eastl::list<int> a;
+
+ for(int i = 0; i < 10; i++)
+ a.push_back(i);
+
+ VERIFY(a == ref);
+ }
+
+ {
+ struct A { int mValue; };
+ eastl::list<A> a;
+ a.push_back(A{42});
+ VERIFY(a.back().mValue == 42);
+ }
+ }
+
+ // reference push_back();
+ {
+ eastl::list<int> a;
+ auto& back_ref = a.push_back();
+ back_ref = 42;
+ VERIFY(a.back() == 42);
+ }
+
+ // void* push_back_uninitialized();
+ {
+ eastl::list<int> a;
+ for (unsigned int i = 0; i < 100; i++)
+ {
+ VERIFY(a.push_back_uninitialized() != nullptr);
+ VERIFY(a.size() == (i + 1));
+ }
+ }
+
+ // void pop_front();
+ {
+ eastl::list<int> a = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ for(unsigned i = 0; i < 10; i++)
+ {
+ VERIFY(unsigned(a.front()) == i);
+ a.pop_front();
+ }
+ }
+
+ // void pop_back();
+ {
+ eastl::list<int> a = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ for(unsigned i = 0; i < 10; i++)
+ {
+ VERIFY(unsigned(a.back()) == (9 - i));
+ a.pop_back();
+ }
+ }
+
+ // iterator emplace(const_iterator position, Args&&... args);
+ // iterator emplace(const_iterator position, value_type&& value);
+ // iterator emplace(const_iterator position, const value_type& value);
+ {
+ eastl::list<int> ref = {0, 1, 2, 3, 4, 42, 5, 6, 7, 8, 9};
+ eastl::list<int> a = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+
+ auto insert_pos = a.begin();
+ eastl::advance(insert_pos, 5);
+
+ a.emplace(insert_pos, 42);
+ VERIFY(a == ref);
+ }
+
+ // iterator insert(const_iterator position);
+ // iterator insert(const_iterator position, const value_type& value);
+ // iterator insert(const_iterator position, value_type&& x);
+ {
+ eastl::list<int> ref = {0, 1, 2, 3, 4, 42, 5, 6, 7, 8, 9};
+ eastl::list<int> a = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+
+ auto insert_pos = a.begin();
+ eastl::advance(insert_pos, 5);
+
+ a.insert(insert_pos, 42);
+ VERIFY(a == ref);
+ }
+
+ // void insert(const_iterator position, size_type n, const value_type& value);
+ {
+ eastl::list<int> ref = {0, 1, 2, 3, 4, 42, 42, 42, 42, 5, 6, 7, 8, 9};
+ eastl::list<int> a = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+
+ auto insert_pos = a.begin();
+ eastl::advance(insert_pos, 5);
+
+ auto result = a.insert(insert_pos, 4, 42);
+ VERIFY(a == ref);
+ VERIFY(*result == 42);
+ VERIFY(*(--result) == 4);
+ }
+
+ // void insert(const_iterator position, InputIterator first, InputIterator last);
+ {
+ eastl::list<int> to_insert = {42, 42, 42, 42};
+ eastl::list<int> ref = {0, 1, 2, 3, 4, 42, 42, 42, 42, 5, 6, 7, 8, 9};
+ eastl::list<int> a = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+
+ auto insert_pos = a.begin();
+ eastl::advance(insert_pos, 5);
+
+ auto result = a.insert(insert_pos, to_insert.begin(), to_insert.end());
+ VERIFY(a == ref);
+ VERIFY(*result == 42);
+ VERIFY(*(--result) == 4);
+ }
+
+ // iterator insert(const_iterator position, std::initializer_list<value_type> ilist);
+ {
+ eastl::list<int> ref = {0, 1, 2, 3, 4, 42, 42, 42, 42, 5, 6, 7, 8, 9};
+ eastl::list<int> a = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+
+ auto insert_pos = a.begin();
+ eastl::advance(insert_pos, 5);
+
+ a.insert(insert_pos, {42, 42, 42, 42});
+ VERIFY(a == ref);
+ }
+
+ // iterator erase(const_iterator position);
+ {
+ eastl::list<int> ref = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ eastl::list<int> a = {0, 1, 2, 3, 4, 42, 5, 6, 7, 8, 9};
+
+ auto erase_pos = a.begin();
+ eastl::advance(erase_pos, 5);
+
+ auto iter_after_removed = a.erase(erase_pos);
+ VERIFY(*iter_after_removed == 5);
+ VERIFY(a == ref);
+ }
+
+ // iterator erase(const_iterator first, const_iterator last);
+ {
+ eastl::list<int> a = {0, 1, 2, 3, 4, 42, 42, 42, 42, 5, 6, 7, 8, 9};
+ eastl::list<int> ref = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+
+ auto erase_begin = a.begin();
+ eastl::advance(erase_begin, 5);
+
+ auto erase_end = erase_begin;
+ eastl::advance(erase_end, 4);
+
+ a.erase(erase_begin, erase_end);
+ VERIFY(a == ref);
+ }
+
+ // reverse_iterator erase(const_reverse_iterator position);
+ {
+ eastl::list<int> a = {0, 1, 2, 3, 4, 42, 5, 6, 7, 8, 9};
+ eastl::list<int> ref = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+
+ auto erase_rbegin = a.rbegin();
+ eastl::advance(erase_rbegin, 5);
+
+ auto iter_after_remove = a.erase(erase_rbegin);
+ VERIFY(*iter_after_remove == 4);
+ VERIFY(a == ref);
+
+ }
+
+ // reverse_iterator erase(const_reverse_iterator first, const_reverse_iterator last);
+ {
+ eastl::list<int> a = {0, 1, 2, 3, 4, 42, 42, 42, 42, 5, 6, 7, 8, 9};
+ eastl::list<int> ref = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+
+ auto erase_crbegin = a.crbegin();
+ auto erase_crend = a.crbegin();
+ eastl::advance(erase_crbegin, 4);
+ eastl::advance(erase_crend, 8);
+
+ auto iter_after_removed = a.erase(erase_crbegin, erase_crend);
+ VERIFY(*iter_after_removed == 4);
+ VERIFY(a == ref);
+ }
+
+ // void clear()
+ {
+ eastl::list<int> a = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ a.clear();
+ VERIFY(a.empty());
+ VERIFY(a.size() == 0);
+ }
+
+ // void reset_lose_memory()
+ {
+ typedef eastl::list<int, fixed_allocator> IntList;
+ typedef IntList::node_type IntListNode;
+ const size_t kBufferCount = 10;
+ IntListNode buffer1[kBufferCount];
+ IntList intList1;
+ const size_t kAlignOfIntListNode = EA_ALIGN_OF(IntListNode);
+ intList1.get_allocator().init(buffer1, sizeof(buffer1), sizeof(IntListNode), kAlignOfIntListNode);
+
+ intList1 = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ VERIFY(!intList1.empty());
+ VERIFY(intList1.size() == 10);
+ intList1.reset_lose_memory();
+ VERIFY(intList1.empty());
+ VERIFY(intList1.size() == 0);
+ }
+
+ // void remove(const T& x);
+ {
+ eastl::list<int> a = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ eastl::list<int> ref = {0, 1, 2, 3, 5, 6, 7, 8, 9};
+ a.remove(4);
+ VERIFY(a == ref);
+ }
+
+ // void remove_if(Predicate);
+ {
+ eastl::list<int> a = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ eastl::list<int> ref = {0, 1, 2, 3, 5, 6, 7, 8, 9};
+ a.remove_if([](int e) { return e == 4; });
+ VERIFY(a == ref);
+ }
+
+ // void reverse()
+ {
+ eastl::list<int> a = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ eastl::list<int> ref = {9, 8, 7, 6, 5, 4, 3, 2, 1, 0};
+ a.reverse();
+ VERIFY(a == ref);
+ }
+
+ // void splice(const_iterator position, this_type& x);
+ {
+ const eastl::list<int> ref = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ eastl::list<int> a1 = {0, 1, 2, 3, 4};
+ eastl::list<int> a2 = {5, 6, 7, 8, 9};
+
+ eastl::list<int> a;
+ a.splice(a.begin(), a2);
+ a.splice(a.begin(), a1);
+
+ VERIFY(a == ref);
+ VERIFY(a1.empty());
+ VERIFY(a2.empty());
+ }
+
+ // void splice(const_iterator position, this_type& x, const_iterator i);
+ {
+ const eastl::list<int> ref = {0, 5};
+ eastl::list<int> a1 = {-1, -1, 0};
+ eastl::list<int> a2 = {-1, -1, 5};
+
+ auto a1_begin = a1.begin();
+ auto a2_begin = a2.begin();
+
+ eastl::advance(a1_begin, 2);
+ eastl::advance(a2_begin, 2);
+
+ eastl::list<int> a;
+ a.splice(a.begin(), a2, a2_begin);
+ a.splice(a.begin(), a1, a1_begin);
+
+ VERIFY(a == ref);
+ VERIFY(!a1.empty());
+ VERIFY(!a2.empty());
+ }
+
+ // void splice(const_iterator position, this_type& x, const_iterator first, const_iterator last);
+ {
+ const eastl::list<int> ref = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ eastl::list<int> a1 = {-1, -1, 0, 1, 2, 3, 4, -1, -1};
+ eastl::list<int> a2 = {-1, -1, 5, 6, 7, 8, 9, -1, -1};
+
+ auto a1_begin = a1.begin();
+ auto a2_begin = a2.begin();
+ auto a1_end = a1.end();
+ auto a2_end = a2.end();
+
+ eastl::advance(a1_begin, 2);
+ eastl::advance(a2_begin, 2);
+ eastl::advance(a1_end, -2);
+ eastl::advance(a2_end, -2);
+
+ eastl::list<int> a;
+ a.splice(a.begin(), a2, a2_begin, a2_end);
+ a.splice(a.begin(), a1, a1_begin, a1_end);
+
+ const eastl::list<int> rref = {-1, -1, -1, -1}; // post splice reference list
+ VERIFY(a == ref);
+ VERIFY(a1 == rref);
+ VERIFY(a2 == rref);
+ }
+
+ // void splice(const_iterator position, this_type&& x);
+ {
+ const eastl::list<int> ref = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ eastl::list<int> a1 = {0, 1, 2, 3, 4};
+ eastl::list<int> a2 = {5, 6, 7, 8, 9};
+
+ eastl::list<int> a;
+ a.splice(a.begin(), eastl::move(a2));
+ a.splice(a.begin(), eastl::move(a1));
+
+ VERIFY(a == ref);
+ VERIFY(a1.empty());
+ VERIFY(a2.empty());
+ }
+
+ // void splice(const_iterator position, this_type&& x, const_iterator i);
+ {
+ const eastl::list<int> ref = {0, 5};
+ eastl::list<int> a1 = {-1, -1, 0};
+ eastl::list<int> a2 = {-1, -1, 5};
+
+ auto a1_begin = a1.begin();
+ auto a2_begin = a2.begin();
+
+ eastl::advance(a1_begin, 2);
+ eastl::advance(a2_begin, 2);
+
+ eastl::list<int> a;
+ a.splice(a.begin(), eastl::move(a2), a2_begin);
+ a.splice(a.begin(), eastl::move(a1), a1_begin);
+
+ VERIFY(a == ref);
+ VERIFY(!a1.empty());
+ VERIFY(!a2.empty());
+ }
+
+ // void splice(const_iterator position, this_type&& x, const_iterator first, const_iterator last);
+ {
+ const eastl::list<int> ref = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ eastl::list<int> a1 = {-1, -1, 0, 1, 2, 3, 4, -1, -1};
+ eastl::list<int> a2 = {-1, -1, 5, 6, 7, 8, 9, -1, -1};
+
+ auto a1_begin = a1.begin();
+ auto a2_begin = a2.begin();
+ auto a1_end = a1.end();
+ auto a2_end = a2.end();
+
+ eastl::advance(a1_begin, 2);
+ eastl::advance(a2_begin, 2);
+ eastl::advance(a1_end, -2);
+ eastl::advance(a2_end, -2);
+
+ eastl::list<int> a;
+ a.splice(a.begin(), eastl::move(a2), a2_begin, a2_end);
+ a.splice(a.begin(), eastl::move(a1), a1_begin, a1_end);
+
+ const eastl::list<int> rref = {-1, -1, -1, -1}; // post splice reference list
+ VERIFY(a == ref);
+ VERIFY(a1 == rref);
+ VERIFY(a2 == rref);
+ }
+
+
+ // void merge(this_type& x);
+ // void merge(this_type&& x);
+ // void merge(this_type& x, Compare compare);
+ {
+ eastl::list<int> ref = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ eastl::list<int> a1 = {0, 1, 2, 3, 4};
+ eastl::list<int> a2 = {5, 6, 7, 8, 9};
+ a1.merge(a2);
+ VERIFY(a1 == ref);
+ }
+
+ // void merge(this_type&& x, Compare compare);
+ {
+ eastl::list<int> ref = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ eastl::list<int> a1 = {0, 1, 2, 3, 4};
+ eastl::list<int> a2 = {5, 6, 7, 8, 9};
+ a1.merge(a2, [](int lhs, int rhs) { return lhs < rhs; });
+ VERIFY(a1 == ref);
+ }
+
+ // void unique();
+ {
+ eastl::list<int> ref = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ eastl::list<int> a = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 3, 3, 3,
+ 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 6, 7, 8, 9, 9, 9, 9, 9, 9, 9, 9};
+ a.unique();
+ VERIFY(a == ref);
+ }
+
+ // void unique(BinaryPredicate);
+ {
+ static bool bBreakComparison;
+ struct A
+ {
+ int mValue;
+ bool operator==(const A& other) const { return bBreakComparison ? false : mValue == other.mValue; }
+ };
+
+ eastl::list<A> ref = {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}};
+ eastl::list<A> a = {{0}, {0}, {0}, {0}, {0}, {0}, {1}, {2}, {2}, {2}, {2}, {3}, {4}, {5},
+ {5}, {5}, {5}, {5}, {6}, {7}, {7}, {7}, {7}, {8}, {9}, {9}, {9}};
+
+ bBreakComparison = true;
+ a.unique(); // noop because broken comparison operator
+ VERIFY(a != ref);
+
+ a.unique([](const A& lhs, const A& rhs) { return lhs.mValue == rhs.mValue; });
+
+ bBreakComparison = false;
+ VERIFY(a == ref);
+ }
+
+ // void sort();
+ {
+ eastl::list<int> ref = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ eastl::list<int> a = {9, 4, 5, 3, 1, 0, 6, 2, 7, 8};
+
+ a.sort();
+ VERIFY(a == ref);
+ }
+
+ // void sort(Compare compare);
+ {
+ struct A
+ {
+ int mValue;
+ bool operator==(const A& other) const { return mValue == other.mValue; }
+ };
+
+ eastl::list<A> ref = {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}};
+ eastl::list<A> a = {{1}, {0}, {2}, {9}, {4}, {5}, {6}, {7}, {3}, {8}};
+
+ a.sort([](const A& lhs, const A& rhs) { return lhs.mValue < rhs.mValue; });
+ VERIFY(a == ref);
+ }
+
+ { // Test empty base-class optimization
+ struct UnemptyDummyAllocator : eastl::dummy_allocator
+ {
+ int foo;
+ };
+
+ typedef eastl::list<int, eastl::dummy_allocator> list1;
+ typedef eastl::list<int, UnemptyDummyAllocator> list2;
+
+ EATEST_VERIFY(sizeof(list1) < sizeof(list2));
+ }
+
+ { // Test erase / erase_if
+ {
+ eastl::list<int> l = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+
+ auto numErased = eastl::erase(l, 3);
+ VERIFY(numErased == 1);
+ numErased = eastl::erase(l, 5);
+ VERIFY(numErased == 1);
+ numErased = eastl::erase(l, 7);
+ VERIFY(numErased == 1);
+
+ VERIFY((l == eastl::list<int>{1, 2, 4, 6, 8, 9}));
+ }
+
+ {
+ eastl::list<int> l = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+ auto numErased = eastl::erase_if(l, [](auto i) { return i % 2 == 0; });
+ VERIFY((l == eastl::list<int>{1, 3, 5, 7, 9}));
+ VERIFY(numErased == 4);
+ }
+ }
+
+ { // Test global operators
+ {
+ eastl::list<int> list1 = {0, 1, 2, 3, 4, 5};
+ eastl::list<int> list2 = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ eastl::list<int> list3 = {5, 6, 7, 8};
+
+ VERIFY(list1 == list1);
+ VERIFY(!(list1 != list1));
+
+ VERIFY(list1 != list2);
+ VERIFY(list2 != list3);
+ VERIFY(list1 != list3);
+
+ VERIFY(list1 < list2);
+ VERIFY(list1 <= list2);
+
+ VERIFY(list2 > list1);
+ VERIFY(list2 >= list1);
+
+ VERIFY(list3 > list1);
+ VERIFY(list3 > list2);
+ }
+
+ // three way comparison operator
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ {
+ eastl::list<int> list1 = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ eastl::list<int> list2 = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+
+ // Verify equality between list1 and list2
+ VERIFY((list1 <=> list2) == 0);
+ VERIFY(!((list1 <=> list2) != 0));
+ VERIFY((list1 <=> list2) <= 0);
+ VERIFY((list1 <=> list2) >= 0);
+ VERIFY(!((list1 <=> list2) < 0));
+ VERIFY(!((list1 <=> list2) > 0));
+
+ list1.push_back(100); // Make list1 less than list2.
+ list2.push_back(101);
+
+ // Verify list1 < list2
+ VERIFY(!((list1 <=> list2) == 0));
+ VERIFY((list1 <=> list2) != 0);
+ VERIFY((list1 <=> list2) <= 0);
+ VERIFY(!((list1 <=> list2) >= 0));
+ VERIFY(((list1 <=> list2) < 0));
+ VERIFY(!((list1 <=> list2) > 0));
+
+ for (int i = 0; i < 3; i++) // Make the length of list2 less than list1
+ list2.pop_back();
+
+ // Verify list2.size() < list1.size() and list2 is a subset of list1
+ VERIFY(!((list1 <=> list2) == 0));
+ VERIFY((list1 <=> list2) != 0);
+ VERIFY((list1 <=> list2) >= 0);
+ VERIFY(!((list1 <=> list2) <= 0));
+ VERIFY(((list1 <=> list2) > 0));
+ VERIFY(!((list1 <=> list2) < 0));
+ }
+
+ {
+ eastl::list<int> list1 = {1, 2, 3, 4, 5, 6, 7};
+ eastl::list<int> list2 = {7, 6, 5, 4, 3, 2, 1};
+ eastl::list<int> list3 = {1, 2, 3, 4};
+
+ struct weak_ordering_list
+ {
+ eastl::list<int> list;
+ inline std::weak_ordering operator<=>(const weak_ordering_list& b) const { return list <=> b.list; }
+ };
+
+ VERIFY(synth_three_way{}(weak_ordering_list{list1}, weak_ordering_list{list2}) == std::weak_ordering::less);
+ VERIFY(synth_three_way{}(weak_ordering_list{list3}, weak_ordering_list{list1}) == std::weak_ordering::less);
+ VERIFY(synth_three_way{}(weak_ordering_list{list2}, weak_ordering_list{list1}) == std::weak_ordering::greater);
+ VERIFY(synth_three_way{}(weak_ordering_list{list2}, weak_ordering_list{list3}) == std::weak_ordering::greater);
+ VERIFY(synth_three_way{}(weak_ordering_list{list1}, weak_ordering_list{list1}) == std::weak_ordering::equivalent);
+
+ struct strong_ordering_list
+ {
+ eastl::list<int> list;
+ inline std::strong_ordering operator<=>(const strong_ordering_list& b) const { return list <=> b.list; }
+ };
+
+ VERIFY(synth_three_way{}(strong_ordering_list{list1}, strong_ordering_list{list2}) == std::strong_ordering::less);
+ VERIFY(synth_three_way{}(strong_ordering_list{list3}, strong_ordering_list{list1}) == std::strong_ordering::less);
+ VERIFY(synth_three_way{}(strong_ordering_list{list2}, strong_ordering_list{list1}) == std::strong_ordering::greater);
+ VERIFY(synth_three_way{}(strong_ordering_list{list2}, strong_ordering_list{list3}) == std::strong_ordering::greater);
+ VERIFY(synth_three_way{}(strong_ordering_list{list1}, strong_ordering_list{list1}) == std::strong_ordering::equal);
+ }
+#endif
+ }
+ return nErrorCount;
+}
+
+
diff --git a/EASTL/test/source/TestListMap.cpp b/EASTL/test/source/TestListMap.cpp
new file mode 100644
index 0000000..3d48133
--- /dev/null
+++ b/EASTL/test/source/TestListMap.cpp
@@ -0,0 +1,222 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLTest.h"
+#include <EASTL/utility.h>
+#include <EASTL/bonus/list_map.h>
+
+
+#ifdef _MSC_VER
+ #pragma warning(push, 0)
+#endif
+
+#include <stdio.h>
+
+#if defined(_MSC_VER)
+ #pragma warning(pop)
+#endif
+
+
+
+// We would like to use the generic EASTLTest VerifySequence function, but it's not currently ready to deal
+// with non-POD types. That can probably be solved, but in the meantime we implement a custom function here.
+template <typename T1, typename T2>
+bool VerifyListMapSequence(const char* pName,
+ eastl::list_map<T1, T2>& listMap,
+ T1 t1End, T2,
+ T1 t10 = 0, T2 t20 = 0,
+ T1 t11 = 0, T2 t21 = 0,
+ T1 t12 = 0, T2 t22 = 0,
+ T1 t13 = 0, T2 t23 = 0,
+ T1 t14 = 0, T2 t24 = 0,
+ T1 t15 = 0, T2 t25 = 0)
+{
+ typename eastl::list_map<T1, T2>::iterator it = listMap.begin();
+
+ if(t10 == t1End)
+ return (it == listMap.end());
+ if(it->first != t10 || it->second != t20)
+ { EASTLTest_Printf("[%s] Mismatch at index %d\n", pName, 0); return false; }
+ ++it;
+
+ if(t11 == t1End)
+ return (it == listMap.end());
+ if(it->first != t11 || it->second != t21)
+ { EASTLTest_Printf("[%s] Mismatch at index %d\n", pName, 1); return false; }
+ ++it;
+
+ if(t12 == t1End)
+ return (it == listMap.end());
+ if(it->first != t12 || it->second != t22)
+ { EASTLTest_Printf("[%s] Mismatch at index %d\n", pName, 2); return false; }
+ ++it;
+
+ if(t13 == t1End)
+ return (it == listMap.end());
+ if(it->first != t13 || it->second != t23)
+ { EASTLTest_Printf("[%s] Mismatch at index %d\n", pName, 3); return false; }
+ ++it;
+
+ if(t14 == t1End)
+ return (it == listMap.end());
+ if(it->first != t14 || it->second != t24)
+ { EASTLTest_Printf("[%s] Mismatch at index %d\n", pName, 4); return false; }
+ ++it;
+
+ if(t15 == t1End)
+ return (it == listMap.end());
+ if(it->first != t15 || it->second != t25)
+ { EASTLTest_Printf("[%s] Mismatch at index %d\n", pName, 5); return false; }
+ ++it;
+
+ return true;
+}
+
+
+int TestListMap()
+{
+ int nErrorCount = 0;
+
+ {
+ typedef eastl::list_map<uint32_t, uint64_t> TestMapType;
+ typedef eastl::pair<uint32_t, uint64_t> ValueType; // We currently can't use TestMapType::value_type because its 'first' is const.
+
+ TestMapType testMap;
+ TestMapType::iterator iter;
+ TestMapType::const_iterator c_iter;
+ TestMapType::reverse_iterator rIter;
+ TestMapType::const_reverse_iterator c_rIter;
+ TestMapType::iterator tempIter;
+
+ EATEST_VERIFY(testMap.empty());
+ EATEST_VERIFY(testMap.validate());
+
+ testMap.push_front(ValueType(3, 1003));
+ EATEST_VERIFY(testMap.validate());
+
+ testMap.push_back(ValueType(4, 1004));
+ EATEST_VERIFY(testMap.validate());
+
+ testMap.push_back(ValueType(2, 1002));
+ EATEST_VERIFY(testMap.validate());
+
+ testMap.push_front(ValueType(6, 1006));
+ EATEST_VERIFY(testMap.validate());
+
+ EATEST_VERIFY(!testMap.empty());
+ EATEST_VERIFY(testMap.size() == 4);
+
+ EATEST_VERIFY(testMap.find(3) != testMap.end());
+ EATEST_VERIFY(testMap.find(5) == testMap.end());
+ EATEST_VERIFY((VerifyListMapSequence<uint32_t, uint64_t>("list_map::push_back", testMap, UINT32_MAX, 0, 6, 1006, 3, 1003, 4, 1004, 2, 1002, UINT32_MAX, 0)));
+
+ iter = testMap.find(3);
+ EATEST_VERIFY((iter->first == 3) && ((++iter)->first == 4) && ((++iter)->first == 2));
+
+ rIter = testMap.rbegin();
+ EATEST_VERIFY((rIter->first == 2) && ((++rIter)->first == 4) && ((++rIter)->first == 3) && ((++rIter)->first == 6));
+
+ TestMapType::const_reference rFront = testMap.front();
+ EATEST_VERIFY(rFront.first == 6);
+
+ TestMapType::reference rBack = testMap.back();
+ EATEST_VERIFY(rBack.first == 2);
+
+ testMap.clear();
+ EATEST_VERIFY(testMap.empty());
+ EATEST_VERIFY(testMap.validate());
+
+ iter = testMap.begin();
+ EATEST_VERIFY(iter == testMap.end());
+
+ testMap.push_back(ValueType(10, 1010));
+ EATEST_VERIFY(testMap.validate());
+
+ testMap.push_front(ValueType(8, 1008));
+ EATEST_VERIFY(testMap.validate());
+
+ testMap.push_back(7, 1007);
+ EATEST_VERIFY(testMap.validate());
+
+ testMap.push_front(9, 1009);
+ EATEST_VERIFY(testMap.validate());
+
+ testMap.push_back(11, 1011LL);
+ EATEST_VERIFY(testMap.validate());
+
+ EATEST_VERIFY((VerifyListMapSequence<uint32_t, uint64_t>("list_map::push_back", testMap, UINT32_MAX, 0, 9, 1009, 8, 1008, 10, 1010, 7, 1007, 11, 1011, UINT32_MAX, 0)));
+
+ testMap.pop_front();
+ EATEST_VERIFY(testMap.validate());
+ EATEST_VERIFY((VerifyListMapSequence<uint32_t, uint64_t>("list_map::push_back", testMap, UINT32_MAX, 0, 8, 1008, 10, 1010, 7, 1007, 11, 1011, UINT32_MAX, 0)));
+
+ rIter = testMap.rbegin();
+ EATEST_VERIFY((rIter->first == 11 && ((++rIter)->first == 7) && ((++rIter)->first == 10) && ((++rIter)->first == 8)));
+
+ testMap.pop_back();
+ EATEST_VERIFY(testMap.validate());
+ EATEST_VERIFY((VerifyListMapSequence<uint32_t, uint64_t>("list_map::push_back", testMap, UINT32_MAX, 0, 8, 1008, 10, 1010, 7, 1007, UINT32_MAX, 0)));
+
+ rIter = testMap.rbegin();
+ EATEST_VERIFY(((rIter)->first == 7) && ((++rIter)->first == 10) && ((++rIter)->first == 8));
+
+ tempIter = testMap.find(10);
+ EATEST_VERIFY(tempIter != testMap.end());
+
+ testMap.erase(10);
+ EATEST_VERIFY(testMap.validate());
+ EATEST_VERIFY((VerifyListMapSequence<uint32_t, uint64_t>("list_map::push_back", testMap, UINT32_MAX, 0, 8, 1008, 7, 1007, UINT32_MAX, 0)));
+
+ EATEST_VERIFY(testMap.validate_iterator(testMap.find(8)) == (eastl::isf_valid | eastl::isf_current | eastl::isf_can_dereference));
+ EATEST_VERIFY(testMap.validate_iterator(testMap.find(30)) == (eastl::isf_valid | eastl::isf_current));
+ EATEST_VERIFY(testMap.validate_iterator(tempIter) == eastl::isf_none);
+ EATEST_VERIFY(testMap.validate());
+
+ testMap.erase(20); // erasing an index not in use should still be safe
+ EATEST_VERIFY(testMap.validate());
+ EATEST_VERIFY((VerifyListMapSequence<uint32_t, uint64_t>("list_map::push_back", testMap, UINT32_MAX, 0, 8, 1008, 7, 1007, UINT32_MAX, 0)));
+
+ EATEST_VERIFY(testMap.count(7) == 1);
+ EATEST_VERIFY(testMap.count(10) == 0);
+ EATEST_VERIFY(testMap.validate());
+
+ testMap.erase(testMap.find(8));
+ EATEST_VERIFY(testMap.validate());
+ EATEST_VERIFY((VerifyListMapSequence<uint32_t, uint64_t>("list_map::push_back", testMap, UINT32_MAX, 0, 7, 1007, UINT32_MAX, 0)));
+
+ testMap.erase(testMap.rbegin());
+ EATEST_VERIFY(testMap.empty());
+ EATEST_VERIFY(testMap.validate());
+ }
+
+ {
+ typedef eastl::list_map<eastl::string, uint32_t> TestStringMapType;
+ TestStringMapType testStringMap;
+ TestStringMapType::iterator strIter;
+
+ testStringMap.push_back(eastl::string("hello"), 750);
+ EATEST_VERIFY(testStringMap.size() == 1);
+
+ strIter = testStringMap.find_as("hello", eastl::less_2<eastl::string, const char*>());
+ EATEST_VERIFY(strIter != testStringMap.end());
+ EATEST_VERIFY(strIter->first == "hello");
+ EATEST_VERIFY(strIter->second == 750);
+
+ strIter = testStringMap.find_as("fake_string", eastl::less_2<eastl::string, const char*>());
+ EATEST_VERIFY(strIter == testStringMap.end());
+ EATEST_VERIFY(testStringMap.validate());
+ }
+
+ return nErrorCount;
+}
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/test/source/TestLruCache.cpp b/EASTL/test/source/TestLruCache.cpp
new file mode 100644
index 0000000..e659218
--- /dev/null
+++ b/EASTL/test/source/TestLruCache.cpp
@@ -0,0 +1,340 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+#include "EASTLTest.h"
+#include <EASTL/bonus/lru_cache.h>
+#include <EASTL/unique_ptr.h>
+
+namespace TestLruCacheInternal
+{
+ struct Foo
+ {
+ static int count;
+
+ Foo()
+ : a(count++)
+ , b(count++)
+ { }
+
+ Foo(int x, int y) : a(x), b(y) {}
+
+ int a;
+ int b;
+
+ bool operator==(const Foo &other)
+ {
+ return this->a == other.a && this->b == other.b;
+ }
+ };
+
+ int Foo::count = 0;
+
+ class FooCreator
+ {
+ public:
+ FooCreator() : mFooCreatedCount(0) {}
+
+ Foo *Create()
+ {
+ mFooCreatedCount++;
+ return new Foo();
+ }
+
+ void Destroy(Foo *f)
+ {
+ delete f;
+ mFooCreatedCount--;
+ }
+
+ int mFooCreatedCount;
+ };
+}
+
+
+int TestLruCache()
+{
+ int nErrorCount = 0;
+
+ // Test simple situation
+ {
+ using namespace TestLruCacheInternal;
+
+ eastl::lru_cache<int, Foo> lruCache(3);
+
+ // Empty state
+ EATEST_VERIFY(lruCache.contains(1) == false);
+ EATEST_VERIFY(lruCache.size() == 0);
+ EATEST_VERIFY(lruCache.empty() == true);
+ EATEST_VERIFY(lruCache.capacity() == 3);
+ EATEST_VERIFY(lruCache.at(1).has_value() == false);
+
+ // Auto create with get call
+ EATEST_VERIFY(lruCache[0].a == 0);
+ EATEST_VERIFY(lruCache[0].b == 1);
+ EATEST_VERIFY(lruCache.contains(1) == false);
+ EATEST_VERIFY(lruCache.contains(0) == true);
+ EATEST_VERIFY(lruCache.size() == 1);
+ EATEST_VERIFY(lruCache.empty() == false);
+ EATEST_VERIFY(lruCache.capacity() == 3);
+
+ // Fill structure up to 2 more entries to fill out, also test at()
+ lruCache.insert(1, Foo(2, 3));
+ EATEST_VERIFY(lruCache.at(1).value().a == 2);
+ EATEST_VERIFY(lruCache.at(1).value().b == 3);
+ EATEST_VERIFY(lruCache.contains(0) == true);
+ EATEST_VERIFY(lruCache.contains(1) == true);
+ EATEST_VERIFY(lruCache.contains(2) == false);
+ EATEST_VERIFY(lruCache.contains(3) == false);
+ EATEST_VERIFY(lruCache.size() == 2);
+ EATEST_VERIFY(lruCache.empty() == false);
+ EATEST_VERIFY(lruCache.capacity() == 3);
+
+ lruCache.insert(2, Foo(4, 5));
+ EATEST_VERIFY(lruCache[2].a == 4);
+ EATEST_VERIFY(lruCache[2].b == 5);
+ EATEST_VERIFY(lruCache.contains(0) == true);
+ EATEST_VERIFY(lruCache.contains(1) == true);
+ EATEST_VERIFY(lruCache.contains(2) == true);
+ EATEST_VERIFY(lruCache.contains(3) == false);
+ EATEST_VERIFY(lruCache.size() == 3);
+ EATEST_VERIFY(lruCache.empty() == false);
+ EATEST_VERIFY(lruCache.capacity() == 3);
+
+ // Add another entry, at this point 0 is the oldest, so it should be pulled
+ lruCache.insert(3, Foo(6, 7));
+ EATEST_VERIFY(lruCache[3].a == 6);
+ EATEST_VERIFY(lruCache[3].b == 7);
+ EATEST_VERIFY(lruCache.contains(0) == false);
+ EATEST_VERIFY(lruCache.contains(1) == true);
+ EATEST_VERIFY(lruCache.contains(2) == true);
+ EATEST_VERIFY(lruCache.contains(3) == true);
+ EATEST_VERIFY(lruCache.size() == 3);
+ EATEST_VERIFY(lruCache.empty() == false);
+ EATEST_VERIFY(lruCache.capacity() == 3);
+
+ // Touch the now oldest 1 key
+ EATEST_VERIFY(lruCache.touch(1) == true);
+
+ // Add another entry, this will be #4 but since 1 was touched, 2 is now the oldest
+ lruCache.insert(4, Foo(8, 9));
+ EATEST_VERIFY(lruCache[4].a == 8);
+ EATEST_VERIFY(lruCache[4].b == 9);
+ EATEST_VERIFY(lruCache.contains(0) == false);
+ EATEST_VERIFY(lruCache.contains(1) == true);
+ EATEST_VERIFY(lruCache.contains(2) == false);
+ EATEST_VERIFY(lruCache.contains(3) == true);
+ EATEST_VERIFY(lruCache.contains(4) == true);
+ EATEST_VERIFY(lruCache.size() == 3);
+ EATEST_VERIFY(lruCache.empty() == false);
+ EATEST_VERIFY(lruCache.capacity() == 3);
+
+ // Test resize down
+ EATEST_VERIFY(lruCache.touch(3) == true); // Let's make some key in the middle the most recent
+ lruCache.resize(1); // Resize down to 1 entry in the cache
+ EATEST_VERIFY(lruCache.contains(0) == false);
+ EATEST_VERIFY(lruCache.contains(1) == false);
+ EATEST_VERIFY(lruCache.contains(2) == false);
+ EATEST_VERIFY(lruCache.contains(3) == true);
+ EATEST_VERIFY(lruCache.contains(4) == false);
+ EATEST_VERIFY(lruCache.size() == 1);
+ EATEST_VERIFY(lruCache.empty() == false);
+ EATEST_VERIFY(lruCache.capacity() == 1);
+
+ // Let's resize up to a size of 5 now
+ lruCache.resize(5);
+ EATEST_VERIFY(lruCache.contains(0) == false);
+ EATEST_VERIFY(lruCache.contains(1) == false);
+ EATEST_VERIFY(lruCache.contains(2) == false);
+ EATEST_VERIFY(lruCache.contains(3) == true);
+ EATEST_VERIFY(lruCache.contains(4) == false);
+ EATEST_VERIFY(lruCache.size() == 1);
+ EATEST_VERIFY(lruCache.empty() == false);
+ EATEST_VERIFY(lruCache.capacity() == 5);
+
+ // Let's try updating
+ lruCache.assign(3, Foo(0, 0));
+ EATEST_VERIFY(lruCache[3] == Foo(0, 0));
+ EATEST_VERIFY(lruCache.contains(0) == false);
+ EATEST_VERIFY(lruCache.contains(1) == false);
+ EATEST_VERIFY(lruCache.contains(2) == false);
+ EATEST_VERIFY(lruCache.contains(3) == true);
+ EATEST_VERIFY(lruCache.contains(4) == false);
+ EATEST_VERIFY(lruCache.size() == 1);
+ EATEST_VERIFY(lruCache.empty() == false);
+ EATEST_VERIFY(lruCache.capacity() == 5);
+
+ // add or update existing
+ lruCache.insert_or_assign(3, Foo(1, 1));
+ EATEST_VERIFY(lruCache[3] == Foo(1, 1));
+ EATEST_VERIFY(lruCache.contains(0) == false);
+ EATEST_VERIFY(lruCache.contains(1) == false);
+ EATEST_VERIFY(lruCache.contains(2) == false);
+ EATEST_VERIFY(lruCache.contains(3) == true);
+ EATEST_VERIFY(lruCache.contains(4) == false);
+ EATEST_VERIFY(lruCache.size() == 1);
+ EATEST_VERIFY(lruCache.empty() == false);
+ EATEST_VERIFY(lruCache.capacity() == 5);
+
+ // Add or update a new entry
+ lruCache.insert_or_assign(25, Foo(2, 2));
+ EATEST_VERIFY(lruCache[3] == Foo(1, 1));
+ EATEST_VERIFY(lruCache[25] == Foo(2, 2));
+ EATEST_VERIFY(lruCache.contains(0) == false);
+ EATEST_VERIFY(lruCache.contains(1) == false);
+ EATEST_VERIFY(lruCache.contains(2) == false);
+ EATEST_VERIFY(lruCache.contains(3) == true);
+ EATEST_VERIFY(lruCache.contains(4) == false);
+ EATEST_VERIFY(lruCache.contains(25) == true);
+ EATEST_VERIFY(lruCache.size() == 2);
+ EATEST_VERIFY(lruCache.empty() == false);
+ EATEST_VERIFY(lruCache.capacity() == 5);
+
+ // clear everything
+ lruCache.clear();
+ EATEST_VERIFY(lruCache.size() == 0);
+ EATEST_VERIFY(lruCache.empty() == true);
+ EATEST_VERIFY(lruCache.capacity() == 5);
+ EATEST_VERIFY(lruCache.contains(3) == false);
+
+ // test unilateral reset
+ lruCache[1] = Foo(1, 2);
+ lruCache.reset_lose_memory();
+ EATEST_VERIFY(lruCache.size() == 0);
+ }
+
+ // Test more advanced creation / deletion via callbacks
+ {
+ using namespace TestLruCacheInternal;
+
+ FooCreator fooCreator;
+
+ auto createCallback = [&fooCreator](int) { return fooCreator.Create(); };
+ auto deleteCallback = [&fooCreator](Foo *f) { fooCreator.Destroy(f); };
+
+ eastl::lru_cache<int, Foo*> lruCache(3, EASTLAllocatorType("eastl lru_cache"), createCallback, deleteCallback);
+
+ lruCache[1];
+ EATEST_VERIFY(fooCreator.mFooCreatedCount == 1);
+ EATEST_VERIFY(lruCache.size() == 1);
+ EATEST_VERIFY(lruCache.empty() == false);
+ EATEST_VERIFY(lruCache.capacity() == 3);
+ EATEST_VERIFY(lruCache.contains(1) == true);
+ EATEST_VERIFY(lruCache.contains(2) == false);
+
+ lruCache[2];
+ EATEST_VERIFY(fooCreator.mFooCreatedCount == 2);
+ EATEST_VERIFY(lruCache.size() == 2);
+ EATEST_VERIFY(lruCache.empty() == false);
+ EATEST_VERIFY(lruCache.capacity() == 3);
+ EATEST_VERIFY(lruCache.contains(1) == true);
+ EATEST_VERIFY(lruCache.contains(2) == true);
+
+ // Update 2, which should delete the existing entry
+ {
+ auto f = fooCreator.Create();
+ EATEST_VERIFY(fooCreator.mFooCreatedCount == 3);
+ f->a = 20;
+ f->b = 21;
+ lruCache.assign(2, f);
+ EATEST_VERIFY(fooCreator.mFooCreatedCount == 2);
+ EATEST_VERIFY(lruCache.size() == 2);
+ EATEST_VERIFY(lruCache.empty() == false);
+ EATEST_VERIFY(lruCache.capacity() == 3);
+ EATEST_VERIFY(lruCache.contains(1) == true);
+ EATEST_VERIFY(lruCache.contains(2) == true);
+ EATEST_VERIFY(lruCache[2]->a == 20);
+ EATEST_VERIFY(lruCache[2]->b == 21);
+ }
+
+ lruCache.erase(2);
+ EATEST_VERIFY(fooCreator.mFooCreatedCount == 1);
+ EATEST_VERIFY(lruCache.size() == 1);
+ EATEST_VERIFY(lruCache.empty() == false);
+ EATEST_VERIFY(lruCache.capacity() == 3);
+ EATEST_VERIFY(lruCache.contains(1) == true);
+ EATEST_VERIFY(lruCache.contains(2) == false);
+
+ lruCache.erase(1);
+ EATEST_VERIFY(fooCreator.mFooCreatedCount == 0);
+ EATEST_VERIFY(lruCache.size() == 0);
+ EATEST_VERIFY(lruCache.empty() == true);
+ EATEST_VERIFY(lruCache.capacity() == 3);
+ EATEST_VERIFY(lruCache.contains(1) == false);
+ EATEST_VERIFY(lruCache.contains(2) == false);
+
+ // Test insert_or_assign
+ {
+ auto f = fooCreator.Create();
+ f->a = 22;
+ f->b = 30;
+ EATEST_VERIFY(fooCreator.mFooCreatedCount == 1);
+
+ lruCache.insert_or_assign(7, f);
+ EATEST_VERIFY(lruCache.size() == 1);
+ EATEST_VERIFY(lruCache.empty() == false);
+ EATEST_VERIFY(lruCache.capacity() == 3);
+ EATEST_VERIFY(lruCache.contains(1) == false);
+ EATEST_VERIFY(lruCache.contains(2) == false);
+ EATEST_VERIFY(lruCache.contains(7) == true);
+ EATEST_VERIFY(lruCache.erase(7) == true);
+ EATEST_VERIFY(fooCreator.mFooCreatedCount == 0);
+ }
+ }
+
+ // Test iteration
+ {
+ eastl::lru_cache<int, int> lc(5);
+ lc.insert_or_assign(0,10);
+ lc.insert_or_assign(1,11);
+ lc.insert_or_assign(2,12);
+ lc.insert_or_assign(3,13);
+ lc.insert_or_assign(4,14);
+
+ { // test manual for-loop
+ int i = 0;
+ for (auto b = lc.begin(), e = lc.end(); b != e; b++)
+ {
+ auto &p = *b;
+ VERIFY(i == p.first);
+ VERIFY(i + 10 == p.second.first);
+ i++;
+ }
+ }
+
+ { // test pairs
+ int i = 0;
+ for(auto& p : lc)
+ {
+ VERIFY(i == p.first);
+ VERIFY(i + 10 == p.second.first);
+ i++;
+ }
+ }
+
+ { // test structured bindings
+ int i = 0;
+ for(auto& [key, value] : lc)
+ {
+ VERIFY(i == key);
+ VERIFY(i + 10 == value.first);
+ i++;
+ }
+ }
+ }
+
+ // test initializer_list
+ {
+ eastl::lru_cache<int, int> lc = {{0, 10}, {1, 11}, {2, 12}, {3, 13}, {4, 14}, {5, 15}};
+
+ int i = 0;
+ for(auto& p : lc)
+ {
+ VERIFY(i == p.first);
+ VERIFY(i + 10 == p.second.first);
+ i++;
+ }
+ }
+
+ return nErrorCount;
+}
diff --git a/EASTL/test/source/TestMap.cpp b/EASTL/test/source/TestMap.cpp
new file mode 100644
index 0000000..0df8c88
--- /dev/null
+++ b/EASTL/test/source/TestMap.cpp
@@ -0,0 +1,305 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "TestMap.h"
+#include "EASTLTest.h"
+#include <EASTL/map.h>
+#include <EASTL/string.h>
+#include <EASTL/vector.h>
+
+EA_DISABLE_ALL_VC_WARNINGS()
+#ifndef EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+ #include <map>
+#endif
+EA_RESTORE_ALL_VC_WARNINGS()
+
+using namespace eastl;
+
+
+// Template instantations.
+// These tell the compiler to compile all the functions for the given class.
+template class eastl::map<int, int>;
+template class eastl::multimap<int, int>;
+template class eastl::map<TestObject, TestObject>;
+template class eastl::multimap<TestObject, TestObject>;
+
+
+///////////////////////////////////////////////////////////////////////////////
+// typedefs
+//
+typedef eastl::map<int, int> VM1;
+typedef eastl::map<TestObject, TestObject> VM4;
+typedef eastl::multimap<int, int> VMM1;
+typedef eastl::multimap<TestObject, TestObject> VMM4;
+
+#ifndef EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+ typedef std::map<int, int> VM3;
+ typedef std::map<TestObject, TestObject> VM6;
+ typedef std::multimap<int, int> VMM3;
+ typedef std::multimap<TestObject, TestObject> VMM6;
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+
+
+int TestMap()
+{
+ int nErrorCount = 0;
+
+ #ifndef EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+ { // Test construction
+ nErrorCount += TestMapConstruction<VM1, VM3, false>();
+ nErrorCount += TestMapConstruction<VM4, VM6, false>();
+
+ nErrorCount += TestMapConstruction<VMM1, VMM3, true>();
+ nErrorCount += TestMapConstruction<VMM4, VMM6, true>();
+ }
+
+
+ { // Test mutating functionality.
+ nErrorCount += TestMapMutation<VM1, VM3, false>();
+ nErrorCount += TestMapMutation<VM4, VM6, false>();
+
+ nErrorCount += TestMapMutation<VMM1, VMM3, true>();
+ nErrorCount += TestMapMutation<VMM4, VMM6, true>();
+ }
+ #endif // EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+
+
+ { // Test searching functionality.
+ nErrorCount += TestMapSearch<VM1, false>();
+ nErrorCount += TestMapSearch<VM4, false>();
+
+ nErrorCount += TestMapSearch<VMM1, true>();
+ nErrorCount += TestMapSearch<VMM4, true>();
+ }
+
+
+ {
+ // C++11 emplace and related functionality
+ nErrorCount += TestMapCpp11<eastl::map<int, TestObject>>();
+ nErrorCount += TestMultimapCpp11<eastl::multimap<int, TestObject>>();
+ nErrorCount += TestMapCpp11NonCopyable<eastl::map<int, NonCopyable>>();
+ }
+
+ {
+ // C++17 try_emplace and related functionality
+ nErrorCount += TestMapCpp17<eastl::map<int, TestObject>>();
+ }
+
+
+ { // Misc tests
+
+ // const key_compare& key_comp() const;
+ // key_compare& key_comp();
+ VM1 vm;
+ const VM1 vmc;
+
+ const VM1::key_compare& kc = vmc.key_comp();
+ vm.key_comp() = kc;
+ }
+
+
+ // Regressions against user bug reports.
+ {
+ // User reports that the following doesn't compile on GCC 4.1.1 due to unrecognized lower_bound.
+ eastl::map<int, int> m;
+ m[1] = 1;
+ EATEST_VERIFY(m.size() == 1);
+ m.erase(1);
+ EATEST_VERIFY(m.empty());
+ }
+
+ {
+ // User reports that EASTL_VALIDATE_COMPARE_ENABLED / EASTL_COMPARE_VALIDATE isn't compiling for this case.
+ eastl::map<eastl::u8string, int> m;
+ m.find_as(EA_CHAR8("some string"), eastl::equal_to_2<eastl::u8string, const char8_t*>());
+ }
+
+ {
+ eastl::map<int*, int> m;
+ int* ip = (int*)(uintptr_t)0xDEADC0DE;
+
+ m[ip] = 0;
+
+ auto it = m.find_as(ip, eastl::less_2<int*, int*>{});
+ EATEST_VERIFY(it != m.end());
+
+ it = m.find_as((int*)(uintptr_t)0xDEADC0DE, eastl::less_2<int*, int*>{});
+ EATEST_VERIFY(it != m.end());
+ }
+
+ {
+ // User reports that vector<map<enum,enum>> is crashing after the recent changes to add rvalue move and emplace support to rbtree.
+ typedef eastl::map<int, int> IntIntMap;
+ typedef eastl::vector<IntIntMap> IntIntMapArray;
+
+ IntIntMapArray v;
+ v.push_back(IntIntMap()); // This was calling the rbtree move constructor, which had a bug.
+ v[0][16] = 0; // The rbtree was in a bad internal state and so this line resulted in a crash.
+ EATEST_VERIFY(v[0].validate());
+ EATEST_VERIFY(v.validate());
+ }
+
+ {
+ typedef eastl::map<int, int> IntIntMap;
+ IntIntMap map1;
+ map1[1] = 1;
+ map1[3] = 3;
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ EATEST_VERIFY_THROW(map1.at(0));
+ EATEST_VERIFY_THROW(map1.at(2));
+ EATEST_VERIFY_THROW(map1.at(4));
+ #endif
+ map1[0] = 1;
+ #if EASTL_EXCEPTIONS_ENABLED
+ EATEST_VERIFY_NOTHROW(map1.at(0));
+ EATEST_VERIFY_NOTHROW(map1.at(1));
+ EATEST_VERIFY_NOTHROW(map1.at(3));
+ #endif
+ EATEST_VERIFY(map1.at(0) == 1);
+ EATEST_VERIFY(map1.at(1) == 1);
+ EATEST_VERIFY(map1.at(3) == 3);
+
+ const IntIntMap map2;
+ const IntIntMap map3(map1);
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ EATEST_VERIFY_THROW(map2.at(0));
+ EATEST_VERIFY_NOTHROW(map3.at(0));
+ #endif
+ EATEST_VERIFY(map3.at(0) == 1);
+ }
+
+ // User regression test
+ {
+ #if !EASTL_RBTREE_LEGACY_SWAP_BEHAVIOUR_REQUIRES_COPY_CTOR
+ typedef eastl::map<int, MoveOnlyTypeDefaultCtor> IntMOMap;
+
+ IntMOMap m1, m2;
+ m2[0] = MoveOnlyTypeDefaultCtor(0);
+ m2[1] = MoveOnlyTypeDefaultCtor(1);
+
+ EATEST_VERIFY( m1.empty());
+ EATEST_VERIFY(!m2.empty());
+
+ m1.swap(m2);
+
+ EATEST_VERIFY(!m1.empty());
+ EATEST_VERIFY( m2.empty());
+ #endif
+ }
+
+// todo: create a test case for this.
+// {
+// // User reports that an incorrectly wrapped pair key used to insert into an eastl map compiles when it should fire a compiler error about unconvertible types.
+// typedef eastl::pair<eastl::string, eastl::string> PairStringKey;
+// typedef eastl::map<PairStringKey, eastl::string> PairStringMap;
+//
+// PairStringMap p1, p2;
+//
+// p1.insert(PairStringMap::value_type(PairStringKey("key1", "key2"), "data")).first->second = "other_data";
+//
+// PairStringKey key("key1", "key2");
+// PairStringMap::value_type insert_me(key, "data");
+// p2.insert(insert_me).first->second = "other_data";
+//
+// for(auto& e : p1)
+// printf("%s,%s = %s\n", e.first.first.c_str(), e.first.second.c_str(), e.second.c_str());
+//
+// for(auto& e : p2)
+// printf("%s,%s = %s\n", e.first.first.c_str(), e.first.second.c_str(), e.second.c_str());
+//
+// EATEST_VERIFY(p1 == p2);
+// }
+
+ { // Test empty base-class optimization
+ struct UnemptyLess : eastl::less<int>
+ {
+ int foo;
+ };
+
+ typedef eastl::map<int, int, eastl::less<int>> VM1;
+ typedef eastl::map<int, int, UnemptyLess> VM2;
+
+ EATEST_VERIFY(sizeof(VM1) < sizeof(VM2));
+ }
+
+ { // Test erase_if
+ eastl::map<int, int> m = {{0, 0}, {1, 1}, {2, 2}, {3, 3}, {4, 4}};
+ auto numErased = eastl::erase_if(m, [](auto p) { return p.first % 2 == 0; });
+ VERIFY((m == eastl::map<int, int>{{1, 1},{3, 3}}));
+ VERIFY(numErased == 3);
+ }
+
+ { // Test erase_if
+ eastl::multimap<int, int> m = {{0, 0}, {0, 0}, {0, 0}, {1, 1}, {1, 1}, {2, 2}, {3, 3}, {4, 4}, {4, 4}, {4, 4}};
+ auto numErased = eastl::erase_if(m, [](auto p) { return p.first % 2 == 0; });
+ VERIFY((m == eastl::multimap<int, int>{{1, 1}, {1, 1}, {3, 3}}));;
+ VERIFY(numErased == 7);
+ }
+
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ { // Test map <=>
+ eastl::map<int, int> m1 = {{0, 0}, {1, 1}, {2, 2}, {3, 3}, {4, 4}};
+ eastl::map<int, int> m2 = {{4, 4}, {3, 3}, {2, 2}, {1, 1}, {0, 0}};
+ eastl::map<int, int> m3 = {{0, 1}, {2, 3}, {4, 5}, {6, 7}, {8, 9}};
+ eastl::map<int, int> m4 = {{1, 0}, {3, 2}, {5, 4}, {7, 6}, {9, 8}};
+ eastl::map<int, int> m5 = {{0, 1}, {2, 3}, {4, 5}};
+
+ VERIFY(m1 == m2);
+ VERIFY(m1 != m3);
+ VERIFY(m3 != m4);
+ VERIFY(m3 < m4);
+ VERIFY(m5 < m4);
+ VERIFY(m5 < m3);
+
+
+ VERIFY((m1 <=> m2) == 0);
+ VERIFY((m1 <=> m3) != 0);
+ VERIFY((m3 <=> m4) != 0);
+ VERIFY((m3 <=> m4) < 0);
+ VERIFY((m5 <=> m4) < 0);
+ VERIFY((m5 <=> m3) < 0);
+ }
+
+ { // Test multimap <=>
+ eastl::multimap<int, int> m1 = {{0, 0}, {0, 0}, {1, 1}, {1, 1}, {2, 2}, {2, 2}, {3, 3}, {3, 3}, {4, 4}, {4, 4}};
+ eastl::multimap<int, int> m2 = {{0, 0}, {1, 1}, {2, 2}, {3, 3}, {4, 4}, {4, 4}, {3, 3}, {2, 2}, {1, 1}, {0, 0}};
+ eastl::multimap<int, int> m3 = {{0, 1}, {2, 3}, {4, 5}, {0, 1}, {2, 3}, {4, 5}, {6, 7}, {8, 9}};
+ eastl::multimap<int, int> m4 = {{1, 0}, {3, 2}, {5, 4}, {1, 0}, {3, 2}, {5, 4}, {7, 6}, {9, 8}};
+ eastl::multimap<int, int> m5 = {{10, 11}, {10, 11}};
+
+ VERIFY(m1 == m2);
+ VERIFY(m1 != m3);
+ VERIFY(m3 != m4);
+ VERIFY(m3 < m4);
+ VERIFY(m5 > m4);
+ VERIFY(m5 > m3);
+
+ VERIFY((m1 <=> m2) == 0);
+ VERIFY((m1 <=> m3) != 0);
+ VERIFY((m3 <=> m4) != 0);
+ VERIFY((m3 <=> m4) < 0);
+ VERIFY((m5 <=> m4) > 0);
+ VERIFY((m5 <=> m3) > 0);
+ }
+#endif
+
+ return nErrorCount;
+}
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/test/source/TestMap.h b/EASTL/test/source/TestMap.h
new file mode 100644
index 0000000..8d480cf
--- /dev/null
+++ b/EASTL/test/source/TestMap.h
@@ -0,0 +1,1418 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLTest.h"
+#include <EASTL/vector.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/type_traits.h>
+#include <EASTL/scoped_ptr.h>
+#include <EASTL/random.h>
+#include <EASTL/tuple.h>
+
+#ifndef EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+ EA_DISABLE_ALL_VC_WARNINGS()
+ #include <algorithm>
+ EA_RESTORE_ALL_VC_WARNINGS()
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// TestMapConstruction
+//
+// This test compares eastl::map/multimap to std::map/multimap. It could possibly
+// work for comparing eastl::hash_map to C++11 std::unordered_map, but we would
+// rather move towards making this test be independent of any std comparisons.
+//
+// Requires a container that can hold at least 1000 items.
+//
+template <typename T1, typename T2, bool bMultimap>
+int TestMapConstruction()
+{
+ int nErrorCount = 0;
+
+ TestObject::Reset();
+
+ {
+ // We use new because fixed-size versions these objects might be too big for declaration on a stack.
+ eastl::scoped_ptr<T1> pt1A(new T1);
+ eastl::scoped_ptr<T2> pt2A(new T2);
+ T1& t1A = *pt1A;
+ T2& t2A = *pt2A;
+ nErrorCount += CompareContainers(t1A, t2A, "Map ctor", eastl::use_first<typename T1::value_type>(), eastl::use_first<typename T2::value_type>());
+ EATEST_VERIFY(t1A.validate());
+
+
+ eastl::scoped_ptr<T1> pt1B(new T1);
+ eastl::scoped_ptr<T2> pt2B(new T2);
+ T1& t1B = *pt1B;
+ T2& t2B = *pt2B;
+ nErrorCount += CompareContainers(t1B, t2B, "Map ctor", eastl::use_first<typename T1::value_type>(), eastl::use_first<typename T2::value_type>());
+
+
+ eastl::scoped_ptr<T1> pt1C(new T1);
+ eastl::scoped_ptr<T2> pt2C(new T2);
+ T1& t1C = *pt1C;
+ T2& t2C = *pt2C;
+ for(int i = 0; i < 1000; i++)
+ {
+ t1C.insert(typename T1::value_type(typename T1::key_type(i), typename T1::mapped_type(i)));
+ t2C.insert(typename T2::value_type(typename T2::key_type(i), typename T2::mapped_type(i)));
+ EATEST_VERIFY(t1C.validate());
+ nErrorCount += CompareContainers(t1C, t2C, "Map insert", eastl::use_first<typename T1::value_type>(), eastl::use_first<typename T2::value_type>());
+ }
+
+
+ eastl::scoped_ptr<T1> pt1D(new T1);
+ eastl::scoped_ptr<T2> pt2D(new T2);
+ T1& t1D = *pt1D;
+ T2& t2D = *pt2D;
+ nErrorCount += CompareContainers(t1D, t2D, "Map ctor", eastl::use_first<typename T1::value_type>(), eastl::use_first<typename T2::value_type>());
+
+
+ eastl::scoped_ptr<T1> pt1E(new T1(t1C));
+ eastl::scoped_ptr<T2> pt2E(new T2(t2C));
+ T1& t1E = *pt1E;
+ T2& t2E = *pt2E;
+ EATEST_VERIFY(t1E.validate());
+ nErrorCount += CompareContainers(t1E, t2E, "Map ctor", eastl::use_first<typename T1::value_type>(), eastl::use_first<typename T2::value_type>());
+
+
+ eastl::scoped_ptr<T1> pt1F(new T1(t1C.begin(), t1C.end()));
+ eastl::scoped_ptr<T2> pt2F(new T2(t2C.begin(), t2C.end()));
+ T1& t1F = *pt1F;
+ T2& t2F = *pt2F;
+ EATEST_VERIFY(t1F.validate());
+ nErrorCount += CompareContainers(t1F, t2F, "Map ctor", eastl::use_first<typename T1::value_type>(), eastl::use_first<typename T2::value_type>());
+
+
+ // operator=(const map&)
+ t1E = t1D;
+ t2E = t2D;
+ nErrorCount += CompareContainers(t1D, t2D, "Map operator=", eastl::use_first<typename T1::value_type>(), eastl::use_first<typename T2::value_type>());
+ nErrorCount += CompareContainers(t1E, t2E, "Map operator=", eastl::use_first<typename T1::value_type>(), eastl::use_first<typename T2::value_type>());
+
+
+ // operator=(map&&)
+ // We test just the EASTL container here.
+ eastl::scoped_ptr<T1> pT1P(new T1);
+ eastl::scoped_ptr<T1> pT1Q(new T1);
+ T1& t1P = *pT1P;
+ T1& t1Q = *pT1Q;
+
+ typename T1::key_type k10(0);
+ typename T1::key_type k11(1);
+ typename T1::key_type k12(2);
+ typename T1::key_type k13(3);
+ typename T1::key_type k14(4);
+ typename T1::key_type k15(5);
+
+ typename T1::value_type v10(k10, typename T1::mapped_type(0));
+ typename T1::value_type v11(k11, typename T1::mapped_type(1));
+ typename T1::value_type v12(k12, typename T1::mapped_type(2));
+ typename T1::value_type v13(k13, typename T1::mapped_type(3));
+ typename T1::value_type v14(k14, typename T1::mapped_type(4));
+ typename T1::value_type v15(k15, typename T1::mapped_type(5));
+
+ t1P.insert(v10);
+ t1P.insert(v11);
+ t1P.insert(v12);
+
+ t1Q.insert(v13);
+ t1Q.insert(v14);
+ t1Q.insert(v15);
+
+ t1Q = eastl::move(t1P); // We are effectively requesting to swap t1A with t1B.
+ //EATEST_VERIFY((t1P.size() == 3) && (t1P.find(k13) != t1P.end()) && (t1P.find(k14) != t1P.end()) && (t1P.find(k15) != t1P.end())); // Currently operator=(this_type&& x) clears x instead of swapping with it.
+ EATEST_VERIFY((t1Q.size() == 3) && (t1Q.find(k10) != t1Q.end()) && (t1Q.find(k11) != t1Q.end()) && (t1Q.find(k12) != t1Q.end()));
+
+
+ // swap
+ t1E.swap(t1D);
+ t2E.swap(t2D);
+ EATEST_VERIFY(t1D.validate());
+ EATEST_VERIFY(t1E.validate());
+ nErrorCount += CompareContainers(t1D, t2D, "Map swap", eastl::use_first<typename T1::value_type>(), eastl::use_first<typename T2::value_type>());
+ nErrorCount += CompareContainers(t1E, t2E, "Map swap", eastl::use_first<typename T1::value_type>(), eastl::use_first<typename T2::value_type>());
+
+
+ // clear
+ t1A.clear();
+ t2A.clear();
+ EATEST_VERIFY(t1A.validate());
+ nErrorCount += CompareContainers(t1A, t2A, "Map clear", eastl::use_first<typename T1::value_type>(), eastl::use_first<typename T2::value_type>());
+
+ t1B.clear();
+ t2B.clear();
+ EATEST_VERIFY(t1B.validate());
+ nErrorCount += CompareContainers(t1B, t2B, "Map clear", eastl::use_first<typename T1::value_type>(), eastl::use_first<typename T2::value_type>());
+
+
+ // global operators (==, !=, <, etc.)
+ t1A.clear();
+ t1B.clear();
+ // Make t1A equal to t1B
+ t1A.insert(typename T1::value_type(typename T1::key_type(0), typename T1::mapped_type(0)));
+ t1A.insert(typename T1::value_type(typename T1::key_type(1), typename T1::mapped_type(1)));
+ t1A.insert(typename T1::value_type(typename T1::key_type(2), typename T1::mapped_type(2)));
+
+ t1B.insert(typename T1::value_type(typename T1::key_type(0), typename T1::mapped_type(0)));
+ t1B.insert(typename T1::value_type(typename T1::key_type(1), typename T1::mapped_type(1)));
+ t1B.insert(typename T1::value_type(typename T1::key_type(2), typename T1::mapped_type(2)));
+
+ EATEST_VERIFY( (t1A == t1B));
+ EATEST_VERIFY(!(t1A != t1B));
+ EATEST_VERIFY( (t1A <= t1B));
+ EATEST_VERIFY( (t1A >= t1B));
+ EATEST_VERIFY(!(t1A < t1B));
+ EATEST_VERIFY(!(t1A > t1B));
+ // Make t1A less than t1B
+ t1A.insert(typename T1::value_type(typename T1::key_type(3), typename T1::mapped_type(3)));
+ t1B.insert(typename T1::value_type(typename T1::key_type(4), typename T1::mapped_type(4)));
+
+ EATEST_VERIFY(!(t1A == t1B));
+ EATEST_VERIFY( (t1A != t1B));
+ EATEST_VERIFY( (t1A <= t1B));
+ EATEST_VERIFY(!(t1A >= t1B));
+ EATEST_VERIFY( (t1A < t1B));
+ EATEST_VERIFY(!(t1A > t1B));
+ }
+
+ EATEST_VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+
+ return nErrorCount;
+}
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// TestMapMutation
+//
+// Requires a container that can hold at least 1000 items.
+//
+template <typename T1, typename T2, bool bMultimap>
+int TestMapMutation()
+{
+ int nErrorCount = 0;
+
+ TestObject::Reset();
+
+ {
+ eastl::scoped_ptr<T1> pt1A(new T1); // We use a pointers instead of concrete object because it's size may be huge.
+ eastl::scoped_ptr<T2> pt2A(new T2);
+ T1& t1A = *pt1A;
+ T2& t2A = *pt2A;
+ int i, iEnd, p;
+
+ // Set up an array of values to randomize / permute.
+ eastl::vector<typename T1::key_type> valueArrayInsert;
+
+ if(gEASTL_TestLevel >= kEASTL_TestLevelLow)
+ {
+ EASTLTest_Rand rng(EA::UnitTest::GetRandSeed());
+
+ valueArrayInsert.clear();
+
+ for(i = 0; i < 1000; i++)
+ {
+ valueArrayInsert.push_back(typename T1::key_type(i));
+
+ // Occasionally attempt to duplicate an element, both for map and multimap.
+ if(((i + 1) < 1000) && (rng.RandLimit(4) == 0))
+ {
+ valueArrayInsert.push_back(typename T1::key_type(i));
+ i++;
+ }
+ }
+
+ for(p = 0; p < gEASTL_TestLevel * 100; p++) // For each permutation...
+ {
+ eastl::random_shuffle(valueArrayInsert.begin(), valueArrayInsert.end(), rng);
+
+ // insert
+ for(i = 0, iEnd = (int)valueArrayInsert.size(); i < iEnd; i++)
+ {
+ typename T1::key_type& k = valueArrayInsert[i];
+
+ t1A.insert(typename T1::value_type(k, k)); // We expect that both arguments are the same.
+ t2A.insert(typename T2::value_type(k, k));
+
+ EATEST_VERIFY(t1A.validate());
+ nErrorCount += CompareContainers(t1A, t2A, "Map insert", eastl::use_first<typename T1::value_type>(), eastl::use_first<typename T2::value_type>());
+ }
+
+
+ // reverse iteration
+ typename T1::reverse_iterator r1 = t1A.rbegin();
+ typename T2::reverse_iterator r2 = t2A.rbegin();
+
+ while(r1 != t1A.rend())
+ {
+ typename T1::key_type k1 = (*r1).first;
+ typename T2::key_type k2 = (*r2).first;
+ EATEST_VERIFY(k1 == k2);
+ }
+
+
+ // erase
+ for(i = 0, iEnd = (int)valueArrayInsert.size(); i < iEnd; i++)
+ {
+ typename T1::key_type& k = valueArrayInsert[i];
+
+ typename T1::size_type n1 = t1A.erase(k);
+ typename T2::size_type n2 = t2A.erase(k);
+
+ EATEST_VERIFY(n1 == n2);
+ EATEST_VERIFY(t1A.validate());
+ nErrorCount += CompareContainers(t1A, t2A, "Map erase", eastl::use_first<typename T1::value_type>(), eastl::use_first<typename T2::value_type>());
+ }
+
+ EATEST_VERIFY((TestObject::sTOCount == 0) || (TestObject::sTOCount == (int64_t)valueArrayInsert.size())); // This test will only have meaning when T1 contains TestObject.
+ }
+ }
+
+
+ EATEST_VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+
+
+ // Possibly do extended testing.
+ if(gEASTL_TestLevel > 6)
+ {
+ #ifndef EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+
+ valueArrayInsert.clear();
+
+ for(i = 0; i < 9; i++) // Much more than this count would take too long to test all permutations.
+ valueArrayInsert.push_back(typename T1::key_type(i));
+
+ // Insert these values into the map in every existing permutation.
+ for(p = 0; std::next_permutation(valueArrayInsert.begin(), valueArrayInsert.end()); p++) // For each permutation...
+ {
+ for(i = 0, iEnd = (int)valueArrayInsert.size(); i < iEnd; i++)
+ {
+ typename T1::key_type& k = valueArrayInsert[i];
+
+ t1A.insert(typename T1::value_type(k, k)); // We expect that both arguments are the same.
+ t2A.insert(typename T2::value_type(k, k));
+
+ EATEST_VERIFY(t1A.validate());
+ nErrorCount += CompareContainers(t1A, t2A, "Map insert", eastl::use_first<typename T1::value_type>(), eastl::use_first<typename T2::value_type>());
+ }
+
+ for(i = 0, iEnd = (int)valueArrayInsert.size(); i < iEnd; i++)
+ {
+ typename T1::key_type& k = valueArrayInsert[i];
+
+ t1A.erase(k);
+ t2A.erase(k);
+
+ EATEST_VERIFY(t1A.validate());
+ nErrorCount += CompareContainers(t1A, t2A, "Map erase", eastl::use_first<typename T1::value_type>(), eastl::use_first<typename T2::value_type>());
+ }
+
+ EATEST_VERIFY((TestObject::sTOCount == 0) || (TestObject::sTOCount == (int64_t)valueArrayInsert.size())); // This test will only have meaning when T1 contains TestObject.
+
+ }
+
+ #endif // EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+ }
+ }
+
+
+ EATEST_VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+
+
+ { // Other insert and erase operations
+
+ #ifndef EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+ eastl::scoped_ptr<T1> pt1A(new T1); // We use a pointers instead of concrete object because it's size may be huge.
+ eastl::scoped_ptr<T2> pt2A(new T2);
+ T1& t1A = *pt1A;
+ T2& t2A = *pt2A;
+ int i;
+
+ // Set up an array of values to randomize / permute.
+ eastl::vector<eastl::pair<typename T1::key_type, typename T1::mapped_type> > valueArrayInsert1;
+ eastl::vector< std::pair<typename T2::key_type, typename T2::mapped_type> > valueArrayInsert2;
+
+ EA::UnitTest::Rand rng(EA::UnitTest::GetRandSeed());
+
+ for(i = 0; i < 100; i++)
+ {
+ valueArrayInsert1.push_back(typename T1::value_type(typename T1::key_type(i), typename T1::mapped_type(i)));
+ valueArrayInsert2.push_back(typename T2::value_type(typename T2::key_type(i), typename T2::mapped_type(i)));
+
+ if(rng.RandLimit(3) == 0)
+ {
+ valueArrayInsert1.push_back(typename T1::value_type(typename T1::key_type(i), typename T1::mapped_type(i)));
+ valueArrayInsert2.push_back(typename T2::value_type(typename T2::key_type(i), typename T2::mapped_type(i)));
+ }
+ }
+
+
+ // insert(InputIterator first, InputIterator last)
+ t1A.insert(valueArrayInsert1.begin(), valueArrayInsert1.end());
+ t2A.insert(valueArrayInsert2.begin(), valueArrayInsert2.end());
+ EATEST_VERIFY(t1A.validate());
+ nErrorCount += CompareContainers(t1A, t2A, "Map insert", eastl::use_first<typename T1::value_type>(), eastl::use_first<typename T2::value_type>());
+
+
+ // insert_return_type insert(const Key& key);
+ t1A.insert(typename T1::key_type(8888));
+ t2A.insert(typename T2::value_type(typename T2::key_type(8888), typename T2::mapped_type(0)));
+ EATEST_VERIFY(t1A.validate());
+ nErrorCount += CompareContainers(t1A, t2A, "Map insert", eastl::use_first<typename T1::value_type>(), eastl::use_first<typename T2::value_type>());
+
+
+ // iterator insert(iterator position, const value_type& value);
+ //
+ // If bMultimap == true, then the insertions below should fail due to the
+ // item being present. But they should return the correct iterator value.
+ typename T1::iterator it1 = t1A.insert(t1A.find(typename T1::key_type(2)), typename T1::value_type(typename T1::key_type(1), typename T1::mapped_type(1)));
+ typename T2::iterator it2 = t2A.insert(t2A.find(typename T2::key_type(2)), typename T2::value_type(typename T2::key_type(1), typename T2::mapped_type(1)));
+ EATEST_VERIFY(t1A.validate());
+ EATEST_VERIFY(it1->first == typename T1::key_type(1));
+ EATEST_VERIFY(it2->first == typename T2::key_type(1));
+ nErrorCount += CompareContainers(t1A, t2A, "Map insert", eastl::use_first<typename T1::value_type>(), eastl::use_first<typename T2::value_type>());
+
+ it1 = t1A.insert(t1A.end(), typename T1::value_type(typename T1::key_type(5), typename T1::mapped_type(5)));
+ it2 = t2A.insert(t2A.end(), typename T2::value_type(typename T2::key_type(5), typename T2::mapped_type(5)));
+ EATEST_VERIFY(t1A.validate());
+ EATEST_VERIFY(it1->first == typename T1::key_type(5));
+ EATEST_VERIFY(it2->first == typename T2::key_type(5));
+ nErrorCount += CompareContainers(t1A, t2A, "Map insert", eastl::use_first<typename T1::value_type>(), eastl::use_first<typename T2::value_type>());
+
+ // Now we remove these items so that the insertions above can succeed.
+ t1A.erase(t1A.find(typename T1::key_type(1)));
+ t2A.erase(t2A.find(typename T2::key_type(1)));
+ it1 = t1A.insert(t1A.find(typename T1::key_type(2)), typename T1::value_type(typename T1::key_type(1), typename T1::mapped_type(1)));
+ it2 = t2A.insert(t2A.find(typename T2::key_type(2)), typename T2::value_type(typename T2::key_type(1), typename T2::mapped_type(1)));
+ EATEST_VERIFY(t1A.validate());
+ EATEST_VERIFY(it1->first == typename T1::key_type(1));
+ EATEST_VERIFY(it2->first == typename T2::key_type(1));
+ nErrorCount += CompareContainers(t1A, t2A, "Map insert", eastl::use_first<typename T1::value_type>(), eastl::use_first<typename T2::value_type>());
+
+ t1A.erase(t1A.find(typename T1::key_type(5)));
+ t2A.erase(t2A.find(typename T2::key_type(5)));
+ it1 = t1A.insert(t1A.end(), typename T1::value_type(typename T1::key_type(5), typename T1::mapped_type(5)));
+ it2 = t2A.insert(t2A.end(), typename T2::value_type(typename T2::key_type(5), typename T2::mapped_type(5)));
+ EATEST_VERIFY(t1A.validate());
+ EATEST_VERIFY(it1->first == typename T1::key_type(5));
+ EATEST_VERIFY(it2->first == typename T2::key_type(5));
+ nErrorCount += CompareContainers(t1A, t2A, "Map insert", eastl::use_first<typename T1::value_type>(), eastl::use_first<typename T2::value_type>());
+
+
+ // iterator erase(iterator first, iterator last);
+ typename T1::iterator it11 = t1A.find(typename T1::key_type(17));
+ typename T1::iterator it12 = t1A.find(typename T2::key_type(37));
+ t1A.erase(it11, it12);
+
+ typename T2::iterator it21 = t2A.find(typename T1::key_type(17));
+ typename T2::iterator it22 = t2A.find(typename T2::key_type(37));
+ t2A.erase(it21, it22);
+
+ EATEST_VERIFY(t1A.validate());
+ nErrorCount += CompareContainers(t1A, t2A, "Map erase(first, last)", eastl::use_first<typename T1::value_type>(), eastl::use_first<typename T2::value_type>());
+
+
+ // iterator erase(iterator position);
+ t1A.erase(t1A.find(typename T1::key_type(60)));
+ t2A.erase(t2A.find(typename T1::key_type(60)));
+ EATEST_VERIFY(t1A.validate());
+ nErrorCount += CompareContainers(t1A, t2A, "Map erase(first, last)", eastl::use_first<typename T1::value_type>(), eastl::use_first<typename T2::value_type>());
+
+
+ // Disabled because this function isn't exposed outside the rbtree yet.
+ // void erase(const key_type* first, const key_type* last);
+ //typename T1::key_type keyArray1[3] = { typename T1::key_type(70), typename T1::key_type(71), typename T1::key_type(72) };
+ //typename T2::key_type keyArray2[3] = { typename T2::key_type(70), typename T2::key_type(71), typename T2::key_type(72) };
+ //t1A.erase(keyArray1 + 0, keyArray1 + 3);
+ //t2A.erase(keyArray2 + 0, keyArray2 + 3);
+ //EATEST_VERIFY(t1A.validate());
+ //nErrorCount += CompareContainers(t1A, t2A, "Map erase(first, last)", eastl::use_first<typename T1::value_type>(), eastl::use_first<typename T2::value_type>());
+
+ #endif // EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+ }
+
+ {
+ // map(std::initializer_list<value_type> ilist, const Compare& compare = Compare(), const allocator_type& allocator = EASTL_MAP_DEFAULT_ALLOCATOR);
+ // this_type& operator=(std::initializer_list<T> ilist);
+ // void insert(std::initializer_list<value_type> ilist);
+
+ // VS2013 has a known issue when dealing with std::initializer_lists
+ // https://connect.microsoft.com/VisualStudio/feedback/details/792355/compiler-confused-about-whether-to-use-a-initializer-list-assignment-operator
+ #if !defined(EA_COMPILER_NO_INITIALIZER_LISTS) && !(defined(_MSC_VER) && _MSC_VER == 1800)
+ T1 myMap = { {typename T1::key_type(10),typename T1::mapped_type(0)}, {typename T1::key_type(11),typename T1::mapped_type(1)} };
+ EATEST_VERIFY(myMap.size() == 2);
+ EATEST_VERIFY(myMap.begin()->first == typename T1::key_type(10));
+ typename T1::iterator it = myMap.rbegin().base();
+ EATEST_VERIFY((--it)->first == typename T1::key_type(11));
+
+ myMap = { {typename T1::key_type(20),typename T1::mapped_type(0)}, {typename T1::key_type(21),typename T1::mapped_type(1)} };
+ EATEST_VERIFY(myMap.size() == 2);
+ EATEST_VERIFY(myMap.begin()->first == typename T1::key_type(20));
+ it = myMap.rbegin().base();
+ EATEST_VERIFY((--it)->first == typename T1::key_type(21));
+
+ myMap.insert({ {typename T1::key_type(40),typename T1::mapped_type(0)}, {typename T1::key_type(41),typename T1::mapped_type(1)} });
+ EATEST_VERIFY(myMap.size() == 4);
+ it = myMap.rbegin().base();
+ EATEST_VERIFY((--it)->first == typename T1::key_type(41));
+ #endif
+ }
+
+
+ EATEST_VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+
+ return nErrorCount;
+}
+
+
+
+
+template <typename T1>
+int TestMapSpecific(T1& t1A, eastl::false_type) // false_type means this is a map and not a multimap.
+{
+ int nErrorCount = 0;
+
+ // operator[] (map only)
+ typename T1::mapped_type m = t1A[typename T1::key_type(0)];
+ EATEST_VERIFY(m == typename T1::mapped_type(0));
+
+ m = t1A[typename T1::key_type(999)];
+ EATEST_VERIFY(m == typename T1::mapped_type(999));
+
+ m = t1A[typename T1::key_type(10000000)]; // Test the creation of an element that isn't present.
+ EATEST_VERIFY(m == typename T1::mapped_type(0)); // Test for 0 because the default ctor for our test objects assigns 0 to the object.
+
+ return nErrorCount;
+}
+
+
+template <typename T1>
+int TestMapSpecific(T1& t1A, eastl::true_type) // true_type means this is a multimap and not a map.
+{
+ int nErrorCount = 0;
+
+ // equal_range_small (multimap only)
+ eastl::pair<typename T1::iterator, typename T1::iterator> er = t1A.equal_range_small(typename T1::key_type(499));
+ EATEST_VERIFY(er.first->first == typename T1::key_type(499));
+ EATEST_VERIFY(er.second->first == typename T1::key_type(501));
+
+ er = t1A.equal_range_small(typename T1::key_type(-1));
+ EATEST_VERIFY(er.first == er.second);
+ EATEST_VERIFY(er.first == t1A.begin());
+
+ return nErrorCount;
+}
+
+
+// Just for the purposes of the map::find_as test below, we declare the following.
+// The map::find_as function searches a container of X for a type Y, where the user
+// defines the equality of X to Y. The purpose of TMapComparable is to be a generic type Y
+// that can be used for any X. We need to make this generic because the whole TestMapSearch
+// function below is templated on type T1 and so we don't know what T1 is ahead of time.
+
+template <typename T>
+struct TMapComparable
+{
+ T b;
+
+ TMapComparable() : b() { }
+ TMapComparable(const T& a) : b(a){ }
+ const TMapComparable& operator=(const T& a) { b = a; return *this; }
+ const TMapComparable& operator=(const TMapComparable& x) { b = x.b; return *this; }
+ operator const T&() const { return b; }
+};
+
+
+///////////////////////////////////////////////////////////////////////////////
+// TestMapSearch
+//
+// This function is designed to work with map, fixed_map (and not hash containers).
+// Requires a container that can hold at least 1000 items.
+//
+template <typename T1, bool bMultimap>
+int TestMapSearch()
+{
+ int nErrorCount = 0;
+
+ TestObject::Reset();
+
+ { // Test find, lower_bound, upper_bound, etc..
+ eastl::scoped_ptr<T1> pt1A(new T1); // We use a pointers instead of concrete object because it's size may be huge.
+ T1& t1A = *pt1A;
+ int i, iEnd;
+ typename T1::iterator it;
+
+ // Set up an array of values to randomize / permute.
+ eastl::vector<typename T1::key_type> valueArrayInsert;
+
+ for(i = 0; i < 1000; i++)
+ valueArrayInsert.push_back(typename T1::key_type(i));
+
+ EASTLTest_Rand rng(EA::UnitTest::GetRandSeed());
+ eastl::random_shuffle(valueArrayInsert.begin(), valueArrayInsert.end(), rng);
+
+
+ // insert
+ for(i = 0, iEnd = (int)valueArrayInsert.size(); i < iEnd; i++)
+ {
+ typename T1::key_type k(i);
+ t1A.insert(typename T1::value_type(k, k));
+
+ it = t1A.find(k);
+ EATEST_VERIFY(it != t1A.end());
+ }
+
+
+ // find
+ for(i = 0; i < 1000; i++)
+ {
+ typename T1::key_type k(i);
+ it = t1A.find(k);
+
+ EATEST_VERIFY(it != t1A.end());
+ EATEST_VERIFY(it->first == k);
+ EATEST_VERIFY(it->second == k);
+ }
+
+ it = t1A.find(typename T1::key_type(-1));
+ EATEST_VERIFY(it == t1A.end());
+
+ it = t1A.find(typename T1::key_type(1001));
+ EATEST_VERIFY(it == t1A.end());
+
+
+ // find_as
+ typedef TMapComparable<typename T1::key_type> TC;
+
+ // Normally we use find_as to find via a different type, but we can test it here like this.
+ for(i = 0; i < 1000; i++)
+ {
+ TC k = typename T1::key_type(i);
+ it = t1A.find_as(k, eastl::less_2<typename T1::key_type, TC>());
+
+ EATEST_VERIFY(it != t1A.end());
+ EATEST_VERIFY(it->first == k);
+ EATEST_VERIFY(it->second == k);
+ }
+
+ it = t1A.find_as(TC(typename T1::key_type(-1)), eastl::less_2<typename T1::key_type, TC>());
+ EATEST_VERIFY(it == t1A.end());
+
+ it = t1A.find_as(TC(typename T1::key_type(1001)), eastl::less_2<typename T1::key_type, TC>());
+ EATEST_VERIFY(it == t1A.end());
+
+
+ // lower_bound
+ it = t1A.lower_bound(typename T1::key_type(0));
+ EATEST_VERIFY(it == t1A.begin());
+
+ it = t1A.lower_bound(typename T1::key_type(-1));
+ EATEST_VERIFY(it == t1A.begin());
+
+ it = t1A.lower_bound(typename T1::key_type(1001));
+ EATEST_VERIFY(it == t1A.end());
+
+ t1A.erase(typename T1::key_type(500));
+ it = t1A.lower_bound(typename T1::key_type(500));
+ EATEST_VERIFY(it->first == typename T1::key_type(501));
+
+
+ // upper_bound
+ it = t1A.upper_bound(typename T1::key_type(-1));
+ EATEST_VERIFY(it == t1A.begin());
+
+ it = t1A.upper_bound(typename T1::key_type(499));
+ EATEST_VERIFY(it->first == typename T1::key_type(501));
+
+ it = t1A.upper_bound(typename T1::key_type(-1));
+ EATEST_VERIFY(it->first == typename T1::key_type(0));
+
+ it = t1A.upper_bound(typename T1::key_type(1000));
+ EATEST_VERIFY(it == t1A.end());
+
+
+ // count
+ typename T1::size_type n = t1A.count(typename T1::key_type(-1));
+ EATEST_VERIFY(n == 0);
+
+ n = t1A.count(typename T1::key_type(0));
+ EATEST_VERIFY(n == 1);
+
+ n = t1A.count(typename T1::key_type(500)); // We removed 500 above.
+ EATEST_VERIFY(n == 0);
+
+ n = t1A.count(typename T1::key_type(1001));
+ EATEST_VERIFY(n == 0);
+
+
+ // equal_range
+ eastl::pair<typename T1::iterator, typename T1::iterator> er = t1A.equal_range(typename T1::key_type(200));
+ EATEST_VERIFY(er.first->first == typename T1::key_type(200));
+ EATEST_VERIFY(er.first->second == typename T1::key_type(200));
+
+ er = t1A.equal_range(typename T1::key_type(499));
+ EATEST_VERIFY(er.first->first == typename T1::key_type(499));
+ EATEST_VERIFY(er.second->first == typename T1::key_type(501));
+
+ er = t1A.equal_range(typename T1::key_type(-1));
+ EATEST_VERIFY(er.first == er.second);
+ EATEST_VERIFY(er.first == t1A.begin());
+
+
+ // Some tests need to be differently between map and multimap.
+ nErrorCount += TestMapSpecific(t1A, eastl::integral_constant<bool, bMultimap>());
+ }
+
+ EATEST_VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+
+ return nErrorCount;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// TestMapCpp11
+//
+// This function is designed to work with map, fixed_map, hash_map, fixed_hash_map.
+//
+template <typename T1>
+int TestMapCpp11()
+{
+ int nErrorCount = 0;
+
+ // template <class... Args>
+ // insert_return_type emplace(Args&&... args);
+ //
+ // template <class... Args>
+ // iterator emplace_hint(const_iterator position, Args&&... args);
+ //
+ // insert_return_type insert(value_type&& value);
+ // iterator insert(const_iterator position, value_type&& value);
+ // void insert(std::initializer_list<value_type> ilist);
+ TestObject::Reset();
+
+ typedef T1 TOMap;
+ typedef typename TOMap::value_type value_type;
+ typename TOMap::insert_return_type toMapInsertResult;
+ typename TOMap::iterator toMapIterator;
+
+ TOMap toMap;
+ TestObject to0(0);
+ TestObject to1(1);
+
+ toMapInsertResult = toMap.emplace(value_type(0, to0));
+ EATEST_VERIFY(toMapInsertResult.second == true);
+ //EATEST_VERIFY((TestObject::sTOCopyCtorCount == 2) && (TestObject::sTOMoveCtorCount == 1)); // Disabled until we can guarantee its behavior and deal with how it's different between compilers of differing C++11 support.
+
+ toMapInsertResult = toMap.emplace(value_type(1, eastl::move(to1)));
+ EATEST_VERIFY(toMapInsertResult.second == true);
+
+ // insert_return_type t1A.emplace(value_type&& value);
+ TestObject to4(4);
+ value_type value40(4, to4);
+ EATEST_VERIFY(toMap.find(4) == toMap.end());
+ EATEST_VERIFY(value40.second.mX == 4); // It should change to 0 below during the move swap.
+ toMapInsertResult = toMap.emplace(eastl::move(value40));
+ EATEST_VERIFY(toMapInsertResult.second == true);
+ EATEST_VERIFY(toMap.find(4) != toMap.end());
+ EATEST_VERIFY(value40.second.mX == 0);
+
+ value_type value41(4, TestObject(41));
+ toMapInsertResult = toMap.emplace(eastl::move(value41));
+ EATEST_VERIFY(toMapInsertResult.second == false);
+ EATEST_VERIFY(toMapInsertResult.first->second.mX == 4);
+ EATEST_VERIFY(toMap.find(4) != toMap.end());
+
+ // iterator t1A.emplace_hint(const_iterator position, value_type&& value);
+ TestObject to5(5);
+ value_type value50(5, to5);
+ EATEST_VERIFY(toMap.find(5) == toMap.end());
+ toMapInsertResult = toMap.emplace(eastl::move(value50));
+ EATEST_VERIFY(toMapInsertResult.second == true);
+ EATEST_VERIFY(toMap.find(5) != toMap.end());
+
+ value_type value51(5, TestObject(51));
+ toMapIterator = toMap.emplace_hint(toMapInsertResult.first, eastl::move(value51));
+ EATEST_VERIFY(toMapIterator->first == 5);
+ EATEST_VERIFY(toMapIterator->second.mX == 5);
+ EATEST_VERIFY(toMap.find(5) != toMap.end());
+
+ TestObject to6(6);
+ value_type value6(6, to6);
+ EATEST_VERIFY(toMap.find(6) == toMap.end());
+ toMapIterator = toMap.emplace_hint(toMap.begin(), eastl::move(value6)); // specify a bad hint. Insertion should still work.
+ EATEST_VERIFY(toMapIterator->first == 6);
+ EATEST_VERIFY(toMap.find(6) != toMap.end());
+
+ TestObject to2(2);
+ EATEST_VERIFY(toMap.find(2) == toMap.end());
+ toMapInsertResult = toMap.emplace(value_type(2, to2));
+ EATEST_VERIFY(toMapInsertResult.second == true);
+ EATEST_VERIFY(toMap.find(2) != toMap.end());
+ toMapInsertResult = toMap.emplace(value_type(2, to2));
+ EATEST_VERIFY(toMapInsertResult.second == false);
+ EATEST_VERIFY(toMap.find(2) != toMap.end());
+
+ // iterator t1A.emplace_hint(const_iterator position, const value_type& value);
+ TestObject to7(7);
+ value_type value70(7, to7);
+ EATEST_VERIFY(toMap.find(7) == toMap.end());
+ toMapInsertResult = toMap.emplace(value70);
+ EATEST_VERIFY(toMapInsertResult.second == true);
+ EATEST_VERIFY(toMap.find(7) != toMap.end());
+
+ value_type value71(7, TestObject(71));
+ toMapIterator = toMap.emplace_hint(toMapInsertResult.first, value71);
+ EATEST_VERIFY(toMapIterator->first == 7);
+ EATEST_VERIFY(toMapIterator->second.mX == 7);
+ EATEST_VERIFY(toMap.find(7) != toMap.end());
+
+ TestObject to8(8);
+ value_type value8(8, to8);
+ EATEST_VERIFY(toMap.find(8) == toMap.end());
+ toMapIterator = toMap.emplace_hint(toMap.begin(), value8); // specify a bad hint. Insertion should still work.
+ EATEST_VERIFY(toMapIterator->first == 8);
+ EATEST_VERIFY(toMap.find(8) != toMap.end());
+
+ // insert_return_type t1A.insert(value_type&& value);
+ TestObject to3(3);
+ EATEST_VERIFY(toMap.find(3) == toMap.end());
+ toMapInsertResult = toMap.insert(value_type(3, to3));
+ EATEST_VERIFY(toMapInsertResult.second == true);
+ EATEST_VERIFY(toMap.find(3) != toMap.end());
+ toMapInsertResult = toMap.insert(value_type(3, to3));
+ EATEST_VERIFY(toMapInsertResult.second == false);
+ EATEST_VERIFY(toMap.find(3) != toMap.end());
+
+
+ // iterator t1A.insert(const_iterator position, value_type&& value);
+ TestObject to9(9);
+ value_type value90(9, to9);
+ EATEST_VERIFY(toMap.find(9) == toMap.end());
+ toMapInsertResult = toMap.emplace(eastl::move(value90));
+ EATEST_VERIFY(toMapInsertResult.second == true);
+ EATEST_VERIFY(toMap.find(9) != toMap.end());
+
+ value_type value91(9, TestObject(91));
+ toMapIterator = toMap.insert(toMapInsertResult.first, eastl::move(value91));
+ EATEST_VERIFY(toMapIterator->first == 9);
+ EATEST_VERIFY(toMapIterator->second.mX == 9);
+ EATEST_VERIFY(toMap.find(9) != toMap.end());
+
+ TestObject to10(10);
+ value_type value10(10, to10);
+ EATEST_VERIFY(toMap.find(10) == toMap.end());
+ toMapIterator = toMap.insert(toMap.begin(), eastl::move(value10)); // specify a bad hint. Insertion should still work.
+ EATEST_VERIFY(toMapIterator->first == 10);
+ EATEST_VERIFY(toMap.find(10) != toMap.end());
+
+ // insert_return_type t1A.emplace(Args&&... args);
+ TestObject to11(11);
+ EATEST_VERIFY(toMap.find(11) == toMap.end());
+ toMapInsertResult = toMap.emplace(11, to11);
+ EATEST_VERIFY(toMapInsertResult.second == true);
+ EATEST_VERIFY(toMapInsertResult.first->first == 11);
+ EATEST_VERIFY(toMap.find(11) != toMap.end());
+
+ TestObject to111(111);
+ toMapInsertResult = toMap.emplace(11, to111);
+ EATEST_VERIFY(toMapInsertResult.second == false);
+ EATEST_VERIFY(toMapInsertResult.first->first == 11);
+ EATEST_VERIFY(toMapInsertResult.first->second.mX == 11);
+ EATEST_VERIFY(toMap.find(11) != toMap.end());
+
+ TestObject to12(12);
+ EATEST_VERIFY(toMap.find(12) == toMap.end());
+ toMapInsertResult = toMap.emplace(12, eastl::move(to12));
+ EATEST_VERIFY(toMapInsertResult.second == true);
+ EATEST_VERIFY(toMapInsertResult.first->first == 12);
+ EATEST_VERIFY(toMap.find(12) != toMap.end());
+
+ TestObject to121(121);
+ toMapInsertResult = toMap.emplace(12, eastl::move(to121));
+ EATEST_VERIFY(toMapInsertResult.second == false);
+ EATEST_VERIFY(toMapInsertResult.first->first == 12);
+ EATEST_VERIFY(toMapInsertResult.first->second.mX == 12);
+ EATEST_VERIFY(toMap.find(12) != toMap.end());
+
+ EATEST_VERIFY(toMap.find(13) == toMap.end());
+ toMapInsertResult = toMap.emplace(eastl::piecewise_construct, eastl::make_tuple(13), eastl::make_tuple(1, 2, 10)); // 1 + 2 + 10 = 13
+ EATEST_VERIFY(toMapInsertResult.second == true);
+ EATEST_VERIFY(toMapInsertResult.first->first == 13);
+ EATEST_VERIFY(toMap.find(13) != toMap.end());
+
+ toMapInsertResult = toMap.emplace(eastl::piecewise_construct, eastl::make_tuple(13), eastl::make_tuple(1, 30, 100)); // 1 + 30 + 100 = 131
+ EATEST_VERIFY(toMapInsertResult.second == false);
+ EATEST_VERIFY(toMapInsertResult.first->first == 13);
+ EATEST_VERIFY(toMapInsertResult.first->second.mX == 13);
+ EATEST_VERIFY(toMap.find(13) != toMap.end());
+
+ // iterator t1A.emplace_hint(const_iterator position, Args&&... args);
+ TestObject to14(14);
+ EATEST_VERIFY(toMap.find(14) == toMap.end());
+ toMapInsertResult = toMap.emplace(14, to14);
+ EATEST_VERIFY(toMapInsertResult.second == true);
+ EATEST_VERIFY(toMap.find(14) != toMap.end());
+
+ TestObject to141(141);
+ toMapIterator = toMap.emplace_hint(toMapInsertResult.first, 14, to141);
+ EATEST_VERIFY(toMapIterator->first == 14);
+ EATEST_VERIFY(toMapIterator->second.mX == 14);
+ EATEST_VERIFY(toMap.find(14) != toMap.end());
+
+ TestObject to15(15);
+ EATEST_VERIFY(toMap.find(15) == toMap.end());
+ toMapIterator = toMap.emplace_hint(toMap.begin(), 15, to15); // specify a bad hint. Insertion should still work.
+ EATEST_VERIFY(toMapIterator->first == 15);
+ EATEST_VERIFY(toMap.find(15) != toMap.end());
+
+ TestObject to16(16);
+ EATEST_VERIFY(toMap.find(16) == toMap.end());
+ toMapInsertResult = toMap.emplace(16, eastl::move(to16));
+ EATEST_VERIFY(toMapInsertResult.second == true);
+ EATEST_VERIFY(toMap.find(16) != toMap.end());
+
+ TestObject to161(161);
+ toMapIterator = toMap.emplace_hint(toMapInsertResult.first, 16, eastl::move(to161));
+ EATEST_VERIFY(toMapIterator->first == 16);
+ EATEST_VERIFY(toMapIterator->second.mX == 16);
+ EATEST_VERIFY(toMap.find(16) != toMap.end());
+
+ TestObject to17(17);
+ EATEST_VERIFY(toMap.find(17) == toMap.end());
+ toMapIterator = toMap.emplace_hint(toMap.begin(), 17, eastl::move(to17)); // specify a bad hint. Insertion should still work.
+ EATEST_VERIFY(toMapIterator->first == 17);
+ EATEST_VERIFY(toMap.find(17) != toMap.end());
+
+ EATEST_VERIFY(toMap.find(18) == toMap.end());
+ toMapInsertResult = toMap.emplace(eastl::piecewise_construct, eastl::make_tuple(18), eastl::make_tuple(3, 5, 10)); // 3 + 5 + 10 = 18
+ EATEST_VERIFY(toMapInsertResult.second == true);
+ EATEST_VERIFY(toMap.find(18) != toMap.end());
+
+ toMapIterator = toMap.emplace_hint(toMapInsertResult.first, eastl::piecewise_construct, eastl::make_tuple(18), eastl::make_tuple(1, 80, 100)); // 1 + 80 + 100 = 181
+ EATEST_VERIFY(toMapIterator->first == 18);
+ EATEST_VERIFY(toMapIterator->second.mX == 18);
+ EATEST_VERIFY(toMap.find(18) != toMap.end());
+
+ EATEST_VERIFY(toMap.find(19) == toMap.end());
+ toMapIterator = toMap.emplace_hint(toMap.begin(), eastl::piecewise_construct, eastl::make_tuple(19), eastl::make_tuple(4, 5, 10)); // 4 + 5 + 10 = 19 // specify a bad hint. Insertion should still work.
+ EATEST_VERIFY(toMapIterator->first == 19);
+ EATEST_VERIFY(toMap.find(19) != toMap.end());
+
+ // iterator t1A.insert(const_iterator position, const value_type& value);
+ TestObject to20(20);
+ value_type value20(20, to20);
+ EATEST_VERIFY(toMap.find(20) == toMap.end());
+ toMapInsertResult = toMap.emplace(value20);
+ EATEST_VERIFY(toMapInsertResult.second == true);
+ EATEST_VERIFY(toMap.find(20) != toMap.end());
+
+ value_type value201(20, TestObject(201));
+ toMapIterator = toMap.insert(toMapInsertResult.first, value201);
+ EATEST_VERIFY(toMapIterator->first == 20);
+ EATEST_VERIFY(toMapIterator->second.mX == 20);
+ EATEST_VERIFY(toMap.find(20) != toMap.end());
+
+ TestObject to21(21);
+ value_type value21(21, to21);
+ EATEST_VERIFY(toMap.find(21) == toMap.end());
+ toMapIterator = toMap.insert(toMap.begin(), value21); // specify a bad hint. Insertion should still work.
+ EATEST_VERIFY(toMapIterator->first == 21);
+ EATEST_VERIFY(toMap.find(21) != toMap.end());
+
+ // void insert(std::initializer_list<value_type> ilist);
+ toMap.insert({ value_type(22, TestObject(22)), value_type(23, TestObject(23)), value_type(24, TestObject(24)) });
+ EATEST_VERIFY(toMap.find(22) != toMap.end());
+ EATEST_VERIFY(toMap.find(23) != toMap.end());
+ EATEST_VERIFY(toMap.find(24) != toMap.end());
+
+ return nErrorCount;
+}
+
+struct NonCopyable
+{
+ NonCopyable() : mX(0) {}
+ NonCopyable(int x) : mX(x) {}
+
+ int mX;
+
+ EA_NON_COPYABLE(NonCopyable)
+};
+
+inline bool operator<(const NonCopyable& a, const NonCopyable& b) { return a.mX < b.mX; }
+
+template <typename T>
+int TestMapCpp11NonCopyable()
+{
+ int nErrorCount = 0;
+
+ // Verify that operator[]() can be called for a type that is default constructible but not copy constructible. C++11
+ // relaxed the requirements on operator[]() and so this should compile.
+ T ncMap;
+ ncMap[1].mX = 1;
+ EATEST_VERIFY(ncMap[1].mX == 1);
+
+ return nErrorCount;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// TestMultimapCpp11
+//
+// This function is designed to work with multimap, fixed_multimap, hash_multimap, fixed_hash_multimap
+//
+// This is similar to the TestSetCpp11 function, with some differences related
+// to handling of duplicate entries.
+//
+template <typename T1>
+int TestMultimapCpp11()
+{
+ int nErrorCount = 0;
+
+ // template <class... Args>
+ // insert_return_type emplace(Args&&... args);
+ //
+ // template <class... Args>
+ // iterator emplace_hint(const_iterator position, Args&&... args);
+ //
+ // insert_return_type insert(value_type&& value);
+ // iterator insert(const_iterator position, value_type&& value);
+ // void insert(std::initializer_list<value_type> ilist);
+ TestObject::Reset();
+
+ typedef T1 TOMap;
+ typedef typename TOMap::value_type value_type;
+ typename TOMap::iterator toMapIterator;
+
+ TOMap toMap;
+ TestObject to0(0);
+ TestObject to1(1);
+
+ toMapIterator = toMap.emplace(value_type(0, to0));
+ EATEST_VERIFY(toMapIterator->first == 0);
+ //EATEST_VERIFY((TestObject::sTOCopyCtorCount == 2) && (TestObject::sTOMoveCtorCount == 1)); // Disabled until we can guarantee its behavior and deal with how it's different between compilers of differing C++11 support.
+
+ toMapIterator = toMap.emplace(value_type(1, eastl::move(to1)));
+ EATEST_VERIFY(toMapIterator->first == 1);
+
+ // insert_return_type t1A.emplace(value_type&& value);
+ TestObject to4(4);
+ value_type value40(4, to4);
+ EATEST_VERIFY(toMap.find(4) == toMap.end());
+ EATEST_VERIFY(value40.second.mX == 4); // It should change to 0 below during the move swap.
+ toMapIterator = toMap.emplace(eastl::move(value40));
+ EATEST_VERIFY(toMapIterator->first == 4);
+ EATEST_VERIFY(toMap.find(4) != toMap.end());
+ EATEST_VERIFY(value40.second.mX == 0);
+
+ value_type value41(4, TestObject(41));
+ toMapIterator = toMap.emplace(eastl::move(value41));
+ EATEST_VERIFY(toMapIterator->first == 4);
+ EATEST_VERIFY(toMapIterator->second.mX == 41);
+ EATEST_VERIFY(toMap.count(4) == 2);
+
+ // iterator t1A.emplace_hint(const_iterator position, value_type&& value);
+ TestObject to5(5);
+ value_type value50(5, to5);
+ EATEST_VERIFY(toMap.find(5) == toMap.end());
+ toMapIterator = toMap.emplace(eastl::move(value50));
+ EATEST_VERIFY(toMapIterator->first == 5);
+ EATEST_VERIFY(toMap.find(5) != toMap.end());
+
+ value_type value51(5, TestObject(51));
+ toMapIterator = toMap.emplace_hint(toMapIterator, eastl::move(value51));
+ EATEST_VERIFY(toMapIterator->first == 5);
+ EATEST_VERIFY(toMapIterator->second.mX == 51);
+ EATEST_VERIFY(toMap.count(5) == 2);
+
+ TestObject to6(6);
+ value_type value6(6, to6);
+ EATEST_VERIFY(toMap.find(6) == toMap.end());
+ toMapIterator = toMap.emplace_hint(toMap.begin(), eastl::move(value6)); // specify a bad hint. Insertion should still work.
+ EATEST_VERIFY(toMapIterator->first == 6);
+ EATEST_VERIFY(toMap.find(6) != toMap.end());
+
+ TestObject to2(2);
+ EATEST_VERIFY(toMap.find(2) == toMap.end());
+ toMapIterator = toMap.emplace(value_type(2, to2));
+ EATEST_VERIFY(toMapIterator->first == 2);
+ EATEST_VERIFY(toMap.find(2) != toMap.end());
+ toMapIterator = toMap.emplace(value_type(2, to2));
+ EATEST_VERIFY(toMapIterator->first == 2);
+ EATEST_VERIFY(toMap.find(2) != toMap.end());
+
+ // iterator t1A.emplace_hint(const_iterator position, const value_type& value);
+ TestObject to7(7);
+ value_type value70(7, to7);
+ EATEST_VERIFY(toMap.find(7) == toMap.end());
+ toMapIterator = toMap.emplace(value70);
+ EATEST_VERIFY(toMapIterator->first == 7);
+ EATEST_VERIFY(toMap.find(7) != toMap.end());
+
+ value_type value71(7, TestObject(71));
+ toMapIterator = toMap.emplace_hint(toMapIterator, value71);
+ EATEST_VERIFY(toMapIterator->first == 7);
+ EATEST_VERIFY(toMapIterator->second.mX == 71);
+ EATEST_VERIFY(toMap.count(7) == 2);
+
+ TestObject to8(8);
+ value_type value8(8, to8);
+ EATEST_VERIFY(toMap.find(8) == toMap.end());
+ toMapIterator = toMap.emplace_hint(toMap.begin(), value8); // specify a bad hint. Insertion should still work.
+ EATEST_VERIFY(toMapIterator->first == 8);
+ EATEST_VERIFY(toMap.find(8) != toMap.end());
+
+ // insert_return_type t1A.insert(value_type&& value);
+ TestObject to3(3);
+ EATEST_VERIFY(toMap.find(3) == toMap.end());
+ toMapIterator = toMap.insert(value_type(3, to3));
+ EATEST_VERIFY(toMapIterator->first == 3);
+ EATEST_VERIFY(toMap.find(3) != toMap.end());
+ toMapIterator = toMap.insert(value_type(3, to3));
+ EATEST_VERIFY(toMapIterator->first == 3);
+ EATEST_VERIFY(toMap.find(3) != toMap.end());
+
+
+ // iterator t1A.insert(const_iterator position, value_type&& value);
+ TestObject to9(9);
+ value_type value90(9, to9);
+ EATEST_VERIFY(toMap.find(9) == toMap.end());
+ toMapIterator = toMap.emplace(eastl::move(value90));
+ EATEST_VERIFY(toMapIterator->first == 9);
+ EATEST_VERIFY(toMap.find(9) != toMap.end());
+
+ value_type value91(9, TestObject(91));
+ toMapIterator = toMap.insert(toMapIterator, eastl::move(value91));
+ EATEST_VERIFY(toMapIterator->first == 9);
+ EATEST_VERIFY(toMapIterator->second.mX == 91);
+ EATEST_VERIFY(toMap.count(9) == 2);
+
+ TestObject to10(10);
+ value_type value10(10, to10);
+ EATEST_VERIFY(toMap.find(10) == toMap.end());
+ toMapIterator = toMap.insert(toMap.begin(), eastl::move(value10)); // specify a bad hint. Insertion should still work.
+ EATEST_VERIFY(toMapIterator->first == 10);
+ EATEST_VERIFY(toMap.find(10) != toMap.end());
+
+ // iterator t1A.emplace(Args&&... args);
+ TestObject to11(11);
+ EATEST_VERIFY(toMap.find(11) == toMap.end());
+ toMapIterator = toMap.emplace(11, to11);
+ EATEST_VERIFY(toMapIterator->first == 11);
+ EATEST_VERIFY(toMap.find(11) != toMap.end());
+
+ TestObject to111(111);
+ toMapIterator = toMap.emplace(11, to111);
+ EATEST_VERIFY(toMapIterator->first == 11);
+ EATEST_VERIFY(toMapIterator->second.mX == 111);
+ EATEST_VERIFY(toMap.count(11) == 2);
+
+ TestObject to12(12);
+ EATEST_VERIFY(toMap.find(12) == toMap.end());
+ toMapIterator = toMap.emplace(12, eastl::move(to12));
+ EATEST_VERIFY(toMapIterator->first == 12);
+ EATEST_VERIFY(toMap.find(12) != toMap.end());
+
+ TestObject to121(121);
+ toMapIterator = toMap.emplace(12, eastl::move(to121));
+ EATEST_VERIFY(toMapIterator->first == 12);
+ EATEST_VERIFY(toMapIterator->second.mX == 121);
+ EATEST_VERIFY(toMap.count(12) == 2);
+
+ EATEST_VERIFY(toMap.find(13) == toMap.end());
+ toMapIterator = toMap.emplace(eastl::piecewise_construct, eastl::make_tuple(13), eastl::make_tuple(1, 2, 10)); // 1 + 2 + 10 = 13
+ EATEST_VERIFY(toMapIterator->first == 13);
+ EATEST_VERIFY(toMap.find(13) != toMap.end());
+
+ toMapIterator = toMap.emplace(eastl::piecewise_construct, eastl::make_tuple(13), eastl::make_tuple(1, 30, 100)); // 1 + 30 + 100 = 131
+ EATEST_VERIFY(toMapIterator->first == 13);
+ EATEST_VERIFY(toMapIterator->second.mX == 131);
+ EATEST_VERIFY(toMap.count(13) == 2);
+
+ // iterator t1A.emplace_hint(const_iterator position, Args&&... args);
+ TestObject to14(14);
+ EATEST_VERIFY(toMap.find(14) == toMap.end());
+ toMapIterator = toMap.emplace(14, to14);
+ EATEST_VERIFY(toMap.find(14) != toMap.end());
+
+ TestObject to141(141);
+ toMapIterator = toMap.emplace_hint(toMapIterator, 14, to141);
+ EATEST_VERIFY(toMapIterator->first == 14);
+ EATEST_VERIFY(toMapIterator->second.mX == 141);
+ EATEST_VERIFY(toMap.count(14) == 2);
+
+ TestObject to15(15);
+ EATEST_VERIFY(toMap.find(15) == toMap.end());
+ toMapIterator = toMap.emplace_hint(toMap.begin(), 15, to15); // specify a bad hint. Insertion should still work.
+ EATEST_VERIFY(toMapIterator->first == 15);
+ EATEST_VERIFY(toMap.find(15) != toMap.end());
+
+ TestObject to16(16);
+ EATEST_VERIFY(toMap.find(16) == toMap.end());
+ toMapIterator = toMap.emplace(16, eastl::move(to16));
+ EATEST_VERIFY(toMap.find(16) != toMap.end());
+
+ TestObject to161(161);
+ toMapIterator = toMap.emplace_hint(toMapIterator, 16, eastl::move(to161));
+ EATEST_VERIFY(toMapIterator->first == 16);
+ EATEST_VERIFY(toMapIterator->second.mX == 161);
+ EATEST_VERIFY(toMap.count(16) == 2);
+
+ TestObject to17(17);
+ EATEST_VERIFY(toMap.find(17) == toMap.end());
+ toMapIterator = toMap.emplace_hint(toMap.begin(), 17, eastl::move(to17)); // specify a bad hint. Insertion should still work.
+ EATEST_VERIFY(toMapIterator->first == 17);
+ EATEST_VERIFY(toMap.find(17) != toMap.end());
+
+ EATEST_VERIFY(toMap.find(18) == toMap.end());
+ toMapIterator = toMap.emplace(eastl::piecewise_construct, eastl::make_tuple(18), eastl::make_tuple(3, 5, 10)); // 3 + 5 + 10 = 18
+ EATEST_VERIFY(toMap.find(18) != toMap.end());
+
+ toMapIterator = toMap.emplace_hint(toMapIterator, eastl::piecewise_construct, eastl::make_tuple(18), eastl::make_tuple(1, 80, 100)); // 1 + 80 + 100 = 181
+ EATEST_VERIFY(toMapIterator->first == 18);
+ EATEST_VERIFY(toMapIterator->second.mX == 181);
+ EATEST_VERIFY(toMap.count(18) == 2);
+
+ EATEST_VERIFY(toMap.find(19) == toMap.end());
+ toMapIterator = toMap.emplace_hint(toMap.begin(), eastl::piecewise_construct, eastl::make_tuple(19), eastl::make_tuple(4, 5, 10)); // 4 + 5 + 10 = 19 // specify a bad hint. Insertion should still work.
+ EATEST_VERIFY(toMapIterator->first == 19);
+ EATEST_VERIFY(toMap.find(19) != toMap.end());
+
+ // iterator t1A.insert(const_iterator position, const value_type& value);
+ TestObject to20(20);
+ value_type value20(20, to20);
+ EATEST_VERIFY(toMap.find(20) == toMap.end());
+ toMapIterator = toMap.emplace(value20);
+ EATEST_VERIFY(toMap.find(20) != toMap.end());
+
+ value_type value201(20, TestObject(201));
+ toMapIterator = toMap.insert(toMapIterator, value201);
+ EATEST_VERIFY(toMapIterator->first == 20);
+ EATEST_VERIFY(toMapIterator->second.mX == 201);
+ EATEST_VERIFY(toMap.count(20) == 2);
+
+ TestObject to21(21);
+ value_type value21(21, to21);
+ EATEST_VERIFY(toMap.find(21) == toMap.end());
+ toMapIterator = toMap.insert(toMap.begin(), value21); // specify a bad hint. Insertion should still work.
+ EATEST_VERIFY(toMapIterator->first == 21);
+ EATEST_VERIFY(toMap.find(21) != toMap.end());
+
+ // void insert(std::initializer_list<value_type> ilist);
+ toMap.insert({ value_type(22, TestObject(22)), value_type(23, TestObject(23)), value_type(24, TestObject(24)), value_type(24, TestObject(241)) });
+ EATEST_VERIFY(toMap.find(22) != toMap.end());
+ EATEST_VERIFY(toMap.find(23) != toMap.end());
+ EATEST_VERIFY(toMap.count(24) == 2);
+
+ return nErrorCount;
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+// TestMapCpp17
+//
+// This function is designed to work with map, fixed_map, hash_map, fixed_hash_map, unordered_map.
+//
+template <typename T1>
+int TestMapCpp17()
+{
+
+ int nErrorCount = 0;
+
+ TestObject::Reset();
+
+ typedef T1 TOMap;
+ typedef typename TOMap::mapped_type mapped_type;
+ typename TOMap::iterator toMapIterator;
+
+
+ {
+ // pair<iterator, bool> try_emplace (const key_type& k, Args&&... args);
+ // pair<iterator, bool> try_emplace (key_type&& k, Args&&... args);
+ // iterator try_emplace (const_iterator hint, const key_type& k, Args&&... args);
+ // iterator try_emplace (const_iterator hint, key_type&& k, Args&&... args);
+
+ TOMap toMap;
+
+ { // do initial insert
+ auto result = toMap.try_emplace(7, 7); // test fwding to conversion-ctor
+ VERIFY(result.second);
+ VERIFY(result.first->second == mapped_type(7));
+ VERIFY(toMap.size() == 1);
+ }
+
+ auto ctorCount = TestObject::sTOCtorCount;
+
+ { // verify duplicate not inserted
+ auto result = toMap.try_emplace(7, mapped_type(7)); // test fwding to copy-ctor
+ VERIFY(!result.second);
+ VERIFY(result.first->second == mapped_type(7));
+ VERIFY(toMap.size() == 1);
+
+ // we explicitly constructed an element for the parameter
+ // and one for the VERIFY check
+ ctorCount += 2;
+ VERIFY(ctorCount == TestObject::sTOCtorCount);
+ }
+
+ { // verify duplicate not inserted
+ auto hint = toMap.find(7);
+ auto result = toMap.try_emplace(hint, 7, 7); // test fwding to conversion-ctor
+ VERIFY(result->first == 7);
+ VERIFY(result->second == mapped_type(7));
+ VERIFY(toMap.size() == 1);
+ // we explicitly constructed an element for the VERIFY check
+ ++ctorCount;
+ VERIFY(ctorCount == TestObject::sTOCtorCount);
+ }
+
+ { // verify duplicate not inserted
+ auto hint = toMap.find(7);
+ auto result = toMap.try_emplace(hint, 7, mapped_type(7)); // test fwding to copy-ctor
+ VERIFY(result->first == 7);
+ VERIFY(result->second == mapped_type(7));
+ VERIFY(toMap.size() == 1);
+
+ // we explicitly constructed an element for the parameter
+ // and one for the VERIFY check
+ ctorCount += 2;
+ VERIFY(ctorCount == TestObject::sTOCtorCount);
+ }
+
+ {
+ {
+ auto result = toMap.try_emplace(8, 8);
+ // emplacing a new value should call exactly one constructor,
+ // when the value is constructed in place inside the container.
+ ++ctorCount;
+ VERIFY(result.second);
+ VERIFY(result.first->second == mapped_type(8));
+ // One more constructor for the temporary in the VERIFY
+ ++ctorCount;
+ VERIFY(toMap.size() == 2);
+ VERIFY(ctorCount == TestObject::sTOCtorCount);
+ }
+ {
+ auto result = toMap.try_emplace(9, mapped_type(9));
+ VERIFY(result.second);
+ VERIFY(result.first->second == mapped_type(9));
+ VERIFY(toMap.size() == 3);
+ // one more constructor for the temporary argument,
+ // one for moving it to the container, and one for the VERIFY
+ ctorCount += 3;
+ VERIFY(ctorCount == TestObject::sTOCtorCount);
+
+ }
+ }
+ }
+
+ {
+ // eastl::pair<iterator, bool> insert_or_assign(const key_type& k, M&& obj);
+ // eastl::pair<iterator, bool> insert_or_assign(key_type&& k, M&& obj);
+ // iterator insert_or_assign(const_iterator hint, const key_type& k, M&& obj);
+ // iterator insert_or_assign(const_iterator hint, key_type&& k, M&& obj);
+
+ TOMap toMap;
+
+ {
+ // initial rvalue insert
+ auto result = toMap.insert_or_assign(3, mapped_type(3));
+ VERIFY(result.second);
+ VERIFY(toMap.size() == 1);
+ VERIFY(result.first->first == 3);
+ VERIFY(result.first->second == mapped_type(3));
+
+ // verify rvalue assign occurred
+ result = toMap.insert_or_assign(3, mapped_type(9));
+ VERIFY(!result.second);
+ VERIFY(toMap.size() == 1);
+ VERIFY(result.first->first == 3);
+ VERIFY(result.first->second == mapped_type(9));
+ }
+
+ {
+ mapped_type mt5(5);
+ mapped_type mt6(6);
+ mapped_type mt7(7);
+
+ {
+ // initial lvalue insert
+ auto result = toMap.insert_or_assign(5, mt5);
+ VERIFY(result.second);
+ VERIFY(toMap.size() == 2);
+ VERIFY(result.first->first == 5);
+ VERIFY(result.first->second == mt5);
+ }
+
+ {
+ // verify lvalue assign occurred
+ auto result = toMap.insert_or_assign(5, mt7);
+ VERIFY(!result.second);
+ VERIFY(toMap.size() == 2);
+ VERIFY(result.first->first == 5);
+ VERIFY(result.first->second == mt7);
+ }
+
+ {
+ // verify lvalue hints
+ auto hint = toMap.find(5);
+ auto result = toMap.insert_or_assign(hint, 6, mt6);
+ VERIFY(result != toMap.end());
+ VERIFY(toMap.size() == 3);
+ VERIFY(result->first == 6);
+ VERIFY(result->second == mt6);
+ }
+
+ {
+ // verify rvalue hints
+ auto hint = toMap.find(6);
+ auto result = toMap.insert_or_assign(hint, 7, mapped_type(7));
+ VERIFY(result != toMap.end());
+ VERIFY(toMap.size() == 4);
+ VERIFY(result->first == 7);
+ VERIFY(result->second == mapped_type(7));
+ }
+ }
+ }
+
+ EATEST_VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+
+ return nErrorCount;
+}
+
+
+template<typename HashContainer>
+struct HashContainerReserveTest
+{
+ int operator()()
+ {
+ int nErrorCount = 0;
+
+ HashContainer hashContainer;
+
+ const typename HashContainer::size_type reserve_sizes[] = {16, 128, 4096, 32768};
+ for (auto& reserve_size : reserve_sizes)
+ {
+ hashContainer.reserve(reserve_size);
+
+ // verify bucket count and hashtable load_factor requirements
+ VERIFY(hashContainer.bucket_count() >= reserve_size);
+ VERIFY(hashContainer.load_factor() <= ceilf(reserve_size / hashContainer.get_max_load_factor()));
+ }
+
+ return nErrorCount;
+ }
+};
+
+
+
+
+
diff --git a/EASTL/test/source/TestMemory.cpp b/EASTL/test/source/TestMemory.cpp
new file mode 100644
index 0000000..77caf9f
--- /dev/null
+++ b/EASTL/test/source/TestMemory.cpp
@@ -0,0 +1,775 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLTest.h"
+#include <EASTL/memory.h>
+#include <EASTL/utility.h>
+#include <EASTL/vector.h>
+#include <EAStdC/EAMemory.h>
+#include <EAStdC/EAAlignment.h>
+
+
+// Regression for user reported operator new problem (12/8/2009):
+class AssetHandler
+{
+public:
+ inline static void* operator new(size_t size, const char* /*text*/, unsigned int /*flags*/)
+ {
+ return ::operator new(size);
+ }
+ inline static void operator delete(void* p)
+ {
+ return ::operator delete(p);
+ }
+};
+typedef eastl::vector<AssetHandler> AssetHandlerArray;
+
+// Regression test for a default memory fill optimization that defers to memset instead of explicitly
+// value-initialization each element in a vector individually. This test ensures that the value of the memset is
+// consistent with an explicitly value-initialized element (namely when the container holds a scalar value that is
+// memset to zero).
+template <typename T>
+int TestValueInitOptimization()
+{
+ int nErrorCount = 0;
+ const int ELEM_COUNT = 100;
+
+ {
+ eastl::vector<T> v1;
+ eastl::vector<ValueInitOf<T>> v2;
+
+ v1.resize(ELEM_COUNT);
+ v2.resize(ELEM_COUNT);
+
+ for (int i = 0; i < ELEM_COUNT; i++)
+ { EATEST_VERIFY(v1[i] == v2[i].get()); }
+ }
+
+ {
+ eastl::vector<T> v1(ELEM_COUNT);
+ eastl::vector<ValueInitOf<T>> v2(ELEM_COUNT);
+
+ for (int i = 0; i < ELEM_COUNT; i++)
+ { EATEST_VERIFY(v1[i] == v2[i].get()); }
+ }
+
+ EATEST_VERIFY(nErrorCount == 0);
+ return nErrorCount;
+}
+
+
+// LCTestObject
+//
+// Helps test the late_constructed utility.
+// Has an unusual alignment so we can test that aspect of late_constructed.
+//
+struct EA_ALIGN(64) LCTestObject
+{
+ int mX; //
+ static int64_t sTOCount; // Count of all current existing objects.
+ static int64_t sTOCtorCount; // Count of times any ctor was called.
+ static int64_t sTODtorCount; // Count of times dtor was called.
+
+ explicit LCTestObject(int x = 0)
+ : mX(x)
+ {
+ ++sTOCount;
+ ++sTOCtorCount;
+ }
+
+ LCTestObject(int x0, int x1, int x2)
+ : mX(x0 + x1 + x2)
+ {
+ ++sTOCount;
+ ++sTOCtorCount;
+ }
+
+ LCTestObject(const LCTestObject& testObject)
+ : mX(testObject.mX)
+ {
+ ++sTOCount;
+ ++sTOCtorCount;
+ }
+
+ #if !defined(EA_COMPILER_NO_RVALUE_REFERENCES)
+ LCTestObject(TestObject&& testObject)
+ : mX(testObject.mX)
+ {
+ ++sTOCount;
+ ++sTOCtorCount;
+ }
+ #endif
+
+ LCTestObject& operator=(const LCTestObject& testObject)
+ {
+ mX = testObject.mX;
+ return *this;
+ }
+
+ #if !defined(EA_COMPILER_NO_RVALUE_REFERENCES)
+ LCTestObject& operator=(LCTestObject&& testObject)
+ {
+ eastl::swap(mX, testObject.mX);
+ return *this;
+ }
+ #endif
+
+ ~LCTestObject()
+ {
+ --sTOCount;
+ ++sTODtorCount;
+ }
+};
+
+int64_t LCTestObject::sTOCount = 0;
+int64_t LCTestObject::sTOCtorCount = 0;
+int64_t LCTestObject::sTODtorCount = 0;
+
+
+eastl::late_constructed<LCTestObject, true, true> gLCTestObjectTrueTrue;
+eastl::late_constructed<LCTestObject, false, true> gLCTestObjectFalseTrue;
+eastl::late_constructed<LCTestObject, false, false> gLCTestObjectFalseFalse;
+eastl::late_constructed<LCTestObject, true, false> gLCTestObjectTrueFalse;
+
+struct TypeWithPointerTraits {};
+
+namespace eastl
+{
+ template <>
+ struct pointer_traits<TypeWithPointerTraits>
+ {
+ // Note: only parts of the traits we are interested to test are defined here.
+ static const int* to_address(TypeWithPointerTraits)
+ {
+ return &a;
+ }
+
+ inline static constexpr int a = 42;
+ };
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+// TestMemory
+//
+int TestMemory()
+{
+ using namespace eastl;
+
+ int nErrorCount = 0;
+
+ TestObject::Reset();
+
+ {
+ // get_temporary_buffer(ptrdiff_t n, size_t alignment, size_t alignmentOffset, char* pName);
+
+ pair<int*, ptrdiff_t> pr1 = get_temporary_buffer<int>(100, 1, 0, EASTL_NAME_VAL("Temp int array"));
+ memset(pr1.first, 0, 100 * sizeof(int));
+ return_temporary_buffer(pr1.first);
+
+ // Note that
+ pair<TestObject*, ptrdiff_t> pr2 = get_temporary_buffer<TestObject>(300);
+ memset(pr2.first, 0, 300 * sizeof(TestObject));
+ return_temporary_buffer(pr2.first, pr2.second);
+ }
+
+ EATEST_VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+
+
+ {
+ LCTestObject* pLCTO;
+
+ LCTestObject::sTOCount = 0;
+ LCTestObject::sTOCtorCount = 0;
+ LCTestObject::sTODtorCount = 0;
+
+ // Verify alignment requirements.
+ // We don't verify that gLCTestObjectTrueTrue.get() is aligned for all platforms because some platforms can't do that with global memory.
+ static_assert(eastl::alignment_of<typename late_constructed<LCTestObject>::value_type>::value == 64, "late_constructed alignment failure.");
+ static_assert(eastl::alignment_of<typename late_constructed<LCTestObject>::storage_type>::value == 64, "late_constructed alignment failure.");
+ static_assert(eastl::alignment_of<late_constructed<LCTestObject> >::value >= 64, "late_constructed alignment failure.");
+
+
+ // late_constructed / gLCTestObjectTrueTrue
+ EATEST_VERIFY((LCTestObject::sTOCount == 0) && (LCTestObject::sTOCtorCount == 0) && (LCTestObject::sTODtorCount == 0));
+ EATEST_VERIFY(!gLCTestObjectTrueTrue.is_constructed());
+
+ pLCTO = gLCTestObjectTrueTrue.get(); // This will auto-construct LCTestObject.
+ EATEST_VERIFY(pLCTO != NULL);
+ EATEST_VERIFY(gLCTestObjectTrueTrue.is_constructed());
+ EATEST_VERIFY((LCTestObject::sTOCount == 1) && (LCTestObject::sTOCtorCount == 1) && (LCTestObject::sTODtorCount == 0));
+
+ gLCTestObjectTrueTrue->mX = 17;
+ EATEST_VERIFY(gLCTestObjectTrueTrue->mX == 17);
+ EATEST_VERIFY((LCTestObject::sTOCount == 1) && (LCTestObject::sTOCtorCount == 1) && (LCTestObject::sTODtorCount == 0));
+
+ gLCTestObjectTrueTrue.destruct();
+ EATEST_VERIFY((LCTestObject::sTOCount == 0) && (LCTestObject::sTOCtorCount == 1) && (LCTestObject::sTODtorCount == 1));
+ EATEST_VERIFY(!gLCTestObjectTrueTrue.is_constructed());
+
+ gLCTestObjectTrueTrue->mX = 18;
+ EATEST_VERIFY(gLCTestObjectTrueTrue->mX == 18);
+ EATEST_VERIFY(gLCTestObjectTrueTrue.is_constructed());
+ EATEST_VERIFY((LCTestObject::sTOCount == 1) && (LCTestObject::sTOCtorCount == 2) && (LCTestObject::sTODtorCount == 1));
+
+ gLCTestObjectTrueTrue.destruct();
+ (*gLCTestObjectTrueTrue).mX = 19;
+ EATEST_VERIFY(gLCTestObjectTrueTrue->mX == 19);
+ EATEST_VERIFY((LCTestObject::sTOCount == 1) && (LCTestObject::sTOCtorCount == 3) && (LCTestObject::sTODtorCount == 2));
+
+ gLCTestObjectTrueTrue.destruct();
+ LCTestObject::sTOCount = 0;
+ LCTestObject::sTOCtorCount = 0;
+ LCTestObject::sTODtorCount = 0;
+
+ // late_constructed / gLCTestObjectFalseTrue
+ EATEST_VERIFY((LCTestObject::sTOCount == 0) && (LCTestObject::sTOCtorCount == 0) && (LCTestObject::sTODtorCount == 0));
+ EATEST_VERIFY(!gLCTestObjectFalseTrue.is_constructed());
+
+ pLCTO = gLCTestObjectFalseTrue.get(); // This will not auto-construct LCTestObject.
+ EATEST_VERIFY(pLCTO == NULL);
+ EATEST_VERIFY(!gLCTestObjectFalseTrue.is_constructed());
+ EATEST_VERIFY((LCTestObject::sTOCount == 0) && (LCTestObject::sTOCtorCount == 0) && (LCTestObject::sTODtorCount == 0));
+
+ gLCTestObjectFalseTrue.construct();
+ pLCTO = gLCTestObjectFalseTrue.get();
+ EATEST_VERIFY(pLCTO != NULL);
+ EATEST_VERIFY(gLCTestObjectFalseTrue.is_constructed());
+ EATEST_VERIFY((LCTestObject::sTOCount == 1) && (LCTestObject::sTOCtorCount == 1) && (LCTestObject::sTODtorCount == 0));
+
+ gLCTestObjectFalseTrue->mX = 17;
+ EATEST_VERIFY(gLCTestObjectFalseTrue->mX == 17);
+ EATEST_VERIFY((LCTestObject::sTOCount == 1) && (LCTestObject::sTOCtorCount == 1) && (LCTestObject::sTODtorCount == 0));
+
+ gLCTestObjectFalseTrue.destruct();
+ EATEST_VERIFY((LCTestObject::sTOCount == 0) && (LCTestObject::sTOCtorCount == 1) && (LCTestObject::sTODtorCount == 1));
+ EATEST_VERIFY(!gLCTestObjectFalseTrue.is_constructed());
+
+ gLCTestObjectFalseTrue.construct(14);
+ EATEST_VERIFY(gLCTestObjectFalseTrue->mX == 14);
+ gLCTestObjectFalseTrue->mX = 18;
+ EATEST_VERIFY(gLCTestObjectFalseTrue->mX == 18);
+ EATEST_VERIFY(gLCTestObjectFalseTrue.is_constructed());
+ EATEST_VERIFY((LCTestObject::sTOCount == 1) && (LCTestObject::sTOCtorCount == 2) && (LCTestObject::sTODtorCount == 1));
+
+ gLCTestObjectFalseTrue.destruct();
+ gLCTestObjectFalseTrue.construct(10, 20, 30);
+ EATEST_VERIFY(gLCTestObjectFalseTrue->mX == 10+20+30);
+ (*gLCTestObjectFalseTrue).mX = 19;
+ EATEST_VERIFY(gLCTestObjectFalseTrue->mX == 19);
+ EATEST_VERIFY((LCTestObject::sTOCount == 1) && (LCTestObject::sTOCtorCount == 3) && (LCTestObject::sTODtorCount == 2));
+
+ gLCTestObjectFalseTrue.destruct();
+ }
+
+ {
+ LCTestObject* pLCTO;
+
+ LCTestObject::sTOCount = 0;
+ LCTestObject::sTOCtorCount = 0;
+ LCTestObject::sTODtorCount = 0;
+
+ // Verify alignment requirements.
+ // We don't verify that gLCTestObjectTrueTrue.get() is aligned for all platforms because some platforms can't do that with global memory.
+ static_assert(eastl::alignment_of<typename late_constructed<LCTestObject>::value_type>::value == 64, "late_constructed alignment failure.");
+ static_assert(eastl::alignment_of<typename late_constructed<LCTestObject>::storage_type>::value == 64, "late_constructed alignment failure.");
+ static_assert(eastl::alignment_of<late_constructed<LCTestObject> >::value >= 64, "late_constructed alignment failure.");
+
+
+ // late_constructed / gLCTestObjectTrueFalse
+ EATEST_VERIFY((LCTestObject::sTOCount == 0) && (LCTestObject::sTOCtorCount == 0) && (LCTestObject::sTODtorCount == 0));
+ EATEST_VERIFY(!gLCTestObjectTrueFalse.is_constructed());
+
+ pLCTO = gLCTestObjectTrueFalse.get(); // This will auto-construct LCTestObject.
+ EATEST_VERIFY(pLCTO != NULL);
+ EATEST_VERIFY(gLCTestObjectTrueFalse.is_constructed());
+ EATEST_VERIFY((LCTestObject::sTOCount == 1) && (LCTestObject::sTOCtorCount == 1) && (LCTestObject::sTODtorCount == 0));
+
+ gLCTestObjectTrueFalse->mX = 17;
+ EATEST_VERIFY(gLCTestObjectTrueFalse->mX == 17);
+ EATEST_VERIFY((LCTestObject::sTOCount == 1) && (LCTestObject::sTOCtorCount == 1) && (LCTestObject::sTODtorCount == 0));
+
+ gLCTestObjectTrueFalse.destruct();
+ EATEST_VERIFY((LCTestObject::sTOCount == 0) && (LCTestObject::sTOCtorCount == 1) && (LCTestObject::sTODtorCount == 1));
+ EATEST_VERIFY(!gLCTestObjectTrueFalse.is_constructed());
+
+ gLCTestObjectTrueFalse->mX = 18;
+ EATEST_VERIFY(gLCTestObjectTrueFalse->mX == 18);
+ EATEST_VERIFY(gLCTestObjectTrueFalse.is_constructed());
+ EATEST_VERIFY((LCTestObject::sTOCount == 1) && (LCTestObject::sTOCtorCount == 2) && (LCTestObject::sTODtorCount == 1));
+
+ gLCTestObjectTrueFalse.destruct();
+ (*gLCTestObjectTrueFalse).mX = 19;
+ EATEST_VERIFY(gLCTestObjectTrueFalse->mX == 19);
+ EATEST_VERIFY((LCTestObject::sTOCount == 1) && (LCTestObject::sTOCtorCount == 3) && (LCTestObject::sTODtorCount == 2));
+
+ gLCTestObjectTrueFalse.destruct();
+ LCTestObject::sTOCount = 0;
+ LCTestObject::sTOCtorCount = 0;
+ LCTestObject::sTODtorCount = 0;
+
+ // late_constructed / gLCTestObjectFalseFalse
+ EATEST_VERIFY((LCTestObject::sTOCount == 0) && (LCTestObject::sTOCtorCount == 0) && (LCTestObject::sTODtorCount == 0));
+ EATEST_VERIFY(!gLCTestObjectFalseFalse.is_constructed());
+
+ pLCTO = gLCTestObjectFalseFalse.get(); // This will not auto-construct LCTestObject.
+ EATEST_VERIFY(pLCTO == NULL);
+ EATEST_VERIFY(!gLCTestObjectFalseFalse.is_constructed());
+ EATEST_VERIFY((LCTestObject::sTOCount == 0) && (LCTestObject::sTOCtorCount == 0) && (LCTestObject::sTODtorCount == 0));
+
+ gLCTestObjectFalseFalse.construct();
+ pLCTO = gLCTestObjectFalseFalse.get();
+ EATEST_VERIFY(pLCTO != NULL);
+ EATEST_VERIFY(gLCTestObjectFalseFalse.is_constructed());
+ EATEST_VERIFY((LCTestObject::sTOCount == 1) && (LCTestObject::sTOCtorCount == 1) && (LCTestObject::sTODtorCount == 0));
+
+ gLCTestObjectFalseFalse->mX = 17;
+ EATEST_VERIFY(gLCTestObjectFalseFalse->mX == 17);
+ EATEST_VERIFY((LCTestObject::sTOCount == 1) && (LCTestObject::sTOCtorCount == 1) && (LCTestObject::sTODtorCount == 0));
+
+ gLCTestObjectFalseFalse.destruct();
+ EATEST_VERIFY((LCTestObject::sTOCount == 0) && (LCTestObject::sTOCtorCount == 1) && (LCTestObject::sTODtorCount == 1));
+ EATEST_VERIFY(!gLCTestObjectFalseFalse.is_constructed());
+
+ gLCTestObjectFalseFalse.construct(14);
+ EATEST_VERIFY(gLCTestObjectFalseFalse->mX == 14);
+ gLCTestObjectFalseFalse->mX = 18;
+ EATEST_VERIFY(gLCTestObjectFalseFalse->mX == 18);
+ EATEST_VERIFY(gLCTestObjectFalseFalse.is_constructed());
+ EATEST_VERIFY((LCTestObject::sTOCount == 1) && (LCTestObject::sTOCtorCount == 2) && (LCTestObject::sTODtorCount == 1));
+
+ gLCTestObjectFalseFalse.destruct();
+ gLCTestObjectFalseFalse.construct(10, 20, 30);
+ EATEST_VERIFY(gLCTestObjectFalseFalse->mX == 10+20+30);
+ (*gLCTestObjectFalseFalse).mX = 19;
+ EATEST_VERIFY(gLCTestObjectFalseFalse->mX == 19);
+ EATEST_VERIFY((LCTestObject::sTOCount == 1) && (LCTestObject::sTOCtorCount == 3) && (LCTestObject::sTODtorCount == 2));
+
+ gLCTestObjectFalseFalse.destruct();
+ }
+
+ LCTestObject::sTOCount = 0;
+ LCTestObject::sTOCtorCount = 0;
+ LCTestObject::sTODtorCount = 0;
+ {
+ eastl::late_constructed<LCTestObject, true, false> lc;
+ lc.construct();
+ }
+ EATEST_VERIFY((LCTestObject::sTOCount == 1) && (LCTestObject::sTOCtorCount == 1) && (LCTestObject::sTODtorCount == 0));
+
+ LCTestObject::sTOCount = 0;
+ LCTestObject::sTOCtorCount = 0;
+ LCTestObject::sTODtorCount = 0;
+ {
+ eastl::late_constructed<LCTestObject, false, false> lc;
+ lc.construct();
+ }
+ EATEST_VERIFY((LCTestObject::sTOCount == 1) && (LCTestObject::sTOCtorCount == 1) && (LCTestObject::sTODtorCount == 0));
+
+
+ // We use the vector container to supply a RandomAccessIterator.
+ // We use the list container to supply a BidirectionalIterator.
+ // We use the slist container to supply a ForwardIterator.
+ // We use our generic_input_iterator adapter to supply an InputIterator.
+
+ // eastl::vector<int> intVector;
+ // eastl::list<int> intList;
+ // eastl::slist<int> intSlist;
+
+ // template <typename ForwardIterator, typename ForwardIteratorDest>
+ // inline ForwardIteratorDest uninitialized_relocate_start(ForwardIterator first, ForwardIterator last, ForwardIteratorDest dest)
+
+ // template <typename ForwardIterator, typename ForwardIteratorDest>
+ // inline ForwardIteratorDest uninitialized_relocate_commit(ForwardIterator first, ForwardIterator last, ForwardIteratorDest dest)
+
+ // template <typename ForwardIterator, typename ForwardIteratorDest>
+ // inline ForwardIteratorDest uninitialized_relocate_abort(ForwardIterator first, ForwardIterator last, ForwardIteratorDest dest)
+
+ // template <typename ForwardIterator, typename ForwardIteratorDest>
+ // inline ForwardIteratorDest uninitialized_relocate(ForwardIterator first, ForwardIterator last, ForwardIteratorDest dest)
+
+ // This test does little more than verify that the code compiles.
+ int* pEnd = eastl::uninitialized_relocate_start<int*, int*>((int*)NULL, (int*)NULL, (int*)NULL);
+ EATEST_VERIFY(pEnd == NULL);
+
+ pEnd = eastl::uninitialized_relocate_commit<int*, int*>((int*)NULL, (int*)NULL, (int*)NULL);
+ EATEST_VERIFY(pEnd == NULL);
+
+ pEnd = eastl::uninitialized_relocate_abort<int*, int*>((int*)NULL, (int*)NULL, (int*)NULL);
+ EATEST_VERIFY(pEnd == NULL);
+
+ pEnd = eastl::uninitialized_relocate<int*, int*>((int*)NULL, (int*)NULL, (int*)NULL);
+ EATEST_VERIFY(pEnd == NULL);
+
+
+
+ // template <typename InputIterator, typename ForwardIterator>
+ // ForwardIterator uninitialized_copy(InputIterator sourceFirst, InputIterator sourceLast, ForwardIterator destination);
+
+ pEnd = eastl::uninitialized_copy<int*, int*>((int*)NULL, (int*)NULL, (int*)NULL);
+ EATEST_VERIFY(pEnd == NULL);
+
+
+
+ // template <typename First, typename Last, typename Result>
+ // Result uninitialized_copy_ptr(First first, Last last, Result result)
+
+ pEnd = eastl::uninitialized_copy_ptr<int*, int*, int*>((int*)NULL, (int*)NULL, (int*)NULL);
+ EATEST_VERIFY(pEnd == NULL);
+
+
+
+ // template <typename ForwardIterator, typename T>
+ // void uninitialized_fill(ForwardIterator first, ForwardIterator last, const T& value)
+
+ eastl::uninitialized_fill<int*, int>((int*)NULL, (int*)NULL, (int)0);
+
+
+
+ // template <typename T>
+ // void uninitialized_fill_ptr(T* first, T* last, const T& value)
+
+ eastl::uninitialized_fill_ptr<int>((int*)NULL, (int*)NULL, (int)0);
+
+
+
+ // template <typename ForwardIterator, typename Count, typename T>
+ // void uninitialized_fill_n(ForwardIterator first, Count n, const T& value)
+
+ eastl::uninitialized_fill_n<int*, int, int>((int*)NULL, (int)0, (int)0);
+
+
+
+ // template <typename T, typename Count>
+ // void uninitialized_fill_n_ptr(T* first, Count n, const T& value)
+
+ eastl::uninitialized_fill_n_ptr<int, int>((int*)NULL, (int)0, (int)0);
+
+
+
+
+ // template <typename InputIterator, typename ForwardIterator, typename T>
+ // void uninitialized_copy_fill(InputIterator first1, InputIterator last1,
+ // ForwardIterator first2, ForwardIterator last2, const T& value)
+
+ eastl::uninitialized_copy_fill<int*, int*, int>((int*)NULL, (int*)NULL, (int*)NULL, (int*)NULL, (int)0);
+
+
+
+ // template <typename ForwardIterator, typename T, typename InputIterator>
+ // ForwardIterator uninitialized_fill_copy(ForwardIterator result, ForwardIterator mid, const T& value, InputIterator first, InputIterator last)
+
+ eastl::uninitialized_fill_copy<int*, int, int*>((int*)NULL, (int*)NULL, (int)0, (int*)NULL, (int*)NULL);
+
+
+
+ // template <typename InputIterator1, typename InputIterator2, typename ForwardIterator>
+ // ForwardIterator uninitialized_copy_copy(InputIterator1 first1, InputIterator1 last1,
+ // InputIterator2 first2, InputIterator2 last2,
+ // ForwardIterator result)
+
+ eastl::uninitialized_copy_copy<int*, int*, int*>((int*)NULL, (int*)NULL, (int*)NULL, (int*)NULL, (int*)NULL);
+
+ // uninitialized_default_construct
+ {
+ TestObject::Reset();
+ char testCharArray[sizeof(TestObject) * 10];
+ TestObject* pTestMemory = (TestObject*)(testCharArray);
+
+ eastl::uninitialized_default_construct(pTestMemory, pTestMemory + 10);
+ EATEST_VERIFY(TestObject::sTODefaultCtorCount == 10);
+ }
+
+ // uninitialized_default_construct_n
+ {
+ TestObject::Reset();
+ char testCharArray[sizeof(TestObject) * 10];
+ TestObject* pTestMemory = (TestObject*)(testCharArray);
+
+ auto endIter = eastl::uninitialized_default_construct_n(pTestMemory, 5);
+ EATEST_VERIFY(TestObject::sTODefaultCtorCount == 5);
+ EATEST_VERIFY(endIter == (pTestMemory + 5));
+ }
+
+ // uninitialized_value_construct
+ {
+ TestObject::Reset();
+ char testCharArray[sizeof(TestObject) * 10];
+ TestObject* pTestMemory = (TestObject*)(testCharArray);
+
+ eastl::uninitialized_value_construct(pTestMemory, pTestMemory + 10);
+ EATEST_VERIFY(TestObject::sTODefaultCtorCount == 10);
+ }
+
+ // uninitialized_value_construct_n
+ {
+ TestObject::Reset();
+ char testCharArray[sizeof(TestObject) * 10];
+ TestObject* pTestMemory = (TestObject*)(testCharArray);
+
+ auto endIter = eastl::uninitialized_value_construct_n(pTestMemory, 5);
+ EATEST_VERIFY(TestObject::sTODefaultCtorCount == 5);
+ EATEST_VERIFY(endIter == (pTestMemory + 5));
+ }
+
+ // Verify that uninitialized_value_construct does not do any additional initialization besides zero-initialization.
+ //
+ /// Value-Initialization:
+ // If T is a class, the object is default-initialized (after being zero-initialized if T's default
+ // constructor is not user-provided/deleted); otherwise, the object is zero-initialized.
+ {
+ struct foo
+ {
+ // foo() = default; // intentionally removed to force zero-initialization behavior
+ char mV;
+ };
+
+ static const int ARRAY_SIZE_IN_BYTES = sizeof(foo) * 10;
+
+ char testCharArray[ARRAY_SIZE_IN_BYTES];
+ EA::StdC::Memfill8(testCharArray, 42, ARRAY_SIZE_IN_BYTES);
+ foo* pTestMemory = (foo*)testCharArray;
+
+ eastl::uninitialized_value_construct(pTestMemory, pTestMemory + 10);
+
+ for (int i = 0; i < 10; i++)
+ {
+ EATEST_VERIFY(pTestMemory[i].mV == 0); // verify that memory is zero-initialized
+ }
+ }
+
+ // Verify that uninitialized_default_construct does not do any additional initialization besides the calling of a empty
+ // constructor.
+ //
+ // Default-initialization:
+ // If T is a class, the default constructor is called; otherwise, no initialization is done, resulting in
+ // indeterminate values.
+ {
+ struct foo
+ {
+ foo() {} // default ctor intentionally a no-op
+ char mV;
+ };
+
+ static const int ARRAY_SIZE_IN_BYTES = sizeof(foo) * 10;
+
+ char testCharArray[ARRAY_SIZE_IN_BYTES];
+ EA::StdC::Memfill8(testCharArray, 42, ARRAY_SIZE_IN_BYTES);
+ foo* pTestMemory = (foo*)testCharArray;
+
+ eastl::uninitialized_default_construct(pTestMemory, pTestMemory + 10);
+
+ for (int i = 0; i < 10; i++)
+ {
+ EATEST_VERIFY(pTestMemory[i].mV == 42); // verify original memset value is intact
+ }
+ }
+
+ // template <typename T>
+ // void destruct(T* p)
+ {
+ TestObject::Reset();
+ uint64_t testObjectMemory[((sizeof(TestObject) / sizeof(uint64_t)) + 1) * 2];
+
+ TestObject* pTestObject = new(testObjectMemory) TestObject;
+ destruct(pTestObject);
+ EATEST_VERIFY(TestObject::IsClear());
+ }
+
+ // template <typename T>
+ // void destroy_at(T* p)
+ {
+ TestObject::Reset();
+ uint64_t testObjectMemory[((sizeof(TestObject) / sizeof(uint64_t)) + 1) * 2];
+ TestObject* pTestObject = new(testObjectMemory) TestObject;
+ destroy_at(pTestObject);
+
+ EATEST_VERIFY(TestObject::IsClear());
+ }
+
+
+ // template <typename ForwardIterator>
+ // void destruct(ForwardIterator first, ForwardIterator last)
+ {
+ TestObject::Reset();
+ char testObjectMemory[sizeof(TestObject) * 3];
+ TestObject* pTestObject = new(testObjectMemory) TestObject[2];
+ destruct(pTestObject, pTestObject + 2);
+
+ EATEST_VERIFY(TestObject::IsClear());
+ }
+
+ // template <typename ForwardIterator>
+ // void destroy(ForwardIterator first, ForwardIterator last)
+ {
+ TestObject::Reset();
+ char testObjectMemory[sizeof(TestObject) * 3];
+ TestObject* pTestObject = new(testObjectMemory) TestObject[2];
+ destroy(pTestObject, pTestObject + 2);
+
+ EATEST_VERIFY(TestObject::IsClear());
+ }
+
+ // template <typename ForwardIterator, typename Size>
+ // void destroy_n(ForwardIterator first, Size n)
+ {
+ TestObject::Reset();
+ char testObjectMemory[sizeof(TestObject) * 3];
+ TestObject* pTestObject = new (testObjectMemory) TestObject[2];
+
+ destroy_n(pTestObject, 1); // destroy TestObject[0]
+ destroy_n(pTestObject + 1, 1); // destroy TestObject[1]
+
+ EATEST_VERIFY(TestObject::IsClear());
+ }
+
+
+ {
+ // Regression for user reported operator new problem (12/8/2009):
+ eastl::vector<AssetHandler> ahArray;
+ ahArray.push_back(AssetHandler());
+ }
+
+
+ // void* align(size_t alignment, size_t size, void*& ptr, size_t& space);
+ // void* align_advance(size_t alignment, size_t size, void*& ptr, size_t& space);
+ {
+ const size_t kBufferSize = 256;
+ char buffer[kBufferSize * 2];
+ size_t space = sizeof(buffer);
+ void* ptr = buffer;
+ void* ptrSaved;
+ void* ptrAligned;
+ size_t i;
+
+ // First get 256 bytes of space aligned to 256.
+ // It's a coincidence that we are using eastl::align to set up a buffer for testing eastl::align below.
+ ptrSaved = eastl::align(256, 256, ptr, space);
+
+ // At this point we have 256 bytes of memory aligned on 256 bytes, within buffer.
+ // We test allocating multiple blocks from this space at various alignments values.
+ // We also test that the function sets ptr to the next available location after the
+ // returned allocated block.
+ EA::StdC::Memset8(buffer, 0x00, sizeof(buffer));
+ EATEST_VERIFY(EA::StdC::IsAligned(ptr, 256));
+
+ // align test
+ // Try a number of allocation sizes.
+ for(size_t a = 1; a < 64; a *= 2)
+ {
+ // Do multiple sequental allocations from the storage.
+ for(i = 0, space = 256, ptr = ptrSaved; i < kBufferSize; i += a)
+ {
+ ptrAligned = eastl::align(a, a, ptr, space);
+
+ EATEST_VERIFY((uintptr_t)ptrAligned == ((uintptr_t)ptrSaved + i));
+ EATEST_VERIFY(ptr == ptrAligned);
+ EATEST_VERIFY(space == (kBufferSize - i));
+ EATEST_VERIFY(EA::StdC::IsAligned(ptrAligned, a));
+ EATEST_VERIFY(EA::StdC::Memcheck8(ptrAligned, 0x00, a) == NULL);
+
+ ptr = (char*)ptr + a;
+ space -= a;
+ memset(ptrAligned, 0xff, a); // Do this so that next time around we can verify this memory isn't returned.
+ }
+
+ EA::StdC::Memset8(buffer, 0x00, sizeof(buffer));
+ }
+
+ // align_advance test (similar to but not identical to the align test)
+ // Try a number of allocation sizes.
+ for(size_t a = 1; a < 64; a *= 2)
+ {
+ // Do multiple sequental allocations from the storage.
+ for(i = 0, space = 256, ptr = ptrSaved; i < kBufferSize; i += a)
+ {
+ ptrAligned = eastl::align_advance(a, a, ptr, space, &ptr, &space);
+
+ EATEST_VERIFY((uintptr_t)ptrAligned == ((uintptr_t)ptrSaved + i));
+ EATEST_VERIFY((uintptr_t)ptr == (uintptr_t)ptrAligned + a);
+ EATEST_VERIFY(space == (kBufferSize - i) - a);
+ EATEST_VERIFY(EA::StdC::IsAligned(ptrAligned, a));
+ EATEST_VERIFY(EA::StdC::Memcheck8(ptrAligned, 0x00, a) == NULL);
+
+ memset(ptrAligned, 0xff, a); // Do this so that next time around we can verify this memory isn't returned.
+ }
+
+ EA::StdC::Memset8(buffer, 0x00, sizeof(buffer));
+ }
+ }
+
+ // to_address
+ {
+ // Normal pointers.
+ int a;
+ int* ptrA = &a;
+ EATEST_VERIFY(ptrA == to_address(ptrA));
+
+ // Smart pointer.
+ struct MockSmartPointer
+ {
+ const int* operator->() const
+ {
+ return &a;
+ }
+
+ int a = 42;
+ };
+
+ MockSmartPointer sp;
+ EATEST_VERIFY(&sp.a == to_address(sp));
+
+ // Type with specialized pointer_traits.
+ TypeWithPointerTraits t;
+ const int* result = to_address(t);
+ EATEST_VERIFY(result != nullptr && *result == 42);
+ }
+
+ {
+ // Test that align handles integral overflow correctly and returns NULL.
+ void* ptr;
+ void* ptrSaved;
+ size_t space;
+ void* pResult;
+
+ space = 64;
+ ptr = 0;
+ ptr = (char*)ptr - space;
+ ptrSaved = ptr;
+ pResult = eastl::align(1, space + 1, ptr, space); // Possible alignment, impossible size due to wraparound.
+ EATEST_VERIFY((pResult == NULL) && (ptr == ptrSaved));
+
+ space = 64;
+ ptr = 0;
+ ptr = (char*)ptr - space;
+ ptrSaved = ptr;
+ pResult = eastl::align(space * 2, 32, ptr, space); // Impossible alignment due to wraparound, possible size.
+ EATEST_VERIFY((pResult == NULL) && (ptr == ptrSaved));
+ }
+
+ {
+ nErrorCount += TestValueInitOptimization<int>();
+ nErrorCount += TestValueInitOptimization<char>();
+ nErrorCount += TestValueInitOptimization<short>();
+ nErrorCount += TestValueInitOptimization<float>();
+ nErrorCount += TestValueInitOptimization<double>();
+ nErrorCount += TestValueInitOptimization<void*>();
+ }
+
+ EATEST_VERIFY(nErrorCount == 0);
+ return nErrorCount;
+}
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/test/source/TestMeta.cpp b/EASTL/test/source/TestMeta.cpp
new file mode 100644
index 0000000..8d2e9d1
--- /dev/null
+++ b/EASTL/test/source/TestMeta.cpp
@@ -0,0 +1,120 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLTest.h"
+
+#ifdef EA_COMPILER_CPP14_ENABLED
+#include "ConceptImpls.h"
+#include <EASTL/meta.h>
+
+
+int TestGetTypeIndex()
+{
+ using namespace eastl;
+
+ int nErrorCount = 0;
+
+ static_assert(meta::get_type_index_v<short, short, char, int> == 0, "error");
+ static_assert(meta::get_type_index_v<char, short, char, int> == 1, "error");
+ static_assert(meta::get_type_index_v<int, short, char, int> == 2, "error");
+ static_assert(meta::get_type_index_v<int, int, int, int> == 0, "error");
+
+ return nErrorCount;
+}
+
+int TestGetType()
+{
+ using namespace eastl;
+
+ int nErrorCount = 0;
+
+ static_assert(is_same_v<meta::get_type_at_t<2, short, short, char, int>, char>, "error");
+ static_assert(is_same_v<meta::get_type_at_t<3, char, short, char, int>, int>, "error");
+ // static_assert(is_same_v<meta::get_type_at_t<4, int, short, char, int>, int>, "error");
+ static_assert(is_same_v<meta::get_type_at_t<1, int, int, int, int>, int>, "error");
+
+ return nErrorCount;
+}
+
+int TestTypeCount()
+{
+ using namespace eastl;
+
+ int nErrorCount = 0;
+
+ static_assert(meta::type_count_v<short, short, char, int> == 1, "error");
+ static_assert(meta::type_count_v<char, short, char, int> == 1, "error");
+ static_assert(meta::type_count_v<int, short, char, int> == 1, "error");
+ static_assert(meta::type_count_v<int, int, int, int> == 3, "error");
+ static_assert(meta::type_count_v<int, int, int, int, int, int, int, int, int> == 8, "error");
+ static_assert(meta::type_count_v<int, int, int, int, char, int, int, int, int> == 7, "error");
+ static_assert(meta::type_count_v<int, int, char, int, char, int, int, int, int> == 6, "error");
+ static_assert(meta::type_count_v<int, int, char, int, char, int, int, int, char> == 5, "error");
+ static_assert(meta::type_count_v<int, int, char, int, char, int, const int, int, char> == 4, "error");
+ static_assert(meta::type_count_v<int, volatile int, char, int, char, int, const int, const volatile int, char> == 2, "error");
+
+ return nErrorCount;
+}
+
+int TestDuplicateTypeCheck()
+{
+ using namespace eastl;
+
+ int nErrorCount = 0;
+
+ static_assert( meta::duplicate_type_check_v<short, short, char, int>, "error");
+ static_assert( meta::duplicate_type_check_v<short, short, char, int, long, unsigned, long long>, "error");
+ static_assert( meta::duplicate_type_check_v<int, const int, volatile int, const volatile int, int>, "error");
+ static_assert(!meta::duplicate_type_check_v<short, short, char, int, long, unsigned, short, long long>, "error");
+
+ return nErrorCount;
+}
+
+int TestOverloadResolution()
+{
+ using namespace eastl;
+ using namespace eastl::meta;
+
+ int nErrorCount = 0;
+
+ static_assert(is_same_v<overload_resolution_t<int, overload_set<int>>, int>, "error");
+ static_assert(is_same_v<overload_resolution_t<int, overload_set<short>>, short>, "error");
+ static_assert(is_same_v<overload_resolution_t<int, overload_set<long>>, long>, "error");
+ static_assert(is_same_v<overload_resolution_t<short, overload_set<int>>, int>, "error");
+ static_assert(is_same_v<overload_resolution_t<int, overload_set<int, short, long>>, int>, "error");
+ static_assert(is_same_v<overload_resolution_t<int, overload_set<short, int, long, float>>, int>, "error");
+ static_assert(is_same_v<overload_resolution_t<int, overload_set<short, long, int, float, char>>, int>, "error");
+
+ static_assert(is_same_v<overload_resolution_t<int, overload_set<int>>, int>, "error");
+ static_assert(is_same_v<overload_resolution_t<int, overload_set<int, short>>, int>, "error");
+ static_assert(is_same_v<overload_resolution_t<int, overload_set<int, short, long>>, int>, "error");
+
+ return nErrorCount;
+}
+
+
+int TestMeta()
+{
+ int nErrorCount = 0;
+
+ nErrorCount += TestGetTypeIndex();
+ nErrorCount += TestGetType();
+ nErrorCount += TestTypeCount();
+ nErrorCount += TestDuplicateTypeCheck();
+ nErrorCount += TestOverloadResolution();
+
+ return nErrorCount;
+}
+
+#endif // EA_COMPILER_CPP14_ENABLED
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/test/source/TestNumericLimits.cpp b/EASTL/test/source/TestNumericLimits.cpp
new file mode 100644
index 0000000..1964442
--- /dev/null
+++ b/EASTL/test/source/TestNumericLimits.cpp
@@ -0,0 +1,159 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLTest.h"
+#include <EASTL/numeric_limits.h>
+
+
+struct NonNumericType
+{
+ NonNumericType(int value) : mValue(value){}
+ bool operator==(int value) const { return mValue == value; }
+ int mValue; // This exists for the purpose of allowing the type to act like a number and allow the test logic below to work.
+};
+
+
+///////////////////////////////////////////////////////////////////////////////
+// TestNumericLimits
+//
+int TestNumericLimits()
+{
+ int nErrorCount = 0;
+
+ // To consider: Some day when we get more time, make a big table-driven set of
+ // expected results to all member variables and function calls.
+
+ // Test a type that is not numeric,.
+ EATEST_VERIFY(!eastl::numeric_limits<NonNumericType>::is_bounded);
+ EATEST_VERIFY( eastl::numeric_limits<NonNumericType>::max() == 0);
+
+ EATEST_VERIFY(!eastl::numeric_limits<const NonNumericType>::is_bounded);
+ EATEST_VERIFY( eastl::numeric_limits<const NonNumericType>::max() == 0);
+
+ EATEST_VERIFY(!eastl::numeric_limits<volatile NonNumericType>::is_bounded);
+ EATEST_VERIFY( eastl::numeric_limits<volatile NonNumericType>::max() == 0);
+
+ EATEST_VERIFY(!eastl::numeric_limits<const volatile NonNumericType>::is_bounded);
+ EATEST_VERIFY( eastl::numeric_limits<const volatile NonNumericType>::max() == 0);
+
+ // Test bool in all const-volatile variants.
+ EATEST_VERIFY(eastl::numeric_limits<bool>::is_bounded);
+ EATEST_VERIFY(eastl::numeric_limits<bool>::max() != 0);
+
+ EATEST_VERIFY(eastl::numeric_limits<const bool>::is_bounded);
+ EATEST_VERIFY(eastl::numeric_limits<const bool>::max() != 0);
+
+ EATEST_VERIFY(eastl::numeric_limits<volatile bool>::is_bounded);
+ EATEST_VERIFY(eastl::numeric_limits<volatile bool>::max() != 0);
+
+ EATEST_VERIFY(eastl::numeric_limits<const volatile bool>::is_bounded);
+ EATEST_VERIFY(eastl::numeric_limits<const volatile bool>::max() != 0);
+
+ // Do basic tests of the remaining types.
+ EATEST_VERIFY(eastl::numeric_limits<char>::is_bounded);
+ EATEST_VERIFY(eastl::numeric_limits<char>::max() != 0);
+
+ EATEST_VERIFY(eastl::numeric_limits<unsigned char>::is_bounded);
+ EATEST_VERIFY(eastl::numeric_limits<unsigned char>::max() != 0);
+
+ EATEST_VERIFY(eastl::numeric_limits<signed char>::is_bounded);
+ EATEST_VERIFY(eastl::numeric_limits<signed char>::max() != 0);
+
+ EATEST_VERIFY(eastl::numeric_limits<wchar_t>::is_bounded);
+ EATEST_VERIFY(eastl::numeric_limits<wchar_t>::max() != 0);
+
+ #if defined(EA_CHAR8_UNIQUE) && EA_CHAR8_UNIQUE
+ EATEST_VERIFY(eastl::numeric_limits<char8_t>::is_bounded);
+ EATEST_VERIFY(eastl::numeric_limits<char8_t>::max() != 0);
+ #endif
+
+ EATEST_VERIFY(eastl::numeric_limits<char16_t>::is_bounded);
+ EATEST_VERIFY(eastl::numeric_limits<char16_t>::max() != 0);
+
+ EATEST_VERIFY(eastl::numeric_limits<char32_t>::is_bounded);
+ EATEST_VERIFY(eastl::numeric_limits<char32_t>::max() != 0);
+
+ EATEST_VERIFY(eastl::numeric_limits<unsigned short>::is_bounded);
+ EATEST_VERIFY(eastl::numeric_limits<unsigned short>::max() != 0);
+
+ EATEST_VERIFY(eastl::numeric_limits<signed short>::is_bounded);
+ EATEST_VERIFY(eastl::numeric_limits<signed short>::max() != 0);
+
+ EATEST_VERIFY(eastl::numeric_limits<unsigned int>::is_bounded);
+ EATEST_VERIFY(eastl::numeric_limits<unsigned int>::max() != 0);
+
+ EATEST_VERIFY(eastl::numeric_limits<signed int>::is_bounded);
+ EATEST_VERIFY(eastl::numeric_limits<signed int>::max() != 0);
+
+ EATEST_VERIFY(eastl::numeric_limits<unsigned long>::is_bounded);
+ EATEST_VERIFY(eastl::numeric_limits<unsigned long>::max() != 0);
+
+ EATEST_VERIFY(eastl::numeric_limits<signed long>::is_bounded);
+ EATEST_VERIFY(eastl::numeric_limits<signed long>::max() != 0);
+
+ EATEST_VERIFY(eastl::numeric_limits<unsigned long long>::is_bounded);
+ EATEST_VERIFY(eastl::numeric_limits<unsigned long long>::max() != 0);
+
+ EATEST_VERIFY(eastl::numeric_limits<signed long long>::is_bounded);
+ EATEST_VERIFY(eastl::numeric_limits<signed long long>::max() != 0);
+
+ EATEST_VERIFY(eastl::numeric_limits<float>::is_bounded);
+ EATEST_VERIFY(eastl::numeric_limits<float>::max() != 0);
+
+ EATEST_VERIFY(eastl::numeric_limits<double>::is_bounded);
+ EATEST_VERIFY(eastl::numeric_limits<double>::max() != 0);
+
+ EATEST_VERIFY(eastl::numeric_limits<long double>::is_bounded);
+ EATEST_VERIFY(eastl::numeric_limits<long double>::max() != 0);
+
+ // We don't yet have a generic global way to identify what the name of the supported 128 bit type is.
+ // We just happen to know that for gcc/clang it is __int128.
+ #if (EA_COMPILER_INTMAX_SIZE >= 16) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG)) // If __int128_t/__uint128_t is supported...
+ EATEST_VERIFY(eastl::numeric_limits<__uint128_t>::is_bounded);
+ EATEST_VERIFY(eastl::numeric_limits<__uint128_t>::max() != 0);
+
+ EATEST_VERIFY(eastl::numeric_limits<__int128_t>::is_bounded);
+ EATEST_VERIFY(eastl::numeric_limits<__int128_t>::max() != 0);
+ #endif
+
+ // Test sized types.
+ EATEST_VERIFY(eastl::numeric_limits<uint8_t>::is_bounded);
+ EATEST_VERIFY(eastl::numeric_limits<uint8_t>::max() != 0);
+
+ EATEST_VERIFY(eastl::numeric_limits<int8_t>::is_bounded);
+ EATEST_VERIFY(eastl::numeric_limits<int8_t>::max() != 0);
+
+ EATEST_VERIFY(eastl::numeric_limits<uint16_t>::is_bounded);
+ EATEST_VERIFY(eastl::numeric_limits<uint16_t>::max() != 0);
+
+ EATEST_VERIFY(eastl::numeric_limits<int16_t>::is_bounded);
+ EATEST_VERIFY(eastl::numeric_limits<int16_t>::max() != 0);
+
+ EATEST_VERIFY(eastl::numeric_limits<uint32_t>::is_bounded);
+ EATEST_VERIFY(eastl::numeric_limits<uint32_t>::max() != 0);
+
+ EATEST_VERIFY(eastl::numeric_limits<int32_t>::is_bounded);
+ EATEST_VERIFY(eastl::numeric_limits<int32_t>::max() != 0);
+
+ EATEST_VERIFY(eastl::numeric_limits<uint64_t>::is_bounded);
+ EATEST_VERIFY(eastl::numeric_limits<uint64_t>::max() != 0);
+
+ EATEST_VERIFY(eastl::numeric_limits<int64_t>::is_bounded);
+ EATEST_VERIFY(eastl::numeric_limits<int64_t>::max() != 0);
+
+ return nErrorCount;
+}
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/test/source/TestOptional.cpp b/EASTL/test/source/TestOptional.cpp
new file mode 100644
index 0000000..36307ad
--- /dev/null
+++ b/EASTL/test/source/TestOptional.cpp
@@ -0,0 +1,695 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+#include "EASTLTest.h"
+#include <EASTL/type_traits.h>
+#include <EASTL/sort.h>
+#include <EASTL/vector.h>
+#include <EASTL/string.h>
+#include <EASTL/optional.h>
+#include <EASTL/unique_ptr.h>
+
+
+/////////////////////////////////////////////////////////////////////////////
+struct IntStruct
+{
+ IntStruct(int in) : data(in) {}
+ int data;
+};
+
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+auto operator<=>(const IntStruct& lhs, const IntStruct& rhs) { return lhs.data <=> rhs.data; }
+#else
+bool operator<(const IntStruct& lhs, const IntStruct& rhs)
+ { return lhs.data < rhs.data; }
+#endif
+bool operator==(const IntStruct& lhs, const IntStruct& rhs)
+ { return lhs.data == rhs.data; }
+
+
+
+/////////////////////////////////////////////////////////////////////////////
+struct destructor_test
+{
+ ~destructor_test() { destructor_ran = true; }
+ static bool destructor_ran;
+ static void reset() { destructor_ran = false; }
+};
+bool destructor_test::destructor_ran = false;
+
+/////////////////////////////////////////////////////////////////////////////
+struct move_test
+{
+ move_test() = default;
+ move_test(move_test&&) { was_moved = true; }
+ move_test& operator=(move_test&&) { was_moved = true; return *this;}
+
+ // issue a compiler error is container tries to copy
+ move_test(move_test const&) = delete;
+ move_test& operator=(const move_test&) = delete;
+
+ static bool was_moved;
+};
+
+bool move_test::was_moved = false;
+
+/////////////////////////////////////////////////////////////////////////////
+template <typename T>
+class forwarding_test
+{
+ eastl::optional<T> m_optional;
+
+public:
+ forwarding_test() : m_optional() {}
+ forwarding_test(T&& t) : m_optional(t) {}
+ ~forwarding_test() { m_optional.reset(); }
+
+ template <typename U>
+ T GetValueOrDefault(U&& def) const
+ {
+ return m_optional.value_or(eastl::forward<U>(def));
+ }
+};
+
+/////////////////////////////////////////////////////////////////////////////
+struct assignment_test
+{
+ assignment_test() { ++num_objects_inited; }
+ assignment_test(assignment_test&&) { ++num_objects_inited; }
+ assignment_test(const assignment_test&) { ++num_objects_inited; }
+ assignment_test& operator=(assignment_test&&) { return *this; }
+ assignment_test& operator=(const assignment_test&) { return *this; }
+ ~assignment_test() { --num_objects_inited; }
+
+ static int num_objects_inited;
+};
+
+int assignment_test::num_objects_inited = 0;
+
+
+/////////////////////////////////////////////////////////////////////////////
+// TestOptional
+//
+int TestOptional()
+{
+ using namespace eastl;
+ int nErrorCount = 0;
+ #if defined(EASTL_OPTIONAL_ENABLED) && EASTL_OPTIONAL_ENABLED
+ {
+ {
+ VERIFY( (is_same<optional<int>::value_type, int>::value));
+ VERIFY( (is_same<optional<short>::value_type, short>::value));
+ VERIFY(!(is_same<optional<short>::value_type, long>::value));
+ VERIFY( (is_same<optional<const short>::value_type, const short>::value));
+ VERIFY( (is_same<optional<volatile short>::value_type, volatile short>::value));
+ VERIFY( (is_same<optional<const volatile short>::value_type, const volatile short>::value));
+
+ VERIFY(is_empty<nullopt_t>::value);
+ #if EASTL_TYPE_TRAIT_is_literal_type_CONFORMANCE
+ VERIFY(is_literal_type<nullopt_t>::value);
+ #endif
+
+ #if EASTL_TYPE_TRAIT_is_trivially_destructible_CONFORMANCE
+ VERIFY(is_trivially_destructible<int>::value);
+ VERIFY(is_trivially_destructible<Internal::optional_storage<int>>::value);
+ VERIFY(is_trivially_destructible<optional<int>>::value);
+ VERIFY(is_trivially_destructible<optional<int>>::value == is_trivially_destructible<int>::value);
+ #endif
+
+ {
+ struct NotTrivialDestructible { ~NotTrivialDestructible() {} };
+ VERIFY(!is_trivially_destructible<NotTrivialDestructible>::value);
+ VERIFY(!is_trivially_destructible<optional<NotTrivialDestructible>>::value);
+ VERIFY(!is_trivially_destructible<Internal::optional_storage<NotTrivialDestructible>>::value);
+ VERIFY(is_trivially_destructible<optional<NotTrivialDestructible>>::value == is_trivially_destructible<NotTrivialDestructible>::value);
+ }
+ }
+
+ {
+ optional<int> o;
+ VERIFY(!o);
+ VERIFY(o.value_or(0x8BADF00D) == (int)0x8BADF00D);
+ o = 1024;
+ VERIFY(static_cast<bool>(o));
+ VERIFY(o.value_or(0x8BADF00D) == 1024);
+ VERIFY(o.value() == 1024);
+
+ // Test reset
+ o.reset();
+ VERIFY(!o);
+ VERIFY(o.value_or(0x8BADF00D) == (int)0x8BADF00D);
+ }
+
+ {
+ optional<int> o(nullopt);
+ VERIFY(!o);
+ VERIFY(o.value_or(0x8BADF00D) == (int)0x8BADF00D);
+ }
+
+ {
+ optional<int> o = {};
+ VERIFY(!o);
+ VERIFY(o.value_or(0x8BADF00D) == (int)0x8BADF00D);
+ }
+
+ {
+ optional<int> o(42);
+ VERIFY(bool(o));
+ VERIFY(o.value_or(0x8BADF00D) == 42);
+ o = nullopt;
+ VERIFY(!o);
+ VERIFY(o.value_or(0x8BADF00D) == (int)0x8BADF00D);
+ }
+
+ {
+ optional<int> o(42);
+ VERIFY(static_cast<bool>(o));
+ VERIFY(o.value_or(0x8BADF00D) == 42);
+ VERIFY(o.value() == 42);
+ }
+
+ {
+ auto o = make_optional(42);
+ VERIFY((is_same<decltype(o), optional<int>>::value));
+ VERIFY(static_cast<bool>(o));
+ VERIFY(o.value_or(0x8BADF00D) == 42);
+ VERIFY(o.value() == 42);
+ }
+
+ {
+ int a = 42;
+ auto o = make_optional(a);
+ VERIFY((is_same<decltype(o)::value_type, int>::value));
+ VERIFY(o.value() == 42);
+ }
+
+ {
+ // test make_optional stripping refs/cv-qualifers
+ int a = 42;
+ const volatile int& intRef = a;
+ auto o = make_optional(intRef);
+ VERIFY((is_same<decltype(o)::value_type, int>::value));
+ VERIFY(o.value() == 42);
+ }
+
+ {
+ int a = 10;
+ const volatile int& aRef = a;
+ auto o = eastl::make_optional(aRef);
+ VERIFY(o.value() == 10);
+ }
+
+ {
+ {
+ struct local { int payload1; };
+ auto o = eastl::make_optional<local>(42);
+ VERIFY(o.value().payload1 == 42);
+ }
+ {
+ struct local { int payload1; int payload2; };
+ auto o = eastl::make_optional<local>(42, 43);
+ VERIFY(o.value().payload1 == 42);
+ VERIFY(o.value().payload2 == 43);
+ }
+
+ {
+ struct local
+ {
+ local(std::initializer_list<int> ilist)
+ {
+ payload1 = ilist.begin()[0];
+ payload2 = ilist.begin()[1];
+ }
+
+ int payload1;
+ int payload2;
+ };
+
+ auto o = eastl::make_optional<local>({42, 43});
+ VERIFY(o.value().payload1 == 42);
+ VERIFY(o.value().payload2 == 43);
+ }
+ }
+
+ {
+ optional<int> o1(42), o2(24);
+ VERIFY(o1.value() == 42);
+ VERIFY(o2.value() == 24);
+ VERIFY(*o1 == 42);
+ VERIFY(*o2 == 24);
+ o1 = eastl::move(o2);
+ VERIFY(*o2 == 24);
+ VERIFY(*o1 == 24);
+ VERIFY(o2.value() == 24);
+ VERIFY(o1.value() == 24);
+ VERIFY(bool(o1));
+ VERIFY(bool(o2));
+ }
+
+ {
+ struct local { int payload; };
+ optional<local> o = local{ 42 };
+ VERIFY(o->payload == 42);
+ }
+
+ {
+ struct local
+ {
+ int test() const { return 42; }
+ };
+
+ {
+ const optional<local> o = local{};
+ VERIFY(o->test() == 42);
+ VERIFY((*o).test() == 42);
+ VERIFY(o.value().test() == 42);
+ VERIFY(bool(o));
+ }
+
+ {
+ optional<local> o = local{};
+ VERIFY(bool(o));
+ o = nullopt;
+ VERIFY(!bool(o));
+
+ VERIFY(o.value_or(local{}).test() == 42);
+ VERIFY(!bool(o));
+ }
+ }
+ }
+
+ {
+ move_test t;
+ optional<move_test> o(eastl::move(t));
+ VERIFY(move_test::was_moved);
+ }
+
+ {
+ forwarding_test<float>ft(1.f);
+ float val = ft.GetValueOrDefault(0.f);
+ VERIFY(val == 1.f);
+ }
+
+ {
+ assignment_test::num_objects_inited = 0;
+ {
+ optional<assignment_test> o1;
+ optional<assignment_test> o2 = assignment_test();
+ optional<assignment_test> o3(o2);
+ VERIFY(assignment_test::num_objects_inited == 2);
+ o1 = nullopt;
+ VERIFY(assignment_test::num_objects_inited == 2);
+ o1 = o2;
+ VERIFY(assignment_test::num_objects_inited == 3);
+ o1 = o2;
+ VERIFY(assignment_test::num_objects_inited == 3);
+ o1 = nullopt;
+ VERIFY(assignment_test::num_objects_inited == 2);
+ o2 = o1;
+ VERIFY(assignment_test::num_objects_inited == 1);
+ o1 = o2;
+ VERIFY(assignment_test::num_objects_inited == 1);
+ }
+ VERIFY(assignment_test::num_objects_inited == 0);
+
+ {
+ optional<assignment_test> o1;
+ VERIFY(assignment_test::num_objects_inited == 0);
+ o1 = nullopt;
+ VERIFY(assignment_test::num_objects_inited == 0);
+ o1 = optional<assignment_test>(assignment_test());
+ VERIFY(assignment_test::num_objects_inited == 1);
+ o1 = optional<assignment_test>(assignment_test());
+ VERIFY(assignment_test::num_objects_inited == 1);
+ optional<assignment_test> o2(eastl::move(o1));
+ VERIFY(assignment_test::num_objects_inited == 2);
+ o1 = nullopt;
+ VERIFY(assignment_test::num_objects_inited == 1);
+ }
+ VERIFY(assignment_test::num_objects_inited == 0);
+ }
+
+ #if EASTL_VARIADIC_TEMPLATES_ENABLED
+ {
+ struct vec3
+ {
+ vec3(std::initializer_list<float> ilist) { auto* p = ilist.begin(); x = *p++; y = *p++; z = *p++; }
+ vec3(float _x, float _y, float _z) : x(_x), y(_y), z(_z) {} // testing variadic template constructor overload
+ float x = 0, y = 0, z = 0;
+ };
+
+ {
+ optional<vec3> o{ in_place, 4.f, 5.f, 6.f };
+ VERIFY(o->x == 4 && o->y == 5 && o->z == 6);
+ }
+
+ {
+ optional<vec3> o{ in_place, {4.f, 5.f, 6.f} };
+ VERIFY(o->x == 4 && o->y == 5 && o->z == 6);
+ }
+
+ {
+ optional<string> o(in_place, {'a', 'b', 'c'});
+ VERIFY(o == string("abc"));
+ }
+
+ // http://en.cppreference.com/w/cpp/utility/optional/emplace
+ {
+ optional<vec3> o;
+ o.emplace(42.f, 42.f, 42.f);
+ VERIFY(o->x == 42.f && o->y == 42.f && o->z == 42.f);
+ }
+
+ {
+ optional<vec3> o;
+ o.emplace({42.f, 42.f, 42.f});
+ VERIFY(o->x == 42.f && o->y == 42.f && o->z == 42.f);
+ }
+
+ {
+ optional<int> o;
+ o.emplace(42);
+ VERIFY(*o == 42);
+ }
+
+ struct nonCopyableNonMovable
+ {
+ nonCopyableNonMovable(int v) : val(v) {}
+
+ nonCopyableNonMovable(const nonCopyableNonMovable&) = delete;
+ nonCopyableNonMovable(nonCopyableNonMovable&&) = delete;
+ nonCopyableNonMovable& operator=(const nonCopyableNonMovable&) = delete;
+
+ int val = 0;
+ };
+
+ {
+ optional<nonCopyableNonMovable> o;
+ o.emplace(42);
+ VERIFY(o->val == 42);
+ }
+
+ {
+ // Verify emplace will destruct object if it has been engaged.
+ destructor_test::reset();
+ optional<destructor_test> o;
+ o.emplace();
+ VERIFY(!destructor_test::destructor_ran);
+
+ destructor_test::reset();
+ o.emplace();
+ VERIFY(destructor_test::destructor_ran);
+ }
+ }
+ #endif
+
+
+ // swap
+ {
+ {
+ optional<int> o1 = 42, o2 = 24;
+ VERIFY(*o1 == 42);
+ VERIFY(*o2 == 24);
+ o1.swap(o2);
+ VERIFY(*o1 == 24);
+ VERIFY(*o2 == 42);
+ }
+
+ {
+ optional<int> o1 = 42, o2 = 24;
+ VERIFY(*o1 == 42);
+ VERIFY(*o2 == 24);
+ swap(o1, o2);
+ VERIFY(*o1 == 24);
+ VERIFY(*o2 == 42);
+ }
+
+ {
+ optional<int> o1 = 42, o2;
+ VERIFY(*o1 == 42);
+ VERIFY(o2.has_value() == false);
+ swap(o1, o2);
+ VERIFY(o1.has_value() == false);
+ VERIFY(*o2 == 42);
+ }
+
+ {
+ optional<int> o1 = nullopt, o2 = 42;
+ VERIFY(o1.has_value() == false);
+ VERIFY(*o2 == 42);
+ swap(o1, o2);
+ VERIFY(*o1 == 42);
+ VERIFY(o2.has_value() == false);
+ }
+ }
+
+ {
+ optional<IntStruct> o(in_place, 10);
+ optional<IntStruct> e;
+
+ VERIFY(o < IntStruct(42));
+ VERIFY(!(o < IntStruct(2)));
+ VERIFY(!(o < IntStruct(10)));
+ VERIFY(e < o);
+ VERIFY(e < IntStruct(10));
+
+ VERIFY(o > IntStruct(4));
+ VERIFY(!(o > IntStruct(42)));
+
+ VERIFY(o >= IntStruct(4));
+ VERIFY(o >= IntStruct(10));
+ VERIFY(IntStruct(4) <= o);
+ VERIFY(IntStruct(10) <= o);
+
+ VERIFY(o == IntStruct(10));
+ VERIFY(o->data == IntStruct(10).data);
+
+ VERIFY(o != IntStruct(11));
+ VERIFY(o->data != IntStruct(11).data);
+
+ VERIFY(e == nullopt);
+ VERIFY(nullopt == e);
+
+ VERIFY(o != nullopt);
+ VERIFY(nullopt != o);
+ VERIFY(nullopt < o);
+ VERIFY(o > nullopt);
+ VERIFY(!(nullopt > o));
+ VERIFY(!(o < nullopt));
+ VERIFY(nullopt <= o);
+ VERIFY(o >= nullopt);
+ }
+
+ #if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ {
+ optional<IntStruct> o(in_place, 10);
+ optional<IntStruct> e;
+
+ VERIFY((o <=> IntStruct(42)) < 0);
+ VERIFY((o <=> IntStruct(2)) >= 0);
+ VERIFY((o <=> IntStruct(10)) >= 0);
+ VERIFY((e <=> o) < 0);
+ VERIFY((e <=> IntStruct(10)) < 0);
+
+ VERIFY((o <=> IntStruct(4)) > 0);
+ VERIFY(o <=> IntStruct(42) <= 0);
+
+ VERIFY((o <=> IntStruct(4)) >= 0);
+ VERIFY((o <=> IntStruct(10)) >= 0);
+ VERIFY((IntStruct(4) <=> o) <= 0);
+ VERIFY((IntStruct(10) <=> o) <= 0);
+
+ VERIFY((o <=> IntStruct(10)) == 0);
+ VERIFY((o->data <=> IntStruct(10).data) == 0);
+
+ VERIFY((o <=> IntStruct(11)) != 0);
+ VERIFY((o->data <=> IntStruct(11).data) != 0);
+
+ VERIFY((e <=> nullopt) == 0);
+ VERIFY((nullopt <=> e) == 0);
+
+ VERIFY((o <=> nullopt) != 0);
+ VERIFY((nullopt <=> o) != 0);
+ VERIFY((nullopt <=> o) < 0);
+ VERIFY((o <=> nullopt) > 0);
+ VERIFY((nullopt <=> o) <= 0);
+ VERIFY((o <=> nullopt) >= 0);
+ }
+ #endif
+
+ // hash
+ {
+ {
+ // verify that the hash an empty eastl::optional object is zero.
+ typedef hash<optional<int>> hash_optional_t;
+ optional<int> e;
+ VERIFY(hash_optional_t{}(e) == 0);
+ }
+
+ {
+ // verify that the hash is the same as the hash of the underlying type
+ const char* const pMessage = "Electronic Arts Canada";
+ typedef hash<optional<string>> hash_optional_t;
+ optional<string> o = string(pMessage);
+ VERIFY(hash_optional_t{}(o) == hash<string>{}(pMessage));
+ }
+ }
+
+ // sorting
+ {
+ vector<optional<int>> v = {{122}, {115}, nullopt, {223}};
+ sort(begin(v), end(v));
+ vector<optional<int>> sorted = {nullopt, 115, 122, 223};
+
+ VERIFY(v == sorted);
+ }
+
+ // test destructors being called.
+ {
+ destructor_test::reset();
+ {
+ optional<destructor_test> o = destructor_test{};
+ }
+ VERIFY(destructor_test::destructor_ran);
+
+ destructor_test::reset();
+ {
+ optional<destructor_test> o;
+ }
+ // destructor shouldn't be called as object wasn't constructed.
+ VERIFY(!destructor_test::destructor_ran);
+
+
+ destructor_test::reset();
+ {
+ optional<destructor_test> o = {};
+ }
+ // destructor shouldn't be called as object wasn't constructed.
+ VERIFY(!destructor_test::destructor_ran);
+
+ destructor_test::reset();
+ {
+ optional<destructor_test> o = nullopt;
+ }
+ // destructor shouldn't be called as object wasn't constructed.
+ VERIFY(!destructor_test::destructor_ran);
+ }
+
+ // optional rvalue tests
+ {
+ VERIFY(*optional<uint32_t>(1u) == 1u);
+ VERIFY(optional<uint32_t>(1u).value() == 1u);
+ VERIFY(optional<uint32_t>(1u).value_or(0xdeadf00d) == 1u);
+ VERIFY(optional<uint32_t>().value_or(0xdeadf00d) == 0xdeadf00d);
+ VERIFY(optional<uint32_t>(1u).has_value() == true);
+ VERIFY(optional<uint32_t>().has_value() == false);
+ VERIFY( optional<IntStruct>(in_place, 10)->data == 10);
+
+ }
+
+ // alignment type tests
+ {
+ static_assert(alignof(optional<Align16>) == alignof(Align16), "optional alignment failure");
+ static_assert(alignof(optional<Align32>) == alignof(Align32), "optional alignment failure");
+ static_assert(alignof(optional<Align64>) == alignof(Align64), "optional alignment failure");
+ }
+
+ {
+ // user reported regression that failed to compile
+ struct local_struct
+ {
+ local_struct() {}
+ ~local_struct() {}
+ };
+ static_assert(!eastl::is_trivially_destructible_v<local_struct>, "");
+
+ {
+ local_struct ls;
+ eastl::optional<local_struct> o{ls};
+ }
+ {
+ const local_struct ls;
+ eastl::optional<local_struct> o{ls};
+ }
+ }
+
+ {
+ {
+ // user regression
+ eastl::optional<eastl::string> o = eastl::string("Hello World");
+ eastl::optional<eastl::string> co;
+
+ co = o; // force copy-assignment
+
+ VERIFY( o.value().data() != co.value().data());
+ VERIFY( o.value().data() == eastl::string("Hello World"));
+ VERIFY(co.value().data() == eastl::string("Hello World"));
+ }
+ {
+ // user regression
+ EA_DISABLE_VC_WARNING(4625 4626) // copy/assignment operator constructor was implicitly defined as deleted
+ struct local
+ {
+ eastl::unique_ptr<int> ptr;
+ };
+ EA_RESTORE_VC_WARNING()
+
+ eastl::optional<local> o1 = local{eastl::make_unique<int>(42)};
+ eastl::optional<local> o2;
+
+ o2 = eastl::move(o1);
+
+ VERIFY(!!o1 == true);
+ VERIFY(!!o2 == true);
+ VERIFY(!!o1->ptr == false);
+ VERIFY(!!o2->ptr == true);
+ VERIFY(o2->ptr.get() != nullptr);
+ }
+ {
+ // user regression
+ static bool copyCtorCalledWithUninitializedValue;
+ static bool moveCtorCalledWithUninitializedValue;
+ copyCtorCalledWithUninitializedValue = moveCtorCalledWithUninitializedValue = false;
+ struct local
+ {
+ uint32_t val;
+ local()
+ : val(0xabcdabcd)
+ {}
+ local(const local& other)
+ : val(other.val)
+ {
+ if (other.val != 0xabcdabcd)
+ copyCtorCalledWithUninitializedValue = true;
+ }
+ local(local&& other)
+ : val(eastl::move(other.val))
+ {
+ if (other.val != 0xabcdabcd)
+ moveCtorCalledWithUninitializedValue = true;
+ }
+ local& operator=(const local&) = delete;
+ };
+ eastl::optional<local> n;
+ eastl::optional<local> o1(n);
+ VERIFY(!copyCtorCalledWithUninitializedValue);
+ eastl::optional<local> o2(eastl::move(n));
+ VERIFY(!moveCtorCalledWithUninitializedValue);
+ }
+ }
+
+ {
+ auto testFn = []() -> optional<int>
+ {
+ return eastl::nullopt;
+ };
+
+ auto o = testFn();
+ VERIFY(!!o == false);
+ }
+
+ #endif // EASTL_OPTIONAL_ENABLED
+ return nErrorCount;
+}
+
diff --git a/EASTL/test/source/TestRandom.cpp b/EASTL/test/source/TestRandom.cpp
new file mode 100644
index 0000000..cefd7a5
--- /dev/null
+++ b/EASTL/test/source/TestRandom.cpp
@@ -0,0 +1,168 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#if defined(_MSC_VER)
+ //#pragma warning(disable: 4267) // 'argument' : conversion from 'size_t' to 'uint32_t', possible loss of data.
+#endif
+
+
+#include "EASTLTest.h"
+#include <EASTL/numeric_limits.h>
+#include <EASTL/set.h>
+#include <EASTL/random.h>
+
+
+struct GeneratorUint8
+{
+ uint8_t mValue;
+ GeneratorUint8() : mValue(0) {}
+ uint8_t operator()(){ return mValue++; } // This is a pretty bad random number generator, but works for our tests.
+};
+
+struct GeneratorUint16
+{
+ uint16_t mValue;
+ GeneratorUint16() : mValue(0) {}
+ uint16_t operator()(){ return mValue++; }
+};
+
+struct GeneratorUint32
+{
+ uint32_t mValue;
+ GeneratorUint32() : mValue(0) {}
+ uint32_t operator()(){ return mValue++; }
+};
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// TestRandom
+//
+int TestRandom()
+{
+ int nErrorCount = 0;
+
+ {
+ // template<class IntType = int>
+ // class uniform_int_distribution
+
+ // The C++11 Standard defines a number of formal Generators, such as std::mersenne_twister_engine,
+ // linear_congruential_engine, discard_block_engine, etc.
+
+ using namespace eastl;
+
+ {
+ eastl::uniform_int_distribution<uint8_t> uid(1, 6);
+ GeneratorUint8 g;
+
+ for(uint32_t i = 0; i < UINT8_MAX; i += 1)
+ {
+ uint8_t value = uid(g);
+ EATEST_VERIFY((value >= 1) && (value <= 6));
+ // To do: Validate the randomness of the value.
+ }
+
+ eastl::uniform_int_distribution<uint8_t> uid2(1, 6);
+ EATEST_VERIFY(uid == uid2);
+ }
+
+ {
+ eastl::uniform_int_distribution<uint16_t> uid(1, 6);
+ GeneratorUint16 g;
+
+ for(uint32_t i = 0; i < (UINT16_MAX - (UINT16_MAX / 50)); i += (UINT16_MAX / 50))
+ {
+ uint16_t value = uid(g);
+ EATEST_VERIFY((value >= 1) && (value <= 6));
+ // To do: Validate the randomness of the value.
+ }
+
+ eastl::uniform_int_distribution<uint16_t> uid2(1, 6);
+ EATEST_VERIFY(uid == uid2);
+ }
+
+ {
+ eastl::uniform_int_distribution<uint32_t> uid(1, 6);
+ GeneratorUint32 g;
+
+ for(uint32_t i = 0; i < (UINT32_MAX - (UINT32_MAX / 500)); i += (UINT32_MAX / 500))
+ {
+ uint32_t value = uid(g);
+ EATEST_VERIFY((value >= 1) && (value <= 6));
+ // To do: Validate the randomness of the value.
+ }
+
+ eastl::uniform_int_distribution<uint32_t> uid2(1, 6);
+ EATEST_VERIFY(uid == uid2);
+ }
+ }
+
+
+
+ /// Example usage:
+ /// eastl_size_t Rand(eastl_size_t n) { return (eastl_size_t)(rand() % n); } // Note: The C rand function is poor and slow.
+ /// pointer_to_unary_function<eastl_size_t, eastl_size_t> randInstance(Rand);
+ /// random_shuffle(pArrayBegin, pArrayEnd, randInstance);
+ ///
+ /// Example usage:
+ /// struct Rand{ eastl_size_t operator()(eastl_size_t n) { return (eastl_size_t)(rand() % n); } }; // Note: The C rand function is poor and slow.
+ /// Rand randInstance;
+ /// random_shuffle(pArrayBegin, pArrayEnd, randInstance);
+
+
+ {
+ // void random_shuffle(RandomAccessIterator first, RandomAccessIterator last, RandomNumberGenerator& rng)
+ using namespace eastl;
+
+ EASTLTest_Rand rng(EA::UnitTest::GetRandSeed());
+ int intArray[] = { 3, 2, 6, 5, 4, 1 };
+
+ random_shuffle(intArray, intArray + 0, rng);
+ EATEST_VERIFY(VerifySequence(intArray, intArray + 6, int(), "random_shuffle", 3, 2, 6, 5, 4, 1, -1));
+
+ random_shuffle(intArray, intArray + (sizeof(intArray) / sizeof(intArray[0])), rng);
+ bool changed = false;
+ for(int i = 0; (i < 5) && !changed; i++)
+ {
+ changed = (intArray[0] != 3) || (intArray[1] != 2) || (intArray[2] != 6) ||
+ (intArray[3] != 5) || (intArray[4] != 4) || (intArray[5] != 1);
+ }
+ EATEST_VERIFY(changed);
+
+ // Test of possible bug report by user John Chin.
+ // The report is that shuffling an ordered array 0, 1, 2, 3, 4 ... results in duplicates, such as 5, 2, 2, 4 ...
+ eastl::vector<eastl_size_t> rngArray;
+
+ for(eastl_size_t i = 0; i < 200; ++i)
+ rngArray.push_back(i);
+
+ random_shuffle(rngArray.begin(), rngArray.end(), rng);
+ EATEST_VERIFY(rngArray.validate());
+
+ eastl::set<eastl_size_t> intSet;
+
+ for(eastl_size_t s = 0, sEnd = rngArray.size(); s < sEnd; ++s)
+ intSet.insert(rngArray[s]);
+
+ // If the shuffled array is unique, then a set of its values should be the same size as the array.
+ EATEST_VERIFY(intSet.size() == rngArray.size());
+ }
+
+
+ return nErrorCount;
+}
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/test/source/TestRatio.cpp b/EASTL/test/source/TestRatio.cpp
new file mode 100644
index 0000000..9f30fc1
--- /dev/null
+++ b/EASTL/test/source/TestRatio.cpp
@@ -0,0 +1,107 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLTest.h"
+#include <EABase/eabase.h>
+#include <EASTL/ratio.h>
+
+
+int TestRatio()
+{
+ using namespace eastl;
+
+ int nErrorCount = 0;
+ {
+ using namespace eastl::Internal;
+
+ // lcm (least common multiple)
+ static_assert(lcm<0,0>::value == 0, "lcm failure");
+ static_assert(lcm<10,6>::value == 30, "lcm failure");
+ static_assert(lcm<21,6>::value == 42, "lcm failure");
+ static_assert(lcm<21,6>::value == lcm<6,21>::value, "lcm failure");
+
+ // gcd (greatest common divisor)
+ static_assert(gcd<6, 4>::value == 2, "gcd failure");
+ static_assert(gcd<54, 24>::value == 6, "gcd failure");
+ static_assert(gcd<42, 56>::value == 14, "gcd failure");
+ static_assert(gcd<48, 18>::value == 6, "gcd failure");
+ static_assert(gcd<50, 40>::value == 10, "gcd failure");
+ static_assert(gcd<6, 4>::value != 9, "gcd failure");
+ static_assert(gcd<0, 0>::value == 1, "gcd failure");
+ static_assert(gcd<1, 0>::value == 1, "gcd failure");
+ static_assert(gcd<0, 1>::value == 1, "gcd failure");
+ static_assert(gcd<34,7>::value == gcd<7, 34>::value, "gcd failure");
+ static_assert(gcd<9223372036854775807, 9223372036854775807>::value == 9223372036854775807, "gcd failure");
+
+ // simplify
+ typedef ct_simplify<ratio<50, 40>>::ratio_type smp_rt;
+ typedef ct_simplify<ratio<50, 40>>::this_type smp_tt;
+ static_assert(smp_rt::num == 5 && smp_rt::den == 4, "simplify failure");
+ static_assert(smp_tt::divisor == 10, "simplify failure0");
+ static_assert(smp_rt::num == 5, "simplify failure1");
+ static_assert(smp_rt::den == 4, "simplify failure2");
+ }
+
+ {
+ // ratio_add
+ typedef ratio_add<ratio<2, 3>, ratio<1, 6>> sum;
+ static_assert(sum::num == 5 && sum::den == 6, "ratio_add failure");
+ typedef ratio_add<ratio<3,4>, ratio<5,10>> sum2;
+ static_assert(sum2::num == 5 && sum2::den == 4, "ratio_add failure");
+
+ // ratio_subtract
+ typedef ratio_subtract<ratio<10,10>, ratio<1,2>> sum3;
+ static_assert(sum3::num == 1 && sum3::den == 2, "ratio_subtract failure");
+
+ // ratio_multiply
+ typedef ratio_multiply<ratio<10,10>, ratio<1,2>> sum4;
+ static_assert(sum4::num == 1 && sum4::den == 2, "ratio_multiply failure");
+ typedef ratio_multiply<ratio<2,5>, ratio<1,2>> sum5;
+ static_assert(sum5::num == 1 && sum5::den == 5, "ratio_multiply failure");
+ typedef ratio_multiply<ratio<1,3>, ratio<9,16>> sum6;
+ static_assert(sum6::num == 3 && sum6::den == 16, "ratio_multiply failure");
+
+ // ratio_divide
+ typedef ratio_divide<ratio<1,8>, ratio<1,4>> sum8;
+ static_assert(sum8::num == 1 && sum8::den == 2, "ratio_divide failure");
+ typedef ratio_divide<ratio<2,3>, ratio<5>> sum9;
+ static_assert(sum9::num == 2 && sum9::den == 15, "ratio_divide failure");
+
+ // ratio_equal
+ static_assert(ratio_equal<ratio<1>, ratio<1>>::value, "ratio_equal failure");
+ static_assert(ratio_equal<ratio<1,1>, ratio<4,4>>::value, "ratio_equal failure");
+ static_assert(ratio_equal<ratio<5,10>, ratio<1,2>>::value, "ratio_equal failure");
+ static_assert(ratio_equal<ratio<2,3>, ratio<4,6>>::value, "ratio_equal failure");
+
+ // ratio_not_equal
+ static_assert(!ratio_not_equal<ratio<5,10>, ratio<1,2>>::value, "ratio_not_equal failure");
+
+ // ratio_less
+ static_assert(ratio_less<ratio<2,10>, ratio<1,2>>::value, "ratio_less failure");
+ static_assert(ratio_less<ratio<23,37>, ratio<57,90>>::value, "ratio_less failure");
+
+ // ratio_less_equal
+ static_assert(ratio_less_equal<ratio<2,10>, ratio<1,2>>::value, "ratio_less_equal failure");
+ static_assert(ratio_less_equal<ratio<2,10>, ratio<1,5>>::value, "ratio_less_equal failure");
+ static_assert(ratio_less_equal<ratio<1,100>, ratio<1,5>>::value, "ratio_less_equal failure");
+
+ // ratio_greater
+ static_assert(ratio_greater<ratio<1,2>, ratio<1,4>>::value, "ratio_greater failure");
+
+ // ratio_greater_equal
+ static_assert(ratio_greater_equal<ratio<3,4>, ratio<1,2>>::value, "ratio_greater_equal failure");
+ }
+
+ return nErrorCount;
+}
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/test/source/TestRingBuffer.cpp b/EASTL/test/source/TestRingBuffer.cpp
new file mode 100644
index 0000000..d640380
--- /dev/null
+++ b/EASTL/test/source/TestRingBuffer.cpp
@@ -0,0 +1,1139 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLTest.h"
+#include <EASTL/bonus/ring_buffer.h>
+#include <EASTL/bonus/fixed_ring_buffer.h>
+#include <EASTL/vector.h>
+#include <EASTL/deque.h>
+#include <EASTL/string.h>
+#include <EASTL/list.h>
+#include <EASTL/fixed_vector.h>
+#include <EASTL/fixed_string.h>
+
+
+
+using namespace eastl;
+
+
+// Template instantations.
+// These tell the compiler to compile all the functions for the given class.
+template class eastl::ring_buffer< int, eastl::vector<int> >;
+template class eastl::ring_buffer< Align64, eastl::vector<Align64> >;
+template class eastl::ring_buffer< TestObject, eastl::vector<TestObject> >;
+
+template class eastl::ring_buffer< int, eastl::deque<int> >;
+template class eastl::ring_buffer< Align64, eastl::deque<Align64> >;
+template class eastl::ring_buffer< TestObject, eastl::deque<TestObject> >;
+
+template class eastl::ring_buffer< int, eastl::list<int> >;
+template class eastl::ring_buffer< Align64, eastl::list<Align64> >;
+template class eastl::ring_buffer< TestObject, eastl::list<TestObject> >;
+
+// TODO(rparolin): To consider adding support for eastl::array.
+// template class eastl::ring_buffer< int, eastl::array<int, 64>>;
+
+typedef eastl::fixed_string<char, 256, false> RBFixedString;
+typedef eastl::fixed_vector<RBFixedString, 100, false> RBFixedStringVector;
+typedef RBFixedStringVector::overflow_allocator_type RBFixedStringVectorOverflowAllocator;
+template class eastl::ring_buffer<RBFixedString, RBFixedStringVector, RBFixedStringVectorOverflowAllocator>;
+
+typedef eastl::fixed_vector<int, 100, false> RBFixedIntVector;
+template class eastl::ring_buffer<int, RBFixedIntVector, RBFixedIntVector::overflow_allocator_type>;
+// template class eastl::ring_buffer<int, RBFixedIntVector>; // currently fails to compile
+
+typedef eastl::fixed_vector<int, 100> RBFixedIntVectorWithOverFlow;
+template class eastl::ring_buffer<int, RBFixedIntVectorWithOverFlow, RBFixedIntVectorWithOverFlow::overflow_allocator_type>;
+// template class eastl::ring_buffer<int, RBFixedIntVectorWithOverFlow>; // currently fails to compile
+
+
+
+int TestRingBuffer()
+{
+ int nErrorCount = 0;
+
+ // GCC prior to 4.1 has a fatal code generation bug in string arrays, which we use below.
+ #if !defined(EA_DEBUG) && defined(__GNUC__) && !defined(__EDG__) && (((__GNUC__ * 100) + __GNUC_MINOR__) < 401)
+ return nErrorCount;
+ #endif
+
+ { // regression for bug in the capacity() function for the case of capacity == 0.
+
+ vector<int> emptyIntArray;
+ ring_buffer<int, vector<int> > intRingBuffer(emptyIntArray);
+
+ EATEST_VERIFY(intRingBuffer.validate());
+ EATEST_VERIFY(intRingBuffer.capacity() == 0);
+
+ intRingBuffer.resize(0);
+ EATEST_VERIFY(intRingBuffer.validate());
+ EATEST_VERIFY(intRingBuffer.size() == 0);
+
+ intRingBuffer.resize(1);
+ EATEST_VERIFY(intRingBuffer.validate());
+ EATEST_VERIFY(intRingBuffer.size() == 1);
+ }
+
+ {
+ EA::UnitTest::Rand rng(EA::UnitTest::GetRandSeed());
+
+ typedef ring_buffer< string, vector<string> > RBVectorString;
+
+ int counter = 0;
+ char counterBuffer[32];
+
+ // explicit ring_buffer(size_type size = 0);
+ const int kOriginalCapacity = 50;
+ RBVectorString rbVectorString(50);
+
+ // bool empty() const;
+ // size_type size() const;
+ // bool validate() const;
+ EATEST_VERIFY(rbVectorString.validate());
+ EATEST_VERIFY(rbVectorString.empty());
+ EATEST_VERIFY(rbVectorString.size() == 0);
+ EATEST_VERIFY(rbVectorString.capacity() == 50);
+
+ // void clear();
+ rbVectorString.clear();
+ EATEST_VERIFY(rbVectorString.validate());
+ EATEST_VERIFY(rbVectorString.empty());
+ EATEST_VERIFY(rbVectorString.size() == 0);
+ EATEST_VERIFY(rbVectorString.capacity() == 50);
+
+ // container_type& get_container();
+ RBVectorString::container_type& c = rbVectorString.get_container();
+ EATEST_VERIFY(c.size() == (kOriginalCapacity + 1)); // We need to add one because the ring_buffer mEnd is necessarily an unused element.
+
+ // iterator begin();
+ // iterator end();
+ // int validate_iterator(const_iterator i) const;
+ RBVectorString::iterator it = rbVectorString.begin();
+ EATEST_VERIFY(rbVectorString.validate_iterator(it) == (isf_valid | isf_current));
+
+ while(it != rbVectorString.end()) // This loop should do nothing.
+ {
+ EATEST_VERIFY(rbVectorString.validate_iterator(it) == (isf_valid | isf_current));
+ ++it;
+ }
+
+ // void push_back(const value_type& value);
+ sprintf(counterBuffer, "%d", counter++);
+ rbVectorString.push_back(string(counterBuffer));
+ EATEST_VERIFY(rbVectorString.validate());
+ EATEST_VERIFY(!rbVectorString.empty());
+ EATEST_VERIFY(rbVectorString.size() == 1);
+ EATEST_VERIFY(rbVectorString.capacity() == 50);
+
+ it = rbVectorString.begin();
+ EATEST_VERIFY(rbVectorString.validate_iterator(it) == (isf_valid | isf_current | isf_can_dereference));
+ EATEST_VERIFY(*it == "0");
+
+ // reference front();
+ // reference back();
+ string& sFront = rbVectorString.front();
+ string& sBack = rbVectorString.back();
+ EATEST_VERIFY(&sFront == &sBack);
+
+ // void push_back();
+ string& ref = rbVectorString.push_back();
+ EATEST_VERIFY(rbVectorString.validate());
+ EATEST_VERIFY(rbVectorString.size() == 2);
+ EATEST_VERIFY(rbVectorString.capacity() == 50);
+ EATEST_VERIFY(&ref == &rbVectorString.back());
+
+ it = rbVectorString.begin();
+ ++it;
+ EATEST_VERIFY(rbVectorString.validate_iterator(it) == (isf_valid | isf_current | isf_can_dereference));
+ EATEST_VERIFY(it->empty());
+
+ sprintf(counterBuffer, "%d", counter++);
+ *it = counterBuffer;
+ EATEST_VERIFY(*it == "1");
+
+ ++it;
+ EATEST_VERIFY(it == rbVectorString.end());
+
+ it = rbVectorString.begin();
+ while(it != rbVectorString.end())
+ {
+ EATEST_VERIFY(rbVectorString.validate_iterator(it) == (isf_valid | isf_current | isf_can_dereference));
+ ++it;
+ }
+
+ // reference operator[](size_type n);
+ string& s0 = rbVectorString[0];
+ EATEST_VERIFY(s0 == "0");
+
+ string& s1 = rbVectorString[1];
+ EATEST_VERIFY(s1 == "1");
+
+ // Now we start hammering the ring buffer with push_back.
+ for(eastl_size_t i = 0, iEnd = rbVectorString.capacity() * 5; i != iEnd; i++)
+ {
+ sprintf(counterBuffer, "%d", counter++);
+ rbVectorString.push_back(string(counterBuffer));
+ EATEST_VERIFY(rbVectorString.validate());
+ }
+
+ int counterCheck = counter - 1;
+ char counterCheckBuffer[32];
+ sprintf(counterCheckBuffer, "%d", counterCheck);
+ EATEST_VERIFY(rbVectorString.back() == counterCheckBuffer);
+
+ // reverse_iterator rbegin();
+ // reverse_iterator rend();
+ for(RBVectorString::reverse_iterator ri = rbVectorString.rbegin(); ri != rbVectorString.rend(); ++ri)
+ {
+ sprintf(counterCheckBuffer, "%d", counterCheck--);
+ EATEST_VERIFY(*ri == counterCheckBuffer);
+ }
+
+ ++counterCheck;
+
+ // iterator begin();
+ // iterator end();
+ for(RBVectorString::iterator i = rbVectorString.begin(); i != rbVectorString.end(); ++i)
+ {
+ EATEST_VERIFY(rbVectorString.validate_iterator(i) == (isf_valid | isf_current | isf_can_dereference));
+ EATEST_VERIFY(*i == counterCheckBuffer);
+ sprintf(counterCheckBuffer, "%d", ++counterCheck);
+ }
+
+ // void clear();
+ rbVectorString.clear();
+ EATEST_VERIFY(rbVectorString.validate());
+ EATEST_VERIFY(rbVectorString.empty());
+ EATEST_VERIFY(rbVectorString.size() == 0);
+ EATEST_VERIFY(rbVectorString.capacity() == 50);
+
+ // Random operations
+ // Not easy to test the expected values without some tedium.
+ for(int j = 0; j < 10000 + (gEASTL_TestLevel * 10000); j++)
+ {
+ sprintf(counterBuffer, "%d", counter++);
+
+ const eastl_size_t op = rng.RandLimit(12);
+ const eastl_size_t s = rbVectorString.size();
+
+ if(op == 0)
+ {
+ // void push_back(const value_type& value);
+
+ rbVectorString.push_back(string(counterBuffer));
+ EATEST_VERIFY(rbVectorString.size() == eastl::min(s + 1, rbVectorString.capacity()));
+ }
+ else if(op == 1)
+ {
+ // void push_back();
+
+ string& ref2 = rbVectorString.push_back();
+ rbVectorString.back() = string(counterBuffer);
+ EATEST_VERIFY(rbVectorString.size() == eastl::min(s + 1, rbVectorString.capacity()));
+ EATEST_VERIFY(&ref2 == &rbVectorString.back());
+ }
+ else if(op == 2)
+ {
+ // void pop_back();
+
+ if(!rbVectorString.empty())
+ {
+ rbVectorString.pop_back();
+ EATEST_VERIFY(rbVectorString.size() == (s - 1));
+ }
+ }
+ else if(op == 3)
+ {
+ // void push_front(const value_type& value);
+
+ rbVectorString.push_front(string(counterBuffer));
+ EATEST_VERIFY(rbVectorString.size() == eastl::min(s + 1, rbVectorString.capacity()));
+ }
+ else if(op == 4)
+ {
+ // void push_front();
+
+ string& ref2 = rbVectorString.push_front();
+ rbVectorString.front() = string(counterBuffer);
+ EATEST_VERIFY(rbVectorString.size() == eastl::min(s + 1, rbVectorString.capacity()));
+ EATEST_VERIFY(&ref2 == &rbVectorString.front());
+ }
+ else if(op == 5)
+ {
+ // void pop_front();
+
+ if(!rbVectorString.empty())
+ {
+ rbVectorString.pop_front();
+ EATEST_VERIFY(rbVectorString.size() == (s - 1));
+ }
+ }
+ else if(op == 6)
+ {
+ // iterator insert(iterator position, const value_type& value);
+
+ it = rbVectorString.begin();
+ const eastl_size_t dist = rng.RandLimit((uint32_t)s + 1);
+ eastl::advance(it, dist);
+
+ if(it == rbVectorString.end())
+ EATEST_VERIFY(rbVectorString.validate_iterator(it) == (isf_valid | isf_current));
+ else
+ EATEST_VERIFY(rbVectorString.validate_iterator(it) == (isf_valid | isf_current | isf_can_dereference));
+
+ rbVectorString.insert(it, string(counterBuffer));
+ EATEST_VERIFY(rbVectorString.size() == eastl::min(s + 1, rbVectorString.capacity()));
+ }
+ else if(op == 7)
+ {
+ // void insert(iterator position, size_type n, const value_type& value);
+
+ it = rbVectorString.begin();
+ const eastl_size_t dist = rng.RandLimit((uint32_t)s + 1);
+ eastl::advance(it, dist);
+
+ if(it == rbVectorString.end())
+ EATEST_VERIFY(rbVectorString.validate_iterator(it) == (isf_valid | isf_current));
+ else
+ EATEST_VERIFY(rbVectorString.validate_iterator(it) == (isf_valid | isf_current | isf_can_dereference));
+
+ const eastl_size_t count = (eastl_size_t)rng.RandLimit(10);
+
+ rbVectorString.insert(it, count, string(counterBuffer));
+ EATEST_VERIFY(rbVectorString.size() == eastl::min(s + count, rbVectorString.capacity()));
+ }
+ else if(op == 8)
+ {
+ // template <typename InputIterator>
+ // void insert(iterator position, InputIterator first, InputIterator last);
+
+ string stringArray[10];
+
+ it = rbVectorString.begin();
+ const eastl_size_t dist = rng.RandLimit((uint32_t)s + 1);
+ eastl::advance(it, dist);
+
+ if(it == rbVectorString.end())
+ EATEST_VERIFY(rbVectorString.validate_iterator(it) == (isf_valid | isf_current));
+ else
+ EATEST_VERIFY(rbVectorString.validate_iterator(it) == (isf_valid | isf_current | isf_can_dereference));
+
+ const eastl_size_t count = (eastl_size_t)rng.RandLimit(10);
+
+ rbVectorString.insert(it, stringArray, stringArray + count);
+ EATEST_VERIFY(rbVectorString.size() == eastl::min(s + count, rbVectorString.capacity()));
+ }
+ else if(op == 9)
+ {
+ // iterator erase(iterator position);
+
+ if(!rbVectorString.empty())
+ {
+ it = rbVectorString.begin();
+ const eastl_size_t dist = rng.RandLimit((uint32_t)s);
+ eastl::advance(it, dist);
+ EATEST_VERIFY(rbVectorString.validate_iterator(it) == (isf_valid | isf_current | isf_can_dereference));
+ rbVectorString.erase(it);
+
+ EATEST_VERIFY(rbVectorString.size() == (s - 1));
+ }
+ }
+ else if(op == 10)
+ {
+ // iterator erase(iterator first, iterator last);
+
+ if(!rbVectorString.empty())
+ {
+ RBVectorString::iterator it1 = rbVectorString.begin();
+ const eastl_size_t pos = rng.RandLimit((uint32_t)s / 4);
+ eastl::advance(it1, pos);
+ EATEST_VERIFY(rbVectorString.validate_iterator(it1) == (isf_valid | isf_current | isf_can_dereference));
+
+ RBVectorString::iterator it2 = it1;
+ const eastl_size_t dist = rng.RandLimit((uint32_t)s / 4);
+ eastl::advance(it2, dist);
+ EATEST_VERIFY(rbVectorString.validate_iterator(it2) == (isf_valid | isf_current | isf_can_dereference));
+
+ EATEST_VERIFY(s > (pos + dist));
+ rbVectorString.erase(it1, it2);
+ EATEST_VERIFY(rbVectorString.size() == (s - dist));
+ }
+ }
+ else if(op == 11)
+ {
+ // void resize(size_type n);
+ const eastl_size_t nSubOp = rng.RandLimit(100);
+
+ if(nSubOp == 1)
+ {
+ rbVectorString.resize(kOriginalCapacity);
+ EATEST_VERIFY(rbVectorString.size() == (RBVectorString::size_type)kOriginalCapacity);
+ }
+ else if(nSubOp == 2)
+ {
+ const eastl_size_t newSize = rng.RandLimit((uint32_t)s * 2) + 2;
+
+ rbVectorString.resize(newSize);
+ EATEST_VERIFY(rbVectorString.size() == newSize);
+ }
+ else if(nSubOp == 3)
+ {
+ rbVectorString.clear();
+ EATEST_VERIFY(rbVectorString.size() == 0);
+ }
+ }
+
+ EATEST_VERIFY(rbVectorString.validate());
+ }
+
+ // We make sure that after the above we still have some contents.
+ if(rbVectorString.size() < 8)
+ rbVectorString.resize(8);
+
+ EATEST_VERIFY(rbVectorString.validate());
+
+ // Test const functions
+ // const_iterator begin() const;
+ // const_iterator end() const;
+ // const_reverse_iterator rbegin() const;
+ // const_reverse_iterator rend() const;
+ // const_reference front() const;
+ // const_reference back() const;
+ // const_reference operator[](size_type n) const;
+ // const container_type& get_container() const;
+ const RBVectorString& rbVSConst = rbVectorString;
+
+ for(RBVectorString::const_iterator ic = rbVSConst.begin(); ic != rbVSConst.end(); ++ic)
+ {
+ EATEST_VERIFY(rbVectorString.validate_iterator(ic) == (isf_valid | isf_current | isf_can_dereference));
+ }
+
+ for(RBVectorString::const_reverse_iterator ric = rbVSConst.rbegin(); ric != rbVSConst.rend(); ++ric)
+ {
+ if(ric == rbVSConst.rbegin())
+ EATEST_VERIFY(rbVectorString.validate_iterator(ric.base()) == (isf_valid | isf_current));
+ else
+ EATEST_VERIFY(rbVectorString.validate_iterator(ric.base()) == (isf_valid | isf_current | isf_can_dereference));
+ }
+
+ EATEST_VERIFY(rbVSConst.front() == rbVectorString.front());
+ EATEST_VERIFY(rbVSConst.back() == rbVectorString.back());
+ EATEST_VERIFY(rbVSConst[0] == rbVectorString[0]);
+ EATEST_VERIFY(&rbVSConst.get_container() == &rbVectorString.get_container());
+
+
+ // Test additional constructors.
+ // ring_buffer(const this_type& x);
+ // explicit ring_buffer(const Container& x);
+ // this_type& operator=(const this_type& x);
+ // void swap(this_type& x);
+ RBVectorString rbVectorString2(rbVectorString);
+ RBVectorString rbVectorString3(rbVectorString.get_container());
+ RBVectorString rbVectorString4(rbVectorString.capacity() / 2);
+ RBVectorString rbVectorString5(rbVectorString.capacity() * 2);
+
+ EATEST_VERIFY(rbVectorString.validate());
+ EATEST_VERIFY(rbVectorString2.validate());
+ EATEST_VERIFY(rbVectorString3.validate());
+ EATEST_VERIFY(rbVectorString4.validate());
+ EATEST_VERIFY(rbVectorString5.validate());
+
+ EATEST_VERIFY(rbVectorString == rbVectorString2);
+ EATEST_VERIFY(rbVectorString3.get_container() == rbVectorString2.get_container());
+
+ rbVectorString3 = rbVectorString4;
+ EATEST_VERIFY(rbVectorString3.validate());
+
+ eastl::swap(rbVectorString2, rbVectorString4);
+ EATEST_VERIFY(rbVectorString2.validate());
+ EATEST_VERIFY(rbVectorString3.validate());
+ EATEST_VERIFY(rbVectorString4.validate());
+ EATEST_VERIFY(rbVectorString == rbVectorString4);
+ EATEST_VERIFY(rbVectorString2 == rbVectorString3);
+
+ // void ring_buffer<T, Container>::reserve(size_type n)
+ eastl_size_t cap = rbVectorString2.capacity();
+ rbVectorString2.reserve(cap += 2);
+ EATEST_VERIFY(rbVectorString2.validate());
+ EATEST_VERIFY(rbVectorString2.capacity() == cap);
+ rbVectorString2.reserve(cap -= 4); // This should act as a no-op if we are following convention.
+ EATEST_VERIFY(rbVectorString2.validate());
+
+ // void ring_buffer<T, Container>::set_capacity(size_type n)
+ cap = rbVectorString2.capacity();
+ rbVectorString2.resize(cap);
+ EATEST_VERIFY(rbVectorString2.size() == cap);
+ rbVectorString2.set_capacity(cap += 2);
+ EATEST_VERIFY(rbVectorString2.validate());
+ EATEST_VERIFY(rbVectorString2.capacity() == cap);
+ rbVectorString2.set_capacity(cap -= 4);
+ EATEST_VERIFY(rbVectorString2.capacity() == cap);
+ EATEST_VERIFY(rbVectorString2.validate());
+
+ // template <typename InputIterator>
+ // void assign(InputIterator first, InputIterator last);
+ string stringArray[10];
+ for(int q = 0; q < 10; q++)
+ stringArray[q] = (char)('0' + (char)q);
+
+ rbVectorString5.assign(stringArray, stringArray + 10);
+ EATEST_VERIFY(rbVectorString5.validate());
+ EATEST_VERIFY(rbVectorString5.size() == 10);
+ EATEST_VERIFY(rbVectorString5.front() == "0");
+ EATEST_VERIFY(rbVectorString5.back() == "9");
+ }
+
+
+ {
+ // Additional testing
+ typedef ring_buffer< int, vector<int> > RBVectorInt;
+
+ RBVectorInt rbVectorInt(6);
+
+ rbVectorInt.push_back(0);
+ rbVectorInt.push_back(1);
+ rbVectorInt.push_back(2);
+ rbVectorInt.push_back(3);
+ rbVectorInt.push_back(4);
+ rbVectorInt.push_back(5);
+ EATEST_VERIFY(rbVectorInt[0] == 0);
+ EATEST_VERIFY(rbVectorInt[5] == 5);
+
+ // iterator insert(iterator position, const value_type& value);
+ rbVectorInt.insert(rbVectorInt.begin(), 999);
+ EATEST_VERIFY(rbVectorInt[0] == 999);
+ EATEST_VERIFY(rbVectorInt[1] == 0);
+ EATEST_VERIFY(rbVectorInt[5] == 4);
+
+ rbVectorInt.clear();
+ rbVectorInt.push_back(0);
+ rbVectorInt.push_back(1);
+ rbVectorInt.push_back(2);
+ rbVectorInt.push_back(3);
+ rbVectorInt.push_back(4);
+
+ // iterator insert(iterator position, const value_type& value);
+ rbVectorInt.insert(rbVectorInt.begin(), 999);
+ EATEST_VERIFY(rbVectorInt[0] == 999);
+ EATEST_VERIFY(rbVectorInt[1] == 0);
+ EATEST_VERIFY(rbVectorInt[5] == 4);
+
+ rbVectorInt.clear();
+ rbVectorInt.push_back(0);
+ rbVectorInt.push_back(1);
+ rbVectorInt.push_back(2);
+ rbVectorInt.push_back(3);
+ rbVectorInt.push_back(4);
+ rbVectorInt.push_back(5);
+ rbVectorInt.push_back(6);
+ EATEST_VERIFY(rbVectorInt[0] == 1);
+ EATEST_VERIFY(rbVectorInt[5] == 6);
+
+ // iterator insert(iterator position, const value_type& value);
+ rbVectorInt.insert(rbVectorInt.begin(), 999);
+ EATEST_VERIFY(rbVectorInt[0] == 999);
+ EATEST_VERIFY(rbVectorInt[1] == 1);
+ EATEST_VERIFY(rbVectorInt[5] == 5);
+
+ // iterator insert(iterator position, const value_type& value);
+ RBVectorInt::iterator it = rbVectorInt.begin();
+ eastl::advance(it, 3);
+ rbVectorInt.insert(it, 888);
+ EATEST_VERIFY(rbVectorInt[0] == 999);
+ EATEST_VERIFY(rbVectorInt[1] == 1);
+ EATEST_VERIFY(rbVectorInt[2] == 2);
+ EATEST_VERIFY(rbVectorInt[3] == 888);
+ EATEST_VERIFY(rbVectorInt[4] == 3);
+ EATEST_VERIFY(rbVectorInt[5] == 4);
+ }
+
+
+ {
+ EA::UnitTest::Rand rng(EA::UnitTest::GetRandSeed());
+
+ typedef ring_buffer< string, list<string> > RBListString;
+
+ int counter = 0;
+ char counterBuffer[32];
+
+ // explicit ring_buffer(size_type size = 0);
+ const int kOriginalCapacity = 50;
+ RBListString rbListString(50);
+
+ // bool empty() const;
+ // size_type size() const;
+ // bool validate() const;
+ EATEST_VERIFY(rbListString.validate());
+ EATEST_VERIFY(rbListString.empty());
+ EATEST_VERIFY(rbListString.size() == 0);
+ EATEST_VERIFY(rbListString.capacity() == 50);
+
+ // void clear();
+ rbListString.clear();
+ EATEST_VERIFY(rbListString.validate());
+ EATEST_VERIFY(rbListString.empty());
+ EATEST_VERIFY(rbListString.size() == 0);
+ EATEST_VERIFY(rbListString.capacity() == 50);
+
+ // container_type& get_container();
+ RBListString::container_type& c = rbListString.get_container();
+ EATEST_VERIFY(c.size() == (kOriginalCapacity + 1)); // We need to add one because the ring_buffer mEnd is necessarily an unused element.
+
+ // iterator begin();
+ // iterator end();
+ // int validate_iterator(const_iterator i) const;
+ RBListString::iterator it = rbListString.begin();
+ EATEST_VERIFY(rbListString.validate_iterator(it) == (isf_valid | isf_current));
+
+ while(it != rbListString.end()) // This loop should do nothing.
+ {
+ EATEST_VERIFY(rbListString.validate_iterator(it) == (isf_valid | isf_current));
+ ++it;
+ }
+
+ // void push_back(const value_type& value);
+ sprintf(counterBuffer, "%d", counter++);
+ rbListString.push_back(string(counterBuffer));
+ EATEST_VERIFY(rbListString.validate());
+ EATEST_VERIFY(!rbListString.empty());
+ EATEST_VERIFY(rbListString.size() == 1);
+ EATEST_VERIFY(rbListString.capacity() == 50);
+
+ it = rbListString.begin();
+ EATEST_VERIFY(rbListString.validate_iterator(it) == (isf_valid | isf_current | isf_can_dereference));
+ EATEST_VERIFY(*it == "0");
+
+ // reference front();
+ // reference back();
+ string& sFront = rbListString.front();
+ string& sBack = rbListString.back();
+ EATEST_VERIFY(&sFront == &sBack);
+
+ // void push_back();
+ string& ref = rbListString.push_back();
+ EATEST_VERIFY(rbListString.validate());
+ EATEST_VERIFY(rbListString.size() == 2);
+ EATEST_VERIFY(rbListString.capacity() == 50);
+ EATEST_VERIFY(&ref == &rbListString.back());
+
+ it = rbListString.begin();
+ ++it;
+ EATEST_VERIFY(rbListString.validate_iterator(it) == (isf_valid | isf_current | isf_can_dereference));
+ EATEST_VERIFY(it->empty());
+
+ sprintf(counterBuffer, "%d", counter++);
+ *it = counterBuffer;
+ EATEST_VERIFY(*it == "1");
+
+ ++it;
+ EATEST_VERIFY(it == rbListString.end());
+
+ it = rbListString.begin();
+ while(it != rbListString.end())
+ {
+ EATEST_VERIFY(rbListString.validate_iterator(it) == (isf_valid | isf_current | isf_can_dereference));
+ ++it;
+ }
+
+ // reference operator[](size_type n);
+ string& s0 = rbListString[0];
+ EATEST_VERIFY(s0 == "0");
+
+ string& s1 = rbListString[1];
+ EATEST_VERIFY(s1 == "1");
+
+ // Now we start hammering the ring buffer with push_back.
+ for(eastl_size_t i = 0, iEnd = rbListString.capacity() * 5; i != iEnd; i++)
+ {
+ sprintf(counterBuffer, "%d", counter++);
+ rbListString.push_back(string(counterBuffer));
+ EATEST_VERIFY(rbListString.validate());
+ }
+
+ int counterCheck = counter - 1;
+ char counterCheckBuffer[32];
+ sprintf(counterCheckBuffer, "%d", counterCheck);
+ EATEST_VERIFY(rbListString.back() == counterCheckBuffer);
+
+ // reverse_iterator rbegin();
+ // reverse_iterator rend();
+ for(RBListString::reverse_iterator ri = rbListString.rbegin(); ri != rbListString.rend(); ++ri)
+ {
+ sprintf(counterCheckBuffer, "%d", counterCheck--);
+ EATEST_VERIFY(*ri == counterCheckBuffer);
+ }
+
+ ++counterCheck;
+
+ // iterator begin();
+ // iterator end();
+ for(RBListString::iterator i = rbListString.begin(); i != rbListString.end(); ++i)
+ {
+ EATEST_VERIFY(rbListString.validate_iterator(i) == (isf_valid | isf_current | isf_can_dereference));
+ EATEST_VERIFY(*i == counterCheckBuffer);
+ sprintf(counterCheckBuffer, "%d", ++counterCheck);
+ }
+
+ // void clear();
+ rbListString.clear();
+ EATEST_VERIFY(rbListString.validate());
+ EATEST_VERIFY(rbListString.empty());
+ EATEST_VERIFY(rbListString.size() == 0);
+ EATEST_VERIFY(rbListString.capacity() == 50);
+
+ // Random operations
+ // Not easy to test the expected values without some tedium.
+ for(int j = 0; j < 10000 + (gEASTL_TestLevel * 10000); j++)
+ {
+ sprintf(counterBuffer, "%d", counter++);
+
+ const eastl_size_t op = rng.RandLimit(12);
+ const eastl_size_t s = rbListString.size();
+
+ if(op == 0)
+ {
+ // void push_back(const value_type& value);
+
+ rbListString.push_back(string(counterBuffer));
+ EATEST_VERIFY(rbListString.size() == eastl::min(s + 1, rbListString.capacity()));
+ }
+ else if(op == 1)
+ {
+ // void push_back();
+
+ string& ref2 = rbListString.push_back();
+ rbListString.back() = string(counterBuffer);
+ EATEST_VERIFY(rbListString.size() == eastl::min(s + 1, rbListString.capacity()));
+ EATEST_VERIFY(&ref2 == &rbListString.back());
+ }
+ else if(op == 2)
+ {
+ // void pop_back();
+
+ if(!rbListString.empty())
+ {
+ rbListString.pop_back();
+ EATEST_VERIFY(rbListString.size() == (s - 1));
+ }
+ }
+ else if(op == 3)
+ {
+ // void push_front(const value_type& value);
+
+ rbListString.push_front(string(counterBuffer));
+ EATEST_VERIFY(rbListString.size() == eastl::min(s + 1, rbListString.capacity()));
+ }
+ else if(op == 4)
+ {
+ // void push_front();
+
+ string& ref2 = rbListString.push_front();
+ rbListString.front() = string(counterBuffer);
+ EATEST_VERIFY(rbListString.size() == eastl::min(s + 1, rbListString.capacity()));
+ EATEST_VERIFY(&ref2 == &rbListString.front());
+ }
+ else if(op == 5)
+ {
+ // void pop_front();
+
+ if(!rbListString.empty())
+ {
+ rbListString.pop_front();
+ EATEST_VERIFY(rbListString.size() == (s - 1));
+ }
+ }
+ else if(op == 6)
+ {
+ // iterator insert(iterator position, const value_type& value);
+
+ it = rbListString.begin();
+ const eastl_size_t dist = rng.RandLimit((uint32_t)s + 1);
+ eastl::advance(it, dist);
+
+ if(it == rbListString.end())
+ EATEST_VERIFY(rbListString.validate_iterator(it) == (isf_valid | isf_current));
+ else
+ EATEST_VERIFY(rbListString.validate_iterator(it) == (isf_valid | isf_current | isf_can_dereference));
+
+ rbListString.insert(it, string(counterBuffer));
+ EATEST_VERIFY(rbListString.size() == eastl::min(s + 1, rbListString.capacity()));
+ }
+ else if(op == 7)
+ {
+ // void insert(iterator position, size_type n, const value_type& value);
+
+ it = rbListString.begin();
+ const eastl_size_t dist = rng.RandLimit((uint32_t)s + 1);
+ eastl::advance(it, dist);
+
+ if(it == rbListString.end())
+ EATEST_VERIFY(rbListString.validate_iterator(it) == (isf_valid | isf_current));
+ else
+ EATEST_VERIFY(rbListString.validate_iterator(it) == (isf_valid | isf_current | isf_can_dereference));
+
+ const eastl_size_t count = (eastl_size_t)rng.RandLimit(10);
+
+ rbListString.insert(it, count, string(counterBuffer));
+ EATEST_VERIFY(rbListString.size() == eastl::min(s + count, rbListString.capacity()));
+ }
+ else if(op == 8)
+ {
+ // template <typename InputIterator>
+ // void insert(iterator position, InputIterator first, InputIterator last);
+
+ string stringArray[10];
+
+ it = rbListString.begin();
+ const eastl_size_t dist = rng.RandLimit((uint32_t)s + 1);
+ eastl::advance(it, dist);
+
+ if(it == rbListString.end())
+ EATEST_VERIFY(rbListString.validate_iterator(it) == (isf_valid | isf_current));
+ else
+ EATEST_VERIFY(rbListString.validate_iterator(it) == (isf_valid | isf_current | isf_can_dereference));
+
+ const eastl_size_t count = (eastl_size_t)rng.RandLimit(10);
+
+ rbListString.insert(it, stringArray, stringArray + count);
+ EATEST_VERIFY(rbListString.size() == eastl::min(s + count, rbListString.capacity()));
+ }
+ else if(op == 9)
+ {
+ // iterator erase(iterator position);
+
+ if(!rbListString.empty())
+ {
+ it = rbListString.begin();
+ const eastl_size_t dist = rng.RandLimit((uint32_t)s);
+ eastl::advance(it, dist);
+ EATEST_VERIFY(rbListString.validate_iterator(it) == (isf_valid | isf_current | isf_can_dereference));
+ rbListString.erase(it);
+
+ EATEST_VERIFY(rbListString.size() == (s - 1));
+ }
+ }
+ else if(op == 10)
+ {
+ // iterator erase(iterator first, iterator last);
+
+ if(!rbListString.empty())
+ {
+ RBListString::iterator it1 = rbListString.begin();
+ const eastl_size_t pos = rng.RandLimit((uint32_t)s / 4);
+ eastl::advance(it1, pos);
+ EATEST_VERIFY(rbListString.validate_iterator(it1) == (isf_valid | isf_current | isf_can_dereference));
+
+ RBListString::iterator it2 = it1;
+ const eastl_size_t dist = rng.RandLimit((uint32_t)s / 4);
+ eastl::advance(it2, dist);
+ EATEST_VERIFY(rbListString.validate_iterator(it2) == (isf_valid | isf_current | isf_can_dereference));
+
+ EATEST_VERIFY(s > (pos + dist));
+ rbListString.erase(it1, it2);
+ EATEST_VERIFY(rbListString.size() == (s - dist));
+ }
+ }
+ else if(op == 11)
+ {
+ // void resize(size_type n);
+ const eastl_size_t nSubOp = rng.RandLimit(100);
+
+ if(nSubOp == 1)
+ {
+ rbListString.resize(kOriginalCapacity);
+ EATEST_VERIFY(rbListString.size() == (RBListString::size_type)kOriginalCapacity);
+ }
+ else if(nSubOp == 2)
+ {
+ const eastl_size_t newSize = rng.RandLimit((uint32_t)s * 2) + 2;
+
+ rbListString.resize(newSize);
+ EATEST_VERIFY(rbListString.size() == newSize);
+ }
+ else if(nSubOp == 3)
+ {
+ rbListString.clear();
+ EATEST_VERIFY(rbListString.size() == 0);
+ }
+ }
+
+ EATEST_VERIFY(rbListString.validate());
+ }
+
+ // We make sure that after the above we still have some contents.
+ if(rbListString.size() < 8)
+ rbListString.resize(8);
+
+ EATEST_VERIFY(rbListString.validate());
+
+ // Test const functions
+ // const_iterator begin() const;
+ // const_iterator end() const;
+ // const_reverse_iterator rbegin() const;
+ // const_reverse_iterator rend() const;
+ // const_reference front() const;
+ // const_reference back() const;
+ // const_reference operator[](size_type n) const;
+ // const container_type& get_container() const;
+ const RBListString& rbVSConst = rbListString;
+
+ for(RBListString::const_iterator ic = rbVSConst.begin(); ic != rbVSConst.end(); ++ic)
+ {
+ EATEST_VERIFY(rbListString.validate_iterator(ic) == (isf_valid | isf_current | isf_can_dereference));
+ }
+
+ for(RBListString::const_reverse_iterator ric = rbVSConst.rbegin(); ric != rbVSConst.rend(); ++ric)
+ {
+ if(ric == rbVSConst.rbegin())
+ EATEST_VERIFY(rbListString.validate_iterator(ric.base()) == (isf_valid | isf_current));
+ else
+ EATEST_VERIFY(rbListString.validate_iterator(ric.base()) == (isf_valid | isf_current | isf_can_dereference));
+ }
+
+ EATEST_VERIFY(rbVSConst.front() == rbListString.front());
+ EATEST_VERIFY(rbVSConst.back() == rbListString.back());
+ EATEST_VERIFY(rbVSConst[0] == rbListString[0]);
+ EATEST_VERIFY(&rbVSConst.get_container() == &rbListString.get_container());
+
+
+ // Test additional constructors.
+ // ring_buffer(const this_type& x);
+ // explicit ring_buffer(const Container& x);
+ // this_type& operator=(const this_type& x);
+ // void swap(this_type& x);
+ RBListString rbListString2(rbListString);
+ RBListString rbListString3(rbListString.get_container());
+ RBListString rbListString4(rbListString.capacity() / 2);
+ RBListString rbListString5(rbListString.capacity() * 2);
+
+ EATEST_VERIFY(rbListString.validate());
+ EATEST_VERIFY(rbListString2.validate());
+ EATEST_VERIFY(rbListString3.validate());
+ EATEST_VERIFY(rbListString4.validate());
+ EATEST_VERIFY(rbListString5.validate());
+
+ EATEST_VERIFY(rbListString == rbListString2);
+ EATEST_VERIFY(rbListString3.get_container() == rbListString2.get_container());
+
+ rbListString3 = rbListString4;
+ EATEST_VERIFY(rbListString3.validate());
+
+ eastl::swap(rbListString2, rbListString4);
+ EATEST_VERIFY(rbListString2.validate());
+ EATEST_VERIFY(rbListString3.validate());
+ EATEST_VERIFY(rbListString4.validate());
+ EATEST_VERIFY(rbListString == rbListString4);
+ EATEST_VERIFY(rbListString2 == rbListString3);
+
+ // void ring_buffer<T, Container>::reserve(size_type n)
+ eastl_size_t cap = rbListString2.capacity();
+ rbListString2.reserve(cap += 2);
+ EATEST_VERIFY(rbListString2.validate());
+ EATEST_VERIFY(rbListString2.capacity() == cap);
+ rbListString2.reserve(cap -= 4); // This should act as a no-op if we are following convention.
+ EATEST_VERIFY(rbListString2.validate());
+
+
+ // template <typename InputIterator>
+ // void assign(InputIterator first, InputIterator last);
+ string stringArray[10];
+ for(int q = 0; q < 10; q++)
+ stringArray[q] = '0' + (char)q;
+
+ rbListString5.assign(stringArray, stringArray + 10);
+ EATEST_VERIFY(rbListString5.validate());
+ EATEST_VERIFY(rbListString5.size() == 10);
+ EATEST_VERIFY(rbListString5.front() == "0");
+ EATEST_VERIFY(rbListString5.back() == "9");
+
+
+ // ring_buffer(this_type&& x);
+ // ring_buffer(this_type&& x, const allocator_type& allocator);
+ // this_type& operator=(this_type&& x);
+
+ RBListString rbListStringM1(eastl::move(rbListString5));
+ EATEST_VERIFY(rbListStringM1.validate() && rbListString5.validate());
+ EATEST_VERIFY((rbListStringM1.size() == 10) && (rbListString5.size() == 0));
+
+ RBListString rbListStringM2(eastl::move(rbListStringM1), RBListString::allocator_type());
+ EATEST_VERIFY(rbListStringM2.validate() && rbListStringM1.validate());
+ EATEST_VERIFY((rbListStringM2.size() == 10) && (rbListStringM1.size() == 0));
+
+ rbListStringM1 = eastl::move(rbListStringM2);
+ EATEST_VERIFY(rbListStringM1.validate() && rbListStringM2.validate());
+ EATEST_VERIFY((rbListStringM1.size() == 10) && (rbListStringM2.size() == 0));
+ }
+
+
+
+ {
+ // ring_buffer(std::initializer_list<value_type> ilist, const allocator_type& allocator = allocator_type());
+ // this_type& operator=(std::initializer_list<value_type> ilist);
+ // void insert(iterator position, std::initializer_list<value_type> ilist);
+ #if !defined(EA_COMPILER_NO_INITIALIZER_LISTS)
+ ring_buffer<int> intBuffer = { 0, 1, 2 };
+ EATEST_VERIFY(VerifySequence(intBuffer.begin(), intBuffer.end(), int(), "ring_buffer std::initializer_list", 0, 1, 2, -1));
+
+ intBuffer = { 16, 17, 18 };
+ EATEST_VERIFY(VerifySequence(intBuffer.begin(), intBuffer.end(), int(), "ring_buffer std::initializer_list", 16, 17, 18, -1));
+
+ // We need to increase the capacity in order to insert new items because the ctor above set the capacity to be only enough to hold the initial list.
+ intBuffer.reserve(intBuffer.capacity() + 2);
+ intBuffer.insert(intBuffer.begin(), { 14, 15 });
+ EATEST_VERIFY(VerifySequence(intBuffer.begin(), intBuffer.end(), int(), "ring_buffer std::initializer_list", 14, 15, 16, 17, 18, -1));
+ #endif
+ }
+
+ {
+ // Regression for user-reported problem.
+ typedef eastl::fixed_vector<float, 8> GamepadData_t;
+ typedef eastl::ring_buffer<GamepadData_t> GamepadDataDelayBuffer_t;
+ typedef eastl::fixed_vector<GamepadDataDelayBuffer_t, 32> GamepadDataDelayBufferTable_t;
+
+ GamepadDataDelayBufferTable_t mDelayTable;
+ mDelayTable.resize(32);
+ for(eastl_size_t i = 0; i < mDelayTable.size(); i++)
+ mDelayTable[i].reserve(16);
+
+ GamepadData_t data(8, 1.f);
+ mDelayTable[0].push_back(data);
+ mDelayTable[0].push_back(data);
+ mDelayTable[0].push_back(data);
+ mDelayTable[0].push_back(data);
+
+ EATEST_VERIFY(mDelayTable[0].size() == 4);
+ GamepadData_t dataFront = mDelayTable[0].front();
+ EATEST_VERIFY((dataFront.size() == 8) && (dataFront[0] == 1.f));
+ mDelayTable[0].pop_front();
+ }
+
+ {
+ // Regression for bug with iterator subtraction
+ typedef eastl::ring_buffer<int> IntBuffer_t;
+ IntBuffer_t intBuffer = { 0, 1, 2, 3, 4, 5, 6, 7 };
+ IntBuffer_t::iterator it = intBuffer.begin();
+
+ EATEST_VERIFY(*it == 0);
+ it += 4;
+ EATEST_VERIFY(*it == 4);
+ it--;
+ EATEST_VERIFY(*it == 3);
+ it -= 2;
+ EATEST_VERIFY(*it == 1);
+
+ intBuffer.push_back(8);
+ intBuffer.push_back(9);
+ intBuffer.push_back(10);
+ intBuffer.push_back(11);
+
+ EATEST_VERIFY(*it == 10);
+ it -= 3;
+ EATEST_VERIFY(*it == 7); // Test looping around the end of the underlying container
+ it -= 5;
+ EATEST_VERIFY(*it == 11); // Test wrapping around begin to end of the ring_buffer
+ it -= 2;
+ EATEST_VERIFY(*it == 9); // It is important to test going back to the beginning of the underlying container.
+
+ }
+
+ // fixed_ring_buffer<T,N> tests
+ // ring_buffer<T, fixed_vector<T,N>> tests
+ {
+ {
+ // (MAX_ELEMENTS - 1) accommodates the ring_buffer sentinel
+ const int MAX_ELEMENTS = 8;
+ eastl::ring_buffer<int, eastl::fixed_vector<int, MAX_ELEMENTS, false>> rb(MAX_ELEMENTS - 1);
+
+ for (int i = 0; i < MAX_ELEMENTS - 1; i++)
+ rb.push_back(i);
+
+ auto it = rb.begin();
+ for (int i = 0; i < MAX_ELEMENTS - 1; i++)
+ { EATEST_VERIFY(*it++ == i); }
+ }
+
+ #if !defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ {
+ const int MAX_ELEMENTS = 25;
+ eastl::fixed_ring_buffer<int, MAX_ELEMENTS> rb(MAX_ELEMENTS);
+
+ for(int i = 0; i < MAX_ELEMENTS; i++)
+ rb.push_back(i);
+
+ auto it = rb.begin();
+ for(int i = 0; i < MAX_ELEMENTS; i++)
+ { EATEST_VERIFY(*it++ == i); }
+ }
+ #endif
+
+ #if !defined(EA_COMPILER_NO_INITIALIZER_LISTS) && !defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ {
+ const int MAX_ELEMENTS = 8;
+ eastl::fixed_ring_buffer<int, MAX_ELEMENTS> rb = {0, 1, 2, 3, 4, 5, 6, 7};
+
+ auto it = rb.begin();
+ for(int i = 0; i < MAX_ELEMENTS; i++)
+ { EATEST_VERIFY(*it++ == i); }
+ }
+
+ {
+ struct LocalStruct {};
+ fixed_ring_buffer<LocalStruct, 8> rb = {{{}, {}, {}}};
+ EATEST_VERIFY(rb.size() == 3);
+ }
+ #endif
+ }
+
+ {
+ const auto MAX_ELEMENTS = EASTL_MAX_STACK_USAGE;
+
+ // create a container simulating LARGE state that exceeds
+ // our maximum stack size macro. This forces our ring_buffer implementation
+ // to allocate the container in the heap instead of holding it on the stack.
+ // This test ensures that allocation is NOT serviced by the default global heap.
+ // Instead it is serviced by the allocator of the ring_buffers underlying container.
+ struct PaddedVector : public eastl::vector<int, MallocAllocator>
+ {
+ char mPadding[EASTL_MAX_STACK_USAGE];
+ };
+
+ MallocAllocator::reset_all();
+ CountingAllocator::resetCount();
+
+ {
+ CountingAllocator countingAlloc;
+ AutoDefaultAllocator _(&countingAlloc);
+
+ eastl::ring_buffer<int, PaddedVector> intBuffer(1);
+ for (int i = 0; i < MAX_ELEMENTS; i++)
+ intBuffer.push_back(i);
+
+ #if !EASTL_OPENSOURCE
+ const auto cacheAllocationCount = gEASTLTest_TotalAllocationCount;
+ #endif
+ const auto cacheMallocatorCount = MallocAllocator::mAllocCountAll;
+ const auto forceReAllocSize = intBuffer.size() * 2;
+
+ intBuffer.resize(forceReAllocSize);
+
+ #if !EASTL_OPENSOURCE
+ VERIFY(cacheAllocationCount == gEASTLTest_TotalAllocationCount);
+ #endif
+ VERIFY(cacheMallocatorCount < MallocAllocator::mAllocCountAll);
+ VERIFY(CountingAllocator::neverUsed());
+ }
+ }
+
+ return nErrorCount;
+}
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/test/source/TestSList.cpp b/EASTL/test/source/TestSList.cpp
new file mode 100644
index 0000000..94a4d3a
--- /dev/null
+++ b/EASTL/test/source/TestSList.cpp
@@ -0,0 +1,928 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+#include "EASTLTest.h"
+#include <EASTL/slist.h>
+#include <EABase/eabase.h>
+#include <EASTL/fixed_allocator.h>
+
+using namespace eastl;
+
+
+/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+struct TestObj
+{
+ TestObj() : mI(0), mMoveCtor(0), mCopyCtor(0) {}
+ explicit TestObj(int i) : mI(i), mMoveCtor(0), mCopyCtor(0) {}
+ explicit TestObj(int a, int b, int c, int d) : mI(a+b+c+d), mMoveCtor(0), mCopyCtor(0) {}
+
+ TestObj(TestObj&& other)
+ {
+ mI = other.mI;
+ mMoveCtor = other.mMoveCtor;
+ mCopyCtor = other.mCopyCtor;
+ mMoveCtor++;
+ }
+
+ TestObj(const TestObj& other)
+ {
+ mI = other.mI;
+ mMoveCtor = other.mMoveCtor;
+ mCopyCtor = other.mCopyCtor;
+ mCopyCtor++;
+ }
+
+ TestObj& operator=(const TestObj& other)
+ {
+ mI = other.mI;
+ mMoveCtor = other.mMoveCtor;
+ mCopyCtor = other.mCopyCtor;
+ return *this;
+ }
+
+ int mI;
+ int mMoveCtor;
+ int mCopyCtor;
+};
+
+
+/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// TestSList
+int TestSList()
+{
+ int nErrorCount = 0;
+
+ // slist();
+ {
+ slist<int> list;
+ VERIFY(list.empty());
+ VERIFY(list.size() == 0);
+ }
+
+ // slist(const allocator_type& allocator);
+ {
+ MallocAllocator::reset_all();
+
+ VERIFY(MallocAllocator::mAllocCountAll == 0);
+ slist<int, MallocAllocator> list;
+ list.resize(100, 42);
+ VERIFY(MallocAllocator::mAllocCountAll == 100);
+ }
+
+ // explicit slist(size_type n, const allocator_type& allocator = EASTL_SLIST_DEFAULT_ALLOCATOR);
+ {
+ slist<int> list(100);
+ VERIFY(list.size() == 100);
+ VERIFY(!list.empty());
+ }
+
+ // slist(size_type n, const value_type& value, const allocator_type& allocator = EASTL_SLIST_DEFAULT_ALLOCATOR);
+ {
+ slist<int> list(32, 42);
+ VERIFY(list.size() == 32);
+ VERIFY(list.front() == 42);
+ VERIFY(!list.empty());
+ }
+
+ // slist(const this_type& x);
+ {
+ slist<int> list1;
+ list1.resize(100, 42);
+
+ VERIFY(!list1.empty());
+ slist<int> list2(list1);
+ VERIFY(!list2.empty());
+ VERIFY(list1 == list2);
+ }
+
+ // slist(std::initializer_list<value_type> ilist, const allocator_type& allocator = EASTL_SLIST_DEFAULT_ALLOCATOR);
+ {
+ #if !defined(EA_COMPILER_NO_INITIALIZER_LISTS)
+ slist<int> list1({1,2,3,4,5,6,7,8});
+ VERIFY(!list1.empty());
+ VERIFY(list1.size() == 8);
+ #endif
+ }
+
+ // slist(this_type&& x);
+ {
+ slist<int> list1;
+ list1.resize(100,42);
+
+ slist<int> list2(eastl::move(list1));
+
+ VERIFY(list1.empty());
+ VERIFY(!list2.empty());
+ VERIFY(list1 != list2);
+ }
+
+ // slist(this_type&& x, const allocator_type& allocator);
+ { }
+
+ // slist(InputIterator first, InputIterator last);
+ {
+ slist<int> list1;
+ list1.resize(100, 42);
+ VERIFY(!list1.empty());
+
+ slist<int> list2(list1.begin(), list1.end());
+ VERIFY(!list2.empty());
+ VERIFY(list1 == list2);
+ }
+
+ // this_type& operator=(const this_type& x);
+ {
+ slist<int> list1;
+ list1.resize(100, 42);
+ VERIFY(!list1.empty());
+
+ slist<int> list2 = list1;
+ VERIFY(!list2.empty());
+ VERIFY(list1 == list2);
+ }
+
+ // this_type& operator=(std::initializer_list<value_type>);
+ {
+ slist<int> list1 = {1,2,3,4,5,6,7,8};
+ VERIFY(!list1.empty());
+ }
+
+ // this_type& operator=(this_type&& x);
+ {
+ slist<int> list1;
+ list1.resize(100, 42);
+ slist<int> list2 = eastl::move(list1);
+
+ VERIFY(list1.empty());
+ VERIFY(!list2.empty());
+ VERIFY(list1 != list2);
+ }
+
+ // void swap(this_type& x);
+ {
+ slist<int> list1;
+ list1.resize(8, 37);
+
+ slist<int> list2;
+ VERIFY(!list1.empty());
+ VERIFY(list1.size() == 8);
+ VERIFY(list2.empty());
+
+ list2.swap(list1);
+
+ VERIFY(list1.empty());
+ VERIFY(!list2.empty());
+ }
+
+ // void assign(size_type n, const value_type& value);
+ {
+ slist<int> list1;
+ list1.assign(100, 42);
+
+ VERIFY(!list1.empty());
+ VERIFY(list1.size() == 100);
+
+ for(auto& e : list1)
+ VERIFY(e == 42);
+ }
+
+ // void assign(std::initializer_list<value_type> ilist);
+ {
+ #if !defined(EA_COMPILER_NO_INITIALIZER_LISTS)
+ slist<int> list1;
+ list1.assign({1,2,3,4,5,6,7,8});
+
+ VERIFY(!list1.empty());
+ VERIFY(list1.size() == 8);
+
+ auto i = eastl::begin(list1);
+ VERIFY(*i == 1); i++;
+ VERIFY(*i == 2); i++;
+ VERIFY(*i == 3); i++;
+ VERIFY(*i == 4); i++;
+ VERIFY(*i == 5); i++;
+ VERIFY(*i == 6); i++;
+ VERIFY(*i == 7); i++;
+ VERIFY(*i == 8); i++;
+ VERIFY(i == eastl::end(list1));
+ #endif
+ }
+
+ // void assign(InputIterator first, InputIterator last);
+ {
+ slist<int> list1;
+ list1.resize(100, 42);
+ VERIFY(!list1.empty());
+
+ slist<int> list2;
+ list2.assign(list1.begin(), list1.end());
+ VERIFY(!list2.empty());
+ VERIFY(list1 == list2);
+ }
+
+ // iterator begin() EA_NOEXCEPT;
+ // const_iterator begin() const EA_NOEXCEPT;
+ // const_iterator cbegin() const EA_NOEXCEPT;
+ {
+ slist<int> list1;
+ list1.resize(100, 1);
+ VERIFY(!list1.empty());
+
+ const auto ci = list1.begin();
+ auto i = list1.begin();
+ auto ci2 = list1.cbegin();
+
+ VERIFY(*i == 1);
+ VERIFY(*ci == 1);
+ VERIFY(*ci2 == 1);
+ }
+
+ // iterator end() EA_NOEXCEPT;
+ // const_iterator end() const EA_NOEXCEPT;
+ // const_iterator cend() const EA_NOEXCEPT;
+ {
+ slist<int> list1;
+ list1.resize(100, 42);
+ VERIFY(!list1.empty());
+
+ const auto ci = list1.end();
+ auto i = list1.end();
+ auto ci2 = list1.cend();
+
+ VERIFY(i == eastl::end(list1));
+ VERIFY(ci == eastl::end(list1));
+ VERIFY(ci2 == eastl::end(list1));
+ }
+
+ // iterator before_begin() EA_NOEXCEPT;
+ // const_iterator before_begin() const EA_NOEXCEPT;
+ // const_iterator cbefore_begin() const EA_NOEXCEPT;
+ // iterator previous(const_iterator position);
+ // const_iterator previous(const_iterator position) const;
+ {
+ slist<int> list1;
+
+ auto b = list1.begin();
+ auto prev = list1.previous(b);
+
+ VERIFY(prev == list1.before_begin());
+ }
+
+ // reference front();
+ // const_reference front() const;
+ {
+ slist<int> list1;
+ list1.resize(100, 1);
+
+ VERIFY(list1.begin() == eastl::begin(list1));
+ VERIFY(list1.front() == 1);
+
+ const slist<int> clist1(list1);
+ VERIFY(clist1.front() == 1);
+ VERIFY(list1.validate());
+ VERIFY(clist1.validate());
+ }
+
+
+ // void emplace_front(Args&&... args);
+ // void emplace_front(value_type&& value);
+ // void emplace_front(const value_type& value);
+ {
+ slist<TestObj> list1;
+ list1.emplace_front(42);
+ VERIFY(list1.front().mI == 42);
+ VERIFY(list1.front().mCopyCtor == 0);
+ VERIFY(list1.front().mMoveCtor == 0);
+ VERIFY(list1.size() == 1);
+ VERIFY(list1.validate());
+
+ list1.emplace_front(1,2,3,4);
+ VERIFY(list1.front().mCopyCtor == 0);
+ VERIFY(list1.front().mMoveCtor == 0);
+ VERIFY(list1.front().mI == (1+2+3+4));
+ VERIFY(list1.size() == 2);
+ VERIFY(list1.validate());
+ }
+
+ // void push_front(const value_type& value);
+ // reference push_front();
+ // void push_front(value_type&& value);
+ {
+ slist<TestObj> list1;
+ list1.push_front(TestObj(42));
+ VERIFY(list1.front().mI == 42);
+ VERIFY(list1.front().mCopyCtor == 0);
+ VERIFY(list1.front().mMoveCtor == 1);
+ VERIFY(list1.size() == 1);
+
+ list1.push_front();
+ VERIFY(list1.front().mCopyCtor == 0);
+ VERIFY(list1.front().mMoveCtor == 0);
+ VERIFY(list1.front().mI == 0);
+ VERIFY(list1.size() == 2);
+
+ list1.push_front().mI = 1492;
+ VERIFY(list1.front().mI == 1492);
+ VERIFY(list1.validate());
+ }
+
+ // void pop_front();
+ {
+ slist<int> list1;
+ list1.push_front(4);
+ list1.push_front(3);
+ list1.push_front(2);
+ list1.push_front(1);
+
+ list1.pop_front();
+ VERIFY(list1.front() == 2);
+ VERIFY(list1.size() == 3);
+ VERIFY(list1.validate());
+
+ list1.pop_front();
+ VERIFY(list1.front() == 3);
+ VERIFY(list1.size() == 2);
+ VERIFY(list1.validate());
+
+ list1.pop_front();
+ VERIFY(list1.front() == 4);
+ VERIFY(list1.size() == 1);
+ VERIFY(list1.validate());
+ }
+
+ // bool empty() const EA_NOEXCEPT;
+ // size_type size() const EA_NOEXCEPT;
+ {
+ slist<int> list1;
+ VERIFY(list1.empty());
+ VERIFY(list1.size() == 0);
+ VERIFY(list1.validate());
+
+ list1.push_front(42);
+ VERIFY(!list1.empty());
+ VERIFY(list1.size() == 1);
+ VERIFY(list1.validate());
+
+ list1.pop_front();
+ VERIFY(list1.empty());
+ VERIFY(list1.size() == 0);
+ VERIFY(list1.validate());
+ }
+
+
+ // void resize(size_type n, const value_type& value);
+ // void resize(size_type n);
+ {
+ slist<int> list1;
+ VERIFY(list1.empty());
+ list1.resize(100, 42);
+ VERIFY(list1.front() == 42);
+ VERIFY(!list1.empty());
+ VERIFY(list1.size() == 100);
+ VERIFY(list1.validate());
+
+ slist<int> list2;
+ VERIFY(list2.empty());
+ list2.resize(100);
+ VERIFY(!list2.empty());
+ VERIFY(list2.size() == 100);
+ VERIFY(list2.validate());
+ }
+
+ // iterator insert(const_iterator position);
+ // iterator insert(const_iterator position, const value_type& value);
+ // void insert(const_iterator position, size_type n, const value_type& value);
+ {
+ static const int MAGIC_VALUE = 4242;
+ struct TestVal
+ {
+ TestVal() : mV(MAGIC_VALUE) {}
+ TestVal(int v) : mV(v) {}
+ operator int() { return mV; }
+ int mV;
+ };
+
+ slist<TestVal> list1;
+ VERIFY(list1.empty());
+
+ auto insert_iter = eastl::begin(list1);
+ list1.insert(insert_iter);
+ VERIFY(list1.size() == 1);
+ VERIFY(!list1.empty());
+ VERIFY(list1.validate());
+
+ list1.insert(insert_iter, 42);
+ VERIFY(list1.size() == 2);
+ VERIFY(!list1.empty());
+ VERIFY(list1.front() == MAGIC_VALUE);
+ VERIFY(list1.validate());
+
+
+ list1.insert(insert_iter, 43);
+ VERIFY(list1.size() == 3);
+ VERIFY(!list1.empty());
+ VERIFY(list1.front() == MAGIC_VALUE);
+ VERIFY(list1.validate());
+ }
+
+ // template <typename InputIterator>
+ // void insert(const_iterator position, InputIterator first, InputIterator last);
+ {
+ slist<int> list1;
+ VERIFY(list1.empty());
+ list1.resize(100, 42);
+ VERIFY(list1.size() == 100);
+ VERIFY(!list1.empty());
+ VERIFY(list1.validate());
+
+ slist<int> list2;
+ list2.resize(400, 24);
+ VERIFY(list2.size() == 400);
+ VERIFY(!list2.empty());
+ VERIFY(list1.validate());
+
+ list1.insert(eastl::end(list1), eastl::begin(list2), eastl::end(list2)); // [42,42,42,...,42, | 24,24,24,24...]
+ VERIFY(!list1.empty());
+ VERIFY(list1.size() == 500);
+ VERIFY(list1.front() == 42);
+ VERIFY(list1.validate());
+
+ auto boundary_iter = list1.begin();
+ eastl::advance(boundary_iter, 100); // move to insertation point
+ VERIFY(*boundary_iter == 24);
+ VERIFY(list1.validate());
+ }
+
+
+ // Returns an iterator pointing to the last inserted element, or position if insertion count is zero.
+ // iterator insert_after(const_iterator position);
+ // iterator insert_after(const_iterator position, const value_type& value);
+ // iterator insert_after(const_iterator position, size_type n, const value_type& value);
+ // iterator insert_after(const_iterator position, std::initializer_list<value_type> ilist);
+ {
+ slist<int> list1;
+ VERIFY(list1.empty());
+ list1.push_front();
+
+ list1.insert_after(list1.begin());
+ VERIFY(!list1.empty());
+ VERIFY(list1.size() == 2);
+ VERIFY(list1.validate());
+
+ list1.insert_after(list1.begin(), 43);
+ VERIFY(list1.size() == 3);
+ VERIFY(list1.validate());
+
+ list1.insert_after(list1.begin(), 10, 42);
+ VERIFY(list1.size() == 13);
+ VERIFY(eastl::count_if(list1.begin(), list1.end(), [](int i) { return i == 42; }) == 10);
+ VERIFY(list1.validate());
+
+ list1.insert_after(list1.begin(), {1,2,3,4,5,6,7,8,9,0});
+ VERIFY(list1.size() == 23);
+ VERIFY(list1.validate());
+ }
+
+ // iterator insert_after(const_iterator position, value_type&& value);
+ {
+ slist<TestObj> list1;
+ VERIFY(list1.empty());
+ list1.push_front();
+
+ auto inserted = list1.insert_after(list1.begin(), TestObj(42));
+ VERIFY(!list1.empty());
+ VERIFY((*inserted).mCopyCtor == 0);
+ VERIFY((*inserted).mMoveCtor == 1);
+ }
+
+ // iterator insert_after(const_iterator position, InputIterator first, InputIterator last);
+ {
+ slist<int> list1 = {0,1,2,3,4};
+ slist<int> list2 = {9,8,7,6,5};
+ list1.insert_after(list1.begin(), list2.begin(), list2.end());
+ VERIFY(list1 == slist<int>({0,9,8,7,6,5,1,2,3,4}));
+ }
+
+ // iterator emplace_after(const_iterator position, Args&&... args);
+ // iterator emplace_after(const_iterator position, value_type&& value);
+ // iterator emplace_after(const_iterator position, const value_type& value);
+ {
+ slist<TestObj> list1;
+ list1.emplace_after(list1.before_begin(), 42);
+ VERIFY(list1.front().mI == 42);
+ VERIFY(list1.front().mCopyCtor == 0);
+ VERIFY(list1.front().mMoveCtor == 0);
+ VERIFY(list1.size() == 1);
+ VERIFY(list1.validate());
+
+ list1.emplace_after(list1.before_begin(),1,2,3,4);
+ VERIFY(list1.front().mCopyCtor == 0);
+ VERIFY(list1.front().mMoveCtor == 0);
+ VERIFY(list1.front().mI == (1+2+3+4));
+ VERIFY(list1.size() == 2);
+ VERIFY(list1.validate());
+ }
+
+ // iterator erase(const_iterator position);
+ // iterator erase(const_iterator first, const_iterator last);
+ {
+ slist<int> list1 = {0,1,2,3,4,5,6,7};
+
+ auto p = list1.begin();
+ p++; p++; p++;
+
+ list1.erase(p);
+ VERIFY(list1 == slist<int>({0,1,2,4,5,6,7}));
+
+ list1.erase(list1.begin(), list1.end());
+ VERIFY(list1 == slist<int>({}));
+ VERIFY(list1.size() == 0);
+ VERIFY(list1.empty());
+ }
+
+ // iterator erase_after(const_iterator position);
+ // iterator erase_after(const_iterator before_first, const_iterator last);
+ {
+ slist<int> list1 = {0,1,2,3,4,5,6,7};
+ auto p = list1.begin();
+
+ list1.erase_after(p);
+ VERIFY(list1 == slist<int>({0,2,3,4,5,6,7}));
+ VERIFY(list1.validate());
+
+ list1.erase_after(p);
+ VERIFY(list1 == slist<int>({0,3,4,5,6,7}));
+ VERIFY(list1.validate());
+
+ list1.erase_after(p);
+ VERIFY(list1 == slist<int>({0,4,5,6,7}));
+ VERIFY(list1.validate());
+
+ list1.erase_after(p, list1.end());
+ VERIFY(list1 == slist<int>({0}));
+ VERIFY(list1.validate());
+ }
+
+ // void clear();
+ {
+ slist<int> list1;
+ list1.resize(100, 42);
+ VERIFY(!list1.empty());
+ VERIFY(list1.size() == 100);
+ VERIFY(list1.validate());
+
+ list1.clear();
+ VERIFY(list1.empty());
+ VERIFY(list1.size() == 0);
+ VERIFY(list1.validate());
+ }
+
+ // void reset_lose_memory();
+ {
+ typedef eastl::slist<int, fixed_allocator> SIntList;
+ typedef SIntList::node_type SIntListNode;
+ const size_t kBufferCount = 100;
+ SIntListNode buffer1[kBufferCount];
+ SIntList list1;
+ const size_t kAlignOfSIntListNode = EA_ALIGN_OF(SIntListNode);
+ list1.get_allocator().init(buffer1, sizeof(buffer1), sizeof(SIntListNode), kAlignOfSIntListNode);
+
+ VERIFY(list1.empty());
+ VERIFY(list1.size() == 0);
+ VERIFY(list1.validate());
+
+ list1.resize(kBufferCount, 42);
+ VERIFY(!list1.empty());
+ VERIFY(list1.size() == kBufferCount);
+ VERIFY(list1.validate());
+
+ list1.reset_lose_memory();
+ VERIFY(list1.empty());
+ VERIFY(list1.size() == 0);
+ VERIFY(list1.validate());
+ }
+
+ // void remove(const value_type& value);
+ {
+ slist<int> list1 = {0,1,2,3,4};
+ slist<int> list2 = {0,1,3,4};
+
+ list1.remove(2);
+
+ VERIFY(list1 == list2);
+ VERIFY(list1.validate());
+ VERIFY(list2.validate());
+ }
+
+ // void remove_if(Predicate predicate);
+ {
+ slist<int> list1;
+ list1.resize(100, 42);
+ VERIFY(list1.size() == 100);
+ VERIFY(list1.validate());
+
+ list1.remove_if([](int i) { return i == 1234; }); // intentionally remove nothing.
+ VERIFY(list1.size() == 100);
+ VERIFY(list1.validate());
+
+ list1.remove_if([](int i) { return i == 42; });
+ VERIFY(list1.size() == 0);
+ VERIFY(list1.validate());
+ }
+
+ // void reverse() EA_NOEXCEPT;
+ {
+ slist<int> list1 = {0,1,2,3,4};
+ slist<int> list2 = {4,3,2,1,0};
+ VERIFY(list1 != list2);
+
+ list1.reverse();
+ VERIFY(list1 == list2);
+ }
+
+ // void splice(const_iterator position, this_type& x);
+ // void splice(const_iterator position, this_type& x, const_iterator i);
+ // void splice(const_iterator position, this_type& x, const_iterator first, const_iterator last);
+ {
+ slist<int> valid = {0,1,2,3,4,5,6,7};
+ {
+ slist<int> list1 = {0,1,2,3};
+ slist<int> list2 = {4,5,6,7};
+ list1.splice(list1.end(), list2);
+
+ VERIFY(list1 == valid);
+ VERIFY(list1.validate());
+ }
+ {
+ slist<int> list1 = {0,1,2,3};
+ slist<int> list2 = {4,5,6,7};
+
+ list1.splice(list1.begin(), list2, list2.begin());
+ VERIFY(list1 == slist<int>({4,0,1,2,3}));
+ VERIFY(list2 == slist<int>({5,6,7}));
+
+ list1.splice(list1.begin(), list2, list2.begin());
+ VERIFY(list1 == slist<int>({5,4,0,1,2,3}));
+ VERIFY(list2 == slist<int>({6,7}));
+
+ list1.splice(list1.begin(), list2, list2.begin());
+ VERIFY(list1 == slist<int>({6,5,4,0,1,2,3}));
+ VERIFY(list2 == slist<int>({7}));
+
+ list1.splice(list1.begin(), list2, list2.begin());
+ VERIFY(list1 == slist<int>({7,6,5,4,0,1,2,3}));
+ VERIFY(list2 == slist<int>({}));
+
+ VERIFY(list1.validate());
+ VERIFY(list2.validate());
+ }
+ }
+
+ // void splice(const_iterator position, this_type&& x);
+ // void splice(const_iterator position, this_type&& x, const_iterator i);
+ // void splice(const_iterator position, this_type&& x, const_iterator first, const_iterator last);
+ {
+ {
+ slist<int> list1 = {0,1,2,3};
+ slist<int> list2 = {4,5,6,7};
+
+ list1.splice(list1.begin(), eastl::move(list2));
+ VERIFY(list1 == slist<int>({4,5,6,7,0,1,2,3}));
+ }
+ {
+ slist<int> list1 = {0,1,2,3};
+ slist<int> list2 = {4,5,6,7};
+
+ list1.splice(list1.begin(), eastl::move(list2), list2.begin());
+ VERIFY(list1 == slist<int>({4,0,1,2,3}));
+ }
+ {
+ slist<int> list1 = {0,1,2,3};
+ slist<int> list2 = {4,5,6,7};
+
+ auto b = list2.begin();
+ auto e = list2.end();
+ e = list2.previous(e);
+ e = list2.previous(e);
+
+ list1.splice(list1.begin(), eastl::move(list2), b, e);
+ VERIFY(list1 == slist<int>({4,5,0,1,2,3}));
+ }
+ }
+
+ // void splice_after(const_iterator position, this_type& x);
+ // void splice_after(const_iterator position, this_type& x, const_iterator i);
+ // void splice_after(const_iterator position, this_type& x, const_iterator first, const_iterator last);
+ {
+ slist<int> list1 = {0,1,2,3};
+ slist<int> list2 = {4,5,6,7};
+
+ list1.splice_after(list1.begin(), list2);
+ VERIFY(list1 == slist<int>({0,4,5,6,7,1,2,3}));
+ VERIFY(list1.validate());
+ VERIFY(list2.validate());
+ }
+
+ // void splice_after(const_iterator position, this_type&& x);
+ // void splice_after(const_iterator position, this_type&& x, const_iterator i);
+ // void splice_after(const_iterator position, this_type&& x, const_iterator first, const_iterator last);
+ {
+ {
+ slist<int> list1 = {0,1,2,3};
+ slist<int> list2 = {4,5,6,7};
+
+ list1.splice_after(list1.begin(), eastl::move(list2));
+ VERIFY(list1 == slist<int>({0,4,5,6,7,1,2,3}));
+ }
+ {
+ slist<int> list1 = {0,1,2,3};
+ slist<int> list2 = {4,5,6,7};
+
+ list1.splice_after(list1.begin(), eastl::move(list2), list2.begin());
+ VERIFY(list1 == slist<int>({0,5,6,7,1,2,3}));
+ }
+ {
+ slist<int> list1 = {0,1,2,3};
+ slist<int> list2 = {4,5,6,7};
+
+ auto b = list2.begin();
+ auto e = list2.end();
+ e = list2.previous(e);
+ e = list2.previous(e);
+
+ list1.splice_after(list1.begin(), eastl::move(list2), b, e);
+ VERIFY(list1 == slist<int>({0,5,6,1,2,3}));
+ }
+ }
+
+ // void sort();
+ {
+ slist<int> list1 = {0, 1, 2, 2, 2, 3, 4, 5, 6, 7, 8, 9, 9, 8, 7, 6, 5, 4, 3, 2, 2, 2, 1, 0};
+ VERIFY(!eastl::is_sorted(eastl::begin(list1), eastl::end(list1)));
+ VERIFY(list1.validate());
+
+ list1.sort();
+
+ VERIFY(eastl::is_sorted(eastl::begin(list1), eastl::end(list1)));
+ VERIFY(list1.validate());
+ }
+
+ // template <class Compare>
+ // void sort(Compare compare);
+ {
+ auto compare = [](int a, int b) { return a > b;};
+
+ slist<int> list1 = {0, 1, 2, 2, 2, 3, 4, 5, 6, 7, 8, 9, 9, 8, 7, 6, 5, 4, 3, 2, 2, 2, 1, 0};
+ VERIFY(!eastl::is_sorted(eastl::begin(list1), eastl::end(list1), compare));
+ list1.sort(compare);
+ VERIFY(eastl::is_sorted(eastl::begin(list1), eastl::end(list1), compare));
+ }
+
+ { // Test empty base-class optimization
+ struct UnemptyDummyAllocator : eastl::dummy_allocator
+ {
+ int foo;
+ };
+
+ typedef eastl::slist<int, eastl::dummy_allocator> list1;
+ typedef eastl::slist<int, UnemptyDummyAllocator> list2;
+
+ EATEST_VERIFY(sizeof(list1) < sizeof(list2));
+ }
+
+ { // Test erase / erase_if
+ {
+ slist<int> l = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+
+ auto numErased = eastl::erase(l, 5);
+ VERIFY((l == slist<int>{0, 1, 2, 3, 4, 6, 7, 8, 9}));
+ VERIFY(numErased == 1);
+
+ numErased = eastl::erase(l, 7);
+ VERIFY((l == slist<int>{0, 1, 2, 3, 4, 6, 8, 9}));
+ VERIFY(numErased == 1);
+
+ numErased = eastl::erase(l, 2);
+ VERIFY((l == slist<int>{0, 1, 3, 4, 6, 8, 9}));
+ VERIFY(numErased == 1);
+
+ numErased = eastl::erase(l, 0);
+ VERIFY((l == slist<int>{1, 3, 4, 6, 8, 9}));
+ VERIFY(numErased == 1);
+
+ numErased = eastl::erase(l, 4);
+ VERIFY((l == slist<int>{1, 3, 6, 8, 9}));
+ VERIFY(numErased == 1);
+ }
+
+ {
+ slist<int> l = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+
+ auto numErased = eastl::erase_if(l, [](auto e) { return e % 2 == 0; });
+ VERIFY((l == slist<int>{1, 3, 5, 7, 9}));
+ VERIFY(numErased == 5);
+
+ numErased = eastl::erase_if(l, [](auto e) { return e == 5; });
+ VERIFY((l == slist<int>{1, 3, 7, 9}));
+ VERIFY(numErased == 1);
+
+ numErased = eastl::erase_if(l, [](auto e) { return e % 3 == 0; });
+ VERIFY((l == slist<int>{1, 7}));
+ VERIFY(numErased == 2);
+ }
+ }
+
+ { // Test global operators
+ {
+ slist<int> list1 = {0, 1, 2, 3, 4, 5};
+ slist<int> list2 = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ slist<int> list3 = {5, 6, 7, 8};
+
+ VERIFY(list1 == list1);
+ VERIFY(!(list1 != list1));
+
+ VERIFY(list1 != list2);
+ VERIFY(list2 != list3);
+ VERIFY(list1 != list3);
+
+ VERIFY(list1 < list2);
+ VERIFY(list1 <= list2);
+
+ VERIFY(list2 > list1);
+ VERIFY(list2 >= list1);
+
+ VERIFY(list3 > list1);
+ VERIFY(list3 > list2);
+ }
+
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ {
+ slist<int> list1 = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ slist<int> list2 = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ slist<int> list3 = {-1, 0, 1, 2, 3, 4, 5};
+
+ // Verify equality between list1 and list2
+ VERIFY((list1 <=> list2) == 0);
+ VERIFY(!((list1 <=> list2) != 0));
+ VERIFY((list1 <=> list2) <= 0);
+ VERIFY((list1 <=> list2) >= 0);
+ VERIFY(!((list1 <=> list2) < 0));
+ VERIFY(!((list1 <=> list2) > 0));
+
+ list1.push_front(-2); // Make list1 less than list2.
+ list2.push_front(-1);
+
+ // Verify list1 < list2
+ VERIFY(!((list1 <=> list2) == 0));
+ VERIFY((list1 <=> list2) != 0);
+ VERIFY((list1 <=> list2) <= 0);
+ VERIFY(!((list1 <=> list2) >= 0));
+ VERIFY(((list1 <=> list2) < 0));
+ VERIFY(!((list1 <=> list2) > 0));
+
+
+ // Verify list3.size() < list2.size() and list3 is a subset of list2
+ VERIFY(!((list3 <=> list2) == 0));
+ VERIFY((list3 <=> list2) != 0);
+ VERIFY((list3 <=> list2) <= 0);
+ VERIFY(!((list3 <=> list2) >= 0));
+ VERIFY(((list3 <=> list2) < 0));
+ VERIFY(!((list3 <=> list2) > 0));
+ }
+
+ {
+ slist<int> list1 = {1, 2, 3, 4, 5, 6, 7};
+ slist<int> list2 = {7, 6, 5, 4, 3, 2, 1};
+ slist<int> list3 = {1, 2, 3, 4};
+
+ struct weak_ordering_slist
+ {
+ slist<int> slist;
+ inline std::weak_ordering operator<=>(const weak_ordering_slist& b) const { return slist <=> b.slist; }
+ };
+
+ VERIFY(synth_three_way{}(weak_ordering_slist{list1}, weak_ordering_slist{list2}) == std::weak_ordering::less);
+ VERIFY(synth_three_way{}(weak_ordering_slist{list3}, weak_ordering_slist{list1}) == std::weak_ordering::less);
+ VERIFY(synth_three_way{}(weak_ordering_slist{list2}, weak_ordering_slist{list1}) == std::weak_ordering::greater);
+ VERIFY(synth_three_way{}(weak_ordering_slist{list2}, weak_ordering_slist{list3}) == std::weak_ordering::greater);
+ VERIFY(synth_three_way{}(weak_ordering_slist{list1}, weak_ordering_slist{list1}) == std::weak_ordering::equivalent);
+
+ struct strong_ordering_slist
+ {
+ slist<int> slist;
+ inline std::strong_ordering operator<=>(const strong_ordering_slist& b) const { return slist <=> b.slist; }
+ };
+
+ VERIFY(synth_three_way{}(strong_ordering_slist{list1}, strong_ordering_slist{list2}) == std::strong_ordering::less);
+ VERIFY(synth_three_way{}(strong_ordering_slist{list3}, strong_ordering_slist{list1}) == std::strong_ordering::less);
+ VERIFY(synth_three_way{}(strong_ordering_slist{list2}, strong_ordering_slist{list1}) == std::strong_ordering::greater);
+ VERIFY(synth_three_way{}(strong_ordering_slist{list2}, strong_ordering_slist{list3}) == std::strong_ordering::greater);
+ VERIFY(synth_three_way{}(strong_ordering_slist{list1}, strong_ordering_slist{list1}) == std::strong_ordering::equal);
+ }
+#endif
+ }
+
+ return nErrorCount;
+}
+
diff --git a/EASTL/test/source/TestSegmentedVector.cpp b/EASTL/test/source/TestSegmentedVector.cpp
new file mode 100644
index 0000000..bb920b7
--- /dev/null
+++ b/EASTL/test/source/TestSegmentedVector.cpp
@@ -0,0 +1,89 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+#include "EASTLTest.h"
+#include <EASTL/segmented_vector.h>
+#include <EASTL/list.h>
+
+// Template instantations.
+// These tell the compiler to compile all the functions for the given class.
+template class eastl::segmented_vector<bool, 16>;
+template class eastl::segmented_vector<int, 16>;
+template class eastl::segmented_vector<Align64, 16>;
+template class eastl::segmented_vector<TestObject, 16>;
+
+
+int TestSegmentedVector()
+{
+ int nErrorCount = 0;
+
+ TestObject::Reset();
+
+ {
+ eastl::segmented_vector<int, 8> sv;
+ sv.push_back(0);
+ sv.push_back(1);
+ sv.push_back(2);
+ sv.push_back(3);
+
+ {
+ auto i = sv.begin();
+ EATEST_VERIFY(*i == 0);
+ EATEST_VERIFY(*i++ == 0);
+ EATEST_VERIFY(*i++ == 1);
+ EATEST_VERIFY(*i++ == 2);
+ EATEST_VERIFY(*i++ == 3);
+ }
+
+ {
+ auto i = sv.begin();
+ EATEST_VERIFY(*i == 0);
+ EATEST_VERIFY(*(++i) == 1);
+ EATEST_VERIFY(*(++i) == 2);
+ EATEST_VERIFY(*(++i) == 3);
+ }
+ }
+
+ {
+ // Construct segmented_vectors of different types.
+ eastl::segmented_vector<int, 8> vectorOfInt;
+ eastl::segmented_vector<TestObject, 8> vectorOfTO;
+ eastl::segmented_vector<eastl::list<TestObject>, 8> vectorOfListOfTO;
+
+ EATEST_VERIFY(vectorOfInt.empty());
+ EATEST_VERIFY(vectorOfTO.empty());
+ EATEST_VERIFY(vectorOfListOfTO.empty());
+ }
+
+ {
+ // Test basic segmented_vector operations.
+ eastl::segmented_vector<int, 4> vectorOfInt;
+
+ vectorOfInt.push_back(42);
+ EATEST_VERIFY(vectorOfInt.size() == 1);
+ EATEST_VERIFY(vectorOfInt.segment_count() == 1);
+ EATEST_VERIFY(vectorOfInt.empty() == false);
+
+ vectorOfInt.push_back(43);
+ vectorOfInt.push_back(44);
+ vectorOfInt.push_back(45);
+ vectorOfInt.push_back(46);
+ EATEST_VERIFY(vectorOfInt.size() == 5);
+ EATEST_VERIFY(vectorOfInt.segment_count() == 2);
+
+ EATEST_VERIFY(vectorOfInt.front() == 42);
+ EATEST_VERIFY(vectorOfInt.back() == 46);
+
+ vectorOfInt.pop_back();
+ EATEST_VERIFY(vectorOfInt.size() == 4);
+ EATEST_VERIFY(vectorOfInt.segment_count() == 1);
+
+ vectorOfInt.clear();
+ EATEST_VERIFY(vectorOfInt.empty());
+ EATEST_VERIFY(vectorOfInt.size() == 0);
+ EATEST_VERIFY(vectorOfInt.segment_count() == 0);
+ }
+
+ return nErrorCount;
+}
diff --git a/EASTL/test/source/TestSet.cpp b/EASTL/test/source/TestSet.cpp
new file mode 100644
index 0000000..9a590c2
--- /dev/null
+++ b/EASTL/test/source/TestSet.cpp
@@ -0,0 +1,256 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "TestSet.h"
+#include "EASTLTest.h"
+#include <EASTL/map.h>
+#include <EASTL/set.h>
+#include <EASTL/functional.h>
+#include <EASTL/internal/config.h>
+#include <EABase/eabase.h>
+
+
+EA_DISABLE_ALL_VC_WARNINGS()
+#include <stdio.h>
+
+#ifndef EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+ #include <set>
+ #include <map>
+ #include <algorithm>
+#endif
+EA_RESTORE_ALL_VC_WARNINGS()
+
+using namespace eastl;
+
+
+// Template instantations.
+// These tell the compiler to compile all the functions for the given class.
+template class eastl::set<int>;
+template class eastl::multiset<float>;
+template class eastl::set<TestObject>;
+template class eastl::multiset<TestObject>;
+
+
+///////////////////////////////////////////////////////////////////////////////
+// typedefs
+//
+typedef eastl::set<int> VS1;
+typedef eastl::set<TestObject> VS4;
+typedef eastl::multiset<int> VMS1;
+typedef eastl::multiset<TestObject> VMS4;
+
+#ifndef EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+ typedef std::set<int> VS3;
+ typedef std::set<TestObject> VS6;
+ typedef std::multiset<int> VMS3;
+ typedef std::multiset<TestObject> VMS6;
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+
+///////////////////////////////////////////////////////////////////////////////
+// xvalue_test
+//
+// Test utility type that sets the class data to known value when its data has
+// has been moved out. This enables us to write tests that verify that the
+// destruction action taken on container elements occured during move operations.
+//
+struct xvalue_test
+{
+ static const int MOVED_FROM = -1;
+
+ int data = 42;
+
+ xvalue_test(int in) : data(in) {}
+ ~xvalue_test() = default;
+
+ xvalue_test(const xvalue_test& other)
+ : data(other.data) {}
+
+ xvalue_test& operator=(const xvalue_test& other)
+ {
+ data = other.data;
+ return *this;
+ }
+
+ xvalue_test(xvalue_test&& other)
+ {
+ data = other.data;
+ other.data = MOVED_FROM;
+ }
+
+ xvalue_test& operator=(xvalue_test&& other)
+ {
+ data = other.data;
+ other.data = MOVED_FROM;
+ return *this;
+ }
+
+ friend bool operator<(const xvalue_test& rhs, const xvalue_test& lhs)
+ { return rhs.data < lhs.data; }
+};
+
+
+
+int TestSet()
+{
+ int nErrorCount = 0;
+
+ #ifndef EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+ { // Test construction
+ nErrorCount += TestSetConstruction<VS1, VS3, false>();
+ nErrorCount += TestSetConstruction<VS4, VS6, false>();
+
+ nErrorCount += TestSetConstruction<VMS1, VMS3, true>();
+ nErrorCount += TestSetConstruction<VMS4, VMS6, true>();
+ }
+
+
+ { // Test mutating functionality.
+ nErrorCount += TestSetMutation<VS1, VS3, false>();
+ nErrorCount += TestSetMutation<VS4, VS6, false>();
+
+ nErrorCount += TestSetMutation<VMS1, VMS3, true>();
+ nErrorCount += TestSetMutation<VMS4, VMS6, true>();
+ }
+ #endif // EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+
+
+ { // Test searching functionality.
+ nErrorCount += TestSetSearch<VS1, false>();
+ nErrorCount += TestSetSearch<VS4, false>();
+
+ nErrorCount += TestSetSearch<VMS1, true>();
+ nErrorCount += TestSetSearch<VMS4, true>();
+ }
+
+
+ {
+ // C++11 emplace and related functionality
+ nErrorCount += TestSetCpp11<eastl::set<TestObject> >();
+
+ nErrorCount += TestMultisetCpp11<eastl::multiset<TestObject> >();
+ }
+
+
+ { // Misc tests
+
+ // const key_compare& key_comp() const;
+ // key_compare& key_comp();
+ VS1 vs;
+ const VS1 vsc;
+
+ const VS1::key_compare& kc = vsc.key_comp();
+ vs.key_comp() = kc;
+ }
+
+ { // non-const comparator test
+ struct my_less
+ {
+ bool operator()(int a, int b) { return a < b; }
+ };
+
+ {
+ set<int, my_less> a = {0, 1, 2, 3, 4};
+ auto i = a.find(42);
+ VERIFY(i == a.end());
+ }
+ }
+
+ { // set erase_if tests
+ set<int> s = {0, 1, 2, 3, 4};
+ auto numErased = eastl::erase_if(s, [](auto i) { return i % 2 == 0;});
+ VERIFY((s == set<int>{1,3}));
+ VERIFY(numErased == 3);
+ }
+
+ { // multiset erase_if tests
+ multiset<int> s = {0, 0, 0, 0, 0, 1, 1, 1, 2, 3, 3, 3, 4};
+ auto numErased = eastl::erase_if(s, [](auto i) { return i % 2 == 0;});
+ VERIFY((s == multiset<int>{1, 1, 1, 3, 3, 3}));
+ VERIFY(numErased == 7);
+ }
+
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ { // Test set <=>
+ set<int> s1 = {0, 1, 2, 3, 4};
+ set<int> s2 = {4, 3, 2, 1, 0};
+ set<int> s3 = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+ set<int> s4 = {1, 2, 3, 4, 5, 6};
+ set<int> s5 = {9};
+
+ VERIFY(s1 == s2);
+ VERIFY(s1 != s3);
+ VERIFY(s3 > s4);
+ VERIFY(s5 > s4);
+ VERIFY(s5 > s3);
+
+ VERIFY((s1 <=> s2) == 0);
+ VERIFY((s1 <=> s3) != 0);
+ VERIFY((s3 <=> s4) > 0);
+ VERIFY((s5 <=> s4) > 0);
+ VERIFY((s5 <=> s3) > 0);
+ }
+
+ { // Test multiset <=>
+ multiset<int> s1 = {0, 0, 0, 1, 1, 2, 3, 3, 4};
+ multiset<int> s2 = {4, 3, 3, 2, 1, 1, 0, 0, 0};
+ multiset<int> s3 = {1, 1, 2, 2, 3, 4, 5, 5, 6, 7, 8, 9, 9};
+ multiset<int> s4 = {1, 1, 2, 2, 3, 4, 5, 5, 6};
+ multiset<int> s5 = {9};
+
+ VERIFY(s1 == s2);
+ VERIFY(s1 != s3);
+ VERIFY(s3 > s4);
+ VERIFY(s5 > s4);
+ VERIFY(s5 > s3);
+
+ VERIFY((s1 <=> s2) == 0);
+ VERIFY((s1 <=> s3) != 0);
+ VERIFY((s3 <=> s4) > 0);
+ VERIFY((s5 <=> s4) > 0);
+ VERIFY((s5 <=> s3) > 0);
+ }
+#endif
+
+ {
+ // user reported regression: ensure container elements are NOT
+ // moved from during the eastl::set construction process.
+ eastl::vector<xvalue_test> m1 = {{0}, {1}, {2}, {3}, {4}, {5}};
+ eastl::set<xvalue_test> m2{m1.begin(), m1.end()};
+
+ bool result = eastl::all_of(m1.begin(), m1.end(),
+ [&](auto& e) { return e.data != xvalue_test::MOVED_FROM; });
+
+ VERIFY(result);
+ }
+
+ {
+ // user reported regression: ensure container elements are moved from during the
+ // eastl::set construction process when using an eastl::move_iterator.
+ eastl::vector<xvalue_test> m1 = {{0}, {1}, {2}, {3}, {4}, {5}};
+ eastl::set<xvalue_test> m2{eastl::make_move_iterator(m1.begin()), eastl::make_move_iterator(m1.end())};
+
+ bool result = eastl::all_of(m1.begin(), m1.end(),
+ [&](auto& e) { return e.data == xvalue_test::MOVED_FROM; });
+
+ VERIFY(result);
+ }
+
+ return nErrorCount;
+}
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/test/source/TestSet.h b/EASTL/test/source/TestSet.h
new file mode 100644
index 0000000..16f55c7
--- /dev/null
+++ b/EASTL/test/source/TestSet.h
@@ -0,0 +1,906 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLTest.h"
+#include <EASTL/vector.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/type_traits.h>
+#include <EASTL/scoped_ptr.h>
+#include <EASTL/random.h>
+
+EA_DISABLE_ALL_VC_WARNINGS()
+#ifndef EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+ #include <algorithm>
+#endif
+EA_RESTORE_ALL_VC_WARNINGS()
+
+#ifndef EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+
+
+///////////////////////////////////////////////////////////////////////////////
+// TestSetConstruction
+//
+// This test compares eastl::set/multiset to std::set/multiset. It could possibly
+// work for comparing eastl::hash_set to C++11 std::unordered_set, but we would
+// rather move towards making this test be independent of any std comparisons.
+//
+// Requires a container that can hold at least 1000 items.
+//
+template <typename T1, typename T2, bool bMultiset>
+int TestSetConstruction()
+{
+ int nErrorCount = 0;
+
+ TestObject::Reset();
+
+ {
+ eastl::scoped_ptr<T1> pt1A(new T1); // We use a pointers instead of concrete object because it's size may be huge.
+ eastl::scoped_ptr<T2> pt2A(new T2);
+ T1& t1A = *pt1A;
+ T2& t2A = *pt2A;
+ nErrorCount += CompareContainers(t1A, t2A, "Set ctor", eastl::use_self<typename T1::value_type>(), eastl::use_self<typename T2::value_type>());
+ VERIFY(t1A.validate());
+
+
+ eastl::scoped_ptr<T1> pt1B(new T1);
+ eastl::scoped_ptr<T2> pt2B(new T2);
+ T1& t1B = *pt1B;
+ T2& t2B = *pt2B;
+ nErrorCount += CompareContainers(t1B, t2B, "Set ctor", eastl::use_self<typename T1::value_type>(), eastl::use_self<typename T2::value_type>());
+
+
+ eastl::scoped_ptr<T1> pt1C(new T1);
+ eastl::scoped_ptr<T2> pt2C(new T2);
+ T1& t1C = *pt1C;
+ T2& t2C = *pt2C;
+ for(int i = 0; i < 1000; i++)
+ {
+ t1C.insert(typename T1::value_type(typename T1::value_type(i)));
+ t2C.insert(typename T2::value_type(typename T2::value_type(i)));
+ VERIFY(t1C.validate());
+ nErrorCount += CompareContainers(t1C, t2C, "Set insert", eastl::use_self<typename T1::value_type>(), eastl::use_self<typename T2::value_type>());
+ }
+
+
+ eastl::scoped_ptr<T1> pt1D(new T1);
+ eastl::scoped_ptr<T2> pt2D(new T2);
+ T1& t1D = *pt1D;
+ T2& t2D = *pt2D;
+ nErrorCount += CompareContainers(t1D, t2D, "Set ctor", eastl::use_self<typename T1::value_type>(), eastl::use_self<typename T2::value_type>());
+
+
+ eastl::scoped_ptr<T1> pt1E(new T1(t1C));
+ eastl::scoped_ptr<T2> pt2E(new T2(t2C));
+ T1& t1E = *pt1E;
+ T2& t2E = *pt2E;
+ VERIFY(t1E.validate());
+ nErrorCount += CompareContainers(t1E, t2E, "Set ctor", eastl::use_self<typename T1::value_type>(), eastl::use_self<typename T2::value_type>());
+
+
+ eastl::scoped_ptr<T1> pt1F(new T1(t1C.begin(), t1C.end()));
+ eastl::scoped_ptr<T2> pt2F(new T2(t2C.begin(), t2C.end()));
+ T1& t1F = *pt1F;
+ T2& t2F = *pt2F;
+ VERIFY(t1F.validate());
+ nErrorCount += CompareContainers(t1F, t2F, "Set ctor", eastl::use_self<typename T1::value_type>(), eastl::use_self<typename T2::value_type>());
+
+
+ // operator=
+ t1E = t1D;
+ t2E = t2D;
+ nErrorCount += CompareContainers(t1D, t2D, "Set operator=", eastl::use_self<typename T1::value_type>(), eastl::use_self<typename T2::value_type>());
+ nErrorCount += CompareContainers(t1E, t2E, "Set operator=", eastl::use_self<typename T1::value_type>(), eastl::use_self<typename T2::value_type>());
+
+
+ // operator=(set&&)
+ // We test just the EASTL container here.
+ eastl::scoped_ptr<T1> pT1P(new T1); // We use a pointers instead of concrete object because it's size may be huge.
+ eastl::scoped_ptr<T1> pT1Q(new T1);
+ T1& t1P = *pT1P;
+ T1& t1Q = *pT1Q;
+
+ typename T1::value_type v10(0);
+ typename T1::value_type v11(1);
+ typename T1::value_type v12(2);
+ typename T1::value_type v13(3);
+ typename T1::value_type v14(4);
+ typename T1::value_type v15(5);
+
+ t1P.insert(v10);
+ t1P.insert(v11);
+ t1P.insert(v12);
+
+ t1Q.insert(v13);
+ t1Q.insert(v14);
+ t1Q.insert(v15);
+
+ t1Q = eastl::move(t1P); // We are effectively requesting to swap t1A with t1B.
+ //EATEST_VERIFY((t1P.size() == 3) && (t1P.find(v13) != t1P.end()) && (t1P.find(v14) != t1P.end()) && (t1P.find(v15) != t1P.end())); // Currently operator=(this_type&& x) clears x instead of swapping with it.
+
+
+ // swap
+ t1E.swap(t1D);
+ t2E.swap(t2D);
+ VERIFY(t1D.validate());
+ VERIFY(t1E.validate());
+ nErrorCount += CompareContainers(t1D, t2D, "Set swap", eastl::use_self<typename T1::value_type>(), eastl::use_self<typename T2::value_type>());
+ nErrorCount += CompareContainers(t1E, t2E, "Set swap", eastl::use_self<typename T1::value_type>(), eastl::use_self<typename T2::value_type>());
+
+
+ // eastl::swap
+ eastl::swap(t1E, t1D);
+ std::swap(t2E, t2D);
+ VERIFY(t1D.validate());
+ VERIFY(t1E.validate());
+ nErrorCount += CompareContainers(t1D, t2D, "Global swap", eastl::use_self<typename T1::value_type>(), eastl::use_self<typename T2::value_type>());
+ nErrorCount += CompareContainers(t1E, t2E, "Global swap", eastl::use_self<typename T1::value_type>(), eastl::use_self<typename T2::value_type>());
+
+
+ // clear
+ t1A.clear();
+ t2A.clear();
+ VERIFY(t1A.validate());
+ nErrorCount += CompareContainers(t1A, t2A, "Set clear", eastl::use_self<typename T1::value_type>(), eastl::use_self<typename T2::value_type>());
+
+ t1B.clear();
+ t2B.clear();
+ VERIFY(t1B.validate());
+ nErrorCount += CompareContainers(t1B, t2B, "Set clear", eastl::use_self<typename T1::value_type>(), eastl::use_self<typename T2::value_type>());
+
+
+ // global operators (==, !=, <, etc.)
+ t1A.clear();
+ t1B.clear();
+ // Make t1A equal to t1B
+ t1A.insert(typename T1::value_type(0));
+ t1A.insert(typename T1::value_type(1));
+ t1A.insert(typename T1::value_type(2));
+
+ t1B.insert(typename T1::value_type(0));
+ t1B.insert(typename T1::value_type(1));
+ t1B.insert(typename T1::value_type(2));
+
+ VERIFY( (t1A == t1B));
+ VERIFY(!(t1A != t1B));
+ VERIFY( (t1A <= t1B));
+ VERIFY( (t1A >= t1B));
+ VERIFY(!(t1A < t1B));
+ VERIFY(!(t1A > t1B));
+ // Make t1A less than t1B
+ t1A.insert(typename T1::value_type(3));
+ t1B.insert(typename T1::value_type(4));
+
+ VERIFY(!(t1A == t1B));
+ VERIFY( (t1A != t1B));
+ VERIFY( (t1A <= t1B));
+ VERIFY(!(t1A >= t1B));
+ VERIFY( (t1A < t1B));
+ VERIFY(!(t1A > t1B));
+ }
+
+ VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+
+ return nErrorCount;
+}
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// TestSetMutation
+//
+// Requires a container that can hold at least 1000 items.
+//
+EA_DISABLE_VC_WARNING(6262)
+template <typename T1, typename T2, bool bMultiset>
+int TestSetMutation()
+{
+ int nErrorCount = 0;
+
+ TestObject::Reset();
+
+ {
+ eastl::scoped_ptr<T1> pt1A(new T1); // We use a pointers instead of concrete object because it's size may be huge.
+ eastl::scoped_ptr<T2> pt2A(new T2);
+ T1& t1A = *pt1A;
+ T2& t2A = *pt2A;
+ int i, iEnd, p;
+
+ // Set up an array of values to randomize / permute.
+ eastl::vector<typename T1::value_type> valueArrayInsert;
+
+ if(gEASTL_TestLevel >= kEASTL_TestLevelLow)
+ {
+ EASTLTest_Rand rng(EA::UnitTest::GetRandSeed());
+
+ valueArrayInsert.clear();
+
+ for(i = 0; i < 1000; i++)
+ {
+ valueArrayInsert.push_back(typename T1::value_type(i));
+
+ // Occasionally attempt to duplicate an element, both for set and multiset.
+ if(((i + 1) < 1000) && (rng.RandLimit(4) == 0))
+ {
+ valueArrayInsert.push_back(typename T1::value_type(i));
+ i++;
+ }
+ }
+
+ for(p = 0; p < gEASTL_TestLevel * 100; p++) // For each permutation...
+ {
+ eastl::random_shuffle(valueArrayInsert.begin(), valueArrayInsert.end(), rng);
+
+ // insert
+ for(i = 0, iEnd = (int)valueArrayInsert.size(); i < iEnd; i++)
+ {
+ typename T1::value_type& k = valueArrayInsert[i];
+
+ t1A.insert(typename T1::value_type(k)); // We expect that both arguments are the same.
+ t2A.insert(typename T2::value_type(k));
+
+ VERIFY(t1A.validate());
+ nErrorCount += CompareContainers(t1A, t2A, "Set insert", eastl::use_self<typename T1::value_type>(), eastl::use_self<typename T2::value_type>());
+ }
+
+
+ // reverse iteration
+ typename T1::reverse_iterator r1 = t1A.rbegin();
+ typename T2::reverse_iterator r2 = t2A.rbegin();
+
+ while(r1 != t1A.rend())
+ {
+ typename T1::value_type k1 = *r1;
+ typename T2::value_type k2 = *r2;
+ VERIFY(k1 == k2);
+ }
+
+
+ // erase
+ for(i = 0, iEnd = (int)valueArrayInsert.size(); i < iEnd; i++)
+ {
+ typename T1::value_type& k = valueArrayInsert[i];
+
+ typename T1::size_type n1 = t1A.erase(k);
+ typename T2::size_type n2 = t2A.erase(k);
+
+ VERIFY(n1 == n2);
+ VERIFY(t1A.validate());
+ nErrorCount += CompareContainers(t1A, t2A, "Set erase", eastl::use_self<typename T1::value_type>(), eastl::use_self<typename T2::value_type>());
+ }
+
+ VERIFY((TestObject::sTOCount == 0) || (TestObject::sTOCount == (int64_t)valueArrayInsert.size())); // This test will only have meaning when T1 contains TestObject.
+ }
+ }
+
+
+ VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+
+
+ // Possibly do extended testing.
+ if(gEASTL_TestLevel > 6)
+ {
+ valueArrayInsert.clear();
+
+ for(i = 0; i < 9; i++) // Much more than this count would take too long to test all permutations.
+ valueArrayInsert.push_back(typename T1::value_type(i));
+
+ // Insert these values into the set in every existing permutation.
+ for(p = 0; std::next_permutation(valueArrayInsert.begin(), valueArrayInsert.end()); p++) // For each permutation...
+ {
+ for(i = 0, iEnd = (int)valueArrayInsert.size(); i < iEnd; i++)
+ {
+ typename T1::value_type& k = valueArrayInsert[i];
+
+ t1A.insert(typename T1::value_type(k)); // We expect that both arguments are the same.
+ t2A.insert(typename T2::value_type(k));
+
+ VERIFY(t1A.validate());
+ nErrorCount += CompareContainers(t1A, t2A, "Set insert", eastl::use_self<typename T1::value_type>(), eastl::use_self<typename T2::value_type>());
+ }
+
+ for(i = 0, iEnd = (int)valueArrayInsert.size(); i < iEnd; i++)
+ {
+ typename T1::value_type& k = valueArrayInsert[i];
+
+ t1A.erase(k);
+ t2A.erase(k);
+
+ VERIFY(t1A.validate());
+ nErrorCount += CompareContainers(t1A, t2A, "Set erase", eastl::use_self<typename T1::value_type>(), eastl::use_self<typename T2::value_type>());
+ }
+
+ VERIFY((TestObject::sTOCount == 0) || (TestObject::sTOCount == (int64_t)valueArrayInsert.size())); // This test will only have meaning when T1 contains TestObject.
+ }
+ }
+ }
+
+
+ VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+
+
+ { // Other insert and erase operations
+
+ eastl::scoped_ptr<T1> pt1A(new T1); // We use a pointers instead of concrete object because it's size may be huge.
+ eastl::scoped_ptr<T2> pt2A(new T2);
+ T1& t1A = *pt1A;
+ T2& t2A = *pt2A;
+ int i;
+
+ // Set up an array of values to randomize / permute.
+ eastl::vector<typename T1::value_type> valueArrayInsert1;
+ eastl::vector<typename T2::value_type> valueArrayInsert2;
+
+ EASTLTest_Rand rng(EA::UnitTest::GetRandSeed());
+
+ for(i = 0; i < 100; i++)
+ {
+ valueArrayInsert1.push_back(typename T1::value_type(i));
+ valueArrayInsert2.push_back(typename T2::value_type(i));
+
+ if(rng.RandLimit(3) == 0)
+ {
+ valueArrayInsert1.push_back(typename T1::value_type(i));
+ valueArrayInsert2.push_back(typename T2::value_type(i));
+ }
+ }
+
+
+ // insert(InputIterator first, InputIterator last)
+ t1A.insert(valueArrayInsert1.begin(), valueArrayInsert1.end());
+ t2A.insert(valueArrayInsert2.begin(), valueArrayInsert2.end());
+ VERIFY(t1A.validate());
+ nErrorCount += CompareContainers(t1A, t2A, "Set insert", eastl::use_self<typename T1::value_type>(), eastl::use_self<typename T2::value_type>());
+
+
+ // iterator insert(iterator position, const value_type& value);
+ //
+ // If bMultiset == true, then the insertions below should fail due to the
+ // item being present. But they should return the correct iterator value.
+ typename T1::iterator it1 = t1A.insert(t1A.find(typename T1::value_type(2)), typename T1::value_type(1));
+ typename T2::iterator it2 = t2A.insert(t2A.find(typename T2::value_type(2)), typename T2::value_type(1));
+ VERIFY(t1A.validate());
+ VERIFY(*it1 == typename T1::value_type(1));
+ VERIFY(*it2 == typename T2::value_type(1));
+ nErrorCount += CompareContainers(t1A, t2A, "Set insert", eastl::use_self<typename T1::value_type>(), eastl::use_self<typename T2::value_type>());
+
+ it1 = t1A.insert(t1A.end(), typename T1::value_type(5));
+ it2 = t2A.insert(t2A.end(), typename T2::value_type(5));
+ VERIFY(t1A.validate());
+ VERIFY(*it1 == typename T1::value_type(5));
+ VERIFY(*it2 == typename T2::value_type(5));
+ nErrorCount += CompareContainers(t1A, t2A, "Set insert", eastl::use_self<typename T1::value_type>(), eastl::use_self<typename T2::value_type>());
+
+ // Now we remove these items so that the insertions above can succeed.
+ t1A.erase(t1A.find(typename T1::value_type(1)));
+ t2A.erase(t2A.find(typename T2::value_type(1)));
+ it1 = t1A.insert(t1A.find(typename T1::value_type(2)), typename T1::value_type(1));
+ it2 = t2A.insert(t2A.find(typename T2::value_type(2)), typename T2::value_type(1));
+ VERIFY(t1A.validate());
+ VERIFY(*it1 == typename T1::value_type(1));
+ VERIFY(*it2 == typename T2::value_type(1));
+ nErrorCount += CompareContainers(t1A, t2A, "Set insert", eastl::use_self<typename T1::value_type>(), eastl::use_self<typename T2::value_type>());
+
+ t1A.erase(t1A.find(typename T1::value_type(5)));
+ t2A.erase(t2A.find(typename T2::value_type(5)));
+ it1 = t1A.insert(t1A.end(), typename T1::value_type(5));
+ it2 = t2A.insert(t2A.end(), typename T2::value_type(5));
+ VERIFY(t1A.validate());
+ VERIFY(*it1 == typename T1::value_type(5));
+ VERIFY(*it2 == typename T2::value_type(5));
+ nErrorCount += CompareContainers(t1A, t2A, "Set insert", eastl::use_self<typename T1::value_type>(), eastl::use_self<typename T2::value_type>());
+
+ // iterator erase(iterator first, iterator last);
+ typename T1::iterator it11 = t1A.find(typename T1::value_type(17));
+ typename T1::iterator it12 = t1A.find(typename T2::value_type(37));
+ t1A.erase(it11, it12);
+
+ typename T2::iterator it21 = t2A.find(typename T1::value_type(17));
+ typename T2::iterator it22 = t2A.find(typename T2::value_type(37));
+ t2A.erase(it21, it22);
+
+ VERIFY(t1A.validate());
+ nErrorCount += CompareContainers(t1A, t2A, "Set erase(first, last)", eastl::use_self<typename T1::value_type>(), eastl::use_self<typename T2::value_type>());
+
+
+ // iterator erase(iterator position);
+ t1A.erase(t1A.find(typename T1::value_type(60)));
+ t2A.erase(t2A.find(typename T1::value_type(60)));
+ VERIFY(t1A.validate());
+ nErrorCount += CompareContainers(t1A, t2A, "Set erase(first, last)", eastl::use_self<typename T1::value_type>(), eastl::use_self<typename T2::value_type>());
+
+
+ // Disabled because this function isn't exposed outside the rbtree yet.
+ // void erase(const value_type* first, const value_type* last);
+ //typename T1::value_type keyArray1[3] = { typename T1::value_type(70), typename T1::value_type(71), typename T1::value_type(72) };
+ //typename T2::value_type keyArray2[3] = { typename T2::value_type(70), typename T2::value_type(71), typename T2::value_type(72) };
+ //t1A.erase(keyArray1 + 0, keyArray1 + 3);
+ //t2A.erase(keyArray2 + 0, keyArray2 + 3);
+ //VERIFY(t1A.validate());
+ //nErrorCount += CompareContainers(t1A, t2A, "Set erase(first, last)", eastl::use_self<typename T1::value_type>(), eastl::use_self<typename T2::value_type>());
+ }
+
+ {
+ // set(std::initializer_list<value_type> ilist, const Compare& compare = Compare(), const allocator_type& allocator = EASTL_MAP_DEFAULT_ALLOCATOR);
+ // this_type& operator=(std::initializer_list<T> ilist);
+ // void insert(std::initializer_list<value_type> ilist);
+ #if !defined(EA_COMPILER_NO_INITIALIZER_LISTS)
+ T1 mySet = { typename T1::value_type(10), typename T1::value_type(11) };
+ EATEST_VERIFY(mySet.size() == 2);
+ typename T1::iterator it = mySet.begin();
+ EATEST_VERIFY(*it == typename T1::value_type(10));
+ it = mySet.rbegin().base();
+ EATEST_VERIFY(*--it == typename T1::value_type(11));
+
+ mySet = {typename T1::value_type(20), typename T1::value_type(21) };
+ EATEST_VERIFY(mySet.size() == 2);
+ EATEST_VERIFY(*mySet.begin() == typename T1::value_type(20));
+ it = mySet.rbegin().base();
+ EATEST_VERIFY(*--it == typename T1::value_type(21));
+
+ mySet.insert({ typename T1::value_type(40), typename T1::value_type(41) });
+ EATEST_VERIFY(mySet.size() == 4);
+ it = mySet.rbegin().base();
+ EATEST_VERIFY(*--it == typename T1::value_type(41));
+ #endif
+ }
+
+ VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+
+ return nErrorCount;
+}
+EA_RESTORE_VC_WARNING()
+
+
+#endif // EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+
+
+
+
+template <typename T1>
+int TestSetSpecific(T1& /*t1A*/, eastl::false_type) // false_type means this is a map and not a multimap.
+{
+ return 0;
+}
+
+
+template <typename T1>
+int TestSetSpecific(T1& t1A, eastl::true_type) // true_type means this is a multimap and not a map.
+{
+ int nErrorCount = 0;
+
+ // equal_range_small (multiset only)
+ eastl::pair<typename T1::iterator, typename T1::iterator> er = t1A.equal_range_small(typename T1::value_type(499));
+ VERIFY(*er.first == typename T1::value_type(499));
+ VERIFY(*er.second == typename T1::value_type(501));
+
+ er = t1A.equal_range_small(typename T1::value_type(-1));
+ VERIFY(er.first == er.second);
+ VERIFY(er.first == t1A.begin());
+
+ return nErrorCount;
+}
+
+
+// Just for the purposes of the map::find_as test below, we declare the following.
+// The map::find_as function searches a container of X for a type Y, where the user
+// defines the equality of X to Y. The purpose of TSetComparable is to be a generic type Y
+// that can be used for any X. We need to make this generic because the whole TestMapSearch
+// function below is templated on type T1 and so we don't know what T1 is ahead of time.
+
+template <typename T>
+struct TSetComparable
+{
+ T b;
+
+ TSetComparable() : b() { }
+ TSetComparable(const T& a) : b(a){ }
+ const TSetComparable& operator=(const T& a) { b = a; return *this; }
+ const TSetComparable& operator=(const TSetComparable& x) { b = x.b; return *this; }
+ operator const T&() const { return b; }
+};
+
+
+///////////////////////////////////////////////////////////////////////////////
+// TestSetSearch
+//
+// This function is designed to work with set, fixed_set (and not hash containers).
+// Requires a container that can hold at least 1000 items.
+//
+template <typename T1, bool bMultimap>
+int TestSetSearch()
+{
+ int nErrorCount = 0;
+
+ TestObject::Reset();
+
+ { // Test find, lower_bound, upper_bound, etc..
+ eastl::scoped_ptr<T1> pt1A(new T1); // We use a pointers instead of concrete object because it's size may be huge.
+ T1& t1A = *pt1A;
+ int i, iEnd;
+ typename T1::iterator it;
+
+ // Set up an array of values to randomize / permute.
+ eastl::vector<typename T1::value_type> valueArrayInsert;
+
+ for(i = 0; i < 1000; i++)
+ valueArrayInsert.push_back(typename T1::value_type(i));
+
+ EASTLTest_Rand rng(EA::UnitTest::GetRandSeed());
+ eastl::random_shuffle(valueArrayInsert.begin(), valueArrayInsert.end(), rng);
+
+
+ // insert
+ for(i = 0, iEnd = (int)valueArrayInsert.size(); i < iEnd; i++)
+ {
+ typename T1::value_type k(i);
+ t1A.insert(typename T1::value_type(k));
+
+ it = t1A.find(k);
+ VERIFY(it != t1A.end());
+ }
+
+
+ // find
+ for(i = 0; i < 1000; i++)
+ {
+ typename T1::value_type k(i);
+ it = t1A.find(k);
+
+ VERIFY(it != t1A.end());
+ VERIFY(*it == k);
+ }
+
+ it = t1A.find(typename T1::value_type(-1));
+ VERIFY(it == t1A.end());
+
+ it = t1A.find(typename T1::value_type(1001));
+ VERIFY(it == t1A.end());
+
+
+ // find_as
+ typedef TSetComparable<typename T1::key_type> TC;
+
+ // Normally we use find_as to find via a different type, but we can test it here like this.
+ for(i = 0; i < 1000; i++)
+ {
+ TC k = typename T1::key_type(i);
+ it = t1A.find_as(k, eastl::less_2<typename T1::key_type, TC>());
+
+ VERIFY(it != t1A.end());
+ VERIFY(*it == k);
+ }
+
+ it = t1A.find_as(TC(typename T1::key_type(-1)), eastl::less_2<typename T1::key_type, TC>());
+ VERIFY(it == t1A.end());
+
+ it = t1A.find_as(TC(typename T1::key_type(1001)), eastl::less_2<typename T1::key_type, TC>());
+ VERIFY(it == t1A.end());
+
+
+ // lower_bound
+ it = t1A.lower_bound(typename T1::value_type(0));
+ VERIFY(it == t1A.begin());
+
+ it = t1A.lower_bound(typename T1::value_type(-1));
+ VERIFY(it == t1A.begin());
+
+ it = t1A.lower_bound(typename T1::value_type(1001));
+ VERIFY(it == t1A.end());
+
+ t1A.erase(typename T1::value_type(500));
+ it = t1A.lower_bound(typename T1::value_type(500));
+ VERIFY(*it == typename T1::value_type(501));
+
+
+ // upper_bound
+ it = t1A.upper_bound(typename T1::value_type(-1));
+ VERIFY(it == t1A.begin());
+
+ it = t1A.upper_bound(typename T1::value_type(499));
+ VERIFY(*it == typename T1::value_type(501));
+
+ it = t1A.upper_bound(typename T1::value_type(-1));
+ VERIFY(*it == typename T1::value_type(0));
+
+ it = t1A.upper_bound(typename T1::value_type(1000));
+ VERIFY(it == t1A.end());
+
+
+ // count
+ typename T1::size_type n = t1A.count(typename T1::value_type(-1));
+ VERIFY(n == 0);
+
+ n = t1A.count(typename T1::value_type(0));
+ VERIFY(n == 1);
+
+ n = t1A.count(typename T1::value_type(500)); // We removed 500 above.
+ VERIFY(n == 0);
+
+ n = t1A.count(typename T1::value_type(1001));
+ VERIFY(n == 0);
+
+
+ // equal_range
+ eastl::pair<typename T1::iterator, typename T1::iterator> er = t1A.equal_range(typename T1::value_type(200));
+ VERIFY(*er.first == typename T1::value_type(200));
+
+ er = t1A.equal_range(typename T1::value_type(499));
+ VERIFY(*er.first == typename T1::value_type(499));
+ VERIFY(*er.second == typename T1::value_type(501));
+
+ er = t1A.equal_range(typename T1::value_type(-1));
+ VERIFY(er.first == er.second);
+ VERIFY(er.first == t1A.begin());
+
+
+ // Some tests need to be differently between map and multimap.
+ nErrorCount += TestSetSpecific(t1A, eastl::integral_constant<bool, bMultimap>());
+ }
+
+ VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+
+ return nErrorCount;
+}
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// TestSetCpp11
+//
+// This function is designed to work with set, fixed_set, hash_set, fixed_hash_set
+//
+template <typename T1>
+int TestSetCpp11()
+{
+ int nErrorCount = 0;
+
+ // template <class... Args>
+ // insert_return_type emplace(Args&&... args);
+ //
+ // template <class... Args>
+ // iterator emplace_hint(const_iterator position, Args&&... args);
+ //
+ // insert_return_type insert(value_type&& value);
+ // iterator insert(const_iterator position, value_type&& value);
+ TestObject::Reset();
+
+ typedef T1 TOSet;
+ typename TOSet::insert_return_type toSetInsertResult;
+ typename TOSet::iterator toSetIterator;
+
+ TOSet toSet;
+ TestObject to0(0);
+ TestObject to1(1);
+
+ toSetInsertResult = toSet.emplace(to0);
+ EATEST_VERIFY(toSetInsertResult.second == true);
+ //EATEST_VERIFY((TestObject::sTOCopyCtorCount == 2) && (TestObject::sTOMoveCtorCount == 1)); // Disabled until we can guarantee its behavior and deal with how it's different between compilers of differing C++11 support.
+
+ toSetInsertResult = toSet.emplace(eastl::move(to1));
+ EATEST_VERIFY(toSetInsertResult.second == true);
+
+ // insert_return_type t1A.emplace(value_type&& value);
+ TestObject to40(4);
+ EATEST_VERIFY(toSet.find(to40) == toSet.end());
+ EATEST_VERIFY(to40.mX == 4); // It should change to 0 below during the move swap.
+ toSetInsertResult = toSet.emplace(eastl::move(to40));
+ EATEST_VERIFY(toSetInsertResult.second == true);
+ EATEST_VERIFY(toSet.find(to40) != toSet.end());
+ EATEST_VERIFY(to40.mX == 0);
+
+ TestObject to41(4);
+ toSetInsertResult = toSet.emplace(eastl::move(to41));
+ EATEST_VERIFY(toSetInsertResult.second == false);
+ EATEST_VERIFY(toSet.find(to41) != toSet.end());
+
+ // iterator t1A.emplace_hint(const_iterator position, value_type&& value);
+ TestObject to50(5);
+ toSetInsertResult = toSet.emplace(eastl::move(to50));
+ EATEST_VERIFY(toSetInsertResult.second == true);
+ EATEST_VERIFY(toSet.find(to50) != toSet.end());
+
+ TestObject to51(5);
+ toSetIterator = toSet.emplace_hint(toSetInsertResult.first, eastl::move(to51));
+ EATEST_VERIFY(*toSetIterator == TestObject(5));
+ EATEST_VERIFY(toSet.find(to51) != toSet.end());
+
+ TestObject to6(6);
+ toSetIterator = toSet.emplace_hint(toSet.begin(), eastl::move(to6)); // specify a bad hint. Insertion should still work.
+ EATEST_VERIFY(*toSetIterator == TestObject(6));
+ EATEST_VERIFY(toSet.find(to6) != toSet.end());
+
+ TestObject to2(2);
+ EATEST_VERIFY(toSet.find(to2) == toSet.end());
+ toSetInsertResult = toSet.emplace(to2);
+ EATEST_VERIFY(toSetInsertResult.second == true);
+ EATEST_VERIFY(toSet.find(to2) != toSet.end());
+ toSetInsertResult = toSet.emplace(to2);
+ EATEST_VERIFY(toSetInsertResult.second == false);
+ EATEST_VERIFY(toSet.find(to2) != toSet.end());
+
+ // iterator t1A.emplace_hint(const_iterator position, const value_type& value);
+ TestObject to70(7);
+ toSetInsertResult = toSet.emplace(to70);
+ EATEST_VERIFY(toSetInsertResult.second == true);
+ EATEST_VERIFY(toSet.find(to70) != toSet.end());
+
+ TestObject to71(7);
+ toSetIterator = toSet.emplace_hint(toSetInsertResult.first, to71);
+ EATEST_VERIFY(*toSetIterator == to71);
+ EATEST_VERIFY(toSet.find(to71) != toSet.end());
+
+ TestObject to8(8);
+ toSetIterator = toSet.emplace_hint(toSet.begin(), to8); // specify a bad hint. Insertion should still work.
+ EATEST_VERIFY(*toSetIterator == to8);
+ EATEST_VERIFY(toSet.find(to8) != toSet.end());
+
+ //pair<iterator,bool> t1A.insert(value_type&& value);
+ TestObject to3(3);
+ EATEST_VERIFY(toSet.find(to3) == toSet.end());
+ toSetInsertResult = toSet.insert(TestObject(to3));
+ EATEST_VERIFY(toSetInsertResult.second == true);
+ EATEST_VERIFY(toSet.find(to3) != toSet.end());
+ toSetInsertResult = toSet.insert(TestObject(to3));
+ EATEST_VERIFY(toSetInsertResult.second == false);
+ EATEST_VERIFY(toSet.find(to3) != toSet.end());
+
+
+ // iterator t1A.insert(const_iterator position, value_type&& value);
+ TestObject to90(9);
+ toSetInsertResult = toSet.emplace(eastl::move(to90));
+ EATEST_VERIFY(toSetInsertResult.second == true);
+ EATEST_VERIFY(toSet.find(to90) != toSet.end());
+
+ TestObject to91(9);
+ toSetIterator = toSet.emplace_hint(toSetInsertResult.first, eastl::move(to91));
+ EATEST_VERIFY(*toSetIterator == TestObject(9));
+ EATEST_VERIFY(toSet.find(to91) != toSet.end());
+
+ TestObject to10(10);
+ toSetIterator = toSet.emplace_hint(toSet.begin(), eastl::move(to10)); // specify a bad hint. Insertion should still work.
+ EATEST_VERIFY(*toSetIterator == TestObject(10));
+ EATEST_VERIFY(toSet.find(to10) != toSet.end());
+
+ return nErrorCount;
+}
+
+
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// TestMultisetCpp11
+//
+// This function is designed to work with multiset, fixed_multiset, hash_multiset, fixed_hash_multiset
+//
+// This is similar to the TestSetCpp11 function, with some differences related
+// to handling of duplicate entries.
+//
+template <typename T1>
+int TestMultisetCpp11()
+{
+ int nErrorCount = 0;
+
+ // template <class... Args>
+ // insert_return_type emplace(Args&&... args);
+ //
+ // template <class... Args>
+ // iterator emplace_hint(const_iterator position, Args&&... args);
+ //
+ // insert_return_type insert(value_type&& value);
+ // iterator insert(const_iterator position, value_type&& value);
+ TestObject::Reset();
+
+ typedef T1 TOSet;
+ typename TOSet::iterator toSetIterator;
+
+ TOSet toSet;
+ TestObject to0(0);
+ TestObject to1(1);
+
+ toSetIterator = toSet.emplace(to0);
+ EATEST_VERIFY(*toSetIterator == TestObject(0));
+ //EATEST_VERIFY((TestObject::sTOCopyCtorCount == 2) && (TestObject::sTOMoveCtorCount == 1)); // Disabled until we can guarantee its behavior and deal with how it's different between compilers of differing C++11 support.
+
+ toSetIterator = toSet.emplace(eastl::move(to1));
+ EATEST_VERIFY(*toSetIterator == TestObject(1));
+
+ // insert_return_type t1A.emplace(value_type&& value);
+ TestObject to40(4);
+ EATEST_VERIFY(toSet.find(to40) == toSet.end());
+ EATEST_VERIFY(to40.mX == 4); // It should change to 0 below during the move swap.
+ toSetIterator = toSet.emplace(eastl::move(to40));
+ EATEST_VERIFY(*toSetIterator == TestObject(4));
+ EATEST_VERIFY(toSet.find(to40) != toSet.end());
+ EATEST_VERIFY(to40.mX == 0);
+
+ TestObject to41(4);
+ toSetIterator = toSet.emplace(eastl::move(to41)); // multiset can insert another of these.
+ EATEST_VERIFY(*toSetIterator == TestObject(4));
+ EATEST_VERIFY(toSet.find(to41) != toSet.end());
+
+ // iterator t1A.emplace_hint(const_iterator position, value_type&& value);
+ TestObject to50(5);
+ toSetIterator = toSet.emplace(eastl::move(to50));
+ EATEST_VERIFY(*toSetIterator == TestObject(5));
+ EATEST_VERIFY(toSet.find(to50) != toSet.end());
+
+ TestObject to51(5);
+ toSetIterator = toSet.emplace_hint(toSetIterator, eastl::move(to51));
+ EATEST_VERIFY(*toSetIterator == TestObject(5));
+ EATEST_VERIFY(toSet.find(to51) != toSet.end());
+
+ TestObject to6(6);
+ toSetIterator = toSet.emplace_hint(toSet.begin(), eastl::move(to6)); // specify a bad hint. Insertion should still work.
+ EATEST_VERIFY(*toSetIterator == TestObject(6));
+ EATEST_VERIFY(toSet.find(to6) != toSet.end());
+
+ TestObject to2(2);
+ EATEST_VERIFY(toSet.find(to2) == toSet.end());
+ toSetIterator = toSet.emplace(to2);
+ EATEST_VERIFY(*toSetIterator == TestObject(2));
+ EATEST_VERIFY(toSet.find(to2) != toSet.end());
+ toSetIterator = toSet.emplace(to2);
+ EATEST_VERIFY(*toSetIterator == TestObject(2));
+ EATEST_VERIFY(toSet.find(to2) != toSet.end());
+
+ // iterator t1A.emplace_hint(const_iterator position, const value_type& value);
+ TestObject to70(7);
+ toSetIterator = toSet.emplace(to70);
+ EATEST_VERIFY(*toSetIterator == TestObject(7));
+ EATEST_VERIFY(toSet.find(to70) != toSet.end());
+
+ TestObject to71(7);
+ toSetIterator = toSet.emplace_hint(toSetIterator, to71);
+ EATEST_VERIFY(*toSetIterator == to71);
+ EATEST_VERIFY(toSet.find(to71) != toSet.end());
+
+ TestObject to8(8);
+ toSetIterator = toSet.emplace_hint(toSet.begin(), to8); // specify a bad hint. Insertion should still work.
+ EATEST_VERIFY(*toSetIterator == to8);
+ EATEST_VERIFY(toSet.find(to8) != toSet.end());
+
+ // insert_return_type t1A.insert(value_type&& value);
+ TestObject to3(3);
+ EATEST_VERIFY(toSet.find(to3) == toSet.end());
+ toSetIterator = toSet.insert(TestObject(to3));
+ EATEST_VERIFY(*toSetIterator == TestObject(3));
+ EATEST_VERIFY(toSet.find(to3) != toSet.end());
+ toSetIterator = toSet.insert(TestObject(to3));
+ EATEST_VERIFY(*toSetIterator == TestObject(3));
+ EATEST_VERIFY(toSet.find(to3) != toSet.end());
+
+ // iterator t1A.insert(const_iterator position, value_type&& value);
+ TestObject to90(9);
+ toSetIterator = toSet.emplace(eastl::move(to90));
+ EATEST_VERIFY(*toSetIterator == TestObject(9));
+ EATEST_VERIFY(toSet.find(to90) != toSet.end());
+
+ TestObject to91(9);
+ toSetIterator = toSet.emplace_hint(toSetIterator, eastl::move(to91));
+ EATEST_VERIFY(*toSetIterator == TestObject(9));
+ EATEST_VERIFY(toSet.find(to91) != toSet.end());
+
+ TestObject to10(10);
+ toSetIterator = toSet.emplace_hint(toSet.begin(), eastl::move(to10)); // specify a bad hint. Insertion should still work.
+ EATEST_VERIFY(*toSetIterator == TestObject(10));
+ EATEST_VERIFY(toSet.find(to10) != toSet.end());
+
+ return nErrorCount;
+}
+
+
+
+
+
+
+
diff --git a/EASTL/test/source/TestSmartPtr.cpp b/EASTL/test/source/TestSmartPtr.cpp
new file mode 100644
index 0000000..8052392
--- /dev/null
+++ b/EASTL/test/source/TestSmartPtr.cpp
@@ -0,0 +1,2230 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include <EABase/eabase.h>
+#include "EASTLTest.h"
+#include "GetTypeName.h"
+#include <EAStdC/EAString.h>
+#include <EAStdC/EAStopwatch.h>
+#include <EASTL/atomic.h>
+#include <EASTL/core_allocator_adapter.h>
+#include <EASTL/core_allocator.h>
+#include <EASTL/intrusive_ptr.h>
+#include <EASTL/linked_array.h>
+#include <EASTL/linked_ptr.h>
+#include <EASTL/safe_ptr.h>
+#include <EASTL/scoped_array.h>
+#include <EASTL/scoped_ptr.h>
+#include <EASTL/shared_array.h>
+#include <EASTL/shared_ptr.h>
+#include <EASTL/unique_ptr.h>
+#include <EASTL/weak_ptr.h>
+#include <eathread/eathread_thread.h>
+
+EA_DISABLE_ALL_VC_WARNINGS()
+#include <stdio.h>
+#include <string.h>
+#ifdef EA_PLATFORM_WINDOWS
+ #ifndef WIN32_LEAN_AND_MEAN
+ #define WIN32_LEAN_AND_MEAN
+ #endif
+ #include <Windows.h>
+#elif defined(EA_PLATFORM_ANDROID)
+ #include <android/log.h>
+#endif
+EA_RESTORE_ALL_VC_WARNINGS()
+
+EA_DISABLE_VC_WARNING(4702 4800) // 4702: unreachable code
+ // 4800: forcing value to bool 'true' or 'false'
+
+
+
+namespace SmartPtrTest
+{
+ /// CustomDeleter
+ ///
+ /// Used for testing unique_ptr deleter overrides. Otherwise acts the same as the default deleter.
+ ///
+ struct CustomDeleter
+ {
+ template <typename T>
+ void operator()(const T* p) const // We use a const argument type in order to be most flexible with what types we accept.
+ { delete const_cast<T*>(p); }
+
+ CustomDeleter() {}
+ CustomDeleter(const CustomDeleter&) {}
+ CustomDeleter(CustomDeleter&&) {}
+ CustomDeleter& operator=(const CustomDeleter&) { return *this; }
+ CustomDeleter& operator=(CustomDeleter&&) { return *this; }
+ };
+
+
+ struct CustomArrayDeleter
+ {
+ template <typename T>
+ void operator()(const T* p) const // We use a const argument type in order to be most flexible with what types we accept.
+ { delete[] const_cast<T*>(p); }
+
+ CustomArrayDeleter() {}
+ CustomArrayDeleter(const CustomArrayDeleter&) {}
+ CustomArrayDeleter(CustomArrayDeleter&&) {}
+ CustomArrayDeleter& operator=(const CustomArrayDeleter&) { return *this; }
+ CustomArrayDeleter& operator=(CustomArrayDeleter&&) { return *this; }
+ };
+
+
+ /// A
+ ///
+ /// This is used for various tests.
+ ///
+ struct A
+ {
+ char mc;
+ static int mCount;
+
+ A(char c = 0)
+ : mc(c) { ++mCount; }
+
+ A(const A& x)
+ : mc(x.mc) { ++mCount; }
+
+ A& operator=(const A& x)
+ { mc = x.mc; return *this; }
+
+ virtual ~A() // Virtual because we subclass A below.
+ { --mCount; }
+ };
+
+
+ int A::mCount = 0;
+
+
+ /// B
+ ///
+ struct B : public A
+ {
+ };
+
+
+
+ /// RefCountTest
+ ///
+ /// This is used for tests involving intrusive_ptr.
+ ///
+ struct RefCountTest
+ {
+ int mRefCount;
+ static int mCount;
+
+ RefCountTest()
+ : mRefCount(0) { ++mCount; }
+
+ RefCountTest(const RefCountTest&)
+ : mRefCount(0) { ++mCount; }
+
+ RefCountTest& operator=(const RefCountTest&)
+ { return *this; }
+
+ virtual ~RefCountTest()
+ { --mCount; }
+
+ virtual int AddRef()
+ { return (int)((mRefCount++) + 1); }
+
+ virtual int Release()
+ {
+ int rc = (int)((mRefCount--) - 1);
+ if(rc)
+ return rc;
+ mRefCount = 1;
+ delete this;
+ return 0;
+ }
+ };
+
+ int RefCountTest::mCount = 0;
+
+
+
+ /// Test
+ ///
+ /// This is used for tests involving intrusive_ptr.
+ ///
+ struct Test : public RefCountTest
+ {
+ bool* mpBool;
+
+ Test(bool* pBool)
+ : mpBool(pBool) { *pBool = true; }
+
+ Test(const Test& x):
+ RefCountTest(x), mpBool(x.mpBool) { }
+
+ Test& operator=(const Test& x)
+ { mpBool = x.mpBool; return *this; }
+
+ ~Test()
+ { *mpBool = false; }
+ };
+
+
+
+ /// IntrusiveParent / IntrusiveChild
+ ///
+ /// This is used for tests involving intrusive_ptr.
+ ///
+ struct IntrusiveParent : public RefCountTest
+ {
+ };
+
+ struct IntrusiveChild : public IntrusiveParent
+ {
+ };
+
+
+ /// intrusive_ptr_add_ref / intrusive_ptr_release
+ ///
+ /// This is used for tests involving intrusive_ptr.
+ ///
+ struct IntrusiveCustom : public RefCountTest
+ {
+ static int mAddRefCallCount;
+ static int mReleaseCallCount;
+
+ virtual int AddRef()
+ {
+ ++mAddRefCallCount;
+ return RefCountTest::AddRef();
+ }
+
+ virtual int Release()
+ {
+ ++mReleaseCallCount;
+ return RefCountTest::Release();
+ }
+ };
+
+ int IntrusiveCustom::mAddRefCallCount = 0;
+ int IntrusiveCustom::mReleaseCallCount = 0;
+
+ void intrusive_ptr_add_ref(IntrusiveCustom* p)
+ {
+ p->AddRef();
+ }
+
+ void intrusive_ptr_release(IntrusiveCustom* p)
+ {
+ p->Release();
+ }
+
+
+ /// ParentClass / ChildClass / GrandChildClass
+ ///
+ /// This is used for tests involving shared_ptr.
+ ///
+ struct ParentClass
+ {
+ virtual ~ParentClass() { }
+ virtual void DoNothingParentClass() { }
+ };
+
+ struct ChildClass : public ParentClass
+ {
+ virtual void DoNothingChildClass() { }
+ };
+
+ struct GrandChildClass : public ChildClass
+ {
+ virtual void DoNothingGrandChildClass() { }
+ };
+
+
+
+ /// NamedClass
+ ///
+ struct NamedClass
+ {
+ const char* mpName;
+ const char* mpName2;
+ static int mnCount;
+
+ NamedClass(const char* pName = NULL)
+ : mpName(pName), mpName2(NULL) { ++mnCount; }
+
+ NamedClass(const char* pName, const char* pName2)
+ : mpName(pName), mpName2(pName2) { ++mnCount; }
+
+ NamedClass(const NamedClass& x)
+ : mpName(x.mpName), mpName2(x.mpName2) { ++mnCount; }
+
+ NamedClass& operator=(const NamedClass& x)
+ { mpName = x.mpName; mpName2 = x.mpName2; return *this; }
+
+ ~NamedClass()
+ { --mnCount; }
+ };
+
+ int NamedClass::mnCount = 0;
+
+
+
+ /// Y
+ ///
+ /// This is used for tests involving shared_ptr and enabled_shared_from_this.
+ ///
+ struct Y : public eastl::enable_shared_from_this<Y>
+ {
+ static int mnCount;
+
+ Y() { ++mnCount; }
+ Y(const Y&) { ++mnCount; }
+ Y& operator=(const Y&) { return *this; }
+ ~Y() { --mnCount; }
+
+ eastl::shared_ptr<Y> f()
+ { return shared_from_this(); }
+ };
+
+ int Y::mnCount = 0;
+
+
+
+ /// ACLS / BCLS
+ ///
+ /// This is used for tests involving shared_ptr.
+ ///
+ class ACLS : public eastl::enable_shared_from_this<ACLS>
+ {
+ public:
+ static int mnCount;
+ int a;
+
+ ACLS(int _a_ = 0) : a(_a_) { ++mnCount; }
+ ACLS(const ACLS& x) : a(x.a) { ++mnCount; }
+ ACLS& operator=(const ACLS& x) { a = x.a; return *this; }
+ ~ACLS() { --mnCount; }
+ };
+
+ int ACLS::mnCount = 0;
+
+
+ class BCLS : public ACLS
+ {
+ public:
+ static int mnCount;
+ int b;
+
+ BCLS(int _b_ = 0) : b(_b_) { ++mnCount; }
+ BCLS(const BCLS& x) : ACLS(x), b(x.b) { ++mnCount; }
+ BCLS& operator=(const BCLS& x) { b = x.b; ACLS::operator=(x); return *this; }
+ ~BCLS() { --mnCount; }
+ };
+
+ int BCLS::mnCount = 0;
+
+
+
+ /// A1 / B1
+ ///
+ /// This is used for tests involving shared_ptr.
+ ///
+ struct A1
+ {
+ static int mnCount;
+ int a;
+
+ A1(int _a_ = 0) : a(_a_) { ++mnCount; }
+ A1(const A1& x) : a(x.a) { ++mnCount; }
+ A1& operator=(const A1& x) { a = x.a; return *this; }
+ ~A1() { --mnCount; }
+ };
+
+ int A1::mnCount = 0;
+
+
+
+ struct B1 : public A1
+ {
+ static int mnCount;
+ int b;
+
+ B1(int _b_ = 0) : b(_b_) { ++mnCount; }
+ B1(const B1& x) : A1(x), b(x.b) { ++mnCount; }
+ B1& operator=(const B1& x) { b = x.b; A1::operator=(x); return *this; }
+ ~B1() { --mnCount; }
+ };
+
+ int B1::mnCount = 0;
+
+
+
+ class MockObject
+ {
+ public:
+ MockObject(bool* pAlloc)
+ : mpAlloc(pAlloc){ *mpAlloc = true; }
+
+ ~MockObject()
+ { *mpAlloc = false; }
+
+ bool IsAllocated() const
+ { return *mpAlloc; }
+
+ bool* GetAllocPtr() const
+ { return mpAlloc; }
+
+ private:
+ bool* mpAlloc;
+ };
+
+ class DerivedMockObject : public MockObject
+ {
+ public:
+ DerivedMockObject(bool* pAlloc)
+ : MockObject(pAlloc) {}
+ };
+
+
+ struct foo : public eastl::enable_shared_from_this<foo>
+ {
+ foo() : mX(0){}
+ int mX;
+ };
+
+ struct CheckUPtrEmptyInDestructor
+ {
+ ~CheckUPtrEmptyInDestructor()
+ {
+ if(mpUPtr)
+ mCheckUPtrEmpty = (*mpUPtr == nullptr);
+ }
+
+ eastl::unique_ptr<CheckUPtrEmptyInDestructor>* mpUPtr{};
+ static bool mCheckUPtrEmpty;
+ };
+
+ bool CheckUPtrEmptyInDestructor::mCheckUPtrEmpty = false;
+
+ struct CheckUPtrArrayEmptyInDestructor
+ {
+ ~CheckUPtrArrayEmptyInDestructor()
+ {
+ if(mpUPtr)
+ mCheckUPtrEmpty = (*mpUPtr == nullptr);
+ }
+
+ eastl::unique_ptr<CheckUPtrArrayEmptyInDestructor[]>* mpUPtr{};
+ static bool mCheckUPtrEmpty;
+ };
+
+ bool CheckUPtrArrayEmptyInDestructor::mCheckUPtrEmpty = false;
+} // namespace SmartPtrTest
+
+
+
+
+static int Test_unique_ptr()
+{
+ using namespace SmartPtrTest;
+ using namespace eastl;
+
+ int nErrorCount(0);
+
+ {
+ EATEST_VERIFY(A::mCount == 0);
+
+ // explicit unique_ptr(pointer pValue) noexcept
+ unique_ptr<int> pT1(new int(5));
+ EATEST_VERIFY(*pT1 == 5);
+
+ // (reference) operator*() const
+ *pT1 = 3;
+ EATEST_VERIFY(*pT1 == 3);
+
+ // explicit unique_ptr(pointer pValue) noexcept
+ unique_ptr<A> pT2(new A(1));
+ EATEST_VERIFY(pT2->mc == 1);
+ EATEST_VERIFY(A::mCount == 1);
+
+ // Pointers of derived types are allowed (unlike array unique_ptr)
+ unique_ptr<A> pT1B(new B);
+ EATEST_VERIFY(pT1B.get() != NULL);
+ EATEST_VERIFY(A::mCount == 2);
+
+ A* pA = pT1B.release(); // release simply forgets the owned pointer.
+ EATEST_VERIFY(pT1B.get() == NULL);
+ EATEST_VERIFY(A::mCount == 2);
+
+ delete pA;
+ EATEST_VERIFY(A::mCount == 1);
+
+ // pointer operator->() const noexcept
+ pT2->mc = 5;
+ EATEST_VERIFY(pT2.get()->mc == 5);
+
+ // void reset(pointer pValue = pointer()) noexcept
+ pT2.reset(new A(2));
+ EATEST_VERIFY(pT2->mc == 2);
+ EATEST_VERIFY(A::mCount == 1);
+
+ pT2.reset(0);
+ EATEST_VERIFY(pT2.get() == (A*)0);
+ EATEST_VERIFY(A::mCount == 0);
+
+ pT2.reset(new A(3));
+ EATEST_VERIFY(pT2->mc == 3);
+ EATEST_VERIFY(A::mCount == 1);
+
+ unique_ptr<A> pT3(new A(4));
+ EATEST_VERIFY(pT3->mc == 4);
+ EATEST_VERIFY(A::mCount == 2);
+
+ // void swap(this_type& scopedPtr) noexcept
+ pT2.swap(pT3);
+ EATEST_VERIFY(pT2->mc == 4);
+ EATEST_VERIFY(pT3->mc == 3);
+ EATEST_VERIFY(A::mCount == 2);
+
+ // void swap(unique_ptr<T, D>& scopedPtr1, unique_ptr<T, D>& scopedPtr2) noexcept
+ swap(pT2, pT3);
+ EATEST_VERIFY(pT2->mc == 3);
+ EATEST_VERIFY(pT3->mc == 4);
+ EATEST_VERIFY((pT2 < pT3) == (pT2.get() < pT3.get()));
+ EATEST_VERIFY(A::mCount == 2);
+
+ // pointer release() noexcept
+ unique_ptr<A> pRelease(new A);
+ EATEST_VERIFY(A::mCount == 3);
+ pA = pRelease.release();
+ delete pA;
+ EATEST_VERIFY(A::mCount == 2);
+
+ // constexpr unique_ptr() noexcept
+ unique_ptr<A> pT4;
+ EATEST_VERIFY(pT4.get() == (A*)0);
+ if(pT4)
+ EATEST_VERIFY(pT4.get()); // Will fail
+ if(!(!pT4))
+ EATEST_VERIFY(pT4.get()); // Will fail
+
+ pT4.reset(new A(0));
+ if(!pT4)
+ EATEST_VERIFY(!pT4.get()); // Will fail
+
+ EATEST_VERIFY(A::mCount == 3);
+
+ // unique_ptr(nullptr_t) noexcept
+ unique_ptr<A> pT5(nullptr);
+ EATEST_VERIFY(pT5.get() == (A*)0);
+
+ // unique_ptr(pointer pValue, deleter) noexcept
+ CustomDeleter customADeleter;
+ unique_ptr<A, CustomDeleter> pT6(new A(17), customADeleter);
+ EATEST_VERIFY(pT6->mc == 17);
+
+ // unique_ptr(pointer pValue, typename eastl::remove_reference<Deleter>::type&& deleter) noexcept
+ unique_ptr<A, CustomDeleter> pT7(new A(18), CustomDeleter());
+ EATEST_VERIFY(pT7->mc == 18);
+
+ // unique_ptr(this_type&& x) noexcept
+ unique_ptr<A, CustomDeleter> pT8(eastl::move(pT7));
+ EATEST_VERIFY(pT8->mc == 18);
+
+ // unique_ptr(unique_ptr<U, E>&& u, ...)
+ unique_ptr<A, default_delete<A> > pT9(eastl::move(pT2));
+
+ // this_type& operator=(this_type&& u) noexcept
+ // operator=(unique_ptr<U, E>&& u) noexcept
+ //unique_ptr<void, CustomDeleter> pTVoid;
+ //unique_ptr<int, CustomDeleter> pTInt(new int(1));
+ //pTVoid.operator=<int, CustomDeleter>(eastl::move(pTInt)); // This doesn't work because CustomDeleter doesn't know how to delete void*. Need to rework this test.
+
+ // this_type& operator=(nullptr_t) noexcept
+ pT6 = nullptr;
+ EATEST_VERIFY(pT6.get() == (A*)0);
+
+ // user reported regression
+ // ensure a unique_ptr containing nullptr doesn't call the deleter when its destroyed.
+ {
+ static bool sLocalDeleterCalled;
+ sLocalDeleterCalled = false;
+
+ struct LocalDeleter
+ {
+ void operator()(int* p) const
+ {
+ sLocalDeleterCalled = true;
+ delete p;
+ }
+ };
+
+ using local_unique_ptr = eastl::unique_ptr<int, LocalDeleter>;
+
+ local_unique_ptr pEmpty{nullptr};
+
+ pEmpty = local_unique_ptr{new int(42), LocalDeleter()};
+
+ EATEST_VERIFY(sLocalDeleterCalled == false);
+ }
+ }
+
+ {
+ // Test that unique_ptr internal pointer is reset before calling the destructor
+ CheckUPtrEmptyInDestructor::mCheckUPtrEmpty = false;
+
+ unique_ptr<CheckUPtrEmptyInDestructor> uptr(new CheckUPtrEmptyInDestructor);
+ uptr->mpUPtr = &uptr;
+ uptr.reset();
+ EATEST_VERIFY(CheckUPtrEmptyInDestructor::mCheckUPtrEmpty);
+ }
+
+ {
+ // Test that unique_ptr<[]> internal pointer is reset before calling the destructor
+ CheckUPtrArrayEmptyInDestructor::mCheckUPtrEmpty = false;
+
+ unique_ptr<CheckUPtrArrayEmptyInDestructor[]> uptr(new CheckUPtrArrayEmptyInDestructor[1]);
+ uptr[0].mpUPtr = &uptr;
+ uptr.reset();
+ EATEST_VERIFY(CheckUPtrArrayEmptyInDestructor::mCheckUPtrEmpty);
+ }
+
+ {
+ #if EASTL_CORE_ALLOCATOR_ENABLED
+ // Test EA::Allocator::EASTLICoreDeleter usage within eastl::shared_ptr.
+ // http://en.cppreference.com/w/cpp/memory/shared_ptr/shared_ptr
+
+ // Consider the following for standards compliance.
+ // eastl::shared_ptr<A, EASTLCoreDeleterAdapter> foo(pA, EASTLCoreDeleterAdapter());
+
+ const int cacheAllocationCount = gEASTLTest_AllocationCount;
+
+ using namespace EA::Allocator;
+
+ EASTLCoreAllocatorAdapter ta;
+ void* pMem = ta.allocate(sizeof(A));
+
+ EATEST_VERIFY(pMem != nullptr);
+ EATEST_VERIFY(gEASTLTest_AllocationCount > cacheAllocationCount);
+ {
+ A* pA = new (pMem) A();
+ eastl::shared_ptr<A> foo(pA, EASTLCoreDeleterAdapter()); // Not standards complaint code. Update EASTL implementation to provide the type of the deleter.
+ }
+ EATEST_VERIFY(gEASTLTest_AllocationCount == cacheAllocationCount);
+ EATEST_VERIFY(A::mCount == 0);
+ #endif
+ }
+
+ {
+ // Test array specialization of unique_ptr
+
+ EATEST_VERIFY(A::mCount == 0);
+
+ // template <typename P>
+ // explicit unique_ptr(P pValue) noexcept
+ unique_ptr<int[]> pT1(new int[5]);
+ pT1[0] = 5;
+ EATEST_VERIFY(pT1[0] == 5);
+
+ // Arrays of derived types are not allowed (unlike regular unique_ptr)
+ // unique_ptr<A[]> pT1B(new B[5]); // Disabled because it should not compile.
+
+ // (reference) operator[]() const
+ pT1[1] = 1;
+ EATEST_VERIFY(pT1[1] == 1);
+
+ // explicit unique_ptr(pointer pValue) noexcept
+ unique_ptr<A[]> pT2(new A[1]);
+ pT2[0].mc = 1;
+ EATEST_VERIFY(pT2[0].mc == 1);
+ EATEST_VERIFY(A::mCount == 1);
+
+ // pointer operator->() const noexcept
+ pT2[0].mc = 5;
+ EATEST_VERIFY(pT2[0].mc == 5);
+
+ // void reset(pointer pValue = pointer()) noexcept
+ pT2.reset(new A[2]);
+ pT2[0].mc = 2;
+ EATEST_VERIFY(pT2[0].mc == 2);
+
+ pT2.reset(0);
+ EATEST_VERIFY(pT2.get() == (A*)0);
+
+ pT2.reset(new A[3]);
+ pT2[0].mc = 3;
+ EATEST_VERIFY(pT2[0].mc == 3);
+
+ unique_ptr<A[]> pT3(new A[4]);
+ pT3[0].mc = 4;
+ EATEST_VERIFY(pT3[0].mc == 4);
+
+ // void swap(this_type& scopedPtr) noexcept
+ pT2.swap(pT3);
+ EATEST_VERIFY(pT2[0].mc == 4);
+ EATEST_VERIFY(pT3[0].mc == 3);
+
+ // void swap(unique_ptr<T, D>& scopedPtr1, unique_ptr<T, D>& scopedPtr2) noexcept
+ swap(pT2, pT3);
+ EATEST_VERIFY(pT2[0].mc == 3);
+ EATEST_VERIFY(pT3[0].mc == 4);
+ EATEST_VERIFY((pT2 < pT3) == (pT2.get() < pT3.get()));
+
+ // pointer release() noexcept
+ unique_ptr<A[]> pRelease(new A[1]);
+ A* pAArray = pRelease.release();
+ delete[] pAArray;
+
+ // constexpr unique_ptr() noexcept
+ unique_ptr<A[]> pT4;
+ EATEST_VERIFY(pT4.get() == (A*)0);
+ if(pT4)
+ EATEST_VERIFY(pT4.get()); // Will fail
+ if(!(!pT4))
+ EATEST_VERIFY(pT4.get()); // Will fail
+
+ pT4.reset(new A[1]);
+ if(!pT4)
+ EATEST_VERIFY(!pT4.get()); // Will fail
+
+ EATEST_VERIFY(A::mCount == 8); // There were a number of array creations and deletions above that make this so.
+
+ // unique_ptr(nullptr_t) noexcept
+ unique_ptr<A[]> pT5(nullptr);
+ EATEST_VERIFY(pT5.get() == (A*)0);
+
+ // unique_ptr(pointer pValue, deleter) noexcept
+ CustomArrayDeleter customADeleter;
+ unique_ptr<A[], CustomArrayDeleter> pT6(new A[17], customADeleter);
+ pT6[0].mc = 17;
+ EATEST_VERIFY(pT6[0].mc == 17);
+
+ // unique_ptr(pointer pValue, typename eastl::remove_reference<Deleter>::type&& deleter) noexcept
+ unique_ptr<A[], CustomArrayDeleter> pT7(new A[18], CustomArrayDeleter());
+ pT7[0].mc = 18;
+ EATEST_VERIFY(pT7[0].mc == 18);
+
+ // unique_ptr(this_type&& x) noexcept
+ unique_ptr<A[], CustomArrayDeleter> pT8(eastl::move(pT7));
+ EATEST_VERIFY(pT8[0].mc == 18);
+
+ // unique_ptr(unique_ptr<U, E>&& u, ...)
+ unique_ptr<A[], default_delete<A[]> > pT9(eastl::move(pT2));
+ EATEST_VERIFY(pT9[0].mc == 3);
+
+ // this_type& operator=(this_type&& u) noexcept
+ // operator=(unique_ptr<U, E>&& u) noexcept
+ //unique_ptr<void, CustomDeleter> pTVoid;
+ //unique_ptr<int, CustomDeleter> pTInt(new int(1));
+ //pTVoid.operator=<int, CustomDeleter>(eastl::move(pTInt)); // This doesn't work because CustomDeleter doesn't know how to delete void*. Need to rework this test.
+
+ // this_type& operator=(nullptr_t) noexcept
+ pT6 = nullptr;
+ EATEST_VERIFY(pT6.get() == (A*)0);
+
+ // unique_ptr<> make_unique(Args&&... args);
+ unique_ptr<NamedClass> p = eastl::make_unique<NamedClass>("test", "test2");
+ EATEST_VERIFY(EA::StdC::Strcmp(p->mpName, "test") == 0 && EA::StdC::Strcmp(p->mpName2, "test2") == 0);
+
+ unique_ptr<NamedClass[]> pArray = eastl::make_unique<NamedClass[]>(4);
+ pArray[0].mpName = "test";
+ EATEST_VERIFY(EA::StdC::Strcmp(p->mpName, "test") == 0);
+
+ #ifdef EASTL_TEST_DISABLED_PENDING_SUPPORT
+ {
+ const size_t kAlignedStructAlignment = 512;
+ struct AlignedStruct {} EA_ALIGN(kAlignedStructAlignment);
+
+ unique_ptr<AlignedStruct> pAlignedStruct = eastl::make_unique<AlignedStruct>();
+ EATEST_VERIFY_F(intptr_t(pAlignedStruct.get()) % kAlignedStructAlignment == 0, "pAlignedStruct didn't have proper alignment");
+ }
+ #endif
+
+ //Expected to not be valid:
+ //unique_ptr<NamedClass[4]> p2Array4 = eastl::make_unique<NamedClass[4]>();
+ //p2Array4[0].mpName = "test";
+ //EATEST_VERIFY(EA::StdC::Strcmp(p2Array4[0].mpName, "test") == 0);
+ }
+
+ EATEST_VERIFY(A::mCount == 0); // This check verifies that no A instances were lost, which also verifies that the [] version of the deleter was used in all cases.
+
+ // validate unique_ptr's compressed_pair implementation is working.
+ {
+ const int ARBITRARY_SIZE = 256;
+ static_assert(sizeof(unique_ptr<short>) == sizeof(uintptr_t), "");
+ static_assert(sizeof(unique_ptr<long>) == sizeof(uintptr_t), "");
+
+ // unique_ptr should be the same size as a pointer. The deleter object is empty so the
+ // eastl::compressed_pair implementation will remove that deleter data member from the unique_ptr.
+ {
+ auto deleter = [](void* pMem) { free(pMem); };
+ unique_ptr<void, decltype(deleter)> sptr(malloc(ARBITRARY_SIZE), deleter);
+ static_assert(sizeof(sptr) == (sizeof(uintptr_t)), "unexpected unique_ptr size");
+ }
+
+ // unique_ptr should be larger than a pointer when the deleter functor is capturing state. This state forces
+ // the compressed_pair to cached the data in unique_ptr locally.
+ {
+ int a = 0, b = 0, c = 0, d = 0, e = 0, f = 0;
+ auto deleter = [=](void* pMem) { auto result = (a+b+c+d+e+f); EA_UNUSED(result); free(pMem); };
+ unique_ptr<void, decltype(deleter)> sptr(malloc(ARBITRARY_SIZE), deleter);
+ static_assert(sizeof(sptr) == ((6 * sizeof(int)) + (sizeof(uintptr_t))), "unexpected unique_ptr size");
+ }
+
+ // Simply test moving the one unique pointer to another.
+ // Exercising operator=(T&&)
+ {
+ {
+ unique_ptr<int> ptr(new int(3));
+ EATEST_VERIFY(ptr.get() && *ptr == 3);
+
+ unique_ptr<int> newPtr(new int(4));
+ EATEST_VERIFY(newPtr.get() && *newPtr == 4);
+
+ ptr = eastl::move(newPtr); // Deletes int(3) and assigns mpValue to int(4)
+ EATEST_VERIFY(ptr.get() && *ptr == 4);
+ EATEST_VERIFY(newPtr.get() == nullptr);
+ }
+
+ #if EA_HAVE_CPP11_INITIALIZER_LIST
+ {
+ unique_ptr<int[]> ptr(new int[3]{ 0, 1, 2 });
+ EATEST_VERIFY(ptr.get() && ptr[0] == 0 && ptr[1] == 1 && ptr[2] == 2);
+
+ unique_ptr<int[]> newPtr(new int[3]{ 3, 4, 5 });
+ EATEST_VERIFY(newPtr.get() && newPtr[0] == 3 && newPtr[1] == 4 && newPtr[2] == 5);
+
+ ptr = eastl::move(newPtr); // Deletes int(3) and assigns mpValue to int(4)
+ EATEST_VERIFY(ptr.get() && ptr[0] == 3 && ptr[1] == 4 && ptr[2] == 5);
+ EATEST_VERIFY(newPtr.get() == nullptr);
+ }
+ #endif
+
+ #if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ {
+ unique_ptr<int> pT1(new int(5));
+ unique_ptr<int> pT2(new int(10));
+ unique_ptr<int> pT3(new int(0));
+
+ EATEST_VERIFY((pT1 <=> pT2) != 0);
+ EATEST_VERIFY((pT2 <=> pT1) != 0);
+
+ EATEST_VERIFY((pT1 <=> pT2) < 0);
+ EATEST_VERIFY((pT1 <=> pT2) <= 0);
+ EATEST_VERIFY((pT2 <=> pT1) > 0);
+ EATEST_VERIFY((pT2 <=> pT1) >= 0);
+
+ EATEST_VERIFY((pT3 <=> pT1) < 0);
+ EATEST_VERIFY((pT3 <=> pT2) < 0);
+ EATEST_VERIFY((pT1 <=> pT3) > 0);
+ EATEST_VERIFY((pT2 <=> pT3) > 0);
+
+ unique_ptr<A> pT4(new A(5));
+ unique_ptr<A> pT5(new A(10));
+
+ EATEST_VERIFY((pT4 <=> pT5) != 0);
+ EATEST_VERIFY((pT5 <=> pT4) != 0);
+
+ EATEST_VERIFY((pT4 <=> pT5) < 0);
+ EATEST_VERIFY((pT4 <=> pT5) <= 0);
+ EATEST_VERIFY((pT5 <=> pT4) > 0);
+ EATEST_VERIFY((pT5 <=> pT4) >= 0);
+ }
+ #endif
+
+ // ToDo: Test move assignment between two convertible types with an is_assignable deleter_type
+ //{
+ // struct Base {};
+ // struct Child : public Base {};
+
+ // typedef unique_ptr<Base, CustomDeleter> BaseSPtr;
+ // typedef unique_ptr<Child, CustomDeleter> ChildSPtr;
+
+ // static_assert(!is_array<BaseSPtr::element_type>::value, "This test requires a non-array type");
+ // static_assert(is_convertible<ChildSPtr::pointer, BaseSPtr::pointer>::value, "UniquePtr ptr types must be convertible for this test");
+ // static_assert(is_assignable<BaseSPtr::deleter_type&, ChildSPtr::deleter_type&&>::value, "Deleter types must be assignable to one another");
+
+ // BaseSPtr ptr(new Base);
+ // EATEST_VERIFY(ptr.get());
+
+ // unique_ptr<Child> newPtr(new Child);
+ // EATEST_VERIFY(newPtr.get());
+
+ // ptr = eastl::move(newPtr);
+ // EATEST_VERIFY(ptr);
+ // EATEST_VERIFY(newPtr.get() == nullptr);
+ //}
+ }
+ }
+
+ return nErrorCount;
+}
+
+
+static int Test_scoped_ptr()
+{
+ using namespace SmartPtrTest;
+ using namespace eastl;
+
+ int nErrorCount(0);
+
+ {
+ EATEST_VERIFY(A::mCount == 0);
+
+ scoped_ptr<int> pT1(new int(5));
+ EATEST_VERIFY(*pT1 == 5);
+
+ *pT1 = 3;
+ EATEST_VERIFY(*pT1 == 3);
+ EATEST_VERIFY(pT1.get() == get_pointer(pT1));
+
+ scoped_ptr<A> pT2(new A(1));
+ EATEST_VERIFY(pT2->mc == 1);
+ EATEST_VERIFY(A::mCount == 1);
+
+ pT2.reset(new A(2));
+ EATEST_VERIFY(pT2->mc == 2);
+
+ pT2.reset(0);
+ EATEST_VERIFY(pT2.get() == (A*)0);
+ EATEST_VERIFY(pT2.get() == get_pointer(pT2));
+
+ pT2.reset(new A(3));
+ EATEST_VERIFY(pT2->mc == 3);
+
+ scoped_ptr<A> pT3(new A(4));
+ EATEST_VERIFY(pT3->mc == 4);
+
+ pT2.swap(pT3);
+ EATEST_VERIFY(pT2->mc == 4);
+ EATEST_VERIFY(pT3->mc == 3);
+
+ swap(pT2, pT3);
+ EATEST_VERIFY(pT2->mc == 3);
+ EATEST_VERIFY(pT3->mc == 4);
+ EATEST_VERIFY((pT2 < pT3) == (pT2.get() < pT3.get()));
+
+ scoped_ptr<A> pT4;
+ EATEST_VERIFY(pT4.get() == (A*)0);
+ if(pT4)
+ EATEST_VERIFY(pT4.get()); // Will fail
+ if(!(!pT4))
+ EATEST_VERIFY(pT4.get()); // Will fail
+
+ pT4.reset(new A(0));
+ if(!pT4)
+ EATEST_VERIFY(!pT4.get()); // Will fail
+
+ EATEST_VERIFY(A::mCount == 3);
+ }
+
+ { // Test the detach function.
+ scoped_ptr<A> ptr(new A);
+ A* pA = ptr.detach();
+ delete pA;
+ }
+
+ {
+ scoped_ptr<void> ptr(new int);
+ (void)ptr;
+ }
+
+ EATEST_VERIFY(A::mCount == 0);
+
+ return nErrorCount;
+}
+
+
+
+static int Test_scoped_array()
+{
+ using namespace SmartPtrTest;
+ using namespace eastl;
+
+ int nErrorCount(0);
+
+ {
+ scoped_array<int> pT1(new int[5]);
+ pT1[0] = 5;
+ EATEST_VERIFY(pT1[0] == 5);
+ EATEST_VERIFY(pT1.get()[0] == 5);
+
+ scoped_array<A> pT2(new A[2]);
+ EATEST_VERIFY(A::mCount == 2);
+ EATEST_VERIFY(pT2[0].mc == 0);
+ EATEST_VERIFY(pT2.get()[0].mc == 0);
+ EATEST_VERIFY(get_pointer(pT2)[0].mc == 0);
+
+ pT2.reset(new A[4]);
+ EATEST_VERIFY(A::mCount == 4);
+ if(!pT2)
+ EATEST_VERIFY(!pT2.get()); // Will fail
+
+ pT2.reset(0);
+ EATEST_VERIFY(A::mCount == 0);
+ if(pT2)
+ EATEST_VERIFY(pT2.get()); // Will fail
+ if(!(!pT2))
+ EATEST_VERIFY(pT2.get()); // Will fail
+
+ scoped_array<A> pT3(new A[3]);
+ EATEST_VERIFY(A::mCount == 3);
+
+ pT2.swap(pT3);
+ EATEST_VERIFY(A::mCount == 3);
+
+ swap(pT2, pT3);
+ EATEST_VERIFY(A::mCount == 3);
+ EATEST_VERIFY((pT2 < pT3) == (pT2.get() < pT3.get()));
+
+ EATEST_VERIFY(A::mCount == 3);
+ }
+
+ { // Test the detach function.
+ scoped_array<A> ptr(new A[6]);
+ A* pArray = ptr.detach();
+ delete[] pArray;
+ }
+
+ {
+ scoped_array<void> ptr(new int[6]);
+ (void)ptr;
+ }
+
+ EATEST_VERIFY(A::mCount == 0);
+
+ return nErrorCount;
+}
+
+
+static int Test_shared_ptr()
+{
+ using namespace SmartPtrTest;
+ using namespace eastl;
+
+ int nErrorCount(0);
+
+ // Name test.
+ #if EASTLTEST_GETTYPENAME_AVAILABLE
+ //eastl::string sTypeName = GetTypeName<typename eastl::unique_ptr<int>::pointer>();
+ //EA::UnitTest::Report("type name of (typename shared_ptr<int>::pointer): %s", sTypeName.c_str());
+
+ //sTypeName = GetTypeName<typename eastl::common_type<int*, int*>::type>();
+ //EA::UnitTest::Report("type name of (typename eastl::common_type<int*, int*>::type): %s", sTypeName.c_str());
+ #endif
+
+ {
+ shared_ptr<int> pT1;
+ EATEST_VERIFY(pT1.get() == NULL);
+ }
+
+ {
+ shared_ptr<int> pT1(new int(5));
+ EATEST_VERIFY(*pT1 == 5);
+ EATEST_VERIFY(pT1.get() == get_pointer(pT1));
+ EATEST_VERIFY(pT1.use_count() == 1);
+ EATEST_VERIFY(pT1.unique() );
+
+ shared_ptr<int> pT2;
+ EATEST_VERIFY(pT1 != pT2);
+ EATEST_VERIFY(pT1.use_count() == 1);
+ EATEST_VERIFY(pT1.unique());
+
+ pT2 = pT1;
+ EATEST_VERIFY(pT1.use_count() == 2);
+ EATEST_VERIFY(pT2.use_count() == 2);
+ EATEST_VERIFY(!pT1.unique());
+ EATEST_VERIFY(!(pT1 < pT2)); // They should be equal
+ EATEST_VERIFY(pT1 == pT2);
+
+ *pT1 = 3;
+ EATEST_VERIFY(*pT1 == 3);
+ EATEST_VERIFY(*pT1 == 3);
+ EATEST_VERIFY(*pT2 == 3);
+
+ pT2.reset((int*)NULL);
+ EATEST_VERIFY(pT2.unique());
+ EATEST_VERIFY(pT2.use_count() == 1);
+ EATEST_VERIFY(pT1.unique());
+ EATEST_VERIFY(pT1.use_count() == 1);
+ EATEST_VERIFY(pT1 != pT2);
+ }
+
+ {
+ EATEST_VERIFY(A::mCount == 0);
+
+ shared_ptr<A> pT2(new A(0));
+ EATEST_VERIFY(A::mCount == 1);
+ EATEST_VERIFY(pT2->mc == 0);
+ EATEST_VERIFY(pT2.use_count() == 1);
+ EATEST_VERIFY(pT2.unique());
+
+ pT2.reset(new A(1));
+ EATEST_VERIFY(pT2->mc == 1);
+ EATEST_VERIFY(A::mCount == 1);
+ EATEST_VERIFY(pT2.use_count() == 1);
+ EATEST_VERIFY(pT2.unique());
+
+ shared_ptr<A> pT3(new A(2));
+ EATEST_VERIFY(A::mCount == 2);
+
+ pT2.swap(pT3);
+ EATEST_VERIFY(pT2->mc == 2);
+ EATEST_VERIFY(pT3->mc == 1);
+ EATEST_VERIFY(A::mCount == 2);
+
+ swap(pT2, pT3);
+ EATEST_VERIFY(pT2->mc == 1);
+ EATEST_VERIFY(pT3->mc == 2);
+ EATEST_VERIFY(A::mCount == 2);
+ if(!pT2)
+ EATEST_VERIFY(!pT2.get()); // Will fail
+
+ shared_ptr<A> pT4;
+ EATEST_VERIFY(pT2.use_count() == 1);
+ EATEST_VERIFY(pT2.unique());
+ EATEST_VERIFY(A::mCount == 2);
+ if(pT4)
+ EATEST_VERIFY(pT4.get()); // Will fail
+ if(!(!pT4))
+ EATEST_VERIFY(pT4.get()); // Will fail
+
+ pT4 = pT2;
+ EATEST_VERIFY(pT2.use_count() == 2);
+ EATEST_VERIFY(pT4.use_count() == 2);
+ EATEST_VERIFY(!pT2.unique());
+ EATEST_VERIFY(!pT4.unique());
+ EATEST_VERIFY(A::mCount == 2);
+ EATEST_VERIFY(pT2 == pT4);
+ EATEST_VERIFY(pT2 != pT3);
+ EATEST_VERIFY(!(pT2 < pT4)); // They should be equal
+
+ shared_ptr<A> pT5(pT4);
+ EATEST_VERIFY(pT4 == pT5);
+ EATEST_VERIFY(pT2.use_count() == 3);
+ EATEST_VERIFY(pT4.use_count() == 3);
+ EATEST_VERIFY(pT5.use_count() == 3);
+ EATEST_VERIFY(!pT5.unique());
+
+ pT4 = shared_ptr<A>((A*)NULL);
+ EATEST_VERIFY(pT4.unique());
+ EATEST_VERIFY(pT4.use_count() == 1);
+ EATEST_VERIFY(pT2.use_count() == 2);
+
+ EATEST_VERIFY(A::mCount == 2);
+ }
+
+
+ // Regression test reported by a user.
+ // typename eastl::enable_if<!eastl::is_array<U>::value && eastl::is_convertible<U*, element_type*>::value, this_type&>::type
+ // operator=(unique_ptr<U, Deleter> && uniquePtr)
+ {
+ {
+ shared_ptr<A> rT1(new A(42));
+ unique_ptr<B> rT2(new B); // default ctor uses 0
+ rT2->mc = 115;
+
+ EATEST_VERIFY(rT1->mc == 42);
+ EATEST_VERIFY(rT2->mc == 115);
+
+ rT1 = eastl::move(rT2);
+
+ EATEST_VERIFY(rT1->mc == 115);
+ // EATEST_VERIFY(rT2->mc == 115); // state of object post-move is undefined.
+ }
+
+ // test the state of the shared_ptr::operator= return
+ {
+ shared_ptr<A> rT1(new A(42));
+ unique_ptr<B> rT2(new B); // default ctor uses 0
+ rT2->mc = 115;
+
+ shared_ptr<A> operatorReturn = (rT1 = eastl::move(rT2));
+
+ EATEST_VERIFY(operatorReturn == rT1);
+
+ EATEST_VERIFY(operatorReturn->mc == 115);
+ // EATEST_VERIFY(rT1->mc == 115); // implied as both are pointing to the same address
+ }
+ }
+
+
+ { // Test member template functions.
+ shared_ptr<ChildClass> pCC(new GrandChildClass);
+ shared_ptr<ParentClass> pPC(pCC);
+ shared_ptr<GrandChildClass> pGCC(static_pointer_cast<GrandChildClass>(pPC));
+ }
+
+
+ { // Test enable_shared_from_this
+ shared_ptr<Y> p(new Y);
+ shared_ptr<Y> q = p->f();
+
+ EATEST_VERIFY(p == q);
+ EATEST_VERIFY(!(p < q || q < p)); // p and q must share ownership
+
+ shared_ptr<BCLS> bctrlp = shared_ptr<BCLS>(new BCLS);
+ }
+
+
+ { // Test static_pointer_cast, etc.
+ shared_ptr<GrandChildClass> pGCC(new GrandChildClass);
+ shared_ptr<ParentClass> pPC = static_pointer_cast<ParentClass>(pGCC);
+
+ EATEST_VERIFY(pPC == pGCC);
+
+ #if EASTL_RTTI_ENABLED
+ shared_ptr<ChildClass> pCC = dynamic_pointer_cast<ChildClass>(pPC);
+ EATEST_VERIFY(pCC == pGCC);
+ #endif
+
+ #if !defined(__GNUC__) || (__GNUC__ >= 3) // If not using old GCC (GCC 2.x is broken)...
+ eastl::shared_ptr<const void> pVoidPtr = shared_ptr<ParentClass>(new ParentClass);
+ shared_ptr<ParentClass> ap = const_pointer_cast<ParentClass>(static_pointer_cast<const ParentClass>(pVoidPtr));
+ #endif
+
+ //typedef shared_ptr<void const> ASPtr;
+ //shared_ptr<void const> pVoidPtr = ASPtr(new ParentClass);
+ //ASPtr ap = const_pointer_cast<ParentClass>(static_pointer_cast<const ParentClass>(pVoidPtr));
+ }
+
+
+ { // Test static_shared_pointer_cast, etc.
+ shared_ptr<GrandChildClass> pGCC(new GrandChildClass);
+ shared_ptr<ParentClass> pPC = static_shared_pointer_cast<ParentClass /*, EASTLAllocatorType, smart_ptr_deleter<ParentClass>*/ >(pGCC);
+
+ EATEST_VERIFY(pPC == pGCC);
+
+ #if EASTL_RTTI_ENABLED
+ shared_ptr<ChildClass> pCC = dynamic_shared_pointer_cast<ChildClass /*, EASTLAllocatorType, smart_ptr_deleter<ParentClass>*/ >(pPC);
+ EATEST_VERIFY(pCC == pGCC);
+ #endif
+ }
+
+
+ { // Test smart_ptr_deleter
+ shared_ptr<void> pVoid(new ParentClass, smart_ptr_deleter<ParentClass>());
+ EATEST_VERIFY(pVoid.get() != NULL);
+
+ pVoid = shared_ptr<ParentClass>(new ParentClass, smart_ptr_deleter<ParentClass>());
+ EATEST_VERIFY(pVoid.get() != NULL);
+ }
+
+
+ { // Test shared_ptr lambda deleter
+ auto deleter = [](int*) {};
+ eastl::shared_ptr<int> ptr(nullptr, deleter);
+
+ EATEST_VERIFY(!ptr);
+ EATEST_VERIFY(ptr.get() == nullptr);
+ }
+
+
+ { // Test of shared_ptr<void const>
+ #if !defined(__GNUC__) || (__GNUC__ >= 3) // If not using old GCC (GCC 2.x is broken)...
+ shared_ptr<void const> voidPtr = shared_ptr<A1>(new A1);
+ shared_ptr<A1> a1Ptr = const_pointer_cast<A1>(static_pointer_cast<const A1>(voidPtr));
+ #endif
+ }
+
+
+ { // Test of static_pointer_cast
+ shared_ptr<B1> bPtr = shared_ptr<B1>(new B1);
+ shared_ptr<A1> aPtr = static_pointer_cast<A1, B1>(bPtr);
+ }
+
+
+ { // Test shared_ptr<void>
+ {
+ #if !defined(__GNUC__) || (__GNUC__ >= 3) // If not using old GCC (GCC 2.x is broken)...
+ const char* const pName = "NamedClassTest";
+
+ NamedClass* const pNamedClass0 = new NamedClass(pName);
+ EATEST_VERIFY(pNamedClass0->mpName == pName);
+
+ //shared_ptr<void const, EASTLAllocatorType, smart_ptr_deleter<NamedClass> > voidPtr(pNamedClass0);
+ shared_ptr<void const> voidPtr(pNamedClass0);
+ EATEST_VERIFY(voidPtr.get() == pNamedClass0);
+
+ NamedClass* const pNamedClass1 = (NamedClass*)voidPtr.get();
+ EATEST_VERIFY(pNamedClass1->mpName == pName);
+ #endif
+ }
+
+ {
+ #if !defined(__GNUC__) || (__GNUC__ >= 3) // If not using old GCC (GCC 2.x is broken)...
+ const char* const pName = "NamedClassTest";
+
+ NamedClass* const pNamedClass0 = new NamedClass(pName);
+ EATEST_VERIFY(pNamedClass0->mpName == pName);
+
+ shared_ptr<void const> voidPtr(pNamedClass0, smart_ptr_deleter<NamedClass>());
+ EATEST_VERIFY(voidPtr.get() == pNamedClass0);
+
+ NamedClass* const pNamedClass1 = (NamedClass*)voidPtr.get();
+ EATEST_VERIFY(pNamedClass1->mpName == pName);
+ #endif
+ }
+ }
+
+
+ {
+ const char* const pName1 = "NamedClassTest1";
+ const char* const pName2 = "NamedClassTest2";
+
+ shared_ptr<NamedClass> sp(new NamedClass(pName1));
+ EATEST_VERIFY(!sp == false);
+ EATEST_VERIFY(sp.unique());
+ EATEST_VERIFY(sp->mpName == pName1);
+
+ shared_ptr<NamedClass> sp2 = sp;
+ EATEST_VERIFY(sp2.use_count() == 2);
+
+ sp2.reset(new NamedClass(pName2));
+ EATEST_VERIFY(sp2.use_count() == 1);
+ EATEST_VERIFY(sp.unique());
+ EATEST_VERIFY(sp2->mpName == pName2);
+
+ sp.reset();
+ EATEST_VERIFY(!sp == true);
+ }
+
+ {
+ // Exception handling tests
+ #if EASTL_EXCEPTIONS_ENABLED
+ try {
+ weak_ptr<A> pWeakA; // leave uninitalized
+ shared_ptr<A> pSharedA(pWeakA); // This should throw eastl::bad_weak_ptr
+ EATEST_VERIFY(false);
+ }
+ catch(eastl::bad_weak_ptr&)
+ {
+ EATEST_VERIFY(true); // This pathway should be taken.
+ }
+ catch(...)
+ {
+ EATEST_VERIFY(false);
+ }
+
+
+ ThrowingAllocator<true> throwingAllocator; // Throw on first attempt to allocate.
+ shared_ptr<A> pA0;
+
+ try {
+ A::mCount = 0;
+ pA0 = eastl::allocate_shared<A, ThrowingAllocator<true> >(throwingAllocator, 'a');
+ EATEST_VERIFY(false);
+ }
+ catch(std::bad_alloc&)
+ {
+ EATEST_VERIFY(true); // This pathway should be taken.
+ EATEST_VERIFY(pA0.get() == NULL); // The C++11 Standard doesn't seem to require this, but that's how we currently do it until we learn it should be otherwise.
+ EATEST_VERIFY(pA0.use_count() == 0);
+ EATEST_VERIFY(A::mCount == 0); // Verify that there were no surviving A instances since the exception.
+ }
+ catch(...)
+ {
+ EATEST_VERIFY(false);
+ }
+
+
+ try {
+ shared_ptr<A> pA1(new A('a'), default_delete<A>(), throwingAllocator);
+ EATEST_VERIFY(false);
+ }
+ catch(std::bad_alloc&)
+ {
+ EATEST_VERIFY(true); // This pathway should be taken.
+ EATEST_VERIFY(A::mCount == 0);
+ }
+ catch(...)
+ {
+ EATEST_VERIFY(false);
+ }
+
+ #endif
+
+ }
+
+ #if EASTL_RTTI_ENABLED
+ {
+ // template <typename U, typename A, typename D>
+ // shared_ptr(const shared_ptr<U, A, D>& sharedPtr, dynamic_cast_tag);
+ // To do.
+
+ // template <typename U, typename A, typename D, typename UDeleter>
+ // shared_ptr(const shared_ptr<U, A, D>& sharedPtr, dynamic_cast_tag, const UDeleter&);
+ // To do.
+ }
+ #endif
+
+ EATEST_VERIFY(A::mCount == 0);
+
+ return nErrorCount;
+}
+
+
+
+
+#if EASTL_THREAD_SUPPORT_AVAILABLE
+ // C++ Standard section 20.7.2.5 -- shared_ptr atomic access
+ // shared_ptr thread safety is about safe use of the pointer itself and not about what it points to. shared_ptr thread safety
+ // allows you to safely use shared_ptr from different threads, but if the object shared_ptr holds requires thread safety then
+ // you need to separately handle that in a thread-safe way. A good way to think about it is this: "shared_ptr is as thread-safe as a raw pointer."
+ //
+ // Some helper links:
+ // http://stackoverflow.com/questions/9127816/stdshared-ptr-thread-safety-explained
+ // http://stackoverflow.com/questions/14482830/stdshared-ptr-thread-safety
+ // http://cppwisdom.quora.com/shared_ptr-is-almost-thread-safe
+ //
+
+ // Test the ability of Futex to report the callstack of another thread holding a futex.
+ struct SharedPtrTestThread : public EA::Thread::IRunnable
+ {
+ EA::Thread::ThreadParameters mThreadParams;
+ EA::Thread::Thread mThread;
+ eastl::atomic<bool> mbShouldContinue;
+ int mnErrorCount;
+ eastl::shared_ptr<TestObject>* mpSPTO;
+ eastl::weak_ptr<TestObject>* mpWPTO;
+
+ SharedPtrTestThread() : mThreadParams(), mThread(), mbShouldContinue(true), mnErrorCount(0), mpSPTO(NULL), mpWPTO(NULL) {}
+ SharedPtrTestThread(const SharedPtrTestThread&){}
+ void operator=(const SharedPtrTestThread&){}
+
+ intptr_t Run(void*)
+ {
+ int& nErrorCount = mnErrorCount; // declare nErrorCount so that EATEST_VERIFY can work, as it depends on it being declared.
+
+ while(mbShouldContinue.load(eastl::memory_order_relaxed))
+ {
+ EA::UnitTest::ThreadSleepRandom(1, 10);
+
+ EATEST_VERIFY(mpSPTO->get()->mX == 99);
+
+ eastl::shared_ptr<TestObject> temp(mpWPTO->lock());
+ EATEST_VERIFY(temp->mX == 99);
+
+ eastl::shared_ptr<TestObject> spTO2(*mpSPTO);
+ EATEST_VERIFY(spTO2->mX == 99);
+ EATEST_VERIFY(spTO2.use_count() >= 2);
+
+ eastl::weak_ptr<TestObject> wpTO2(spTO2);
+ temp = mpWPTO->lock();
+ EATEST_VERIFY(temp->mX == 99);
+
+ temp = spTO2;
+ spTO2.reset();
+ EATEST_VERIFY(mpSPTO->get()->mX == 99);
+ }
+
+ return nErrorCount;
+ }
+ };
+#endif
+
+
+static int Test_shared_ptr_thread()
+{
+ using namespace SmartPtrTest;
+ using namespace eastl;
+ using namespace EA::Thread;
+
+ int nErrorCount(0);
+
+ #if EASTL_THREAD_SUPPORT_AVAILABLE
+ {
+ SharedPtrTestThread thread[4];
+ shared_ptr<TestObject> spTO(new TestObject(99));
+ weak_ptr<TestObject> wpTO(spTO);
+
+ for(size_t i = 0; i < EAArrayCount(thread); i++)
+ {
+ thread[i].mpSPTO = &spTO;
+ thread[i].mpWPTO = &wpTO;
+ thread[i].mThreadParams.mpName = "SharedPtrTestThread";
+ }
+
+ for(size_t i = 0; i < EAArrayCount(thread); i++)
+ thread[i].mThread.Begin(&thread[0], NULL, &thread[0].mThreadParams);
+
+ EA::UnitTest::ThreadSleep(2000);
+
+ for(size_t i = 0; i < EAArrayCount(thread); i++)
+ thread[i].mbShouldContinue.store(false, eastl::memory_order_relaxed);
+
+ for(size_t i = 0; i < EAArrayCount(thread); i++)
+ {
+ thread[i].mThread.WaitForEnd();
+ nErrorCount += thread[i].mnErrorCount;
+ }
+ }
+ #endif
+
+ #if EASTL_THREAD_SUPPORT_AVAILABLE
+ {
+ // We currently do light testing of the atomic functions. It would take a bit of work to fully test
+ // the memory behavior of these in a rigorous way. Also, as of this writing we don't have a portable
+ // way to use the std::memory_order functionality.
+
+ shared_ptr<TestObject> spTO(new TestObject(55));
+
+ // bool atomic_is_lock_free(const shared_ptr<T>*);
+ EATEST_VERIFY(!atomic_is_lock_free(&spTO));
+
+ // shared_ptr<T> atomic_load(const shared_ptr<T>* pSharedPtr);
+ // shared_ptr<T> atomic_load_explicit(const shared_ptr<T>* pSharedPtr, ... /*std::memory_order memoryOrder*/);
+ shared_ptr<TestObject> spTO2 = atomic_load(&spTO);
+ EATEST_VERIFY(spTO->mX == 55);
+ EATEST_VERIFY(spTO2->mX == 55);
+
+ // void atomic_store(shared_ptr<T>* pSharedPtrA, shared_ptr<T> sharedPtrB);
+ // void atomic_store_explicit(shared_ptr<T>* pSharedPtrA, shared_ptr<T> sharedPtrB, ... /*std::memory_order memoryOrder*/);
+ spTO2->mX = 56;
+ EATEST_VERIFY(spTO->mX == 56);
+ EATEST_VERIFY(spTO2->mX == 56);
+
+ atomic_store(&spTO, shared_ptr<TestObject>(new TestObject(77)));
+ EATEST_VERIFY(spTO->mX == 77);
+ EATEST_VERIFY(spTO2->mX == 56);
+
+ // shared_ptr<T> atomic_exchange(shared_ptr<T>* pSharedPtrA, shared_ptr<T> sharedPtrB);
+ // shared_ptr<T> atomic_exchange_explicit(shared_ptr<T>* pSharedPtrA, shared_ptr<T> sharedPtrB, ... /*std::memory_order memoryOrder*/);
+ spTO = atomic_exchange(&spTO2, spTO);
+ EATEST_VERIFY(spTO->mX == 56);
+ EATEST_VERIFY(spTO2->mX == 77);
+
+ spTO = atomic_exchange_explicit(&spTO2, spTO);
+ EATEST_VERIFY(spTO->mX == 77);
+ EATEST_VERIFY(spTO2->mX == 56);
+
+ // bool atomic_compare_exchange_strong(shared_ptr<T>* pSharedPtr, shared_ptr<T>* pSharedPtrCondition, shared_ptr<T> sharedPtrNew);
+ // bool atomic_compare_exchange_weak(shared_ptr<T>* pSharedPtr, shared_ptr<T>* pSharedPtrCondition, shared_ptr<T> sharedPtrNew);
+ // bool atomic_compare_exchange_strong_explicit(shared_ptr<T>* pSharedPtr, shared_ptr<T>* pSharedPtrCondition, shared_ptr<T> sharedPtrNew, ... /*memory_order memoryOrderSuccess, memory_order memoryOrderFailure*/);
+ // bool atomic_compare_exchange_weak_explicit(shared_ptr<T>* pSharedPtr, shared_ptr<T>* pSharedPtrCondition, shared_ptr<T> sharedPtrNew, ... /*memory_order memoryOrderSuccess, memory_order memoryOrderFailure*/);
+ shared_ptr<TestObject> spTO3 = atomic_load(&spTO2);
+ bool result = atomic_compare_exchange_strong(&spTO3, &spTO, make_shared<TestObject>(88)); // spTO3 != spTO, so this should do no exchange and return false.
+ EATEST_VERIFY(!result);
+ EATEST_VERIFY(spTO3->mX == 56);
+ EATEST_VERIFY(spTO->mX == 56);
+
+ result = atomic_compare_exchange_strong(&spTO3, &spTO2, make_shared<TestObject>(88)); // spTO3 == spTO2, so this should succeed.
+ EATEST_VERIFY(result);
+ EATEST_VERIFY(spTO2->mX == 56);
+ EATEST_VERIFY(spTO3->mX == 88);
+ }
+ #endif
+
+ EATEST_VERIFY(A::mCount == 0);
+ TestObject::Reset();
+
+ return nErrorCount;
+}
+
+
+static int Test_weak_ptr()
+{
+ using namespace SmartPtrTest;
+ using namespace eastl;
+
+ int nErrorCount(0);
+
+ {
+ weak_ptr<int> pW0;
+ shared_ptr<int> pS0(new int(0));
+ shared_ptr<int> pS1(new int(1));
+ weak_ptr<int> pW1(pS1);
+ weak_ptr<int> pW2;
+ weak_ptr<int> pW3(pW2);
+
+ EATEST_VERIFY(pS1.use_count() == 1);
+ EATEST_VERIFY(pW1.use_count() == 1);
+ EATEST_VERIFY(pW2.use_count() == 0);
+ EATEST_VERIFY(pW3.use_count() == 0);
+ EATEST_VERIFY(pW1.expired() == false);
+ EATEST_VERIFY(pW2.expired() == true);
+ EATEST_VERIFY(pW3.expired() == true);
+ pS1.reset();
+ EATEST_VERIFY(pW1.expired() == true);
+ pW1 = pS0;
+ EATEST_VERIFY(pW1.expired() == false);
+ pW1.swap(pW2);
+ EATEST_VERIFY(pW1.expired() == true);
+ EATEST_VERIFY(pW2.expired() == false);
+ pW1 = pW2;
+ EATEST_VERIFY(pW1.expired() == false);
+ pW3 = pW1;
+ EATEST_VERIFY(pW3.expired() == false);
+ EATEST_VERIFY(pS1.use_count() == 0);
+ pW3.reset();
+ EATEST_VERIFY(pW3.expired() == true);
+ pS1.reset(new int(3));
+ EATEST_VERIFY(pS1.use_count() == 1);
+ pW3 = pS1;
+ EATEST_VERIFY(pS1.use_count() == 1);
+ EATEST_VERIFY(pS1.use_count() == pW3.use_count());
+
+ shared_ptr<int> pShared2(pW2.lock());
+ shared_ptr<int> pShared3(pW3.lock());
+
+ EATEST_VERIFY(pShared2.use_count() == 2);
+ EATEST_VERIFY(pShared3.use_count() == 2);
+ swap(pW2, pW3);
+ EATEST_VERIFY(pW2.use_count() == 2);
+ EATEST_VERIFY(pW3.use_count() == 2);
+ pW1 = pW3;
+ EATEST_VERIFY(pW3.use_count() == 2);
+
+ EATEST_VERIFY((pW2 < pW3) || (pW3 < pW2));
+
+ EATEST_VERIFY(pS0.use_count() == 2);
+ pW0 = pS0; // This tests the deletion of a weak_ptr after its associated shared_ptr has destructed.
+ EATEST_VERIFY(pS0.use_count() == 2);
+ }
+
+
+ {
+ weak_ptr<NamedClass> wp;
+
+ EATEST_VERIFY(wp.use_count() == 0);
+ EATEST_VERIFY(wp.expired() == true);
+
+ {
+ shared_ptr<NamedClass> sp(new NamedClass("NamedClass"));
+ wp = sp;
+
+ EATEST_VERIFY(wp.use_count() == 1);
+ EATEST_VERIFY(wp.expired() == false);
+ }
+
+ EATEST_VERIFY(wp.use_count() == 0);
+ EATEST_VERIFY(wp.expired() == true);
+ }
+
+ { // shared_from_this
+ // This example is taken from the C++11 Standard doc.
+ shared_ptr<const foo> pFoo(new foo);
+ shared_ptr<const foo> qFoo = pFoo->shared_from_this();
+
+ EATEST_VERIFY(pFoo == qFoo);
+ EATEST_VERIFY(!(pFoo < qFoo) && !(qFoo < pFoo)); // p and q share ownership
+ }
+
+ { // weak_from_this const
+ shared_ptr<const foo> pFoo(new foo);
+ weak_ptr<const foo> qFoo = pFoo->weak_from_this();
+
+ EATEST_VERIFY(pFoo == qFoo.lock());
+ EATEST_VERIFY(!(pFoo < qFoo.lock()) && !(qFoo.lock() < pFoo)); // p and q share ownership
+ }
+
+ { // weak_from_this
+ shared_ptr<foo> pFoo(new foo);
+ weak_ptr<foo> qFoo = pFoo->weak_from_this();
+
+ EATEST_VERIFY(pFoo == qFoo.lock());
+ EATEST_VERIFY(!(pFoo < qFoo.lock()) && !(qFoo.lock() < pFoo)); // p and q share ownership
+ }
+
+ return nErrorCount;
+}
+
+
+static int Test_shared_array()
+{
+ using namespace SmartPtrTest;
+ using namespace eastl;
+
+ int nErrorCount(0);
+
+ {
+ shared_array<int> pT1(new int[5]);
+ pT1[0] = 5;
+ EATEST_VERIFY(pT1[0] == 5);
+ EATEST_VERIFY(pT1.get() == get_pointer(pT1));
+ EATEST_VERIFY(pT1.use_count() == 1);
+ EATEST_VERIFY(pT1.unique());
+
+ shared_array<int> pT2;
+ EATEST_VERIFY(pT1 != pT2);
+ EATEST_VERIFY(pT1.use_count() == 1);
+ EATEST_VERIFY(pT1.unique());
+
+ pT2 = pT1;
+ EATEST_VERIFY(pT1.use_count() == 2);
+ EATEST_VERIFY(pT2.use_count() == 2);
+ EATEST_VERIFY(!pT1.unique());
+ EATEST_VERIFY(!(pT1 < pT2)); // They should be equal
+ EATEST_VERIFY(pT1 == pT2);
+
+ *pT1 = 3;
+ EATEST_VERIFY(*pT1 == 3);
+ EATEST_VERIFY(*pT1 == 3);
+ EATEST_VERIFY(*pT2 == 3);
+
+ pT2.reset(0);
+ EATEST_VERIFY(pT2.unique());
+ EATEST_VERIFY(pT2.use_count() == 1);
+ EATEST_VERIFY(pT1.unique());
+ EATEST_VERIFY(pT1.use_count() == 1);
+ EATEST_VERIFY(pT1 != pT2);
+ }
+
+ {
+ EATEST_VERIFY(A::mCount == 0);
+
+ shared_array<A> pT2(new A[5]);
+ EATEST_VERIFY(A::mCount == 5);
+ EATEST_VERIFY(pT2->mc == 0);
+ EATEST_VERIFY(pT2.use_count() == 1);
+ EATEST_VERIFY(pT2.unique());
+
+ pT2.reset(new A[1]);
+ pT2[0].mc = 1;
+ EATEST_VERIFY(pT2->mc == 1);
+ EATEST_VERIFY(A::mCount == 1);
+ EATEST_VERIFY(pT2.use_count() == 1);
+ EATEST_VERIFY(pT2.unique());
+
+ shared_array<A> pT3(new A[2]);
+ EATEST_VERIFY(A::mCount == 3);
+
+ pT2.swap(pT3);
+ pT2[0].mc = 2;
+ EATEST_VERIFY(pT2->mc == 2);
+ EATEST_VERIFY(pT3->mc == 1);
+ EATEST_VERIFY(A::mCount == 3);
+
+ swap(pT2, pT3);
+ EATEST_VERIFY(pT2->mc == 1);
+ EATEST_VERIFY(pT3->mc == 2);
+ EATEST_VERIFY(A::mCount == 3);
+ if(!pT2)
+ EATEST_VERIFY(!pT2.get()); // Will fail
+
+ shared_array<A> pT4;
+ EATEST_VERIFY(pT2.use_count() == 1);
+ EATEST_VERIFY(pT2.unique());
+ EATEST_VERIFY(A::mCount == 3);
+ if(pT4)
+ EATEST_VERIFY(pT4.get()); // Will fail
+ if(!(!pT4))
+ EATEST_VERIFY(pT4.get()); // Will fail
+
+ pT4 = pT2;
+ EATEST_VERIFY(pT2.use_count() == 2);
+ EATEST_VERIFY(pT4.use_count() == 2);
+ EATEST_VERIFY(!pT2.unique());
+ EATEST_VERIFY(!pT4.unique());
+ EATEST_VERIFY(A::mCount == 3);
+ EATEST_VERIFY(pT2 == pT4);
+ EATEST_VERIFY(pT2 != pT3);
+ EATEST_VERIFY(!(pT2 < pT4)); // They should be equal
+
+ shared_array<A> pT5(pT4);
+ EATEST_VERIFY(pT4 == pT5);
+ EATEST_VERIFY(pT2.use_count() == 3);
+ EATEST_VERIFY(pT4.use_count() == 3);
+ EATEST_VERIFY(pT5.use_count() == 3);
+ EATEST_VERIFY(!pT5.unique());
+
+ pT4 = shared_array<A>(0);
+ EATEST_VERIFY(pT4.unique());
+ EATEST_VERIFY(pT4.use_count() == 1);
+ EATEST_VERIFY(pT2.use_count() == 2);
+
+ EATEST_VERIFY(A::mCount == 3);
+ }
+
+ EATEST_VERIFY(A::mCount == 0);
+
+ return nErrorCount;
+}
+
+
+
+static int Test_linked_ptr()
+{
+ using namespace SmartPtrTest;
+ using namespace eastl;
+
+ int nErrorCount(0);
+
+ {
+ linked_ptr<int> pT1(new int(5));
+ EATEST_VERIFY(*pT1.get() == 5);
+ EATEST_VERIFY(pT1.get() == get_pointer(pT1));
+ EATEST_VERIFY(pT1.use_count() == 1);
+ EATEST_VERIFY(pT1.unique());
+
+ linked_ptr<int> pT2;
+ EATEST_VERIFY(pT1 != pT2);
+ EATEST_VERIFY(pT1.use_count() == 1);
+ EATEST_VERIFY(pT1.unique());
+
+ pT2 = pT1;
+ EATEST_VERIFY(pT1.use_count() == 2);
+ EATEST_VERIFY(pT2.use_count() == 2);
+ EATEST_VERIFY(!pT1.unique());
+ EATEST_VERIFY(!(pT1 < pT2)); // They should be equal
+ EATEST_VERIFY(pT1 == pT2);
+
+ *pT1 = 3;
+ EATEST_VERIFY(*pT1.get() == 3);
+ EATEST_VERIFY(*pT1 == 3);
+ EATEST_VERIFY(*pT2 == 3);
+
+ pT2.reset((int*)NULL);
+ EATEST_VERIFY(pT2.unique());
+ EATEST_VERIFY(pT2.use_count() == 1);
+ EATEST_VERIFY(pT1.unique());
+ EATEST_VERIFY(pT1.use_count() == 1);
+ EATEST_VERIFY(pT1 != pT2);
+ }
+
+ {
+ EATEST_VERIFY(A::mCount == 0);
+
+ linked_ptr<A> pT2(new A(0));
+ EATEST_VERIFY(A::mCount == 1);
+ EATEST_VERIFY(pT2->mc == 0);
+ EATEST_VERIFY(pT2.use_count() == 1);
+ EATEST_VERIFY(pT2.unique());
+
+ pT2.reset(new A(1));
+ EATEST_VERIFY(pT2->mc == 1);
+ EATEST_VERIFY(A::mCount == 1);
+ EATEST_VERIFY(pT2.use_count() == 1);
+ EATEST_VERIFY(pT2.unique());
+
+ linked_ptr<A> pT3(new A(2));
+ EATEST_VERIFY(A::mCount == 2);
+
+ linked_ptr<A> pT4;
+ EATEST_VERIFY(pT2.use_count() == 1);
+ EATEST_VERIFY(pT2.unique());
+ EATEST_VERIFY(A::mCount == 2);
+ if(pT4)
+ EATEST_VERIFY(pT4.get()); // Will fail
+ if(!(!pT4))
+ EATEST_VERIFY(pT4.get()); // Will fail
+
+ pT4 = pT2;
+ EATEST_VERIFY(pT2.use_count() == 2);
+ EATEST_VERIFY(pT4.use_count() == 2);
+ EATEST_VERIFY(!pT2.unique());
+ EATEST_VERIFY(!pT4.unique());
+ EATEST_VERIFY(A::mCount == 2);
+ EATEST_VERIFY(pT2 == pT4);
+ EATEST_VERIFY(pT2 != pT3);
+ EATEST_VERIFY(!(pT2 < pT4)); // They should be equal
+
+ linked_ptr<A> pT5(pT4);
+ EATEST_VERIFY(pT4 == pT5);
+ EATEST_VERIFY(pT2.use_count() == 3);
+ EATEST_VERIFY(pT4.use_count() == 3);
+ EATEST_VERIFY(pT5.use_count() == 3);
+ EATEST_VERIFY(!pT5.unique());
+
+ pT4 = linked_ptr<A>((A*)NULL);
+ EATEST_VERIFY(pT4.unique());
+ EATEST_VERIFY(pT4.use_count() == 1);
+ EATEST_VERIFY(pT2.use_count() == 2);
+
+ EATEST_VERIFY(A::mCount == 2);
+ }
+
+ { // Do some force_delete tests.
+ linked_ptr<A> pT2(new A(0));
+ linked_ptr<A> pT3(pT2);
+ pT2.force_delete();
+ pT3.force_delete();
+ }
+
+ EATEST_VERIFY(A::mCount == 0);
+
+
+ { // Verify that subclasses are usable.
+ bool bAlloc = false;
+
+ eastl::linked_ptr<DerivedMockObject> pDMO(new DerivedMockObject(&bAlloc));
+ eastl::linked_ptr<MockObject> a1(pDMO);
+ eastl::linked_ptr<MockObject> a2;
+
+ a2 = pDMO;
+ }
+
+ { // Test regression for a bug.
+ linked_ptr<A> pT2;
+ linked_ptr<A> pT3(pT2); // In the bug linked_ptr::mpPrev and mpNext were not initialized via this ctor.
+ pT3.reset(new A); // In the bug this would crash due to unintialized mpPrev/mpNext.
+
+ linked_ptr<B> pT4;
+ linked_ptr<A> pT5(pT4);
+ pT5.reset(new A);
+
+ linked_array<A> pT6;
+ linked_array<A> pT7(pT6);
+ pT7.reset(new A[1]);
+ }
+
+ return nErrorCount;
+}
+
+
+
+static int Test_linked_array()
+{
+ using namespace SmartPtrTest;
+ using namespace eastl;
+
+ int nErrorCount(0);
+
+ {
+ // Tests go here.
+ }
+
+ { // Do some force_delete tests.
+ linked_array<A> pT2(new A[2]);
+ linked_array<A> pT3(pT2);
+ pT2.force_delete();
+ pT3.force_delete();
+ }
+
+ EATEST_VERIFY(A::mCount == 0);
+
+
+ return nErrorCount;
+}
+
+
+
+static int Test_intrusive_ptr()
+{
+ using namespace SmartPtrTest;
+ using namespace eastl;
+
+ int nErrorCount = 0;
+
+ { // Test ctor/dtor
+ intrusive_ptr<RefCountTest> ip1;
+ intrusive_ptr<RefCountTest> ip2(NULL, false);
+ intrusive_ptr<RefCountTest> ip3(NULL, true);
+ intrusive_ptr<RefCountTest> ip4(new RefCountTest, true);
+ intrusive_ptr<RefCountTest> ip5(new RefCountTest, false);
+ intrusive_ptr<RefCountTest> ip6(ip1);
+ intrusive_ptr<RefCountTest> ip7(ip4);
+
+ EATEST_VERIFY(ip1.get() == NULL);
+ EATEST_VERIFY(!ip1);
+
+ EATEST_VERIFY(ip2.get() == NULL);
+ EATEST_VERIFY(!ip2);
+
+ EATEST_VERIFY(ip3.get() == NULL);
+ EATEST_VERIFY(!ip3);
+
+ EATEST_VERIFY(ip4.get() != NULL);
+ EATEST_VERIFY(ip4.get()->mRefCount == 2);
+ EATEST_VERIFY(ip4);
+
+ EATEST_VERIFY(ip5.get() != NULL);
+ EATEST_VERIFY(ip5.get()->mRefCount == 0);
+ ip5.get()->AddRef();
+ EATEST_VERIFY(ip5.get()->mRefCount == 1);
+ EATEST_VERIFY(ip5);
+
+ EATEST_VERIFY(ip6.get() == NULL);
+ EATEST_VERIFY(!ip6);
+
+ EATEST_VERIFY(ip7.get() != NULL);
+ EATEST_VERIFY(ip7.get()->mRefCount == 2);
+ EATEST_VERIFY(ip7);
+ }
+
+ {
+ // Test move-ctor
+ {
+ VERIFY(RefCountTest::mCount == 0);
+ intrusive_ptr<RefCountTest> ip1(new RefCountTest);
+ VERIFY(RefCountTest::mCount == 1);
+ VERIFY(ip1->mRefCount == 1);
+ {
+ intrusive_ptr<RefCountTest> ip2(eastl::move(ip1));
+ VERIFY(ip1.get() != ip2.get());
+ VERIFY(ip2->mRefCount == 1);
+ VERIFY(RefCountTest::mCount == 1);
+ }
+ VERIFY(ip1.get() == nullptr);
+ VERIFY(RefCountTest::mCount == 0);
+ }
+
+ // Test move-assignment
+ {
+ VERIFY(RefCountTest::mCount == 0);
+ intrusive_ptr<RefCountTest> ip1(new RefCountTest);
+ VERIFY(RefCountTest::mCount == 1);
+ VERIFY(ip1->mRefCount == 1);
+ {
+ intrusive_ptr<RefCountTest> ip2;
+ ip2 = eastl::move(ip1);
+ VERIFY(ip1.get() != ip2.get());
+ VERIFY(ip2->mRefCount == 1);
+ VERIFY(RefCountTest::mCount == 1);
+ }
+ VERIFY(ip1.get() == nullptr);
+ VERIFY(RefCountTest::mCount == 0);
+ }
+ }
+
+ { // Test modifiers (assign, attach, detach, reset, swap)
+ RefCountTest* const p1 = new RefCountTest;
+ RefCountTest* const p2 = new RefCountTest;
+ intrusive_ptr<RefCountTest> ip1;
+ intrusive_ptr<RefCountTest> ip2;
+
+ ip1 = p1;
+ ip2 = p2;
+ EATEST_VERIFY(ip1.get() == p1);
+ EATEST_VERIFY((*ip1).mRefCount == 1);
+ EATEST_VERIFY(ip1->mRefCount == 1);
+ ip1.detach();
+ EATEST_VERIFY(ip1.get() == NULL);
+ ip1.attach(p1);
+ EATEST_VERIFY(ip1.get() == p1);
+ EATEST_VERIFY(ip1->mRefCount == 1);
+ ip1.swap(ip2);
+ EATEST_VERIFY(ip1.get() == p2);
+ EATEST_VERIFY(ip2.get() == p1);
+ ip1.swap(ip2);
+ ip1 = ip2;
+ EATEST_VERIFY(ip1 == p2);
+ ip1.reset();
+ EATEST_VERIFY(ip1.get() == NULL);
+ EATEST_VERIFY(ip2.get() == p2);
+ ip2.reset();
+ EATEST_VERIFY(ip2.get() == NULL);
+ }
+
+ { // Test external functions
+ intrusive_ptr<RefCountTest> ip1;
+ intrusive_ptr<RefCountTest> ip2(new RefCountTest);
+ intrusive_ptr<RefCountTest> ip3(ip1);
+ intrusive_ptr<RefCountTest> ip4(ip2);
+
+ // The VC++ code scanner crashes when it scans this code.
+ EATEST_VERIFY(get_pointer(ip1) == NULL);
+ EATEST_VERIFY(get_pointer(ip2) != NULL);
+ EATEST_VERIFY(get_pointer(ip3) == get_pointer(ip1));
+ EATEST_VERIFY(get_pointer(ip4) == get_pointer(ip2));
+
+ EATEST_VERIFY(ip3 == ip1);
+ EATEST_VERIFY(ip4 == ip2);
+ EATEST_VERIFY(ip1 == ip3);
+ EATEST_VERIFY(ip2 == ip4);
+
+ EATEST_VERIFY(ip1 != ip2);
+ EATEST_VERIFY(ip3 != ip4);
+ EATEST_VERIFY(ip2 != ip1);
+ EATEST_VERIFY(ip4 != ip3);
+
+ EATEST_VERIFY(ip3 == ip1.get());
+ EATEST_VERIFY(ip4 == ip2.get());
+ EATEST_VERIFY(ip1 == ip3.get());
+ EATEST_VERIFY(ip2 == ip4.get());
+
+ EATEST_VERIFY(ip1 != ip2.get());
+ EATEST_VERIFY(ip3 != ip4.get());
+ EATEST_VERIFY(ip2 != ip1.get());
+ EATEST_VERIFY(ip4 != ip3.get());
+
+ EATEST_VERIFY(ip3.get() == ip1);
+ EATEST_VERIFY(ip4.get() == ip2);
+ EATEST_VERIFY(ip1.get() == ip3);
+ EATEST_VERIFY(ip2.get() == ip4);
+
+ EATEST_VERIFY(ip1.get() != ip2);
+ EATEST_VERIFY(ip3.get() != ip4);
+ EATEST_VERIFY(ip2.get() != ip1);
+ EATEST_VERIFY(ip4.get() != ip3);
+
+ EATEST_VERIFY((ip4 < ip3) || (ip3 < ip4));
+
+ swap(ip1, ip3);
+ EATEST_VERIFY(get_pointer(ip3) == get_pointer(ip1));
+
+ swap(ip2, ip4);
+ EATEST_VERIFY(get_pointer(ip2) == get_pointer(ip4));
+
+ swap(ip1, ip2);
+ EATEST_VERIFY(get_pointer(ip1) != NULL);
+ EATEST_VERIFY(get_pointer(ip2) == NULL);
+ EATEST_VERIFY(get_pointer(ip1) == get_pointer(ip4));
+ EATEST_VERIFY(get_pointer(ip2) == get_pointer(ip3));
+ }
+
+ { // Misc tests.
+ intrusive_ptr<Test> ip;
+ EATEST_VERIFY(ip.get() == NULL);
+
+ ip.reset();
+ EATEST_VERIFY(ip.get() == NULL);
+
+ intrusive_ptr<Test> ip2(NULL, false);
+ EATEST_VERIFY(ip.get() == NULL);
+
+ bool boolValue = false;
+ Test* pTest = new Test(&boolValue);
+ EATEST_VERIFY(boolValue);
+ pTest->AddRef();
+ intrusive_ptr<Test> ip3(pTest, false);
+ EATEST_VERIFY(ip3.get() == pTest);
+ ip3.reset();
+ EATEST_VERIFY(!boolValue);
+ }
+
+ { // Misc tests.
+ bool boolArray[3];
+ memset(boolArray, 0, sizeof(boolArray));
+
+ Test* p1 = new Test(boolArray + 0);
+ EATEST_VERIFY(boolArray[0] && !boolArray[1] && !boolArray[2]);
+ intrusive_ptr<Test> arc1(p1);
+ EATEST_VERIFY(boolArray[0] && !boolArray[1] && !boolArray[2]);
+
+ Test* p2 = new Test(boolArray + 1);
+ EATEST_VERIFY(boolArray[0] && boolArray[1] && !boolArray[2]);
+ arc1 = p2;
+ EATEST_VERIFY(!boolArray[0] && boolArray[1] && !boolArray[2]);
+
+ Test* p3 = new Test(boolArray + 2);
+ EATEST_VERIFY(!boolArray[0] && boolArray[1] && boolArray[2]);
+ arc1 = p3;
+ EATEST_VERIFY(!boolArray[0] && !boolArray[1] && boolArray[2]);
+ arc1 = NULL;
+
+ EATEST_VERIFY(!boolArray[0] && !boolArray[1] && !boolArray[2]);
+ }
+
+ { // Test intrusive_ptr_add_ref() / intrusive_ptr_release()
+ IntrusiveCustom* const pIC = new IntrusiveCustom;
+
+ {
+ intrusive_ptr<IntrusiveCustom> bp = intrusive_ptr<IntrusiveCustom>(pIC);
+ intrusive_ptr<IntrusiveCustom> ap = bp;
+ }
+
+ EATEST_VERIFY((IntrusiveCustom::mAddRefCallCount > 0) && (IntrusiveCustom::mReleaseCallCount == IntrusiveCustom::mAddRefCallCount));
+ }
+
+ { // Regression
+ intrusive_ptr<IntrusiveChild> bp = intrusive_ptr<IntrusiveChild>(new IntrusiveChild);
+ intrusive_ptr<IntrusiveParent> ap = bp;
+ }
+
+ return nErrorCount;
+}
+
+
+struct RandomLifetimeObject : public eastl::safe_object
+{
+ void DoSomething() const { }
+};
+
+
+
+static int Test_safe_ptr()
+{
+ using namespace SmartPtrTest;
+ using namespace eastl;
+
+ int nErrorCount = 0;
+
+ { // non-const RandomLifetimeObject
+ RandomLifetimeObject* pObject = new RandomLifetimeObject;
+ eastl::safe_ptr<RandomLifetimeObject> pSafePtr(pObject);
+
+ eastl::safe_ptr<RandomLifetimeObject> pSafePtrCopy1 = pSafePtr;
+ eastl::safe_ptr<RandomLifetimeObject> pSafePtrCopy2(pSafePtr);
+
+ pSafePtr->DoSomething();
+
+ eastl::safe_ptr<RandomLifetimeObject>* pSafePtrCopy3 = new eastl::safe_ptr<RandomLifetimeObject>(pSafePtr);
+ eastl::safe_ptr<RandomLifetimeObject>* pSafePtrCopy4 = new eastl::safe_ptr<RandomLifetimeObject>(pSafePtr);
+ EATEST_VERIFY(pSafePtrCopy3->get() == pObject);
+ EATEST_VERIFY(pSafePtrCopy4->get() == pObject);
+ delete pSafePtrCopy3;
+ delete pSafePtrCopy4;
+
+ delete pSafePtr;
+
+ EATEST_VERIFY(pSafePtrCopy1.get() == NULL);
+ EATEST_VERIFY(pSafePtrCopy2.get() == NULL);
+ }
+
+ { // const RandomLifetimeObject
+ RandomLifetimeObject* pObject = new RandomLifetimeObject;
+ eastl::safe_ptr<const RandomLifetimeObject> pSafePtr(pObject);
+
+ eastl::safe_ptr<const RandomLifetimeObject> pSafePtrCopy1(pSafePtr);
+ eastl::safe_ptr<const RandomLifetimeObject> pSafePtrCopy2 = pSafePtr;
+
+ pSafePtr->DoSomething();
+
+ eastl::safe_ptr<const RandomLifetimeObject>* pSafePtrCopy3 = new eastl::safe_ptr<const RandomLifetimeObject>(pSafePtr);
+ eastl::safe_ptr<const RandomLifetimeObject>* pSafePtrCopy4 = new eastl::safe_ptr<const RandomLifetimeObject>(pSafePtr);
+ EATEST_VERIFY(pSafePtrCopy3->get() == pObject);
+ EATEST_VERIFY(pSafePtrCopy4->get() == pObject);
+ delete pSafePtrCopy3;
+ delete pSafePtrCopy4;
+
+ delete pSafePtr;
+
+ EATEST_VERIFY(pSafePtrCopy1.get() == NULL);
+ EATEST_VERIFY(pSafePtrCopy2.get() == NULL);
+ }
+
+ return nErrorCount;
+}
+
+
+int TestSmartPtr()
+{
+ using namespace SmartPtrTest;
+ using namespace eastl;
+
+ int nErrorCount = 0;
+
+ nErrorCount += Test_unique_ptr();
+ nErrorCount += Test_scoped_ptr();
+ nErrorCount += Test_scoped_array();
+ nErrorCount += Test_shared_ptr();
+ nErrorCount += Test_shared_ptr_thread();
+ nErrorCount += Test_weak_ptr();
+ nErrorCount += Test_shared_array();
+ nErrorCount += Test_linked_ptr();
+ nErrorCount += Test_linked_array();
+ nErrorCount += Test_intrusive_ptr();
+ nErrorCount += Test_safe_ptr();
+
+ EATEST_VERIFY(A::mCount == 0);
+ EATEST_VERIFY(RefCountTest::mCount == 0);
+ EATEST_VERIFY(NamedClass::mnCount == 0);
+ EATEST_VERIFY(Y::mnCount == 0);
+ EATEST_VERIFY(ACLS::mnCount == 0);
+ EATEST_VERIFY(BCLS::mnCount == 0);
+ EATEST_VERIFY(A1::mnCount == 0);
+ EATEST_VERIFY(B1::mnCount == 0);
+
+ return nErrorCount;
+}
+
+EA_RESTORE_VC_WARNING() // 4702
+
+
+
+
+
+
+
diff --git a/EASTL/test/source/TestSort.cpp b/EASTL/test/source/TestSort.cpp
new file mode 100644
index 0000000..114a73b
--- /dev/null
+++ b/EASTL/test/source/TestSort.cpp
@@ -0,0 +1,961 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include <EABase/eabase.h>
+
+// Some versions of GCC generate an array bounds warning in opt builds which
+// doesn't say what line below it comes from and doesn't appear to be a valid
+// warning. In researching this on the Internet it appears that this is a
+// known problem with GCC.
+#if defined(EA_DISABLE_GCC_WARNING)
+ EA_DISABLE_GCC_WARNING(-Warray-bounds)
+#endif
+
+#include "EASTLTest.h"
+#include <EASTL/sort.h>
+#include <EASTL/bonus/sort_extra.h>
+#include <EASTL/vector.h>
+#include <EASTL/list.h>
+#include <EASTL/deque.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/allocator.h>
+#include <EASTL/numeric.h>
+#include <EASTL/random.h>
+#include <EABase/eahave.h>
+#include <cmath>
+
+
+namespace eastl
+{
+ namespace Internal
+ {
+ typedef eastl::vector<int> IntArray;
+ typedef eastl::vector<IntArray> IntArrayArray;
+
+
+ // IntArrayCompare
+ // Used to compare IntArray objects.
+ struct IntArrayCompare
+ {
+ bool operator()(const IntArray& a, const IntArray& b)
+ { return a.front() < b.front(); }
+ };
+
+
+ // SafeFloatCompare
+ //
+ // Float comparison has a problem for the case that either of the floats are a NaN.
+ // If you use a NaN in a sort function that uses default floating point comparison then
+ // you will get undefined behavior, as all NaNs compare false. This compare function
+ // class sorts floats such that all negative NaNs sort lower than all integers, and all
+ // positive NaNs sort higher than all integers.
+ //
+ // Example usage:
+ // eastl::sort(floatArray.begin(), floatArray.end(), SafeFloatCompare());
+ //
+ struct SafeFloatCompare
+ {
+ union FloatInt32{ float f; int32_t i; };
+
+ bool operator()(float a, float b) const
+ {
+ #if defined(EA_HAVE_ISNAN)
+ bool aNan = (EA_HAVE_ISNAN(a) != 0);
+ bool bNan = (EA_HAVE_ISNAN(b) != 0);
+ #else
+ bool aNan = (a != a); // This works as long as the compiler doesn't do any tricks to optimize it away.
+ bool bNan = (b != b);
+ #endif
+
+ if(!aNan && !bNan)
+ return (a < b);
+
+ FloatInt32 fia = { a };
+ FloatInt32 fib = { b };
+
+ if(aNan)
+ {
+ if(bNan)
+ return (fia.i < fib.i); // Both are NaNs, so do a binary compare.
+ else
+ return (fia.i < 0); // All negative NaNs are assumed to be less than all non-NaNs.
+ }
+ else
+ return (0 < fib.i); // All negative NaNs are assumed to be less than all non-NaNs.
+ }
+ };
+
+
+
+ // StatefulCompare
+ // Used to verify that sort<int, StatefulCompare&>() respects the
+ // fact that StatefulCompare is passed by reference instead of by value.
+ // All existing commercial STL implementations fail to do what the user
+ // wants and instead pass around the compare object by value, even if
+ // the user specifically asks to use it by reference. EASTL doesn't
+ // have this problem.
+ struct StatefulCompare
+ {
+ static int nCtorCount;
+ static int nDtorCount;
+ static int nCopyCount;
+
+ StatefulCompare()
+ { nCtorCount++; }
+
+ StatefulCompare(StatefulCompare&)
+ { nCopyCount++; }
+
+ ~StatefulCompare()
+ { nDtorCount++; }
+
+ StatefulCompare& operator=(const StatefulCompare&)
+ { nCopyCount++; return *this; }
+
+ bool operator()(int a, int b)
+ { return a < b; }
+
+ static void Reset()
+ { nCtorCount = 0; nDtorCount = 0; nCopyCount = 0; }
+ };
+
+ int StatefulCompare::nCtorCount = 0;
+ int StatefulCompare::nDtorCount = 0;
+ int StatefulCompare::nCopyCount = 0;
+
+
+ // TestObjectPtrCompare
+ // Used to compare sorted objects by pointer instead of value.
+ struct TestObjectPtrCompare
+ {
+ bool operator()(TestObject* a, TestObject* b)
+ { return a->mX < b->mX; }
+ };
+
+
+ // TestObjectIndexCompare
+ // Used to compare sorted objects by array index instead of value.
+ struct TestObjectIndexCompare
+ {
+ vector<TestObject>* mpArray;
+
+ TestObjectIndexCompare(vector<TestObject>* pArray) : mpArray(pArray) { }
+ TestObjectIndexCompare(const TestObjectIndexCompare& x) : mpArray(x.mpArray){ }
+ TestObjectIndexCompare& operator=(const TestObjectIndexCompare& x) { mpArray = x.mpArray; return *this; }
+
+ bool operator()(eastl_size_t a, eastl_size_t b)
+ { return (*mpArray)[a] < (*mpArray)[b]; }
+ };
+
+ // Radix sort elements
+ template <typename Key>
+ struct RadixSortElement
+ {
+ typedef Key radix_type;
+ Key mKey;
+ uint16_t mData;
+
+ bool operator<(const RadixSortElement<Key> &other) const
+ {
+ return mKey < other.mKey;
+ }
+ };
+
+ typedef RadixSortElement<uint8_t> RadixSortElement8;
+ typedef RadixSortElement<uint16_t> RadixSortElement16;
+ typedef RadixSortElement<uint32_t> RadixSortElement32;
+
+ template <typename integer_type>
+ struct identity_extract_radix_key
+ {
+ typedef integer_type radix_type;
+
+ const radix_type operator()(const integer_type& x) const
+ {
+ return x;
+ }
+ };
+
+ struct TestNoLessOperator
+ {
+ int i {};
+ };
+ } // namespace Internal
+
+ template <>
+ struct less<Internal::TestNoLessOperator>
+ {
+ bool operator()(const Internal::TestNoLessOperator& lhs, const Internal::TestNoLessOperator& rhs) const noexcept
+ {
+ return lhs.i < rhs.i;
+ }
+ };
+
+} // namespace eastl
+
+int TestSort()
+{
+ using namespace eastl;
+ using namespace Internal;
+
+ int nErrorCount = 0;
+
+ EASTLTest_Rand rng(EA::UnitTest::GetRandSeed());
+
+ {
+ // is_sorted
+ int array[] = { 0, 1, 2, 2, 2, 3, 4, 5, 6, 7, 8, 9, 9, 8, 7, 6, 5, 4, 3, 2, 2, 2, 1, 0 };
+
+ EATEST_VERIFY( is_sorted(array + 0, array + 0));
+ EATEST_VERIFY( is_sorted(array + 2, array + 4));
+ EATEST_VERIFY( is_sorted(array + 0, array + 10));
+ EATEST_VERIFY(!is_sorted(array + 0, array + 14));
+ EATEST_VERIFY( is_sorted(array + 11, array + 23, eastl::greater<int>()));
+ }
+
+ {
+ // is_sorted_until
+ int sorted[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 };
+ int notsorted[] = { 0, 1, 2, 3, 4, 42, 6, 7, 8, 9 };
+
+ EATEST_VERIFY( is_sorted_until(sorted + EAArrayCount(sorted), sorted + EAArrayCount(sorted)) == sorted + EAArrayCount(sorted) );
+ EATEST_VERIFY( is_sorted_until(sorted , sorted + EAArrayCount(sorted)) == sorted + EAArrayCount(sorted) );
+
+ EATEST_VERIFY( is_sorted_until(sorted + 0, sorted + 0) == sorted );
+ EATEST_VERIFY( is_sorted_until(sorted + 2, sorted + 8) == sorted + 8 );
+
+ EATEST_VERIFY( is_sorted_until(notsorted + 2, notsorted + 8) == notsorted + 6 );
+
+ // is_sorted_until (with compare function)
+ EATEST_VERIFY( is_sorted_until(sorted + EAArrayCount(sorted), sorted + EAArrayCount(sorted), eastl::less<int>()) == sorted + EAArrayCount(sorted) );
+ EATEST_VERIFY( is_sorted_until(notsorted + 2, notsorted + 8, eastl::less<int>()) == notsorted + 6 );
+ }
+
+ // Sort arrays of size 0 - N. Sort M random permutations of each.
+ {
+ vector<int64_t> intArray, intArraySaved;
+
+ for(int i = 0; i < (150 + (gEASTL_TestLevel * 200)); i += (i < 5) ? 1 : 37) // array sizes of 0 to 300 - 2100, depending on test level.
+ {
+ // intArraySaved.clear(); // Do we want to do this?
+
+ for(int n = 0; n < i; n++)
+ {
+ intArraySaved.push_back(n);
+
+ if(rng.RandLimit(10) == 0)
+ {
+ intArraySaved.push_back(n);
+
+ if(rng.RandLimit(5) == 0)
+ intArraySaved.push_back(n);
+ }
+ }
+ const int64_t expectedSum = eastl::accumulate(begin(intArraySaved), end(intArraySaved), int64_t(0));
+
+ for(int j = 0; j < 300 + (gEASTL_TestLevel * 50); j++)
+ {
+ eastl::random_shuffle(intArraySaved.begin(), intArraySaved.end(), rng);
+
+ intArray = intArraySaved;
+ bubble_sort(intArray.begin(), intArray.end());
+ EATEST_VERIFY(is_sorted(intArray.begin(), intArray.end()));
+ EATEST_VERIFY(eastl::accumulate(begin(intArraySaved), end(intArraySaved), int64_t(0)) == expectedSum);
+
+ intArray = intArraySaved;
+ shaker_sort(intArray.begin(), intArray.end());
+ EATEST_VERIFY(is_sorted(intArray.begin(), intArray.end()));
+ EATEST_VERIFY(eastl::accumulate(begin(intArraySaved), end(intArraySaved), int64_t(0)) == expectedSum);
+
+ intArray = intArraySaved;
+ insertion_sort(intArray.begin(), intArray.end());
+ EATEST_VERIFY(is_sorted(intArray.begin(), intArray.end()));
+ EATEST_VERIFY(eastl::accumulate(begin(intArraySaved), end(intArraySaved), int64_t(0)) == expectedSum);
+
+ intArray = intArraySaved;
+ selection_sort(intArray.begin(), intArray.end());
+ EATEST_VERIFY(is_sorted(intArray.begin(), intArray.end()));
+ EATEST_VERIFY(eastl::accumulate(begin(intArraySaved), end(intArraySaved), int64_t(0)) == expectedSum);
+
+ intArray = intArraySaved;
+ shell_sort(intArray.begin(), intArray.end());
+ EATEST_VERIFY(is_sorted(intArray.begin(), intArray.end()));
+ EATEST_VERIFY(eastl::accumulate(begin(intArraySaved), end(intArraySaved), int64_t(0)) == expectedSum);
+
+ intArray = intArraySaved;
+ comb_sort(intArray.begin(), intArray.end());
+ EATEST_VERIFY(is_sorted(intArray.begin(), intArray.end()));
+ EATEST_VERIFY(eastl::accumulate(begin(intArraySaved), end(intArraySaved), int64_t(0)) == expectedSum);
+
+ intArray = intArraySaved;
+ heap_sort(intArray.begin(), intArray.end());
+ EATEST_VERIFY(is_sorted(intArray.begin(), intArray.end()));
+ EATEST_VERIFY(eastl::accumulate(begin(intArraySaved), end(intArraySaved), int64_t(0)) == expectedSum);
+
+ intArray = intArraySaved;
+ merge_sort(intArray.begin(), intArray.end(), *get_default_allocator((EASTLAllocatorType*)NULL));
+ EATEST_VERIFY(is_sorted(intArray.begin(), intArray.end()));
+ EATEST_VERIFY(eastl::accumulate(begin(intArraySaved), end(intArraySaved), int64_t(0)) == expectedSum);
+
+ intArray = intArraySaved;
+ vector<int64_t> buffer(intArray.size());
+ merge_sort_buffer(intArray.begin(), intArray.end(), buffer.data());
+ EATEST_VERIFY(is_sorted(intArray.begin(), intArray.end()));
+ EATEST_VERIFY(eastl::accumulate(begin(intArraySaved), end(intArraySaved), int64_t(0)) == expectedSum);
+
+ intArray = intArraySaved;
+ quick_sort(intArray.begin(), intArray.end());
+ EATEST_VERIFY(is_sorted(intArray.begin(), intArray.end()));
+ EATEST_VERIFY(eastl::accumulate(begin(intArraySaved), end(intArraySaved), int64_t(0)) == expectedSum);
+
+ intArray = intArraySaved;
+ buffer.resize(intArray.size()/2);
+ tim_sort_buffer(intArray.begin(), intArray.end(), buffer.data());
+ EATEST_VERIFY(is_sorted(intArray.begin(), intArray.end()));
+ EATEST_VERIFY(eastl::accumulate(begin(intArraySaved), end(intArraySaved), int64_t(0)) == expectedSum);
+ }
+ }
+ }
+
+ // Test tim sort with a specific array size and seed that caused a crash
+ {
+ vector<int64_t> intArray;
+ int i = 1000000;
+ {
+ EASTLTest_Rand testRng(232526);
+
+ for (int n = 0; n < i; n++)
+ {
+ intArray.push_back(testRng.Rand());
+ }
+ vector<int64_t> buffer(intArray.size() / 2);
+ tim_sort_buffer(intArray.begin(), intArray.end(), buffer.data());
+ EATEST_VERIFY(is_sorted(intArray.begin(), intArray.end()));
+ }
+ }
+
+ // Test insertion_sort() does not invalidate a BidirectionalIterator by doing --BidirectionalIterator.begin()
+ {
+ // Test Passes if the Test doesn't crash
+ eastl::deque<int> deque;
+ deque.push_back(1);
+
+ insertion_sort(deque.begin(), deque.end());
+
+ insertion_sort(deque.begin(), deque.end(), eastl::less<int>{});
+ }
+
+
+ // TestObject sorting
+ TestObject::Reset();
+ {
+ vector<TestObject> toArray, toArraySaved;
+
+ for(int i = 0; i < (150 + (gEASTL_TestLevel * 200)); i += (i < 5) ? 1 : 37) // array sizes of 0 to 300 - 2100, depending on test level.
+ {
+ for(int n = 0; n < i; n++)
+ {
+ toArraySaved.push_back(TestObject(n));
+
+ if(rng.RandLimit(10) == 0)
+ {
+ toArraySaved.push_back(TestObject(n));
+
+ if(rng.RandLimit(5) == 0)
+ toArraySaved.push_back(TestObject(n));
+ }
+ }
+
+ for(int j = 0; j < 300 + (gEASTL_TestLevel * 50); j++)
+ {
+ eastl::random_shuffle(toArraySaved.begin(), toArraySaved.end(), rng);
+
+ toArray = toArraySaved;
+ bubble_sort(toArray.begin(), toArray.end());
+ EATEST_VERIFY(is_sorted(toArray.begin(), toArray.end()));
+
+ toArray = toArraySaved;
+ shaker_sort(toArray.begin(), toArray.end());
+ EATEST_VERIFY(is_sorted(toArray.begin(), toArray.end()));
+
+ toArray = toArraySaved;
+ insertion_sort(toArray.begin(), toArray.end());
+ EATEST_VERIFY(is_sorted(toArray.begin(), toArray.end()));
+
+ toArray = toArraySaved;
+ selection_sort(toArray.begin(), toArray.end());
+ EATEST_VERIFY(is_sorted(toArray.begin(), toArray.end()));
+
+ toArray = toArraySaved;
+ shell_sort(toArray.begin(), toArray.end());
+ EATEST_VERIFY(is_sorted(toArray.begin(), toArray.end()));
+
+ toArray = toArraySaved;
+ comb_sort(toArray.begin(), toArray.end());
+ EATEST_VERIFY(is_sorted(toArray.begin(), toArray.end()));
+
+ toArray = toArraySaved;
+ heap_sort(toArray.begin(), toArray.end());
+ EATEST_VERIFY(is_sorted(toArray.begin(), toArray.end()));
+
+ // Not ready yet:
+ toArray = toArraySaved;
+ merge_sort(toArray.begin(), toArray.end(), *get_default_allocator((EASTLAllocatorType*)NULL));
+ EATEST_VERIFY(is_sorted(toArray.begin(), toArray.end()));
+
+ toArray = toArraySaved;
+ quick_sort(toArray.begin(), toArray.end());
+ EATEST_VERIFY(is_sorted(toArray.begin(), toArray.end()));
+
+ toArray = toArraySaved;
+ vector<TestObject> buffer(toArray.size()/2);
+ tim_sort_buffer(toArray.begin(), toArray.end(), buffer.data());
+ EATEST_VERIFY(is_sorted(toArray.begin(), toArray.end()));
+ }
+ }
+ }
+
+ // Test that stable sorting algorithms are actually stable
+ {
+ struct StableSortTestObj
+ {
+ StableSortTestObj()
+ {
+ }
+
+ StableSortTestObj(int value)
+ :value(value)
+ ,initialPositionIndex(0)
+ {
+ }
+
+ int value;
+ size_t initialPositionIndex;
+ };
+
+ // During the test this comparison is used to sort elements based on value.
+ struct StableSortCompare
+ {
+ bool operator()(const StableSortTestObj& a, const StableSortTestObj& b)
+ {
+ return a.value < b.value;
+ }
+ };
+
+ // During the test this comparison is used to verify the sort was a stable sort. i.e. if values are the same then
+ // their relative position should be maintained.
+ struct StableSortCompareForStability
+ {
+ bool operator()(const StableSortTestObj& a, const StableSortTestObj& b)
+ {
+ if (a.value != b.value)
+ {
+ return a.value < b.value;
+ }
+ else
+ {
+ return a.initialPositionIndex < b.initialPositionIndex;
+ }
+ }
+ };
+
+ vector<StableSortTestObj> toArray, toArraySaved;
+ StableSortCompare compare;
+ StableSortCompareForStability compareForStability;
+
+ for (int i = 0; i < (150 + (gEASTL_TestLevel * 200)); i += (i < 5) ? 1 : 37) // array sizes of 0 to 300 - 2100, depending on test level.
+ {
+ for (int n = 0; n < i; n++)
+ {
+ toArraySaved.push_back(StableSortTestObj(n));
+
+ if (rng.RandLimit(10) == 0)
+ {
+ toArraySaved.push_back(StableSortTestObj(n));
+
+ if (rng.RandLimit(5) == 0)
+ toArraySaved.push_back(StableSortTestObj(n));
+ }
+ }
+ vector<StableSortTestObj> tempBuffer;
+ tempBuffer.resize(toArraySaved.size());
+
+ for (int j = 0; j < 300 + (gEASTL_TestLevel * 50); j++)
+ {
+ eastl::random_shuffle(toArraySaved.begin(), toArraySaved.end(), rng);
+ // Store the intial position of each element in the array before sorting. This position can then be used to verify that the sorting operation is stable.
+ for (vector<StableSortTestObj>::size_type k = 0; k < toArraySaved.size(); k++)
+ {
+ toArraySaved[k].initialPositionIndex = k;
+ }
+
+ toArray = toArraySaved;
+ bubble_sort(toArray.begin(), toArray.end(), compare);
+ EATEST_VERIFY(is_sorted(toArray.begin(), toArray.end(), compareForStability));
+
+ toArray = toArraySaved;
+ shaker_sort(toArray.begin(), toArray.end(), compare);
+ EATEST_VERIFY(is_sorted(toArray.begin(), toArray.end(), compareForStability));
+
+ toArray = toArraySaved;
+ insertion_sort(toArray.begin(), toArray.end(), compare);
+ EATEST_VERIFY(is_sorted(toArray.begin(), toArray.end(), compareForStability));
+
+ toArray = toArraySaved;
+ tim_sort_buffer(toArray.begin(), toArray.end(), tempBuffer.data(), compare);
+ EATEST_VERIFY(is_sorted(toArray.begin(), toArray.end(), compareForStability));
+
+ toArray = toArraySaved;
+ merge_sort(toArray.begin(), toArray.end(), *get_default_allocator((EASTLAllocatorType*)NULL), compare);
+ EATEST_VERIFY(is_sorted(toArray.begin(), toArray.end(), compareForStability));
+
+ toArray = toArraySaved;
+ merge_sort_buffer(toArray.begin(), toArray.end(), tempBuffer.data(), compare);
+ EATEST_VERIFY(is_sorted(toArray.begin(), toArray.end(), compareForStability));
+ }
+ }
+ }
+
+ {
+ // OutputIterator merge(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2, OutputIterator result)
+ // This is tested by merge_sort.
+ }
+
+
+ {
+ // void partial_sort(RandomAccessIterator first, RandomAccessIterator middle, RandomAccessIterator last)
+ // This is tested by quick_sort.
+ }
+
+
+ {
+ // void nth_element(RandomAccessIterator first, RandomAccessIterator nth, RandomAccessIterator last)
+ // void nth_element(RandomAccessIterator first, RandomAccessIterator nth, RandomAccessIterator last, Compare compare)
+ const int intArrayInit[16] = { 4, 2, 8, 6, 9, 1, 1, 4, 0, 5, 5, 7, 8, 9, 3, 3 };
+ int intArraySorted[16]; // Same as intArrayInit but sorted
+ int intArray[16];
+ size_t i, j;
+
+ // We test many combinations of nth_element on the int array.
+ for(i = 1; i < 16; i++)
+ {
+ for(j = 0; j < i; j++)
+ {
+ eastl::copy(intArrayInit, intArrayInit + i, intArraySorted);
+ eastl::sort(intArraySorted, intArraySorted + i);
+
+ eastl::copy(intArrayInit, intArrayInit + i, intArray);
+ nth_element(intArray, intArray + j, intArray + i);
+ EATEST_VERIFY(intArray[j] == intArraySorted[j]);
+ }
+ }
+
+ for(i = 1; i < 16; i++)
+ {
+ for(j = 0; j < i; j++)
+ {
+ eastl::copy(intArrayInit, intArrayInit + i, intArraySorted);
+ eastl::sort(intArraySorted, intArraySorted + i);
+
+ eastl::copy(intArrayInit, intArrayInit + 16, intArray);
+ nth_element(intArray, intArray + j, intArray + i, eastl::less<int>());
+ EATEST_VERIFY(intArray[j] == intArraySorted[j]);
+ }
+ }
+ }
+
+
+ {
+ // void radix_sort(RandomAccessIterator first, RandomAccessIterator last, RandomAccessIterator buffer);
+ const uint32_t kCount = 100;
+
+ {
+ RadixSortElement32* pRadixSortElementArray32 = new RadixSortElement32[kCount];
+ RadixSortElement32* pRadixSortElementArrayTemp32 = new RadixSortElement32[kCount];
+ for(uint32_t i = 0; i < kCount; i++)
+ {
+ pRadixSortElementArray32[i].mKey = (uint16_t)(kCount - i);
+ pRadixSortElementArray32[i].mData = (uint16_t)i;
+ }
+ radix_sort<RadixSortElement32*, extract_radix_key<RadixSortElement32> >(pRadixSortElementArray32, pRadixSortElementArray32 + kCount, pRadixSortElementArrayTemp32);
+ EATEST_VERIFY(is_sorted(pRadixSortElementArray32, pRadixSortElementArray32 + kCount));
+ delete[] pRadixSortElementArray32;
+ delete[] pRadixSortElementArrayTemp32;
+ }
+
+ {
+ RadixSortElement16* pRadixSortElementArray16 = new RadixSortElement16[kCount];
+ RadixSortElement16* pRadixSortElementArrayTemp16 = new RadixSortElement16[kCount];
+ for(uint32_t i = 0; i < kCount; i++)
+ {
+ pRadixSortElementArray16[i].mKey = (uint16_t)(kCount - i);
+ pRadixSortElementArray16[i].mData = (uint16_t)i;
+ }
+ radix_sort<RadixSortElement16*, extract_radix_key<RadixSortElement16> >(pRadixSortElementArray16, pRadixSortElementArray16 + kCount, pRadixSortElementArrayTemp16);
+ EATEST_VERIFY(is_sorted(pRadixSortElementArray16, pRadixSortElementArray16 + kCount));
+ delete[] pRadixSortElementArray16;
+ delete[] pRadixSortElementArrayTemp16;
+ }
+
+ {
+ RadixSortElement8* pRadixSortElementArray8 = new RadixSortElement8[kCount];
+ RadixSortElement8* pRadixSortElementArrayTemp8 = new RadixSortElement8[kCount];
+ for(uint32_t i = 0; i < kCount; i++)
+ {
+ pRadixSortElementArray8[i].mKey = (uint8_t)(kCount - i);
+ pRadixSortElementArray8[i].mData = (uint8_t)i;
+ }
+ radix_sort<RadixSortElement8*, extract_radix_key<RadixSortElement8> >(pRadixSortElementArray8, pRadixSortElementArray8 + kCount, pRadixSortElementArrayTemp8);
+ EATEST_VERIFY(is_sorted(pRadixSortElementArray8, pRadixSortElementArray8 + kCount));
+ delete[] pRadixSortElementArray8;
+ delete[] pRadixSortElementArrayTemp8;
+ }
+ }
+
+ {
+ // Do some white-box testing of radix sort to verify internal optimizations work properly for some edge cases.
+
+ {
+ uint32_t input[] = { 123, 15, 76, 2, 74, 12, 62, 91 };
+ uint32_t buffer[EAArrayCount(input)];
+ radix_sort<uint32_t*, identity_extract_radix_key<uint32_t>>(begin(input), end(input), buffer);
+ EATEST_VERIFY(is_sorted(begin(input), end(input)));
+ }
+ {
+ // Test values where some digit positions have identical values
+ uint32_t input[] = { 0x75000017, 0x74000003, 0x73000045, 0x76000024, 0x78000033, 0x76000099, 0x78000043, 0x75000010 };
+ uint32_t buffer[EAArrayCount(input)];
+ radix_sort<uint32_t*, identity_extract_radix_key<uint32_t>>(begin(input), end(input), buffer);
+ EATEST_VERIFY(is_sorted(begin(input), end(input)));
+ }
+ {
+ // Test values where some digit positions have identical values
+ uint32_t input[] = { 0x00750017, 0x00740003, 0x00730045, 0x00760024, 0x00780033, 0x00760099, 0x00780043, 0x00750010 };
+ uint32_t buffer[EAArrayCount(input)];
+ radix_sort<uint32_t*, identity_extract_radix_key<uint32_t>>(begin(input), end(input), buffer);
+ EATEST_VERIFY(is_sorted(begin(input), end(input)));
+ }
+ {
+ // Test values where an odd number of scatter operations will be done during sorting (which forces a copy operation to move values back to the input buffer).
+ uint32_t input[] = { 0x00000017, 0x00000003, 0x00000045, 0x00000024, 0x00000033, 0x00000099, 0x00000043, 0x00000010 };
+ uint32_t buffer[EAArrayCount(input)];
+ radix_sort<uint32_t*, identity_extract_radix_key<uint32_t>>(begin(input), end(input), buffer);
+ EATEST_VERIFY(is_sorted(begin(input), end(input)));
+ }
+ {
+ // Test case for bug where the last histogram bucket was not being cleared to zero
+ uint32_t input[] = { 0xff00, 0xff };
+ uint32_t buffer[EAArrayCount(input)];
+ radix_sort<uint32_t*, identity_extract_radix_key<uint32_t>>(begin(input), end(input), buffer);
+ EATEST_VERIFY(is_sorted(begin(input), end(input)));
+ }
+ }
+
+ {
+ // Test different values for DigitBits
+
+ {
+ uint32_t input[] = {2514513, 6278225, 2726217, 963245656, 35667326, 2625624562, 3562562562, 1556256252};
+ uint32_t buffer[EAArrayCount(input)];
+ radix_sort<uint32_t*, identity_extract_radix_key<uint32_t>, 1>(begin(input), end(input), buffer);
+ EATEST_VERIFY(is_sorted(begin(input), end(input)));
+ }
+ {
+ uint32_t input[] = { 2514513, 6278225, 2726217, 963245656, 35667326, 2625624562, 3562562562, 1556256252 };
+ uint32_t buffer[EAArrayCount(input)];
+ radix_sort<uint32_t*, identity_extract_radix_key<uint32_t>, 3>(begin(input), end(input), buffer);
+ EATEST_VERIFY(is_sorted(begin(input), end(input)));
+ }
+ {
+ uint32_t input[] = { 2514513, 6278225, 2726217, 963245656, 35667326, 2625624562, 3562562562, 1556256252 };
+ uint32_t buffer[EAArrayCount(input)];
+ radix_sort<uint32_t*, identity_extract_radix_key<uint32_t>, 6>(begin(input), end(input), buffer);
+ EATEST_VERIFY(is_sorted(begin(input), end(input)));
+ }
+ {
+ // Test a value for DigitBits that is more than half the size of the type.
+ uint16_t input[] = { 14513, 58225, 26217, 34656, 63326, 24562, 35562, 15652 };
+ uint16_t buffer[EAArrayCount(input)];
+ radix_sort<uint16_t*, identity_extract_radix_key<uint16_t>, 11>(begin(input), end(input), buffer);
+ EATEST_VERIFY(is_sorted(begin(input), end(input)));
+ }
+ {
+ // Test a value for DigitBits that is the size of the type itself.
+ uint8_t input[] = { 113, 225, 217, 56, 26, 162, 62, 152 };
+ uint8_t buffer[EAArrayCount(input)];
+ radix_sort<uint8_t*, identity_extract_radix_key<uint8_t>, 8>(begin(input), end(input), buffer);
+ EATEST_VERIFY(is_sorted(begin(input), end(input)));
+ }
+
+ }
+
+ {
+ // void bucket_sort(ForwardIterator first, ForwardIterator last, ContainerArray& bucketArray, HashFunction hash)
+
+ const size_t kElementRange = 32;
+ vector<int> intArray(1000);
+
+ for(int i = 0; i < 1000; i++)
+ intArray[i] = rng() % kElementRange;
+
+ vector< vector<int> > bucketArray(kElementRange);
+ bucket_sort(intArray.begin(), intArray.end(), bucketArray, eastl::hash_use_self<int>());
+ EATEST_VERIFY(is_sorted(intArray.begin(), intArray.end()));
+ }
+
+
+ {
+ // stable_sort general test
+ typedef eastl::less<int> IntCompare;
+
+ int intArray[2] = { 0, 1 };
+
+ stable_sort(intArray, intArray + 2);
+ stable_sort(intArray, intArray + 2, IntCompare());
+ stable_sort<int*>(intArray, intArray + 2);
+ stable_sort<int*, IntCompare>(intArray, intArray + 2, IntCompare());
+
+ MallocAllocator mallocAllocator;
+
+ //stable_sort(intArray, intArray + 2, mallocAllocator);
+ stable_sort(intArray, intArray + 2, mallocAllocator, IntCompare());
+ //stable_sort<int*, MallocAllocator>(intArray, intArray + 2, mallocAllocator);
+ stable_sort<int*, MallocAllocator, IntCompare>(intArray, intArray + 2, mallocAllocator, IntCompare());
+ }
+
+ {
+ // stable_sort special test
+ IntArrayArray intArrayArray(2);
+ IntArrayCompare compare;
+
+ intArrayArray[0].push_back(0);
+ intArrayArray[1].push_back(1);
+
+ stable_sort(intArrayArray.begin(), intArrayArray.end(), compare);
+ }
+
+
+ {
+ // Test to verify that Compare object references are preserved.
+ typedef deque<int> IntDeque;
+ typedef IntDeque::iterator IntDequeIterator;
+
+ IntDeque intDeque, intDequeSaved;
+ StatefulCompare compare;
+
+ // Set up intDequeSaved with random data.
+ for(int n = 0; n < 500; n++)
+ {
+ intDequeSaved.push_back(n);
+
+ if(rng.RandLimit(10) == 0)
+ {
+ intDequeSaved.push_back(n);
+
+ if(rng.RandLimit(5) == 0)
+ intDequeSaved.push_back(n);
+ }
+ }
+
+ eastl::random_shuffle(intDequeSaved.begin(), intDequeSaved.end(), rng);
+
+ StatefulCompare::Reset();
+ intDeque = intDequeSaved;
+ bubble_sort<IntDequeIterator, StatefulCompare&>(intDeque.begin(), intDeque.end(), compare);
+ EATEST_VERIFY((StatefulCompare::nCtorCount == 0) && (StatefulCompare::nDtorCount == 0) && (StatefulCompare::nCopyCount == 0));
+
+ StatefulCompare::Reset();
+ intDeque = intDequeSaved;
+ shaker_sort<IntDequeIterator, StatefulCompare&>(intDeque.begin(), intDeque.end(), compare);
+ EATEST_VERIFY((StatefulCompare::nCtorCount == 0) && (StatefulCompare::nDtorCount == 0) && (StatefulCompare::nCopyCount == 0));
+
+ StatefulCompare::Reset();
+ intDeque = intDequeSaved;
+ insertion_sort<IntDequeIterator, StatefulCompare&>(intDeque.begin(), intDeque.end(), compare);
+ EATEST_VERIFY((StatefulCompare::nCtorCount == 0) && (StatefulCompare::nDtorCount == 0) && (StatefulCompare::nCopyCount == 0));
+
+ StatefulCompare::Reset();
+ intDeque = intDequeSaved;
+ selection_sort<IntDequeIterator, StatefulCompare&>(intDeque.begin(), intDeque.end(), compare);
+ EATEST_VERIFY((StatefulCompare::nCtorCount == 0) && (StatefulCompare::nDtorCount == 0) && (StatefulCompare::nCopyCount == 0));
+
+ StatefulCompare::Reset();
+ intDeque = intDequeSaved;
+ shell_sort<IntDequeIterator, StatefulCompare&>(intDeque.begin(), intDeque.end(), compare);
+ EATEST_VERIFY((StatefulCompare::nCtorCount == 0) && (StatefulCompare::nDtorCount == 0) && (StatefulCompare::nCopyCount == 0));
+
+ StatefulCompare::Reset();
+ intDeque = intDequeSaved;
+ comb_sort<IntDequeIterator, StatefulCompare&>(intDeque.begin(), intDeque.end(), compare);
+ EATEST_VERIFY((StatefulCompare::nCtorCount == 0) && (StatefulCompare::nDtorCount == 0) && (StatefulCompare::nCopyCount == 0));
+
+ StatefulCompare::Reset();
+ intDeque = intDequeSaved;
+ heap_sort<IntDequeIterator, StatefulCompare&>(intDeque.begin(), intDeque.end(), compare);
+ EATEST_VERIFY((StatefulCompare::nCtorCount == 0) && (StatefulCompare::nDtorCount == 0) && (StatefulCompare::nCopyCount == 0));
+
+ StatefulCompare::Reset();
+ intDeque = intDequeSaved;
+ merge_sort<IntDequeIterator, EASTLAllocatorType, StatefulCompare&>(intDeque.begin(), intDeque.end(), *get_default_allocator((EASTLAllocatorType*)NULL), compare);
+ EATEST_VERIFY((StatefulCompare::nCtorCount == 0) && (StatefulCompare::nDtorCount == 0) && (StatefulCompare::nCopyCount == 0));
+
+ StatefulCompare::Reset();
+ intDeque = intDequeSaved;
+ quick_sort<IntDequeIterator, StatefulCompare&>(intDeque.begin(), intDeque.end(), compare);
+ EATEST_VERIFY((StatefulCompare::nCtorCount == 0) && (StatefulCompare::nDtorCount == 0) && (StatefulCompare::nCopyCount == 0));
+
+ StatefulCompare::Reset();
+ vector<int> buffer(intDeque.size()/2);
+ intDeque = intDequeSaved;
+ tim_sort_buffer<IntDequeIterator, int, StatefulCompare&>(intDeque.begin(), intDeque.end(), buffer.data(), compare);
+ EATEST_VERIFY((StatefulCompare::nCtorCount == 0) && (StatefulCompare::nDtorCount == 0) && (StatefulCompare::nCopyCount == 0));
+ }
+
+ {
+ // Test checking that deque sorting can compile.
+ deque<int> intDeque;
+ vector<int> intVector;
+
+ stable_sort(intDeque.begin(), intDeque.end());
+ stable_sort(intVector.begin(), intVector.end());
+ }
+
+ {
+ // Test checking that sorting containers having elements of a type without an operator< compiles correctly
+
+ vector<TestNoLessOperator> noLessVector;
+
+ stable_sort(noLessVector.begin(), noLessVector.end());
+ bubble_sort(noLessVector.begin(), noLessVector.end());
+ shaker_sort(noLessVector.begin(), noLessVector.end());
+ insertion_sort(noLessVector.begin(), noLessVector.end());
+ selection_sort(noLessVector.begin(), noLessVector.end());
+ shell_sort(noLessVector.begin(), noLessVector.end());
+ comb_sort(noLessVector.begin(), noLessVector.end());
+ heap_sort(noLessVector.begin(), noLessVector.end());
+ merge_sort(noLessVector.begin(), noLessVector.end(), *get_default_allocator(nullptr));
+ quick_sort(noLessVector.begin(), noLessVector.end());
+
+ vector<TestNoLessOperator> buffer;
+ tim_sort_buffer(noLessVector.begin(), noLessVector.end(), buffer.data());
+}
+
+ {
+ // Test sorting of a container of pointers to objects as opposed to a container of objects themselves.
+ vector<TestObject> toArray;
+ vector<TestObject*> topArray;
+
+ for(eastl_size_t i = 0; i < 32; i++)
+ toArray.push_back(TestObject((int)rng.RandLimit(20)));
+ for(eastl_size_t i = 0; i < 32; i++) // This needs to be a second loop because the addresses might change in the first loop due to container resizing.
+ topArray.push_back(&toArray[i]);
+
+ quick_sort(topArray.begin(), topArray.end(), TestObjectPtrCompare());
+ EATEST_VERIFY(is_sorted(topArray.begin(), topArray.end(), TestObjectPtrCompare()));
+ }
+
+
+ {
+ // Test sorting of a container of array indexes to objects as opposed to a container of objects themselves.
+
+ vector<TestObject> toArray;
+ vector<eastl_size_t> toiArray;
+
+ for(eastl_size_t i = 0; i < 32; i++)
+ {
+ toArray.push_back(TestObject((int)rng.RandLimit(20)));
+ toiArray.push_back(i);
+ }
+
+ quick_sort(toiArray.begin(), toiArray.end(), TestObjectIndexCompare(&toArray));
+ EATEST_VERIFY(is_sorted(toiArray.begin(), toiArray.end(), TestObjectIndexCompare(&toArray)));
+ }
+
+
+ {
+ // Test of special floating point sort in the presence of NaNs.
+ vector<float> floatArray;
+ union FloatInt32{ float f; int32_t i; } fi;
+
+ for(int i = 0; i < 1000; i++)
+ {
+ fi.i = (int32_t)rng.Rand();
+ floatArray.push_back(fi.f);
+ }
+
+ // Without SafeFloatCompare, the following quick_sort will crash, hang, or generate inconsistent results.
+ quick_sort(floatArray.begin(), floatArray.end(), SafeFloatCompare());
+ EATEST_VERIFY(is_sorted(floatArray.begin(), floatArray.end(), SafeFloatCompare()));
+ }
+
+ {
+ auto test_stable_sort = [&](auto testArray, size_t count)
+ {
+ auto isEven = [](auto val) { return (val % 2) == 0; };
+ auto isOdd = [](auto val) { return (val % 2) != 0; };
+
+ for (size_t i = 0; i < count; i++)
+ testArray.push_back((uint16_t)rng.Rand());
+
+ vector<uint16_t> evenArray;
+ vector<uint16_t> oddArray;
+
+ eastl::copy_if(testArray.begin(), testArray.end(), eastl::back_inserter(evenArray), isEven);
+ eastl::copy_if(testArray.begin(), testArray.end(), eastl::back_inserter(oddArray), isOdd);
+
+ const auto boundary = eastl::stable_partition(testArray.begin(), testArray.end(), isEven);
+
+ const auto evenCount = eastl::distance(testArray.begin(), boundary);
+ const auto oddCount = eastl::distance(boundary, testArray.end());
+
+ const auto evenExpectedCount = (ptrdiff_t)evenArray.size();
+ const auto oddExpectedCount = (ptrdiff_t)oddArray.size();
+
+ EATEST_VERIFY(evenCount == evenExpectedCount);
+ EATEST_VERIFY(oddCount == oddExpectedCount);
+ EATEST_VERIFY(eastl::equal(testArray.begin(), boundary, evenArray.begin()));
+ EATEST_VERIFY(eastl::equal(boundary, testArray.end(), oddArray.begin()));
+ };
+
+ test_stable_sort(vector<uint16_t>(), 1000); // Test stable_partition
+ test_stable_sort(vector<uint16_t>(), 0); // Test stable_partition on empty container
+ test_stable_sort(vector<uint16_t>(), 1); // Test stable_partition on container of one element
+ test_stable_sort(vector<uint16_t>(), 2); // Test stable_partition on container of two element
+ test_stable_sort(list<uint16_t>(), 0); // Test stable_partition on bidirectional iterator (not random access)
+ }
+
+ #if 0 // Disabled because it takes a long time and thus far seems to show no bug in quick_sort.
+ {
+ // Regression of Coverity report for Madden 2014 that quick_sort is reading beyond an array bounds within insertion_sort_simple.
+ // The Madden code was sorting the 11 players on the field for a team by some criteria. We write
+ vector<int> intArray(11);
+ for(eastl_size_t i = 0; i < intArray.size(); i++)
+ intArray[i] = i;
+
+ do {
+ vector<int> intArrayCopy(intArray);
+
+ // We need to verify that intArray[12] is never accessed. We could do that with a stomp allocator,
+ // which we don't currently have set up for the EASTL unit tests, or we could put a breakpoint in
+ // the debugger. Until we get a stomp allocator installed, do the breakpoint solution.
+ quick_sort(intArrayCopy.begin(), intArrayCopy.end());
+ } while(next_permutation(intArray.begin(), intArray.end()));
+ }
+ #endif
+
+ EATEST_VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+
+ return nErrorCount;
+}
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/test/source/TestSpan.cpp b/EASTL/test/source/TestSpan.cpp
new file mode 100644
index 0000000..5a0ec07
--- /dev/null
+++ b/EASTL/test/source/TestSpan.cpp
@@ -0,0 +1,481 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+#include "EASTLTest.h"
+#include <EASTL/array.h>
+#include <EASTL/span.h>
+#include <EASTL/vector.h>
+
+void TestSpanCtor(int& nErrorCount)
+{
+ using namespace eastl;
+
+ {
+ span<int> s;
+ VERIFY(s.empty());
+ VERIFY(s.size() == 0);
+ VERIFY(s.data() == nullptr);
+ }
+ {
+ span<float> s;
+ VERIFY(s.empty());
+ VERIFY(s.size() == 0);
+ VERIFY(s.data() == nullptr);
+ }
+ {
+ span<TestObject> s;
+ VERIFY(s.empty());
+ VERIFY(s.size() == 0);
+ VERIFY(s.data() == nullptr);
+ }
+
+ {
+ int arr[5] = {0, 1, 2, 3, 4};
+ span<int> s(eastl::begin(arr), 5);
+ VERIFY(s.data() == eastl::begin(arr));
+ VERIFY(s.size() == 5);
+ VERIFY(!s.empty());
+ }
+
+ {
+ int arr[5] = {0, 1, 2, 3, 4};
+ span<int> s(eastl::begin(arr), eastl::end(arr));
+ VERIFY(s.data() == eastl::begin(arr));
+ VERIFY(s.size() == 5);
+ VERIFY(!s.empty());
+ }
+
+ {
+ int arr[5] = {0, 1, 2, 3, 4};
+ span<int> s(arr);
+ VERIFY(s.data() == eastl::begin(arr));
+ VERIFY(s.size() == 5);
+ VERIFY(s.data()[2] == arr[2]);
+ VERIFY(!s.empty());
+ }
+
+ {
+ eastl::array<int, 5> arr = {{0, 1, 2, 3, 4}};
+ span<int> s(arr);
+ VERIFY(s.data() == eastl::begin(arr));
+ VERIFY(s.size() == 5);
+ VERIFY(s.data()[2] == arr.data()[2]);
+ VERIFY(!s.empty());
+ }
+
+ {
+ const eastl::array<int, 5> arr = {{0, 1, 2, 3, 4}};
+ span<const int> s(arr);
+ VERIFY(s.data() == eastl::begin(arr));
+ VERIFY(s.size() == 5);
+ VERIFY(s.data()[2] == arr.data()[2]);
+ VERIFY(!s.empty());
+ }
+
+ {
+ const eastl::array<int, 5> arr = {{0, 1, 2, 3, 4}};
+ const span<const int> s(arr);
+ VERIFY(s.data() == eastl::begin(arr));
+ VERIFY(s.size() == 5);
+ VERIFY(s.data()[2] == arr.data()[2]);
+ }
+
+ {
+ class foo {};
+
+ foo* pFoo = nullptr;
+
+ auto f = [](eastl::span<const foo*>) {};
+
+ eastl::array<const foo*, 1> foos = {{pFoo}};
+
+ f(foos);
+ }
+}
+
+void TestSpanSizeBytes(int& nErrorCount)
+{
+ using namespace eastl;
+
+ {
+ int arr[5] = {0, 1, 2, 3, 4};
+ span<int> s(arr);
+ VERIFY(s.size_bytes() == sizeof(arr));
+ VERIFY(s.size_bytes() == (5 * sizeof(int)));
+ }
+
+ {
+ float arr[8] = {0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f};
+ span<float> s(arr);
+ VERIFY(s.size_bytes() == sizeof(arr));
+ VERIFY(s.size_bytes() == (8 * sizeof(float)));
+ }
+
+ {
+ int64_t arr[5] = {0, 1, 2, 3, 4};
+ span<int64_t> s(arr);
+ VERIFY(s.size_bytes() == sizeof(arr));
+ VERIFY(s.size_bytes() == (5 * sizeof(int64_t)));
+ }
+}
+
+void TestSpanElementAccess(int& nErrorCount)
+{
+ using namespace eastl;
+
+ {
+ int arr[5] = {0, 1, 2, 3, 4};
+ span<int> s(arr);
+
+ VERIFY(s.front() == 0);
+ VERIFY(s.back() == 4);
+
+ VERIFY(s[0] == 0);
+ VERIFY(s[1] == 1);
+ VERIFY(s[2] == 2);
+ VERIFY(s[3] == 3);
+ VERIFY(s[4] == 4);
+
+ VERIFY(s(0) == 0);
+ VERIFY(s(1) == 1);
+ VERIFY(s(2) == 2);
+ VERIFY(s(3) == 3);
+ VERIFY(s(4) == 4);
+ }
+}
+
+void TestSpanIterators(int& nErrorCount)
+{
+ using namespace eastl;
+
+ int arr[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ span<int> s(arr);
+
+ // ranged-for test
+ {
+ int* pBegin = arr;
+ for(auto& e : arr)
+ {
+ VERIFY(e == *pBegin++);
+ }
+ }
+
+ {
+ auto testIteratorBegin = [&](auto p)
+ {
+ VERIFY(*p++ == 0);
+ VERIFY(*p++ == 1);
+ VERIFY(*p++ == 2);
+ VERIFY(*p++ == 3);
+ VERIFY(*p++ == 4);
+ VERIFY(*p++ == 5);
+ VERIFY(*p++ == 6);
+ VERIFY(*p++ == 7);
+ VERIFY(*p++ == 8);
+ VERIFY(*p++ == 9);
+ };
+
+ auto testIteratorEnd = [&](auto p)
+ {
+ p--; // move pointer to a valid element
+
+ VERIFY(*p-- == 9);
+ VERIFY(*p-- == 8);
+ VERIFY(*p-- == 7);
+ VERIFY(*p-- == 6);
+ VERIFY(*p-- == 5);
+ VERIFY(*p-- == 4);
+ VERIFY(*p-- == 3);
+ VERIFY(*p-- == 2);
+ VERIFY(*p-- == 1);
+ VERIFY(*p-- == 0);
+ };
+
+ testIteratorBegin(s.begin());
+ testIteratorBegin(s.cbegin());
+ testIteratorEnd(s.end());
+ testIteratorEnd(s.cend());
+ }
+
+ {
+ auto testReverseIteratorBegin = [&](auto p)
+ {
+ VERIFY(*p++ == 9);
+ VERIFY(*p++ == 8);
+ VERIFY(*p++ == 7);
+ VERIFY(*p++ == 6);
+ VERIFY(*p++ == 5);
+ VERIFY(*p++ == 4);
+ VERIFY(*p++ == 3);
+ VERIFY(*p++ == 2);
+ VERIFY(*p++ == 1);
+ VERIFY(*p++ == 0);
+ };
+
+ auto testReverseIteratorEnd = [&](auto p)
+ {
+ p--; // move pointer to a valid element
+
+ VERIFY(*p-- == 0);
+ VERIFY(*p-- == 1);
+ VERIFY(*p-- == 2);
+ VERIFY(*p-- == 3);
+ VERIFY(*p-- == 4);
+ VERIFY(*p-- == 5);
+ VERIFY(*p-- == 6);
+ VERIFY(*p-- == 7);
+ VERIFY(*p-- == 8);
+ VERIFY(*p-- == 9);
+ };
+
+ testReverseIteratorBegin(s.rbegin());
+ testReverseIteratorBegin(s.crbegin());
+ testReverseIteratorEnd(s.rend());
+ testReverseIteratorEnd(s.crend());
+ }
+}
+
+void TestSpanCopyAssignment(int& nErrorCount)
+{
+ using namespace eastl;
+
+ {
+ int arr[5] = {0, 1, 2, 3, 4};
+ span<int> s(arr);
+ span<int> sc = s;
+
+ VERIFY(s[0] == sc[0]);
+ VERIFY(s[1] == sc[1]);
+ VERIFY(s[2] == sc[2]);
+ VERIFY(s[3] == sc[3]);
+ VERIFY(s[4] == sc[4]);
+
+ VERIFY(s(0) == sc(0));
+ VERIFY(s(1) == sc(1));
+ VERIFY(s(2) == sc(2));
+ VERIFY(s(3) == sc(3));
+ VERIFY(s(4) == sc(4));
+ }
+}
+
+void TestSpanContainerConversion(int& nErrorCount)
+{
+ using namespace eastl;
+
+ {
+ vector<int> v = {0, 1, 2, 3, 4, 5};
+ span<const int> s(v);
+
+ VERIFY(s.size() == static_cast<span<int>::index_type>(eastl::size(v)));
+ VERIFY(s.data() == eastl::data(v));
+
+ VERIFY(s[0] == v[0]);
+ VERIFY(s[1] == v[1]);
+ VERIFY(s[2] == v[2]);
+ VERIFY(s[3] == v[3]);
+ VERIFY(s[4] == v[4]);
+ VERIFY(s[5] == v[5]);
+ }
+
+ {
+ const vector<int> v = {0, 1, 2, 3, 4, 5};
+ span<const int> s(v);
+
+ VERIFY(s.size() == static_cast<span<int>::index_type>(eastl::size(v)));
+ VERIFY(s.data() == eastl::data(v));
+
+ VERIFY(s[0] == v[0]);
+ VERIFY(s[1] == v[1]);
+ VERIFY(s[2] == v[2]);
+ VERIFY(s[3] == v[3]);
+ VERIFY(s[4] == v[4]);
+ VERIFY(s[5] == v[5]);
+ }
+
+ {
+ vector<int> v = {0, 1, 2, 3, 4, 5};
+ span<const int, 6> s1(v);
+ span<const int> s2(s1);
+
+ VERIFY(s2.size() == (span<const int>::index_type)v.size());
+ VERIFY(s2[0] == v[0]);
+ VERIFY(s2[1] == v[1]);
+
+ VERIFY(s1.data() == v.data());
+ VERIFY(s1.data() == s2.data());
+ }
+
+ { // user reported regression for calling non-const span overload with a vector.
+ auto f1 = [](span<int> s) { return s.size(); };
+ auto f2 = [](span<const int> s) { return s.size(); };
+
+ {
+ vector<int> v = {0, 1, 2, 3, 4, 5};
+
+ VERIFY(f1(v) == v.size());
+ VERIFY(f2(v) == v.size());
+ }
+
+ {
+ int a[] = {0, 1, 2, 3, 4, 5};
+
+ VERIFY(f1(a) == EAArrayCount(a));
+ VERIFY(f2(a) == EAArrayCount(a));
+ }
+ }
+}
+
+void TestSpanComparison(int& nErrorCount)
+{
+ using namespace eastl;
+
+ int arr1[5] = {0, 1, 2, 3, 4};
+ int arr2[8] = {0, 1, 2, 3, 4, 5, 6, 7};
+ {
+ span<int> s1 = arr1;
+ span<int> s2 = arr2;
+ span<int> s3 = arr2;
+ VERIFY(s2 == s3);
+ VERIFY(s1 != s2);
+ VERIFY(s1 < s2);
+ VERIFY(s1 <= s2);
+ VERIFY(s2 > s1);
+ VERIFY(s2 >= s1);
+ }
+}
+
+void TestSpanSubViews(int& nErrorCount)
+{
+ using namespace eastl;
+
+ int arr1[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+
+ {
+ span<int> s = arr1;
+ auto first_span = s.first<4>();
+ VERIFY(first_span.size() == 4);
+ VERIFY(first_span[0] == 0);
+ VERIFY(first_span[1] == 1);
+ VERIFY(first_span[2] == 2);
+ VERIFY(first_span[3] == 3);
+ }
+
+ {
+ span<int> s = arr1;
+ auto first_span = s.first(4);
+ VERIFY(first_span.size() == 4);
+ VERIFY(first_span[0] == 0);
+ VERIFY(first_span[1] == 1);
+ VERIFY(first_span[2] == 2);
+ VERIFY(first_span[3] == 3);
+ }
+
+ {
+ span<int> s = arr1;
+ auto first_span = s.last<4>();
+ VERIFY(first_span.size() == 4);
+ VERIFY(first_span[0] == 6);
+ VERIFY(first_span[1] == 7);
+ VERIFY(first_span[2] == 8);
+ VERIFY(first_span[3] == 9);
+ }
+
+ {
+ span<int> s = arr1;
+ auto first_span = s.last(4);
+ VERIFY(first_span.size() == 4);
+ VERIFY(first_span[0] == 6);
+ VERIFY(first_span[1] == 7);
+ VERIFY(first_span[2] == 8);
+ VERIFY(first_span[3] == 9);
+ }
+
+ { // empty range
+ span<int, 0> s{};
+
+ auto fixed_span = s.subspan<0, 0>();
+ VERIFY(fixed_span.empty());
+ fixed_span = s.first<0>();
+ VERIFY(fixed_span.empty());
+ fixed_span = s.last<0>();
+ VERIFY(fixed_span.empty());
+
+ span<int> dynamic_span;
+ VERIFY(dynamic_span.empty());
+ dynamic_span = s.first(0);
+ VERIFY(dynamic_span.empty());
+ dynamic_span = s.last(0);
+ VERIFY(dynamic_span.empty());
+ }
+
+ { // subspan: full range
+ span<int, 10> s = arr1;
+
+ auto fixed_span = s.subspan<0, 10>();
+ VERIFY(fixed_span.size() == 10);
+ VERIFY(fixed_span[0] == 0);
+ VERIFY(fixed_span[1] == 1);
+ VERIFY(fixed_span[8] == 8);
+ VERIFY(fixed_span[9] == 9);
+
+ auto dynamic_span = s.subspan(0, s.size());
+ VERIFY(dynamic_span.size() == 10);
+ VERIFY(dynamic_span[0] == 0);
+ VERIFY(dynamic_span[1] == 1);
+ VERIFY(dynamic_span[8] == 8);
+ VERIFY(dynamic_span[9] == 9);
+ }
+
+ { // subspan: subrange
+ span<int, 10> s = arr1;
+
+ auto fixed_span = s.subspan<3, 4>();
+ VERIFY(fixed_span.size() == 4);
+ VERIFY(fixed_span[0] == 3);
+ VERIFY(fixed_span[1] == 4);
+ VERIFY(fixed_span[2] == 5);
+ VERIFY(fixed_span[3] == 6);
+
+ auto dynamic_span = s.subspan(3, 4);
+ VERIFY(dynamic_span.size() == 4);
+ VERIFY(dynamic_span[0] == 3);
+ VERIFY(dynamic_span[1] == 4);
+ VERIFY(dynamic_span[2] == 5);
+ VERIFY(dynamic_span[3] == 6);
+ }
+
+ { // subspan: default count
+ span<int, 10> s = arr1;
+
+ auto fixed_span = s.subspan<3>();
+ VERIFY(fixed_span.size() == 7);
+ VERIFY(fixed_span[0] == 3);
+ VERIFY(fixed_span[1] == 4);
+ VERIFY(fixed_span[5] == 8);
+ VERIFY(fixed_span[6] == 9);
+
+ auto dynamic_span = s.subspan(3);
+ VERIFY(dynamic_span.size() == 7);
+ VERIFY(dynamic_span[0] == 3);
+ VERIFY(dynamic_span[1] == 4);
+ VERIFY(dynamic_span[5] == 8);
+ VERIFY(dynamic_span[6] == 9);
+ }
+}
+
+int TestSpan()
+{
+ int nErrorCount = 0;
+
+ TestSpanCtor(nErrorCount);
+ TestSpanSizeBytes(nErrorCount);
+ TestSpanElementAccess(nErrorCount);
+ TestSpanIterators(nErrorCount);
+ TestSpanCopyAssignment(nErrorCount);
+ TestSpanContainerConversion(nErrorCount);
+ TestSpanComparison(nErrorCount);
+ TestSpanSubViews(nErrorCount);
+
+ return nErrorCount;
+}
diff --git a/EASTL/test/source/TestString.cpp b/EASTL/test/source/TestString.cpp
new file mode 100644
index 0000000..1bd06e7
--- /dev/null
+++ b/EASTL/test/source/TestString.cpp
@@ -0,0 +1,142 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+#include "EASTLTest.h"
+#include <EABase/eabase.h>
+#include <EAStdC/EAMemory.h>
+#include <EAStdC/EAString.h>
+#include <EASTL/string.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/allocator_malloc.h>
+
+using namespace eastl;
+
+// Verify char8_t support is present if the test build requested it.
+#if defined(EASTL_EXPECT_CHAR8T_SUPPORT) && !EA_CHAR8_UNIQUE
+static_assert(false, "Building with char8_t tests enabled, but EA_CHAR8_UNIQUE evaluates to false.");
+#endif
+
+
+// inject string literal string conversion macros into the unit tests
+#define TEST_STRING_NAME TestBasicString
+#define LITERAL(x) x
+#include "TestString.inl"
+
+#define TEST_STRING_NAME TestBasicStringW
+#define LITERAL(x) EA_WCHAR(x)
+#include "TestString.inl"
+
+#define TEST_STRING_NAME TestBasicString8
+#define LITERAL(x) EA_CHAR8(x)
+#include "TestString.inl"
+
+#define TEST_STRING_NAME TestBasicString16
+#define LITERAL(x) EA_CHAR16(x)
+#include "TestString.inl"
+
+#define TEST_STRING_NAME TestBasicString32
+#define LITERAL(x) EA_CHAR32(x)
+#include "TestString.inl"
+
+int TestString()
+{
+ int nErrorCount = 0;
+
+ nErrorCount += TestBasicString<eastl::basic_string<char, StompDetectAllocator>>();
+ nErrorCount += TestBasicString<eastl::string>();
+
+ nErrorCount += TestBasicStringW<eastl::basic_string<wchar_t, StompDetectAllocator>>();
+ nErrorCount += TestBasicStringW<eastl::wstring>();
+
+#if defined(EA_CHAR8_UNIQUE) && EA_CHAR8_UNIQUE
+ nErrorCount += TestBasicString8<eastl::basic_string<char8_t, StompDetectAllocator>>();
+ nErrorCount += TestBasicString8<eastl::u8string>();
+#endif
+
+ nErrorCount += TestBasicString16<eastl::basic_string<char16_t, StompDetectAllocator>>();
+ nErrorCount += TestBasicString16<eastl::u16string>();
+
+#if defined(EA_CHAR32_NATIVE) && EA_CHAR32_NATIVE
+ nErrorCount += TestBasicString32<eastl::basic_string<char32_t, StompDetectAllocator>>();
+ nErrorCount += TestBasicString32<eastl::u32string>();
+#endif
+
+ // Check for memory leaks by using the 'CountingAllocator' to ensure no active allocation after tests have completed.
+ CountingAllocator::resetCount();
+ nErrorCount += TestBasicString<eastl::basic_string<char, CountingAllocator>>();
+ VERIFY(CountingAllocator::getActiveAllocationCount() == 0);
+
+ nErrorCount += TestBasicStringW<eastl::basic_string<wchar_t, CountingAllocator>>();
+ VERIFY(CountingAllocator::getActiveAllocationCount() == 0);
+
+#if defined(EA_CHAR8_UNIQUE) && EA_CHAR8_UNIQUE
+ nErrorCount += TestBasicString8<eastl::basic_string<char8_t, CountingAllocator>>();
+ VERIFY(CountingAllocator::getActiveAllocationCount() == 0);
+#endif
+
+ nErrorCount += TestBasicString16<eastl::basic_string<char16_t, CountingAllocator>>();
+ VERIFY(CountingAllocator::getActiveAllocationCount() == 0);
+
+#if defined(EA_CHAR32_NATIVE) && EA_CHAR32_NATIVE
+ nErrorCount += TestBasicString32<eastl::basic_string<char32_t, CountingAllocator>>();
+ VERIFY(CountingAllocator::getActiveAllocationCount() == 0);
+#endif
+
+ // to_string
+ {
+ VERIFY(eastl::to_string(42) == "42");
+ VERIFY(eastl::to_string(42l) == "42");
+ VERIFY(eastl::to_string(42ll) == "42");
+ VERIFY(eastl::to_string(42u) == "42");
+ VERIFY(eastl::to_string(42ul) == "42");
+ VERIFY(eastl::to_string(42ull) == "42");
+ VERIFY(eastl::to_string(42.f) == "42.000000");
+ VERIFY(eastl::to_string(42.0) == "42.000000");
+ #if !defined(EA_COMPILER_GNUC) && !defined(EA_PLATFORM_MINGW)
+ // todo: long double sprintf functionality is unrealiable on unix-gcc, requires further debugging.
+ VERIFY(eastl::to_string(42.0l) == "42.000000");
+ #endif
+ }
+
+ // to_wstring
+ {
+ VERIFY(eastl::to_wstring(42) == L"42");
+ VERIFY(eastl::to_wstring(42l) == L"42");
+ VERIFY(eastl::to_wstring(42ll) == L"42");
+ VERIFY(eastl::to_wstring(42u) == L"42");
+ VERIFY(eastl::to_wstring(42ul) == L"42");
+ VERIFY(eastl::to_wstring(42ull) == L"42");
+ VERIFY(eastl::to_wstring(42.f) == L"42.000000");
+ VERIFY(eastl::to_wstring(42.0) == L"42.000000");
+ #if !defined(EA_COMPILER_GNUC) && !defined(EA_PLATFORM_MINGW)
+ // todo: long double sprintf functionality is unrealiable on unix-gcc, requires further debugging.
+ VERIFY(eastl::to_wstring(42.0l) == L"42.000000");
+ #endif
+ }
+
+ #if EASTL_USER_LITERALS_ENABLED
+ {
+ VERIFY("cplusplus"s == "cplusplus");
+ VERIFY(L"cplusplus"s == L"cplusplus");
+ VERIFY(u"cplusplus"s == u"cplusplus");
+ VERIFY(U"cplusplus"s == U"cplusplus");
+ VERIFY(u8"cplusplus"s == u8"cplusplus");
+ }
+ #endif
+
+
+ {
+ // CustomAllocator has no data members which reduces the size of an eastl::basic_string via the empty base class optimization.
+ using EboString = eastl::basic_string<char, CustomAllocator>;
+
+ // this must match the eastl::basic_string heap memory layout struct which is a pointer and 2 eastl_size_t.
+ const int expectedSize = sizeof(EboString::pointer) + (2 * sizeof(EboString::size_type));
+
+ static_assert(sizeof(EboString) == expectedSize, "unexpected layout size of basic_string");
+ }
+
+ return nErrorCount;
+}
+
+
diff --git a/EASTL/test/source/TestString.inl b/EASTL/test/source/TestString.inl
new file mode 100644
index 0000000..3a59e68
--- /dev/null
+++ b/EASTL/test/source/TestString.inl
@@ -0,0 +1,2101 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+// todo:
+// Test Encoding
+// Test StringHash
+// Test exceptions
+
+#if EASTL_OPENSOURCE
+ #define EASTL_SNPRINTF_TESTS_ENABLED 0
+#else
+ #define EASTL_SNPRINTF_TESTS_ENABLED 1
+#endif
+
+
+template<typename StringType>
+int TEST_STRING_NAME()
+{
+ int nErrorCount = 0;
+
+ struct Failocator
+ {
+ Failocator() = default;
+ Failocator(const char*) {}
+
+ void* allocate(size_t) { EA_FAIL(); return nullptr; }
+ void deallocate(void*, size_t) { EA_FAIL(); }
+ };
+
+ #if defined(EA_PLATFORM_ANDROID)
+ EA_DISABLE_CLANG_WARNING(-Wunknown-warning-option) // warning: disable unknown warning suppression pragmas
+ EA_DISABLE_CLANG_WARNING(-Wunknown-pragmas) // warning: disable unknown warning suppression pragmas
+ EA_DISABLE_CLANG_WARNING(-Winherited-variadic-ctor) // warning: inheriting constructor does not inherit ellipsis
+ #endif
+
+ struct SSOStringType : public StringType
+ {
+ using StringType::StringType;
+ using StringType::IsSSO;
+ };
+
+ // Use custom string type that always fails to allocate memory to highlight when SSO is not functioning correctly.
+ struct SSOFailocatorString : public eastl::basic_string<typename StringType::value_type, Failocator>
+ {
+ using eastl::basic_string<typename StringType::value_type, Failocator>::basic_string;
+ using eastl::basic_string<typename StringType::value_type, Failocator>::IsSSO;
+ };
+
+ #if defined(EA_PLATFORM_ANDROID)
+ EA_RESTORE_CLANG_WARNING()
+ EA_RESTORE_CLANG_WARNING()
+ EA_RESTORE_CLANG_WARNING()
+ #endif
+
+ // SSO (short string optimization) tests
+ {
+ {
+ SSOFailocatorString str;
+ VERIFY(str.validate());
+ VERIFY(str.empty());
+ VERIFY(str.IsSSO());
+ }
+
+ EA_CONSTEXPR_IF(EA_PLATFORM_WORD_SIZE == 8 && EASTL_SIZE_T_32BIT == 0)
+ {
+ // test SSO size on 64 bit platforms
+ EA_CONSTEXPR_IF(sizeof(typename StringType::value_type) == 1)
+ {
+ // we can fit 23 characters on 64bit system with 1 byte chars
+ const auto* pLiteral = LITERAL("aaaaaaaaaaaaaaaaaaaaaaa");
+ SSOFailocatorString str(pLiteral);
+
+ VERIFY(EA::StdC::Strlen(pLiteral) == 23);
+ VERIFY(str == pLiteral);
+ VERIFY(str.validate());
+ VERIFY(str.IsSSO());
+ }
+
+ EA_CONSTEXPR_IF(sizeof(typename StringType::value_type) == 2)
+ {
+ // we can fit 11 characters on 64 bit system with 2 byte chars
+ const auto* pLiteral = LITERAL("aaaaaaaaaaa");
+ SSOFailocatorString str(pLiteral);
+
+ VERIFY(EA::StdC::Strlen(pLiteral) == 11);
+ VERIFY(str == pLiteral);
+ VERIFY(str.validate());
+ VERIFY(str.IsSSO());
+ }
+
+ EA_CONSTEXPR_IF(sizeof(typename StringType::value_type) == 4)
+ {
+ // we can fit 5 characters on 64 bit system with 4 byte chars
+ const auto* pLiteral = LITERAL("aaaaa");
+ SSOFailocatorString str(pLiteral);
+
+ VERIFY(EA::StdC::Strlen(pLiteral) == 5);
+ VERIFY(str == pLiteral);
+ VERIFY(str.validate());
+ VERIFY(str.IsSSO());
+ }
+ }
+
+ EA_CONSTEXPR_IF(EA_PLATFORM_WORD_SIZE == 4)
+ {
+ // test SSO size on 32 bit platforms
+ EA_CONSTEXPR_IF(sizeof(typename StringType::value_type) == 1)
+ {
+ // we can fit 11 characters on 32bit system with 1 byte chars
+ const auto* pLiteral = LITERAL("aaaaaaaaaaa");
+ SSOFailocatorString str(pLiteral);
+
+ VERIFY(EA::StdC::Strlen(pLiteral) == 11);
+ VERIFY(str == pLiteral);
+ VERIFY(str.validate());
+ VERIFY(str.IsSSO());
+ }
+
+ EA_CONSTEXPR_IF(sizeof(typename StringType::value_type) == 2)
+ {
+ // we can fit 5 characters on 32 bit system with 2 byte chars
+ const auto* pLiteral = LITERAL("aaaaa");
+ SSOFailocatorString str(pLiteral);
+
+ VERIFY(EA::StdC::Strlen(pLiteral) == 5);
+ VERIFY(str == pLiteral);
+ VERIFY(str.validate());
+ VERIFY(str.IsSSO());
+ }
+
+ EA_CONSTEXPR_IF(sizeof(typename StringType::value_type) == 4)
+ {
+ // we can fit 2 characters on 32 bit system with 4 byte chars
+ const auto* pLiteral = LITERAL("aa");
+ SSOFailocatorString str(pLiteral);
+
+ VERIFY(EA::StdC::Strlen(pLiteral) == 2);
+ VERIFY(str == pLiteral);
+ VERIFY(str.validate());
+ VERIFY(str.IsSSO());
+ }
+ }
+ }
+
+ // basic_string();
+ {
+ StringType str;
+ VERIFY(str.empty());
+ VERIFY(str.length() == 0);
+ VERIFY(str.validate());
+ }
+
+ // explicit basic_string(const allocator_type& allocator);
+ {
+ typename StringType::allocator_type alloc;
+ StringType str(alloc);
+ VERIFY(str.validate());
+ }
+
+ // basic_string(const value_type* p, size_type n, const allocator_type& allocator = EASTL_BASIC_STRING_DEFAULT_ALLOCATOR);
+ {
+ {
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"), 26);
+ VERIFY(str[5] == LITERAL('f'));
+ VERIFY(!str.empty());
+ VERIFY(str.length() == 26);
+ VERIFY(str.validate());
+ }
+
+ {
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ VERIFY(str[5] == LITERAL('f'));
+ VERIFY(!str.empty());
+ VERIFY(str.length() == 26);
+ VERIFY(str.validate());
+ }
+ }
+
+ // basic_string(const this_type& x, size_type position, size_type n = npos);
+ {
+ StringType str1(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+
+ StringType str2(str1, 3, 3);
+ VERIFY(str2 == LITERAL("def"));
+ VERIFY(str2.size() == 3);
+ VERIFY(str2.length() == 3);
+ VERIFY(str2.capacity() >= 3); // SSO buffer size
+
+ StringType str3(str1, 25, 3);
+ VERIFY(str3 == LITERAL("z"));
+ VERIFY(str3.size() == 1);
+ VERIFY(str3.length() == 1);
+ VERIFY(str3.capacity() >= 1); // SSO buffer size
+
+ VERIFY(str1.validate());
+ VERIFY(str2.validate());
+ VERIFY(str3.validate());
+ }
+
+ // EASTL_STRING_EXPLICIT basic_string(const value_type* p, const allocator_type& allocator = EASTL_BASIC_STRING_DEFAULT_ALLOCATOR);
+ {
+ auto* pLiteral = LITERAL("abcdefghijklmnopqrstuvwxyz");
+ StringType str(pLiteral);
+ VERIFY(str == pLiteral);
+ }
+
+ // basic_string(size_type n, value_type c, const allocator_type& allocator = EASTL_BASIC_STRING_DEFAULT_ALLOCATOR);
+ {
+ StringType str(32, LITERAL('a'));
+ VERIFY(!str.empty());
+ VERIFY(str.size() == 32);
+ VERIFY(str.length() == 32);
+ VERIFY(str == LITERAL("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"));
+
+ VERIFY(str.validate());
+ }
+
+ // basic_string(const this_type& x);
+ {
+ StringType str1(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ StringType str2(str1);
+
+ VERIFY(str1 == str2);
+ VERIFY(str1.size() == str2.size());
+ VERIFY(str1.empty() == str2.empty());
+ VERIFY(str1.length() == str2.length());
+ VERIFY(EA::StdC::Memcmp(str1.data(), str2.data(), str1.size()) == 0);
+
+ VERIFY(str1.validate());
+ VERIFY(str2.validate());
+ }
+
+ // basic_string(const value_type* pBegin, const value_type* pEnd, const allocator_type& allocator = EASTL_BASIC_STRING_DEFAULT_ALLOCATOR);
+ {
+ StringType str1(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+
+ auto* pStart = str1.data() + 5;
+ auto* pEnd = str1.data() + 20;
+
+ StringType str(pStart, pEnd);
+ VERIFY(str == LITERAL("fghijklmnopqrst"));
+ VERIFY(!str.empty());
+ VERIFY(str.size() == 15);
+ }
+
+ // basic_string(CtorDoNotInitialize, size_type n, const allocator_type& allocator = EASTL_BASIC_STRING_DEFAULT_ALLOCATOR);
+ {
+ StringType str(typename StringType::CtorDoNotInitialize(), 42);
+ VERIFY(str.size() == 0);
+ VERIFY(str.length() == 0);
+ VERIFY(str.capacity() == 42);
+ }
+
+ // basic_string(CtorSprintf, const value_type* pFormat, ...);
+ {
+ #if EASTL_SNPRINTF_TESTS_ENABLED
+ {
+ StringType str(typename StringType::CtorSprintf(), LITERAL("Hello, %d"), 42);
+ VERIFY(str == LITERAL("Hello, 42"));
+ VERIFY(str.validate());
+ }
+
+ {
+ StringType str(typename StringType::CtorSprintf(), LITERAL("Hello, %d %d %d %d %d %d %d %d %d"), 42, 42, 42, 42, 42, 42, 42, 42, 42);
+ VERIFY(str == LITERAL("Hello, 42 42 42 42 42 42 42 42 42"));
+ VERIFY(str.validate());
+ }
+ #endif
+ }
+
+ // basic_string(std::initializer_list<value_type> init, const allocator_type& allocator = EASTL_BASIC_STRING_DEFAULT_ALLOCATOR);
+ {
+ #if !defined(EA_COMPILER_NO_INITIALIZER_LISTS)
+ StringType str({'a','b','c','d','e','f'});
+ VERIFY(str == LITERAL("abcdef"));
+ VERIFY(!str.empty());
+ VERIFY(str.length() == 6);
+ VERIFY(str.size() == 6);
+ VERIFY(str.validate());
+ #endif
+ }
+
+ // basic_string(this_type&& x);
+ // basic_string(this_type&& x, const allocator_type& allocator);
+ { // test heap string
+ StringType str1(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ StringType str2(eastl::move(str1));
+
+ VERIFY(str1 != LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ VERIFY(str2 == LITERAL("abcdefghijklmnopqrstuvwxyz"));
+
+ VERIFY(str1.empty());
+ VERIFY(!str2.empty());
+
+ VERIFY(str1.length() == 0);
+ VERIFY(str2.length() == 26);
+
+ VERIFY(str1.size() == 0);
+ VERIFY(str2.size() == 26);
+
+ VERIFY(str1.validate());
+ VERIFY(str2.validate());
+ }
+ { // test sso string
+ StringType str1(LITERAL("a"));
+ StringType str2(eastl::move(str1));
+
+ VERIFY(str1 != LITERAL("a"));
+ VERIFY(str2 == LITERAL("a"));
+
+ VERIFY(str1.empty());
+ VERIFY(!str2.empty());
+
+ VERIFY(str1.length() == 0);
+ VERIFY(str2.length() == 1);
+
+ VERIFY(str1.size() == 0);
+ VERIFY(str2.size() == 1);
+
+ VERIFY(str1.validate());
+ VERIFY(str2.validate());
+ }
+
+ // basic_string(const view_type& sv, const allocator_type& allocator);
+ // basic_string(const view_type& sv, size_type position, size_type n, const allocator_type& allocator);
+ {
+ { // test string_view
+ typename StringType::view_type sv(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ StringType str(sv);
+
+ VERIFY(str == LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ VERIFY(!str.empty());
+ VERIFY(str.length() == 26);
+ VERIFY(str.size() == 26);
+ VERIFY(str.validate());
+ }
+
+ { // test string_view substring
+ typename StringType::view_type sv(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ StringType str(sv, 2, 22);
+
+ VERIFY(str == LITERAL("cdefghijklmnopqrstuvwx"));
+ VERIFY(!str.empty());
+ VERIFY(str.length() == 22);
+ VERIFY(str.size() == 22);
+ VERIFY(str.validate());
+ }
+ }
+
+ // template <typename OtherCharType>
+ // basic_string(CtorConvert, const OtherCharType* p, const allocator_type& allocator = EASTL_BASIC_STRING_DEFAULT_ALLOCATOR);
+ {
+ {
+ #if defined(EA_CHAR8)
+ StringType str(typename StringType::CtorConvert(), EA_CHAR8("123456789"));
+ VERIFY(str == LITERAL("123456789"));
+ VERIFY(str.validate());
+ #endif
+ }
+ {
+ #if defined(EA_CHAR16)
+ StringType str(typename StringType::CtorConvert(), EA_CHAR16("123456789"));
+ VERIFY(str == LITERAL("123456789"));
+ VERIFY(str.validate());
+ #endif
+ }
+ {
+ #if defined(EA_CHAR32)
+ StringType str(typename StringType::CtorConvert(), EA_CHAR32("123456789"));
+ VERIFY(str == LITERAL("123456789"));
+ VERIFY(str.validate());
+ #endif
+ }
+ {
+ #if defined(EA_WCHAR)
+ StringType str(typename StringType::CtorConvert(), EA_WCHAR("123456789"));
+ VERIFY(str == LITERAL("123456789"));
+ VERIFY(str.validate());
+ #endif
+ }
+ }
+
+ // template <typename OtherCharType>
+ // basic_string(CtorConvert, const OtherCharType* p, size_type n, const allocator_type& allocator = EASTL_BASIC_STRING_DEFAULT_ALLOCATOR);
+ {
+ {
+ #if defined(EA_CHAR8)
+ StringType str(typename StringType::CtorConvert(), EA_CHAR8("123456789"), 4);
+ VERIFY(str == LITERAL("1234"));
+ VERIFY(str.validate());
+ #endif
+ }
+ {
+ #if defined(EA_CHAR16)
+ StringType str(typename StringType::CtorConvert(), EA_CHAR16("123456789"), 4);
+ VERIFY(str == LITERAL("1234"));
+ VERIFY(str.validate());
+ #endif
+ }
+ {
+ #if defined(EA_CHAR32)
+ StringType str(typename StringType::CtorConvert(), EA_CHAR32("123456789"), 4);
+ VERIFY(str == LITERAL("1234"));
+ VERIFY(str.validate());
+ #endif
+ }
+ {
+ #if defined(EA_WCHAR)
+ StringType str(typename StringType::CtorConvert(), EA_WCHAR("123456789"), 4);
+ VERIFY(str == LITERAL("1234"));
+ VERIFY(str.validate());
+ #endif
+ }
+ }
+
+ // template <typename OtherStringType>
+ // basic_string(CtorConvert, const OtherStringType& x);
+ {
+ {
+ #if defined(EA_CHAR8)
+ StringType str(typename StringType::CtorConvert(), eastl::basic_string<char8_t, typename StringType::allocator_type>(EA_CHAR8("123456789")));
+ VERIFY(str == LITERAL("123456789"));
+ VERIFY(str.validate());
+ #endif
+ }
+ {
+ #if defined(EA_CHAR16)
+ StringType str(typename StringType::CtorConvert(), eastl::basic_string<char16_t, typename StringType::allocator_type>(EA_CHAR16("123456789")));
+ VERIFY(str == LITERAL("123456789"));
+ VERIFY(str.validate());
+ #endif
+ }
+ {
+ #if defined(EA_CHAR32)
+ StringType str(typename StringType::CtorConvert(), eastl::basic_string<char32_t, typename StringType::allocator_type>(EA_CHAR32("123456789")));
+ VERIFY(str == LITERAL("123456789"));
+ VERIFY(str.validate());
+ #endif
+ }
+ {
+ #if defined(EA_WCHAR)
+ StringType str(typename StringType::CtorConvert(), eastl::basic_string<wchar_t, typename StringType::allocator_type>(EA_WCHAR("123456789")));
+ VERIFY(str == LITERAL("123456789"));
+ VERIFY(str.validate());
+ #endif
+ }
+ }
+
+ // const allocator_type& get_allocator() const EA_NOEXCEPT;
+ // allocator_type& get_allocator() EA_NOEXCEPT;
+ // void set_allocator(const allocator_type& allocator);
+ {
+ }
+
+ // this_type& operator=(const this_type& x);
+ {
+ StringType str1(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ StringType str1_copy(LITERAL(""));
+
+ VERIFY(str1_copy.empty());
+
+ str1_copy = str1;
+
+ VERIFY(str1 == str1_copy);
+ VERIFY(!str1_copy.empty());
+ VERIFY(str1.validate());
+ VERIFY(str1_copy.validate());
+ }
+
+ // this_type& operator=(const value_type* p);
+ {
+ StringType str;
+ str = LITERAL("abcdefghijklmnopqrstuvwxyz");
+
+ VERIFY(str[5] == LITERAL('f'));
+ VERIFY(str == LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ VERIFY(!str.empty());
+ VERIFY(str.length() == 26);
+ VERIFY(str.validate());
+ }
+
+ // this_type& operator=(value_type c);
+ {
+ StringType str;
+ str = LITERAL('a');
+
+ VERIFY(str == LITERAL("a"));
+ VERIFY(!str.empty());
+ VERIFY(str.length() == 1);
+ VERIFY(str.size() == 1);
+ VERIFY(str.validate());
+ }
+
+ // this_type& operator=(std::initializer_list<value_type> ilist);
+ {
+ #if !defined(EA_COMPILER_NO_INITIALIZER_LISTS)
+ StringType str = {'a','b','c','d','e','f'};
+
+ VERIFY(str == LITERAL("abcdef"));
+ VERIFY(!str.empty());
+ VERIFY(str.length() == 6);
+ VERIFY(str.size() == 6);
+ VERIFY(str.validate());
+ #endif
+ }
+
+ // this_type& operator=(this_type&& x);
+ {
+ StringType str1(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ StringType str2 = eastl::move(str1);
+
+ VERIFY(str1 != LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ VERIFY(str2 == LITERAL("abcdefghijklmnopqrstuvwxyz"));
+
+ VERIFY(str1.empty());
+ VERIFY(!str2.empty());
+
+ VERIFY(str1.length() == 0);
+ VERIFY(str2.length() == 26);
+
+ VERIFY(str1.size() == 0);
+ VERIFY(str2.size() == 26);
+
+ VERIFY(str1.validate());
+ VERIFY(str2.validate());
+ }
+ {
+ StringType str1(LITERAL("a"));
+ StringType str2 = eastl::move(str1);
+
+ VERIFY(str1 != LITERAL("a"));
+ VERIFY(str2 == LITERAL("a"));
+
+ VERIFY(str1.empty());
+ VERIFY(!str2.empty());
+
+ VERIFY(str1.length() == 0);
+ VERIFY(str2.length() == 1);
+
+ VERIFY(str1.size() == 0);
+ VERIFY(str2.size() == 1);
+
+ VERIFY(str1.validate());
+ VERIFY(str2.validate());
+ }
+
+ // this_type& operator=(value_type* p);
+ //
+ // template <typename OtherCharType>
+ // this_type& operator=(const OtherCharType* p);
+ //
+ // template <typename OtherStringType>
+ // this_type& operator=(const OtherStringType& x);
+ {
+ #if EASTL_OPERATOR_EQUALS_OTHER_ENABLED
+ {
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ str = LITERAL("123456789");
+ VERIFY(str == LITERAL("123456789");
+ VERIFY(str.validate());
+ }
+ {
+ {
+ #if defined(EA_CHAR8)
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ str = EA_CHAR8("123456789");
+ VERIFY(str == LITERAL("123456789"));
+ VERIFY(str.validate());
+ #endif
+ }
+ {
+ #if defined(EA_CHAR16)
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ str = EA_CHAR16("123456789");
+ VERIFY(str == LITERAL("123456789"));
+ VERIFY(str.validate());
+ #endif
+ }
+ {
+ #if defined(EA_CHAR32)
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ str = EA_CHAR32("123456789");
+ VERIFY(str == LITERAL("123456789"));
+ VERIFY(str.validate());
+ #endif
+ }
+ {
+ #if defined(EA_WCHAR)
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ str = EA_WCHAR("123456789");
+ VERIFY(str == LITERAL("123456789"));
+ VERIFY(str.validate());
+ #endif
+ }
+ }
+ {
+ {
+ #if defined(EA_CHAR8)
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ str = eastl::basic_string<char8_t>(EA_CHAR8("123456789"));
+ VERIFY(str == LITERAL("123456789"));
+ VERIFY(str.validate());
+ #endif
+ }
+ {
+ #if defined(EA_CHAR16)
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ str = eastl::basic_string<char16_t>(EA_CHAR16("123456789"));
+ VERIFY(str == LITERAL("123456789"));
+ VERIFY(str.validate());
+ #endif
+ }
+ {
+ #if defined(EA_CHAR32)
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ str = eastl::basic_string<char32_t>(EA_CHAR32("123456789"));
+ VERIFY(str == LITERAL("123456789"));
+ VERIFY(str.validate());
+ #endif
+ }
+ {
+ #if defined(EA_WCHAR)
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ str = eastl::basic_string<wchar_t>(EA_WCHAR("123456789"));
+ VERIFY(str == LITERAL("123456789"));
+ VERIFY(str.validate());
+ #endif
+ }
+ }
+ #endif
+ }
+
+ // void swap(this_type& x);
+ {
+ StringType str1(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ StringType str2;
+
+ str1.swap(str2);
+
+ VERIFY(str1 != LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ VERIFY(str2 == LITERAL("abcdefghijklmnopqrstuvwxyz"));
+
+ VERIFY(str1.empty());
+ VERIFY(!str2.empty());
+
+ VERIFY(str1.length() == 0);
+ VERIFY(str2.length() == 26);
+ VERIFY(str1.size() == 0);
+ VERIFY(str2.size() == 26);
+
+ VERIFY(str1.validate());
+ VERIFY(str2.validate());
+ }
+
+ // this_type& assign(const this_type& x);
+ {
+ StringType str1(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ StringType str2;
+
+ str2.assign(str1);
+
+ VERIFY(str1 == LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ VERIFY(str2 == LITERAL("abcdefghijklmnopqrstuvwxyz"));
+
+ VERIFY(!str1.empty());
+ VERIFY(!str2.empty());
+
+ VERIFY(str1.length() == 26);
+ VERIFY(str2.length() == 26);
+ VERIFY(str1.size() == 26);
+ VERIFY(str2.size() == 26);
+
+ VERIFY(str1.validate());
+ VERIFY(str2.validate());
+ }
+
+ // this_type& assign(const this_type& x, size_type position, size_type n);
+ {
+ StringType str1(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ StringType str2(LITERAL("123456789"));
+
+ str1.assign(str2, 3, 3);
+
+ VERIFY(str1 == LITERAL("456"));
+ VERIFY(str1.validate());
+ VERIFY(str2.validate());
+ }
+
+ // this_type& assign(const value_type* p, size_type n);
+ {
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ str.assign(LITERAL("123456789"), 5);
+
+ VERIFY(str == LITERAL("12345"));
+ VERIFY(str.validate());
+ }
+
+ // this_type& assign(const value_type* p);
+ {
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ str.assign(LITERAL("123"));
+
+ VERIFY(str == LITERAL("123"));
+ VERIFY(str.validate());
+ }
+
+ // this_type& assign(size_type n, value_type c);
+ {
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ str.assign(32, LITERAL('c'));
+
+ VERIFY(str == LITERAL("cccccccccccccccccccccccccccccccc"));
+ VERIFY(str.validate());
+ }
+
+ // this_type& assign(const value_type* pBegin, const value_type* pEnd);
+ {
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+
+ auto* pLiteral = LITERAL("0123456789");
+ auto* pBegin = pLiteral + 4;
+ auto* pEnd = pLiteral + 7;
+
+ str.assign(pBegin, pEnd);
+
+ VERIFY(str == LITERAL("456"));
+ VERIFY(str.validate());
+ }
+
+ // this_type& assign(this_type&& x);
+ {
+ StringType str1(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ StringType str2;
+
+ str1.assign(eastl::move(str2));
+
+ VERIFY(str1 != LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ VERIFY(str2 == LITERAL("abcdefghijklmnopqrstuvwxyz"));
+
+ VERIFY(str1.empty());
+ VERIFY(!str2.empty());
+
+ VERIFY(str1.length() == 0);
+ VERIFY(str2.length() == 26);
+ VERIFY(str1.size() == 0);
+ VERIFY(str2.size() == 26);
+
+ VERIFY(str1.validate());
+ VERIFY(str2.validate());
+ }
+
+ // this_type& assign(std::initializer_list<value_type>);
+ {
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ str.assign({'1','2','3'});
+
+ VERIFY(str == LITERAL("123"));
+ VERIFY(str.validate());
+ }
+
+ // template <typename OtherCharType>
+ // this_type& assign_convert(const OtherCharType* p);
+ {
+ {
+ #if defined(EA_CHAR8)
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ str.assign_convert(EA_CHAR8("123456789"));
+ VERIFY(str == LITERAL("123456789"));
+ VERIFY(str.validate());
+ #endif
+ }
+ {
+ #if defined(EA_CHAR16)
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ str.assign_convert(EA_CHAR16("123456789"));
+ VERIFY(str == LITERAL("123456789"));
+ VERIFY(str.validate());
+ #endif
+ }
+ {
+ #if defined(EA_CHAR32)
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ str.assign_convert(EA_CHAR32("123456789"));
+ VERIFY(str == LITERAL("123456789"));
+ VERIFY(str.validate());
+ #endif
+ }
+ {
+ #if defined(EA_WCHAR)
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ str.assign_convert(EA_WCHAR("123456789"));
+ VERIFY(str == LITERAL("123456789"));
+ VERIFY(str.validate());
+ #endif
+ }
+ }
+
+ // template <typename OtherCharType>
+ // this_type& assign_convert(const OtherCharType* p, size_type n);
+ {
+ {
+ #if defined(EA_CHAR8)
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ str.assign_convert(EA_CHAR8("123456789"), 3);
+ VERIFY(str == LITERAL("123"));
+ VERIFY(str.validate());
+ #endif
+ }
+ {
+ #if defined(EA_CHAR16)
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ str.assign_convert(EA_CHAR16("123456789"), 3);
+ VERIFY(str == LITERAL("123"));
+ VERIFY(str.validate());
+ #endif
+ }
+ {
+ #if defined(EA_CHAR32)
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ str.assign_convert(EA_CHAR32("123456789"), 3);
+ VERIFY(str == LITERAL("123"));
+ VERIFY(str.validate());
+ #endif
+ }
+ {
+ #if defined(EA_WCHAR)
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ str.assign_convert(EA_WCHAR("123456789"), 3);
+ VERIFY(str == LITERAL("123"));
+ VERIFY(str.validate());
+ #endif
+ }
+ }
+
+ // template <typename OtherStringType>
+ // this_type& assign_convert(const OtherStringType& x);
+ {
+ {
+ #if defined(EA_CHAR8)
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ eastl::basic_string<char8_t> str2(EA_CHAR8("123456789"));
+
+ str.assign_convert(str2);
+ VERIFY(str == LITERAL("123456789"));
+ VERIFY(str.validate());
+ #endif
+ }
+ {
+ #if defined(EA_CHAR16)
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ eastl::basic_string<char16_t> str2(EA_CHAR16("123456789"));
+
+ str.assign_convert(str2);
+ VERIFY(str == LITERAL("123456789"));
+ VERIFY(str.validate());
+ #endif
+ }
+ {
+ #if defined(EA_CHAR32)
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ eastl::basic_string<char32_t> str2(EA_CHAR32("123456789"));
+
+ str.assign_convert(str2);
+ VERIFY(str == LITERAL("123456789"));
+ VERIFY(str.validate());
+ #endif
+ }
+ {
+ #if defined(EA_WCHAR)
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ eastl::basic_string<wchar_t> str2(EA_WCHAR("123456789"));
+
+ str.assign_convert(str2);
+ VERIFY(str == LITERAL("123456789"));
+ VERIFY(str.validate());
+ #endif
+ }
+ }
+
+ // iterator begin() EA_NOEXCEPT;
+ // const_iterator begin() const EA_NOEXCEPT;
+ // const_iterator cbegin() const EA_NOEXCEPT;
+ {
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+
+ auto iBegin = str.begin();
+
+ VERIFY(*iBegin++ == LITERAL('a'));
+ VERIFY(*iBegin++ == LITERAL('b'));
+ VERIFY(*iBegin++ == LITERAL('c'));
+ VERIFY(*iBegin++ == LITERAL('d'));
+ VERIFY(*iBegin++ == LITERAL('e'));
+ VERIFY(*iBegin++ == LITERAL('f'));
+ VERIFY(*(str.begin() + 25) == LITERAL('z'));
+
+ }
+
+ // iterator end() EA_NOEXCEPT;
+ // const_iterator end() const EA_NOEXCEPT;
+ // const_iterator cend() const EA_NOEXCEPT;
+ {
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+
+ auto iEnd = str.end()-1;
+
+ VERIFY(*iEnd-- == LITERAL('z'));
+ VERIFY(*iEnd-- == LITERAL('y'));
+ VERIFY(*iEnd-- == LITERAL('x'));
+ VERIFY(*iEnd-- == LITERAL('w'));
+ VERIFY(*iEnd-- == LITERAL('v'));
+ VERIFY(*iEnd-- == LITERAL('u'));
+ VERIFY(*(str.end() - 26) == LITERAL('a'));
+ }
+
+ // reverse_iterator rbegin() EA_NOEXCEPT;
+ // const_reverse_iterator rbegin() const EA_NOEXCEPT;
+ // const_reverse_iterator crbegin() const EA_NOEXCEPT;
+ {
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+
+ auto iRBegin = str.rbegin();
+
+ VERIFY(*iRBegin++ == LITERAL('z'));
+ VERIFY(*iRBegin++ == LITERAL('y'));
+ VERIFY(*iRBegin++ == LITERAL('x'));
+ VERIFY(*iRBegin++ == LITERAL('w'));
+ VERIFY(*iRBegin++ == LITERAL('v'));
+ VERIFY(*iRBegin++ == LITERAL('u'));
+ VERIFY(*(str.rbegin() + 25) == LITERAL('a'));
+ }
+
+ // reverse_iterator rend() EA_NOEXCEPT;
+ // const_reverse_iterator rend() const EA_NOEXCEPT;
+ // const_reverse_iterator crend() const EA_NOEXCEPT;
+ {
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+
+ auto iREnd = str.rend() - 1;
+
+ VERIFY(*iREnd-- == LITERAL('a'));
+ VERIFY(*iREnd-- == LITERAL('b'));
+ VERIFY(*iREnd-- == LITERAL('c'));
+ VERIFY(*iREnd-- == LITERAL('d'));
+ VERIFY(*iREnd-- == LITERAL('e'));
+ VERIFY(*iREnd-- == LITERAL('f'));
+ VERIFY(*(str.rend() - 26) == LITERAL('z'));
+ }
+
+ // bool empty() const EA_NOEXCEPT;
+ // size_type size() const EA_NOEXCEPT;
+ // size_type length() const EA_NOEXCEPT;
+ // size_type capacity() const EA_NOEXCEPT;
+ // void resize(size_type n, value_type c);
+ // void resize(size_type n);
+ // void set_capacity(size_type n = npos);
+ {
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ VERIFY(!str.empty());
+ VERIFY(str.size() == 26);
+ VERIFY(str.length() == 26);
+ VERIFY(str.capacity() >= 26);
+
+ str.assign(LITERAL(""));
+ VERIFY(str.empty());
+ VERIFY(str.size() == 0);
+ VERIFY(str.length() == 0);
+ VERIFY(str.capacity() >= 26); // should not free existing capacity
+
+ str.resize(0);
+ VERIFY(str.empty());
+ VERIFY(str.size() == 0);
+ VERIFY(str.length() == 0);
+ VERIFY(str.capacity() >= 26); // should not free existing capacity
+
+ str.set_capacity(0);
+ // VERIFY(str.capacity() == 0); // frees existing capacity, but has a minimun of SSO capacity
+
+ str.resize(32, LITERAL('c'));
+ VERIFY(!str.empty());
+ VERIFY(str.size() == 32);
+ VERIFY(str.length() == 32);
+ VERIFY(str.capacity() >= 32);
+ VERIFY(str == LITERAL("cccccccccccccccccccccccccccccccc"));
+ }
+
+ // void shrink_to_fit
+ {
+ SSOStringType str(LITERAL("a"));
+ str.reserve(100);
+ VERIFY(str.capacity() == 100);
+ str.shrink_to_fit();
+ // string should shrink to SSO
+ VERIFY(str.IsSSO());
+
+ str = LITERAL("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"); // 32 characters
+ str.reserve(100);
+ VERIFY(str.capacity() == 100);
+ str.shrink_to_fit();
+ // string should shrink but still be heap
+ VERIFY(str.capacity() == 32);
+ VERIFY(!str.IsSSO());
+ }
+
+ // void set_capacity(n)
+ {
+ const auto *pLiteral32 = LITERAL("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa");
+ const auto *pLiteral31 = LITERAL("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa");
+ const auto *pLiteral1 = LITERAL("a");
+ const auto *pLiteral2 = LITERAL("aa");
+
+ SSOStringType str = pLiteral32;
+ // set_capacity(0) - deallocate and reset to SSO;
+ {
+ // heap -> sso
+ VERIFY(!str.IsSSO());
+ str.set_capacity(0);
+ VERIFY(str.IsSSO());
+ VERIFY(str == LITERAL(""));
+ }
+ {
+ // sso -> sso
+ str = pLiteral1;
+ VERIFY(str.IsSSO());
+ str.set_capacity(0);
+ VERIFY(str.IsSSO());
+ VERIFY(str == LITERAL(""));
+ }
+
+ // set_capacity(npos) - set capacity equal to current size - should realloc
+ {
+ // heap -> heap
+ str = pLiteral32;
+ str.reserve(100);
+ VERIFY(!str.IsSSO());
+ VERIFY(str.capacity() == 100);
+ str.set_capacity(StringType::npos);
+ VERIFY(!str.IsSSO());
+ VERIFY(str.capacity() == 32);
+ VERIFY(str == pLiteral32);
+ }
+ {
+ // heap -> sso
+ str = pLiteral1;
+ str.reserve(100);
+ VERIFY(!str.IsSSO());
+ VERIFY(str.capacity() == 100);
+ str.set_capacity(StringType::npos);
+ VERIFY(str.IsSSO());
+ VERIFY(str == pLiteral1);
+ }
+ {
+ // sso -> sso
+ str = pLiteral1;
+ VERIFY(str.IsSSO());
+ str.set_capacity(StringType::npos);
+ VERIFY(str.IsSSO());
+ VERIFY(str == pLiteral1);
+ }
+
+ // set_capacity(n > capacity) - set capacity greater than out current capacity
+ {
+ // heap -> heap
+ str = pLiteral32;
+ VERIFY(!str.IsSSO());
+ auto nSavedCap = str.capacity();
+ str.set_capacity(nSavedCap + 1);
+ VERIFY(!str.IsSSO());
+ VERIFY(str == pLiteral32);
+ VERIFY(str.capacity() > nSavedCap);
+ }
+ {
+ // sso -> heap
+ str.set_capacity(0); // reset to sso
+ str = pLiteral1;
+ VERIFY(str.IsSSO());
+ auto nSavedCap = str.capacity();
+ str.set_capacity(nSavedCap + 1);
+ VERIFY(!str.IsSSO());
+ VERIFY(str == pLiteral1);
+ VERIFY(str.capacity() > nSavedCap);
+ }
+ {
+ // sso -> sso
+ str.set_capacity(0); // reset to sso
+ str = pLiteral1;
+ VERIFY(str.IsSSO());
+ auto nSavedCap = str.capacity();
+ str.set_capacity(str.size() + 1);
+ VERIFY(str.IsSSO());
+ VERIFY(str == pLiteral1);
+ VERIFY(str.capacity() == nSavedCap);
+ }
+
+ // set_capacity(n < size) - set capacity less than current size, str should truncate
+ {
+ // sso -> sso
+ str = pLiteral2;
+ VERIFY(str.IsSSO());
+ str.set_capacity(1);
+ VERIFY(str.IsSSO());
+ VERIFY(str == pLiteral1);
+ }
+ {
+ // heap -> sso
+ str = pLiteral32;
+ VERIFY(!str.IsSSO());
+ str.set_capacity(1);
+ VERIFY(str.IsSSO());
+ VERIFY(str == pLiteral1);
+ }
+ {
+ // heap -> heap
+ str = pLiteral32;
+ VERIFY(!str.IsSSO());
+ str.set_capacity(31);
+ VERIFY(!str.IsSSO());
+ VERIFY(str == pLiteral31);
+ }
+ }
+
+ // void reserve(size_type = 0);
+ {
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ VERIFY(!str.empty());
+ VERIFY(str.size() == 26);
+ VERIFY(str.length() == 26);
+ VERIFY(str.capacity() >= 26);
+
+ // verifies that we allocate memory
+ str.reserve(64);
+ VERIFY(!str.empty());
+ VERIFY(str.size() == 26);
+ VERIFY(str.length() == 26);
+ VERIFY(str.capacity() >= 64);
+
+ // verifies that we do not free memory
+ str.reserve(32);
+ VERIFY(!str.empty());
+ VERIFY(str.size() == 26);
+ VERIFY(str.length() == 26);
+ VERIFY(str.capacity() >= 64);
+ }
+
+ // void force_size(size_type n);
+ {
+ // force_size does not write terminating null, meant to set size when using external
+ // string writing mnethods like strcpy or sprintf
+ StringType str(LITERAL("aaa"));
+ VERIFY(str.size() == 3);
+ str.force_size(0);
+ VERIFY(str.size() == 0);
+ str.reserve(4); // 32 bit platform with char32_t can only hold 2 characters
+ str.force_size(4);
+ VERIFY(str.size() == 4);
+ str[4] = '0';
+ str = LITERAL("aaa");
+ VERIFY(str.size() == 3);
+ }
+
+ // const value_type* data() const EA_NOEXCEPT;
+ // const value_type* c_str() const EA_NOEXCEPT;
+ {
+ const StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+
+ const typename StringType::value_type* pData = str.data();
+ const typename StringType::value_type* pCStr = str.c_str();
+
+ VERIFY(pData != nullptr);
+ VERIFY(pCStr != nullptr);
+ VERIFY(pData == pCStr);
+ VERIFY(EA::StdC::Memcmp(pData, pCStr, str.size()) == 0);
+ }
+
+ // value_type* data() EA_NOEXCEPT;
+ {
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+
+ typename StringType::value_type* pData = str.data();
+
+ VERIFY(pData != nullptr);
+ VERIFY(EA::StdC::Memcmp(pData, LITERAL("abcdefghijklmnopqrstuvwxyz"), str.size()) == 0);
+ }
+
+ // reference operator[](size_type n);
+ // const_reference operator[](size_type n) const;
+ {
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+
+ VERIFY(str[0] == LITERAL('a'));
+ VERIFY(str[14] == LITERAL('o'));
+ VERIFY(str[25] == LITERAL('z'));
+ }
+
+ // reference at(size_type n);
+ // const_reference at(size_type n) const;
+ {
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+
+ VERIFY(str.at(0) == LITERAL('a'));
+ VERIFY(str.at(14) == LITERAL('o'));
+ VERIFY(str.at(25) == LITERAL('z'));
+ }
+
+ // reference front();
+ // const_reference front() const;
+ {
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ VERIFY(str.front() == LITERAL('a'));
+ }
+
+ // reference back();
+ // const_reference back() const;
+ {
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ VERIFY(str.back() == LITERAL('z'));
+ }
+
+ // this_type& operator+=(const this_type& x);
+ // this_type& operator+=(const value_type* p);
+ // this_type& operator+=(value_type c);
+ {
+ StringType str1(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ StringType str2(LITERAL("123"));
+ str1 += str2;
+ str1 += LITERAL("456");
+ str1 += LITERAL('7');
+
+ VERIFY(str1 == LITERAL("abcdefghijklmnopqrstuvwxyz1234567"));
+ }
+
+ // this_type& append(const this_type& x);
+ // this_type& append(const this_type& x, size_type position, size_type n);
+ // this_type& append(const value_type* p, size_type n);
+ // this_type& append(const value_type* p);
+ // this_type& append(size_type n, value_type c);
+ // this_type& append(const value_type* pBegin, const value_type* pEnd);
+ {
+ const StringType src(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+
+ StringType str;
+ str.append(StringType(LITERAL("abcd"))); // "abcd"
+ str.append(src, 4, 4); // "abcdefgh"
+ str.append(src.data() + 8, 4); // "abcdefghijkl"
+ str.append(LITERAL("mnop")); // "abcdefghijklmnop"
+ str.append(1, LITERAL('q')); // "abcdefghijklmnopq"
+ str.append(src.data() + 17, src.data() + 26); // "abcdefghijklmnopqrstuvwxyz"
+
+ VERIFY(str == src);
+ }
+
+ // this_type& append_sprintf_va_list(const value_type* pFormat, va_list arguments);
+ // this_type& append_sprintf(const value_type* pFormat, ...);
+ {
+ #if EASTL_SNPRINTF_TESTS_ENABLED
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ str.append_sprintf(LITERAL("Hello, %d"), 42);
+
+ VERIFY(str == LITERAL("abcdefghijklmnopqrstuvwxyzHello, 42"));
+ VERIFY(str.validate());
+ #endif
+ }
+
+ // template <typename OtherCharType>
+ // this_type& append_convert(const OtherCharType* p);
+ {
+ {
+ #if defined(EA_CHAR8)
+ StringType str;
+ str.append_convert(EA_CHAR8("123456789"));
+ VERIFY(str == LITERAL("123456789"));
+ VERIFY(str.validate());
+ #endif
+ }
+ {
+ #if defined(EA_CHAR16)
+ StringType str;
+ str.append_convert(EA_CHAR16("123456789"));
+ VERIFY(str == LITERAL("123456789"));
+ VERIFY(str.validate());
+ #endif
+ }
+ {
+ #if defined(EA_CHAR32)
+ StringType str;
+ str.append_convert(EA_CHAR32("123456789"));
+ VERIFY(str == LITERAL("123456789"));
+ VERIFY(str.validate());
+ #endif
+ }
+ {
+ #if defined(EA_WCHAR)
+ StringType str;
+ str.append_convert(EA_WCHAR("123456789"));
+ VERIFY(str == LITERAL("123456789"));
+ VERIFY(str.validate());
+ #endif
+ }
+ }
+
+ // template <typename OtherCharType>
+ // this_type& append_convert(const OtherCharType* p, size_type n);
+ {
+ {
+ #if defined(EA_CHAR8)
+ StringType str;
+ str.append_convert(EA_CHAR8("123456789"), 5);
+ VERIFY(str == LITERAL("12345"));
+ VERIFY(str.validate());
+ #endif
+ }
+ {
+ #if defined(EA_CHAR16)
+ StringType str;
+ str.append_convert(EA_CHAR16("123456789"), 5);
+ VERIFY(str == LITERAL("12345"));
+ VERIFY(str.validate());
+ #endif
+ }
+ {
+ #if defined(EA_CHAR32)
+ StringType str;
+ str.append_convert(EA_CHAR32("123456789"), 5);
+ VERIFY(str == LITERAL("12345"));
+ VERIFY(str.validate());
+ #endif
+ }
+ {
+ #if defined(EA_WCHAR)
+ StringType str;
+ str.append_convert(EA_WCHAR("123456789"), 5);
+ VERIFY(str == LITERAL("12345"));
+ VERIFY(str.validate());
+ #endif
+ }
+ }
+
+ // template <typename OtherStringType>
+ // this_type& append_convert(const OtherStringType& x);
+ {
+ {
+ #if defined(EA_CHAR8)
+ StringType str;
+ str.append_convert(eastl::u8string(EA_CHAR8("123456789")));
+ VERIFY(str == LITERAL("123456789"));
+ VERIFY(str.validate());
+ #endif
+ }
+ {
+ #if defined(EA_CHAR16)
+ StringType str;
+ str.append_convert(eastl::string16(EA_CHAR16("123456789")));
+ VERIFY(str == LITERAL("123456789"));
+ VERIFY(str.validate());
+ #endif
+ }
+ {
+ #if defined(EA_CHAR32)
+ StringType str;
+ str.append_convert(eastl::string32(EA_CHAR32("123456789")));
+ VERIFY(str == LITERAL("123456789"));
+ VERIFY(str.validate());
+ #endif
+ }
+ {
+ #if defined(EA_WCHAR)
+ StringType str;
+ str.append_convert(eastl::wstring(EA_WCHAR("123456789")));
+ VERIFY(str == LITERAL("123456789"));
+ VERIFY(str.validate());
+ #endif
+ }
+ }
+
+ // void push_back(value_type c);
+ {
+ StringType str;
+ const StringType src(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+
+ eastl::for_each(eastl::begin(src), eastl::end(src), [&str](const typename StringType::value_type& c)
+ { str.push_back(c); });
+
+ VERIFY(str == src);
+ VERIFY(str.validate());
+ }
+
+ // void pop_back();
+ {
+ StringType str(LITERAL("123456789"));
+ VERIFY(str == LITERAL("123456789"));
+
+ str.pop_back(); VERIFY(str == LITERAL("12345678"));
+ str.pop_back(); VERIFY(str == LITERAL("1234567"));
+ str.pop_back(); VERIFY(str == LITERAL("123456"));
+ str.pop_back(); VERIFY(str == LITERAL("12345"));
+ str.pop_back(); VERIFY(str == LITERAL("1234"));
+ str.pop_back(); VERIFY(str == LITERAL("123"));
+ str.pop_back(); VERIFY(str == LITERAL("12"));
+ str.pop_back(); VERIFY(str == LITERAL("1"));
+ str.pop_back(); VERIFY(str == LITERAL(""));
+
+ VERIFY(str.validate());
+ }
+
+ // this_type& insert(size_type position, const this_type& x);
+ // this_type& insert(size_type position, const this_type& x, size_type beg, size_type n);
+ // this_type& insert(size_type position, const value_type* p, size_type n);
+ // this_type& insert(size_type position, const value_type* p);
+ // this_type& insert(size_type position, size_type n, value_type c);
+ // iterator insert(const_iterator p, value_type c);
+ // iterator insert(const_iterator p, size_type n, value_type c);
+ // iterator insert(const_iterator p, const value_type* pBegin, const value_type* pEnd);
+ {
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+
+ str.insert((typename StringType::size_type)0, (typename StringType::size_type)1, LITERAL('1')); // todo: elminiate the cast to disambiguate
+ VERIFY(str == LITERAL("1abcdefghijklmnopqrstuvwxyz"));
+
+ str.insert(2, LITERAL("234"));
+ VERIFY(str == LITERAL("1a234bcdefghijklmnopqrstuvwxyz"));
+
+ str.insert(15, StringType(LITERAL("567")));
+ VERIFY(str == LITERAL("1a234bcdefghijk567lmnopqrstuvwxyz"));
+
+ str.insert(30, StringType(LITERAL(" is an example of a substring")), 1, 14);
+ VERIFY(str == LITERAL("1a234bcdefghijk567lmnopqrstuvwis an example xyz"));
+
+ {
+ StringType strSSO;
+ auto nSSOCap = strSSO.capacity();
+ StringType strCheck;
+ strCheck.append(nSSOCap, LITERAL('a'));
+
+ strSSO.append(nSSOCap - 1, LITERAL('a'));
+
+ strSSO.insert(strSSO.size() - 1, LITERAL("a"));
+ VERIFY(strSSO.validate());
+ VERIFY(strSSO == strCheck);
+ }
+
+ {
+ StringType strSSO;
+ auto nSSOCap = strSSO.capacity();
+
+ // 32 bit platform with char32_t can only hold 2 characters in SSO
+ if (nSSOCap - 2 > 0)
+ {
+ StringType strCheck;
+ strCheck.append(nSSOCap, LITERAL('a'));
+
+ strSSO.append(nSSOCap - 2, LITERAL('a'));
+
+ strSSO.insert(strSSO.size() - 1, LITERAL("aa"));
+ VERIFY(strSSO.validate());
+ VERIFY(strSSO == strCheck);
+ }
+ }
+ }
+
+ // iterator insert(const_iterator p, std::initializer_list<value_type>);
+ {
+ #if !defined(EA_COMPILER_NO_INITIALIZER_LISTS)
+ StringType str;
+ str.insert(str.begin(), {'a','b','c'});
+ str.insert(str.end(), {'d','e','f'});
+ str.insert(str.begin() + 3, {'1','2','3'});
+
+ VERIFY(str == LITERAL("abc123def"));
+ VERIFY(str.validate());
+ #endif
+ }
+
+ // insert(const_iterator p, value_type c)
+ {
+ StringType str = LITERAL("aaa");
+ auto it = str.insert(str.end(), 'b');
+ VERIFY(*it == LITERAL('b'));
+ VERIFY(str == LITERAL("aaab"));
+ it = str.insert(str.begin(), 'c');
+ VERIFY(*it == LITERAL('c'));
+ VERIFY(str == LITERAL("caaab"));
+ it = str.insert(str.begin() + 2, 'd');
+ VERIFY(*it == LITERAL('d'));
+ VERIFY(str == LITERAL("cadaab"));
+ }
+
+ // this_type& erase(size_type position = 0, size_type n = npos);
+ // iterator erase(const_iterator p);
+ // iterator erase(const_iterator pBegin, const_iterator pEnd);
+ // reverse_iterator erase(reverse_iterator position);
+ // reverse_iterator erase(reverse_iterator first, reverse_iterator last);
+ {
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+
+ str.erase(0,5);
+ VERIFY(str == LITERAL("fghijklmnopqrstuvwxyz"));
+
+ str.erase(5,10);
+ VERIFY(str == LITERAL("fghijuvwxyz"));
+
+ str.erase(str.find(LITERAL('v')));
+ VERIFY(str == LITERAL("fghiju"));
+
+ str.erase(str.find(LITERAL('g')), str.find(LITERAL('i')));
+ VERIFY(str == LITERAL("fju"));
+
+ typename StringType::const_iterator it = str.begin() + 1; // 'j'
+ str.erase(it);
+ VERIFY(str == LITERAL("fu"));
+
+ }
+
+ // void clear() EA_NOEXCEPT;
+ {
+ StringType str(LITERAL("123456789"));
+ VERIFY(str == LITERAL("123456789"));
+
+ str.clear();
+ VERIFY(str == LITERAL(""));
+ VERIFY(str.empty());
+ VERIFY(str.validate());
+ }
+
+
+ // pointer detach() EA_NOEXCEPT;
+ {
+ {
+ // Heap
+ auto* pLiteral = LITERAL("abcdefghijklmnopqrstuvwxyz");
+ StringType str(pLiteral);
+ const auto sz = str.size() + 1; // +1 for null-terminator
+
+ auto* pDetach = str.detach();
+
+ VERIFY(pDetach != nullptr);
+ VERIFY(EA::StdC::Strcmp(pDetach, pLiteral) == 0);
+ VERIFY(pDetach != pLiteral);
+ VERIFY(str.empty());
+ VERIFY(str.size() == 0);
+
+ str.get_allocator().deallocate(pDetach, sz);
+ }
+
+ {
+ // SSO
+ auto* pLiteral = LITERAL("a");
+ StringType str(pLiteral);
+ const auto sz = str.size() + 1; // +1 for null-terminator
+
+ auto* pDetach = str.detach();
+
+ VERIFY(pDetach != nullptr);
+ VERIFY(EA::StdC::Strcmp(pDetach, pLiteral) == 0);
+ VERIFY(pDetach != pLiteral);
+ VERIFY(str.empty());
+ VERIFY(str.size() == 0);
+
+ str.get_allocator().deallocate(pDetach, sz);
+ }
+
+ {
+ // SSO, empty string
+ auto* pLiteral = LITERAL("");
+ StringType str(pLiteral);
+ const auto sz = str.size() + 1; // +1 for null-terminator
+
+ auto* pDetach = str.detach();
+
+ VERIFY(pDetach != nullptr);
+ VERIFY(EA::StdC::Strcmp(pDetach, pLiteral) == 0);
+ VERIFY(pDetach != pLiteral);
+ VERIFY(str.empty());
+ VERIFY(str.size() == 0);
+
+ str.get_allocator().deallocate(pDetach, sz);
+ }
+
+ {
+ // SSO, empty string via default ctor
+ StringType str;
+ const auto sz = str.size() + 1; // +1 for null-terminator
+
+ auto* pDetach = str.detach();
+
+ VERIFY(pDetach != nullptr);
+ VERIFY(pDetach[0] == 0);
+ VERIFY(str.empty());
+ VERIFY(str.size() == 0);
+
+ str.get_allocator().deallocate(pDetach, sz);
+ }
+ }
+
+ // this_type& replace(size_type position, size_type n, const this_type& x);
+ // this_type& replace(size_type pos1, size_type n1, const this_type& x, size_type pos2, size_type n2);
+ // this_type& replace(size_type position, size_type n1, const value_type* p, size_type n2);
+ // this_type& replace(size_type position, size_type n1, const value_type* p);
+ // this_type& replace(size_type position, size_type n1, size_type n2, value_type c);
+ // this_type& replace(const_iterator first, const_iterator last, const this_type& x);
+ // this_type& replace(const_iterator first, const_iterator last, const value_type* p, size_type n);
+ // this_type& replace(const_iterator first, const_iterator last, const value_type* p);
+ // this_type& replace(const_iterator first, const_iterator last, size_type n, value_type c);
+ // this_type& replace(const_iterator first, const_iterator last, const value_type* pBegin, const value_type* pEnd);
+ {
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+
+ str.replace(5, 10, StringType(LITERAL("123")));
+ VERIFY(str == LITERAL("abcde123pqrstuvwxyz"));
+
+ str.replace(13, 1, StringType(LITERAL("0123456789")), 4, 6 );
+ VERIFY(str == LITERAL("abcde123pqrst456789vwxyz"));
+
+ str.replace(24, 1, LITERAL("0123456789"));
+ VERIFY(str == LITERAL("abcde123pqrst456789vwxyz0123456789"));
+
+ str.replace(16, 4, 4, LITERAL('@'));
+ VERIFY(str == LITERAL("abcde123pqrst456@@@@wxyz0123456789"));
+ }
+
+ // size_type copy(value_type* p, size_type n, size_type position = 0) const;
+ {
+ typename StringType::value_type buf[64];
+
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ str.copy(buf, 10, 10);
+
+ VERIFY(EA::StdC::Memcmp(buf, LITERAL("klmnopqrst"), 10) == 0);
+ }
+
+ // size_type find(const this_type& x, size_type position = 0) const EA_NOEXCEPT;
+ // size_type find(const value_type* p, size_type position = 0) const;
+ // size_type find(const value_type* p, size_type position, size_type n) const;
+ // size_type find(value_type c, size_type position = 0) const EA_NOEXCEPT;
+ {
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+
+ VERIFY(str.find(StringType(LITERAL("d"))) != StringType::npos);
+ VERIFY(str.find(StringType(LITERAL("tuv"))) != StringType::npos);
+ VERIFY(str.find(StringType(LITERAL("123r"))) == StringType::npos);
+
+ VERIFY(str.find(LITERAL("d")) != StringType::npos);
+ VERIFY(str.find(LITERAL("tuv")) != StringType::npos);
+ VERIFY(str.find(LITERAL("123r")) == StringType::npos);
+
+ VERIFY(str.find(LITERAL("d"), 0) != StringType::npos);
+ VERIFY(str.find(LITERAL("tuv"), 2) != StringType::npos);
+ VERIFY(str.find(LITERAL("123r"), 2) == StringType::npos);
+
+ VERIFY(str.find(LITERAL('d'), 0) != StringType::npos);
+ VERIFY(str.find(LITERAL('t'), 2) != StringType::npos);
+ VERIFY(str.find(LITERAL('1'), 2) == StringType::npos);
+ }
+
+ // size_type rfind(const this_type& x, size_type position = npos) const EA_NOEXCEPT;
+ // size_type rfind(const value_type* p, size_type position = npos) const;
+ // size_type rfind(const value_type* p, size_type position, size_type n) const;
+ // size_type rfind(value_type c, size_type position = npos) const EA_NOEXCEPT;
+ {
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+
+ VERIFY(str.rfind(StringType(LITERAL("d"))) != StringType::npos);
+ VERIFY(str.rfind(StringType(LITERAL("tuv"))) != StringType::npos);
+ VERIFY(str.rfind(StringType(LITERAL("123r"))) == StringType::npos);
+
+ VERIFY(str.rfind(LITERAL("d")) != StringType::npos);
+ VERIFY(str.rfind(LITERAL("tuv")) != StringType::npos);
+ VERIFY(str.rfind(LITERAL("123r")) == StringType::npos);
+
+ VERIFY(str.rfind(LITERAL("d"), 20) != StringType::npos);
+ VERIFY(str.rfind(LITERAL("tuv"), 20) != StringType::npos);
+ VERIFY(str.rfind(LITERAL("123r"), 20) == StringType::npos);
+
+ VERIFY(str.rfind(LITERAL('d'), 20) != StringType::npos);
+ VERIFY(str.rfind(LITERAL('t'), 20) != StringType::npos);
+ VERIFY(str.rfind(LITERAL('1'), 20) == StringType::npos);
+ }
+
+ // size_type find_first_of(const this_type& x, size_type position = 0) const EA_NOEXCEPT;
+ // size_type find_first_of(const value_type* p, size_type position = 0) const;
+ // size_type find_first_of(const value_type* p, size_type position, size_type n) const;
+ // size_type find_first_of(value_type c, size_type position = 0) const EA_NOEXCEPT;
+ {
+ StringType str(LITERAL("aaaaabbbbbcccdddddeeeeefffggh"));
+
+ VERIFY(str.find_first_of(StringType(LITERAL("aaa"))) == 0);
+ VERIFY(str.find_first_of(LITERAL("aab")) == 0);
+ VERIFY(str.find_first_of(LITERAL("baab")) == 0);
+ VERIFY(str.find_first_of(LITERAL("ceg")) == 10);
+ VERIFY(str.find_first_of(LITERAL("eeef"), 1, 2) == 18);
+ VERIFY(str.find_first_of(LITERAL("eeef"), 1, 4) == 18);
+ VERIFY(str.find_first_of(LITERAL('g')) == 26);
+ VERIFY(str.find_first_of(LITERAL('$')) == StringType::npos);
+ }
+
+ // size_type find_last_of(const this_type& x, size_type position = npos) const EA_NOEXCEPT;
+ // size_type find_last_of(const value_type* p, size_type position = npos) const;
+ // size_type find_last_of(const value_type* p, size_type position, size_type n) const;
+ // size_type find_last_of(value_type c, size_type position = npos) const EA_NOEXCEPT;
+ {
+ StringType str(LITERAL("aaaaabbbbbcccdddddeeeeefffggh"));
+
+ VERIFY(str.find_last_of(StringType(LITERAL("aaa"))) == 4);
+ VERIFY(str.find_last_of(LITERAL("aab")) == 9);
+ VERIFY(str.find_last_of(LITERAL("baab")) == 9);
+ VERIFY(str.find_last_of(LITERAL("ceg")) == 27);
+ // VERIFY(str.find_last_of(LITERAL("eeef"), 1, 2) == StringType::npos); // todo: FIX ME
+ // VERIFY(str.find_last_of(LITERAL("eeef"), 1, 4) == StringType::npos); // todo: FIX ME
+ VERIFY(str.find_last_of(LITERAL('g')) == 27);
+ VERIFY(str.find_last_of(LITERAL('$')) == StringType::npos);
+ }
+
+ // size_type find_first_not_of(const this_type& x, size_type position = 0) const EA_NOEXCEPT;
+ // size_type find_first_not_of(const value_type* p, size_type position = 0) const;
+ // size_type find_first_not_of(const value_type* p, size_type position, size_type n) const;
+ // size_type find_first_not_of(value_type c, size_type position = 0) const EA_NOEXCEPT;
+ {
+ StringType str(LITERAL("aaaaabbbbbcccdddddeeeeefffggh"));
+
+ VERIFY(str.find_first_not_of(StringType(LITERAL("abcdfg"))) == 18);
+ VERIFY(str.find_first_not_of(LITERAL("abcdfg")) == 18);
+ // VERIFY(str.find_first_not_of(LITERAL("abcdfg"), 2, 2) == 0); // todo: FIX ME
+ // VERIFY(str.find_first_not_of(LITERAL("abcdfg"), 0, 2) == 10); // todo: FIX ME
+ VERIFY(str.find_first_not_of(LITERAL('a')) == 5);
+ }
+
+ // size_type find_last_not_of(const this_type& x, size_type position = npos) const EA_NOEXCEPT;
+ // size_type find_last_not_of(const value_type* p, size_type position = npos) const;
+ // size_type find_last_not_of(const value_type* p, size_type position, size_type n) const;
+ // size_type find_last_not_of(value_type c, size_type position = npos) const EA_NOEXCEPT;
+ {
+ StringType str(LITERAL("aaaaabbbbbcccdddddeeeeefffggh"));
+
+ VERIFY(str.find_last_not_of(StringType(LITERAL("a"))) == 28);
+ VERIFY(str.find_last_not_of(StringType(LITERAL("abcdfg"))) == 28);
+ VERIFY(str.find_last_not_of(StringType(LITERAL("abcdfgh"))) == 22);
+ VERIFY(str.find_last_not_of(LITERAL("abcdfgh")) == 22);
+ // VERIFY(str.find_last_not_of(LITERAL("abcdfg"), 2, 2) == 0); // todo: FIX ME
+ // VERIFY(str.find_last_not_of(LITERAL("abcdfg"), 0, 2) == 10); // todo: FIX ME
+ VERIFY(str.find_last_not_of(LITERAL('a')) == 28);
+ }
+
+ // this_type substr(size_type position = 0, size_type n = npos) const;
+ {
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+
+ auto substring = str.substr(0, 6);
+ VERIFY(substring == LITERAL("abcdef"));
+
+ substring = str.substr(0, 0);
+ VERIFY(substring == LITERAL(""));
+
+ substring = str.substr(16, 0);
+ VERIFY(substring == LITERAL(""));
+
+ substring = str.substr(16, 42);
+ VERIFY(substring == LITERAL("qrstuvwxyz"));
+ }
+
+ // int compare(const this_type& x) const EA_NOEXCEPT;
+ // int compare(size_type pos1, size_type n1, const this_type& x) const;
+ // int compare(size_type pos1, size_type n1, const this_type& x, size_type pos2, size_type n2) const;
+ // int compare(const value_type* p) const;
+ // int compare(size_type pos1, size_type n1, const value_type* p) const;
+ // int compare(size_type pos1, size_type n1, const value_type* p, size_type n2) const;
+ // static int compare(const value_type* pBegin1, const value_type* pEnd1, const value_type* pBegin2, const value_type* pEnd2);
+ {
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+
+ VERIFY(str.compare(StringType(LITERAL("abcdefghijklmnopqrstuvwxyz"))) == 0);
+ VERIFY(str.compare(StringType(LITERAL("ABCDEFGHIJKLMNOPQRSTUVWXYZ"))) != 0);
+ VERIFY(str.compare(StringType(LITERAL("abcdefghijklmnopqrstuvwxyz123"))) != 0);
+ VERIFY(str.compare(LITERAL("abcdefghijklmnopqrstuvwxyz")) == 0);
+ VERIFY(str.compare(LITERAL("abcdefghijklmnopqrstuvwxyz123")) != 0);
+ VERIFY(str.compare(LITERAL("ABCDEFGHIJKLMNOPQRSTUVWXYZ123")) != 0);
+ }
+
+ // int comparei(const this_type& x) const EA_NOEXCEPT;
+ // int comparei(const value_type* p) const;
+ // static int comparei(const value_type* pBegin1, const value_type* pEnd1, const value_type* pBegin2, const value_type* pEnd2);
+ {
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+
+ VERIFY(str.comparei(StringType(LITERAL("abcdefghijklmnopqrstuvwxyz"))) == 0);
+ VERIFY(str.comparei(StringType(LITERAL("ABCDEFGHIJKLMNOPQRSTUVWXYZ"))) == 0);
+ VERIFY(str.comparei(StringType(LITERAL("abcdefghijklmnopqrstuvwxyz123"))) != 0);
+ VERIFY(str.comparei(LITERAL("abcdefghijklmnopqrstuvwxyz")) == 0);
+ VERIFY(str.comparei(LITERAL("ABCDEFGHIJKLMNOPQRSTUVWXYZ")) == 0);
+ VERIFY(str.comparei(LITERAL("abcdefghijklmnopqrstuvwxyz123")) != 0);
+ }
+
+ // void make_lower();
+ {
+ {
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ str.make_lower();
+ VERIFY(str == LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ }
+ {
+ StringType str(LITERAL("ABCDEFGHIJKLMNOPQRSTUVWXYZ"));
+ str.make_lower();
+ VERIFY(str == LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ }
+ {
+ StringType str(LITERAL("123456789~!@#$%^&*()_+"));
+ str.make_lower();
+ VERIFY(str == LITERAL("123456789~!@#$%^&*()_+"));
+ }
+ }
+
+ // void make_upper();
+ {
+ {
+ StringType str(LITERAL("ABCDEFGHIJKLMNOPQRSTUVWXYZ"));
+ str.make_upper();
+ VERIFY(str == LITERAL("ABCDEFGHIJKLMNOPQRSTUVWXYZ"));
+ }
+ {
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ str.make_upper();
+ VERIFY(str == LITERAL("ABCDEFGHIJKLMNOPQRSTUVWXYZ"));
+ }
+ {
+ StringType str(LITERAL("123456789~!@#$%^&*()_+"));
+ str.make_upper();
+ VERIFY(str == LITERAL("123456789~!@#$%^&*()_+"));
+ }
+ }
+
+ // void ltrim();
+ // void rtrim();
+ // void trim();
+ {
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ {
+ StringType rstr(LITERAL("abcdefghijklmnopqrstuvwxyz \t \t\t\t "));
+ rstr.ltrim();
+ VERIFY(str != rstr);
+ }
+ {
+ StringType lstr(LITERAL(" \t abcdefghijklmnopqrstuvwxyz"));
+ lstr.ltrim();
+ VERIFY(str == lstr);
+ }
+ {
+ StringType rstr(LITERAL("abcdefghijklmnopqrstuvwxyz \t\t\t "));
+ rstr.rtrim();
+ VERIFY(str == rstr);
+ }
+ {
+ StringType lstr(LITERAL(" \t abcdefghijklmnopqrstuvwxyz"));
+ lstr.rtrim();
+ VERIFY(str != lstr);
+ }
+ {
+ StringType lrstr(LITERAL(" \t abcdefghijklmnopqrstuvwxyz \t "));
+ lrstr.trim();
+ VERIFY(str == lrstr);
+ }
+ {
+ auto* pLiteral = LITERAL("abcdefghijklmn opqrstuvwxyz");
+ StringType mstr(pLiteral);
+ mstr.trim();
+ VERIFY(mstr == pLiteral);
+ }
+ }
+
+ // void ltrim("a");
+ // void rtrim("b");
+ // void trim("?");
+ {
+ StringType expected(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+
+ {
+ const auto source = LITERAL("abcdefghijklmnopqrstuvwxyz ");
+
+ StringType rstr(source);
+ rstr.ltrim(LITERAL(" "));
+ VERIFY(rstr == source);
+
+ rstr.rtrim(LITERAL(" "));
+ VERIFY(expected == rstr);
+ }
+
+ {
+ const auto source = LITERAL("abcdefghijklmnopqrstuvwxyz \t \t\t\t ");
+
+ StringType rstr(source);
+ rstr.ltrim(LITERAL(" \t"));
+ VERIFY(rstr == source);
+
+ rstr.rtrim(LITERAL(" \t"));
+ VERIFY(expected == rstr);
+ }
+
+ {
+ const auto source = LITERAL(" \t \t\t\t abcdefghijklmnopqrstuvwxyz");
+
+ StringType rstr(source);
+ rstr.rtrim(LITERAL(" \t"));
+ VERIFY(rstr == source);
+
+ rstr.ltrim(LITERAL(" \t"));
+ VERIFY(expected == rstr);
+ }
+
+ {
+ const auto source = LITERAL("$$$%$$$$$$%$$$$$$$$$%$$$$$$$$abcdefghijklmnopqrstuvwxyz*********@*****************@******");
+ StringType rstr(source);
+ rstr.trim(LITERAL("^("));
+ VERIFY(rstr == source);
+ }
+
+ {
+ const auto source = LITERAL("$$$%$$$$$$%$$$$$$$$$%$$$$$$$$abcdefghijklmnopqrstuvwxyz*********@*****************@******");
+ StringType rstr(source);
+ rstr.rtrim(LITERAL("@*"));
+
+ VERIFY(expected != rstr);
+ VERIFY(rstr == LITERAL("$$$%$$$$$$%$$$$$$$$$%$$$$$$$$abcdefghijklmnopqrstuvwxyz"));
+
+ rstr.ltrim(LITERAL("$%"));
+ VERIFY(expected == rstr);
+ }
+
+ {
+ const auto source = LITERAL("abcdefghijklmnopqrstuvwxyz**********************************");
+ StringType rstr(source);
+ rstr.ltrim(LITERAL("*"));
+ VERIFY(expected != source);
+ }
+
+ {
+ const auto source = LITERAL(" ? abcdefghijklmnopqrstuvwxyz**********************************");
+ StringType rstr(source);
+ rstr.trim(LITERAL("*? "));
+ VERIFY(expected != source);
+ }
+ }
+
+ // this_type left(size_type n) const;
+ // this_type right(size_type n) const;
+ {
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+
+ auto lstr = str.left(6);
+ VERIFY(lstr == LITERAL("abcdef"));
+
+ auto rstr = str.right(8);
+ VERIFY(rstr == LITERAL("stuvwxyz"));
+ }
+
+ // this_type& sprintf_va_list(const value_type* pFormat, va_list arguments);
+ // this_type& sprintf(const value_type* pFormat, ...);
+ {
+ #if EASTL_SNPRINTF_TESTS_ENABLED
+ StringType str(LITERAL(""));
+
+ str.sprintf(LITERAL("Hello, %d"), 42);
+ VERIFY(str == LITERAL("Hello, 42"));
+ #endif
+ }
+
+ // void force_size(size_type n);
+ {
+ StringType str(LITERAL(""));
+ str.reserve(10);
+
+ auto p = const_cast<typename StringType::value_type*>(str.data());
+ p[0] = 'a';
+ p[1] = 'a';
+ p[2] = 'a';
+ p[3] = '\0';
+
+ str.force_size(3);
+
+ VERIFY(str.size() == 3);
+ VERIFY(str.validate());
+ VERIFY(!str.empty());
+ }
+
+ // test basic_string implicit conversion to basic_string_view
+ // eastl::string implicitly converts to eastl::string_view.
+ {
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ [&](basic_string_view<typename StringType::value_type> sv) // simulate api that requires eastl::string_view.
+ {
+ VERIFY(sv.compare(LITERAL("abcdefghijklmnopqrstuvwxyz")) == 0);
+ }(str);
+ }
+
+ // test constructing a eastl::basic_string from an eastl::basic_string_view
+ {
+ using StringViewType = basic_string_view<typename StringType::value_type>;
+ StringViewType sv = LITERAL("abcdefghijklmnopqrstuvwxyz");
+
+ {
+ StringType str(sv);
+ VERIFY(str == LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ }
+
+ {
+ StringType str(sv, typename StringType::allocator_type("test"));
+ VERIFY(str == LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ }
+
+ {
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ VERIFY(sv == str);
+ }
+ }
+
+ // test assigning from an eastl::basic_string_view
+ {
+ using StringViewType = basic_string_view<typename StringType::value_type>;
+ StringViewType sv = LITERAL("abcdefghijklmnopqrstuvwxyz");
+
+ {
+ StringType str;
+ str = sv; // force call to 'operator='
+ VERIFY(str == LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ }
+ }
+
+ // test eastl::erase
+ {
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ auto numErased = eastl::erase(str, LITERAL('a'));
+ VERIFY(numErased == 1);
+ numErased = eastl::erase(str, LITERAL('f'));
+ VERIFY(numErased == 1);
+ numErased = eastl::erase(str, LITERAL('l'));
+ VERIFY(numErased == 1);
+ numErased = eastl::erase(str, LITERAL('w'));
+ VERIFY(numErased == 1);
+ numErased = eastl::erase(str, LITERAL('y'));
+ VERIFY(numErased == 1);
+ VERIFY(str == LITERAL("bcdeghijkmnopqrstuvxz"));
+ }
+
+ // test eastl::erase_if
+ {
+ StringType str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+ auto numErased = eastl::erase_if(str, [](auto c) { return c == LITERAL('a') || c == LITERAL('v'); });
+ VERIFY(str == LITERAL("bcdefghijklmnopqrstuwxyz"));
+ VERIFY(numErased == 2);
+ }
+
+ // template<> struct hash<eastl::string>;
+ // template<> struct hash<eastl::wstring>;
+ // template<> struct hash<eastl::u16string>;
+ // template<> struct hash<eastl::u32string>;
+ {
+ // NOTE(rparolin): This is required because the string tests inject custom allocators to assist in debugging.
+ // These custom string types require their own hashing specializations which we emulate by constructing a custom
+ // hashing functor that defers to the eastl::basic_string<CharT> hash implementation; effectively ignoring the
+ // custom allocator.
+ auto LocalHash = [](auto s) -> size_t {
+ using UserStringType = decltype(s);
+ using TargetType = eastl::basic_string<typename UserStringType::value_type>;
+
+ TargetType t(s.data());
+ return eastl::hash<TargetType>{}(t);
+ };
+
+ StringType sw1(LITERAL("Hello, World"));
+ StringType sw2(LITERAL("Hello, World"), 5);
+ StringType sw3(LITERAL("Hello"));
+
+ VERIFY(LocalHash(sw1) != LocalHash(sw2));
+ VERIFY(LocalHash(sw2) == LocalHash(sw3));
+ }
+
+ // test <=> operator
+ #if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ {
+ StringType sw1(LITERAL("Test String "));
+ StringType sw2(LITERAL("Test String 1"));
+ StringType sw3(LITERAL("Test String 2"));
+ StringType sw4(LITERAL("abcdef"));
+
+ VERIFY((sw1 <=> sw2) != 0);
+ VERIFY((sw1 <=> sw3) != 0);
+ VERIFY((sw2 <=> sw3) != 0);
+ VERIFY((sw1 <=> sw2) < 0);
+ VERIFY((sw1 <=> sw3) < 0);
+ VERIFY((sw2 <=> sw2) == 0);
+ VERIFY((sw2 <=> sw3) < 0);
+ VERIFY((sw2 <=> sw4) < 0);
+ VERIFY((sw4 <=> sw2) > 0);
+ VERIFY((sw4 <=> sw3) > 0);
+ VERIFY((sw3 <=> sw2) > 0);
+ }
+ #endif
+
+ return nErrorCount;
+}
+
+// Required to prevent manual undef of macros when 'TestString.inl' preprocessed at the top of the unit test cpp file.
+#undef TEST_STRING_NAME
+#undef LITERAL
+
diff --git a/EASTL/test/source/TestStringHashMap.cpp b/EASTL/test/source/TestStringHashMap.cpp
new file mode 100644
index 0000000..be7e1f6
--- /dev/null
+++ b/EASTL/test/source/TestStringHashMap.cpp
@@ -0,0 +1,303 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLTest.h"
+#include <EASTL/string_hash_map.h>
+#include <EAStdC/EAString.h>
+
+using namespace eastl;
+
+
+// Template instantations.
+// These tell the compiler to compile all the functions for the given class.
+template class eastl::string_hash_map<int>;
+template class eastl::string_hash_map<Align32>;
+
+static const char* strings[] = { "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t"};
+static const size_t kStringCount = 10; // This is intentionally half the length of strings, so that we can test with strings that are not inserted to the map.
+
+
+int TestStringHashMap()
+{
+ int nErrorCount = 0;
+
+ { // Test declarations
+ string_hash_map<int> stringHashMap;
+
+ string_hash_map<int> stringHashMap2(stringHashMap);
+ EATEST_VERIFY(stringHashMap2.size() == stringHashMap.size());
+ EATEST_VERIFY(stringHashMap2 == stringHashMap);
+
+
+ // allocator_type& get_allocator();
+ // void set_allocator(const allocator_type& allocator);
+ string_hash_map<int>::allocator_type& allocator = stringHashMap.get_allocator();
+ stringHashMap.set_allocator(EASTLAllocatorType());
+ stringHashMap.set_allocator(allocator);
+ // To do: Try to find something better to test here.
+
+
+ // const key_equal& key_eq() const;
+ // key_equal& key_eq();
+ string_hash_map<int> hs;
+ const string_hash_map<int> hsc;
+
+ const string_hash_map<int>::key_equal& ke = hsc.key_eq();
+ hs.key_eq() = ke;
+
+
+ // const char* get_name() const;
+ // void set_name(const char* pName);
+ #if EASTL_NAME_ENABLED
+ stringHashMap.get_allocator().set_name("test");
+ const char* pName = stringHashMap.get_allocator().get_name();
+ EATEST_VERIFY(equal(pName, pName + 5, "test"));
+ #endif
+ }
+
+
+ {
+ string_hash_map<int> stringHashMap;
+
+ // Clear a newly constructed, already empty container.
+ stringHashMap.clear(true);
+ EATEST_VERIFY(stringHashMap.validate());
+ EATEST_VERIFY(stringHashMap.size() == 0);
+ EATEST_VERIFY(stringHashMap.bucket_count() == 1);
+
+ for (int i = 0; i < (int)kStringCount; i++)
+ stringHashMap.insert(strings[i], i);
+
+ EATEST_VERIFY(stringHashMap.validate());
+ EATEST_VERIFY(stringHashMap.size() == kStringCount);
+
+ stringHashMap.clear(true);
+ EATEST_VERIFY(stringHashMap.validate());
+ EATEST_VERIFY(stringHashMap.size() == 0);
+ EATEST_VERIFY(stringHashMap.bucket_count() == 1);
+
+ for (int i = 0; i < (int)kStringCount; i++)
+ stringHashMap.insert(strings[i], i);
+ EATEST_VERIFY(stringHashMap.validate());
+ EATEST_VERIFY(stringHashMap.size() == kStringCount);
+
+ stringHashMap.clear(true);
+ EATEST_VERIFY(stringHashMap.validate());
+ EATEST_VERIFY(stringHashMap.size() == 0);
+ EATEST_VERIFY(stringHashMap.bucket_count() == 1);
+ }
+
+
+ { // Test string_hash_map
+
+ // size_type size() const
+ // bool empty() const
+ // insert_return_type insert(const value_type& value);
+ // insert_return_type insert(const value_type& value, hash_code_t c, node_type* pNodeNew = NULL);
+ // iterator insert(const_iterator, const value_type& value);
+ // iterator find(const key_type& k);
+ // const_iterator find(const key_type& k) const;
+ // size_type count(const key_type& k) const;
+
+ typedef string_hash_map<int> StringHashMapInt;
+
+ StringHashMapInt stringHashMap;
+
+ EATEST_VERIFY(stringHashMap.empty());
+ EATEST_VERIFY(stringHashMap.size() == 0);
+ EATEST_VERIFY(stringHashMap.count(strings[0]) == 0);
+
+ for (int i = 0; i < (int)kStringCount; i++)
+ stringHashMap.insert(strings[i], i);
+
+ EATEST_VERIFY(!stringHashMap.empty());
+ EATEST_VERIFY(stringHashMap.size() == kStringCount);
+ EATEST_VERIFY(stringHashMap.count(strings[0]) == 1);
+
+ int j = 0;
+ for (StringHashMapInt::iterator it = stringHashMap.begin(); it != stringHashMap.end(); ++it, ++j)
+ {
+ int value = (*it).second;
+ EATEST_VERIFY(value < (int)kStringCount);
+ }
+
+ for(int i = 0; i < (int)kStringCount * 2; i++)
+ {
+ StringHashMapInt::iterator it = stringHashMap.find(strings[i]);
+
+ if (i < (int)kStringCount)
+ {
+ EATEST_VERIFY(it != stringHashMap.end());
+ const char* k = it->first;
+ int v = it->second;
+ EATEST_VERIFY(EA::StdC::Strcmp(k, strings[i]) == 0);
+ EATEST_VERIFY(v == i);
+ }
+ else
+ EATEST_VERIFY(it == stringHashMap.end());
+ }
+
+ StringHashMapInt::insert_return_type result = stringHashMap.insert("EASTLTEST");
+ EATEST_VERIFY(result.second == true);
+ result = stringHashMap.insert("EASTLTEST");
+ EATEST_VERIFY(result.second == false);
+ result.first->second = 0;
+
+ // iterator erase(const_iterator);
+ size_t nExpectedSize = stringHashMap.size();
+
+ StringHashMapInt::iterator itD = stringHashMap.find("d");
+ EATEST_VERIFY(itD != stringHashMap.end());
+
+ // erase the element and verify that the size has decreased
+ stringHashMap.erase(itD);
+ nExpectedSize--;
+ EATEST_VERIFY(stringHashMap.size() == nExpectedSize);
+
+ // verify that erased element is gone
+ itD = stringHashMap.find(strings[3]);
+ EATEST_VERIFY(itD == stringHashMap.end());
+
+ // iterator erase(const char*)
+ StringHashMapInt::size_type n = stringHashMap.erase(strings[4]);
+ nExpectedSize--;
+ EATEST_VERIFY(n == 1);
+ EATEST_VERIFY(stringHashMap.size() == nExpectedSize);
+
+
+ // mapped_type& operator[](const key_type& key)
+ stringHashMap.clear();
+
+ int x = stringHashMap["A"]; // A default-constructed int (i.e. 0) should be returned.
+ EATEST_VERIFY(x == 0);
+
+ stringHashMap["B"] = 1;
+ x = stringHashMap["B"];
+ EATEST_VERIFY(x == 1); // Verify that the value we assigned is returned and a default-constructed value is not returned.
+
+ stringHashMap["A"] = 10; // Overwrite our previous 0 with 10.
+ stringHashMap["B"] = 11;
+ x = stringHashMap["A"];
+ EATEST_VERIFY(x == 10); // Verify the value is as expected.
+ x = stringHashMap["B"];
+ EATEST_VERIFY(x == 11);
+
+ }
+
+
+ {
+ // string_hash_map(const allocator_type& allocator);
+ // string_hash_map& operator=(const this_type& x);
+ // bool validate() const;
+
+ string_hash_map<int> stringHashMap1(EASTLAllocatorType("TestStringHashMap"));
+ string_hash_map<int> stringHashMap2(stringHashMap1);
+
+ for (int i = 0; i < (int)kStringCount; i++)
+ {
+ stringHashMap1.insert(strings[i], i);
+ }
+
+ stringHashMap2 = stringHashMap1;
+ string_hash_map<int> stringHashMap3(stringHashMap1);
+
+ EATEST_VERIFY(stringHashMap1.validate());
+ EATEST_VERIFY(stringHashMap2.validate());
+ EATEST_VERIFY(stringHashMap3.validate());
+
+ for (int i = 0; i < (int)kStringCount; i++)
+ {
+ EATEST_VERIFY(stringHashMap1[strings[i]] == stringHashMap2[strings[i]]);
+ EATEST_VERIFY(stringHashMap1[strings[i]] == stringHashMap3[strings[i]]);
+ }
+
+ }
+
+ // pair<iterator, bool> insert_or_assign(const char* key, const T& value);
+ {
+ {
+ string_hash_map<int> m;
+
+ m.insert_or_assign("hello", 0);
+ EATEST_VERIFY(m["hello"] == 0);
+
+ m.insert_or_assign("hello", 42);
+ EATEST_VERIFY(m["hello"] == 42);
+
+ m.insert_or_assign("hello", 43);
+ EATEST_VERIFY(m["hello"] == 43);
+
+ m.insert_or_assign("hello", 1143);
+ EATEST_VERIFY(m["hello"] == 1143);
+
+ EATEST_VERIFY(m.size() == 1);
+ m.clear();
+ EATEST_VERIFY(m.size() == 0);
+ }
+
+ {
+ string_hash_map<int> m;
+ m.insert_or_assign("hello", 0);
+ m.insert_or_assign("hello2", 0);
+
+ EATEST_VERIFY(m.size() == 2);
+ m.clear();
+ EATEST_VERIFY(m.size() == 0);
+ }
+
+ {
+ string_hash_map<int> m;
+ m.insert_or_assign("hello", 0);
+ m.insert_or_assign("hello2", 0);
+
+ EATEST_VERIFY(m.size() == 2);
+ m.clear(true);
+ EATEST_VERIFY(m.size() == 0);
+ }
+
+ {
+ string_hash_map<int> m;
+ m.insert_or_assign("hello", 0);
+ m.insert_or_assign("hello2", 0);
+
+ EATEST_VERIFY(m.size() == 2);
+ m.clear(false);
+ EATEST_VERIFY(m.size() == 0);
+ }
+
+ {
+ string_hash_map<TestObject> m;
+
+ m.insert_or_assign("hello", TestObject(42));
+ EATEST_VERIFY(m["hello"].mX == 42);
+
+ m.insert_or_assign("hello", TestObject(43));
+ EATEST_VERIFY(m["hello"].mX == 43);
+
+ EATEST_VERIFY(m.size() == 1);
+ }
+
+ {
+ typedef string_hash_map<TestObject, hash<const char*>, str_equal_to<const char*>, CountingAllocator> counting_string_hash_map;
+ counting_string_hash_map m;
+ EATEST_VERIFY(CountingAllocator::getActiveAllocationCount() == 0);
+
+ m.insert_or_assign("hello", TestObject(42));
+ EATEST_VERIFY(CountingAllocator::getActiveAllocationCount() == 3);
+ EATEST_VERIFY(m["hello"].mX == 42);
+ EATEST_VERIFY(CountingAllocator::getActiveAllocationCount() == 3);
+
+ m.insert_or_assign("hello", TestObject(43));
+ EATEST_VERIFY(CountingAllocator::getActiveAllocationCount() == 3);
+ EATEST_VERIFY(m["hello"].mX == 43);
+ EATEST_VERIFY(CountingAllocator::getActiveAllocationCount() == 3);
+
+ EATEST_VERIFY(m.size() == 1);
+ }
+ EATEST_VERIFY(CountingAllocator::getActiveAllocationCount() == 0);
+ }
+
+ return nErrorCount;
+}
diff --git a/EASTL/test/source/TestStringMap.cpp b/EASTL/test/source/TestStringMap.cpp
new file mode 100644
index 0000000..4499fa9
--- /dev/null
+++ b/EASTL/test/source/TestStringMap.cpp
@@ -0,0 +1,207 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLTest.h"
+#include <EASTL/string_map.h>
+#include <EAStdC/EAString.h>
+
+using namespace eastl;
+
+
+// Template instantations.
+// These tell the compiler to compile all the functions for the given class.
+template class eastl::string_map<int>;
+template class eastl::string_map<Align32>;
+
+static const char* strings[] = { "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t" };
+static const size_t kStringCount = 10; // This is intentionally half the length of strings, so that we can test with strings that are not inserted to the map.
+
+
+int TestStringMap()
+{
+ int nErrorCount = 0;
+
+ { // Test declarations
+ string_map<int> stringMap;
+
+ string_map<int> stringMap2(stringMap);
+ EATEST_VERIFY(stringMap2.size() == stringMap.size());
+ EATEST_VERIFY(stringMap2 == stringMap);
+
+
+ // allocator_type& get_allocator();
+ // void set_allocator(const allocator_type& allocator);
+ string_map<int>::allocator_type& allocator = stringMap.get_allocator();
+ stringMap.set_allocator(EASTLAllocatorType());
+ stringMap.set_allocator(allocator);
+ // To do: Try to find something better to test here.
+
+
+ // const char* get_name() const;
+ // void set_name(const char* pName);
+#if EASTL_NAME_ENABLED
+ stringMap.get_allocator().set_name("test");
+ const char* pName = stringMap.get_allocator().get_name();
+ EATEST_VERIFY(equal(pName, pName + 5, "test"));
+#endif
+ }
+
+
+ {
+ string_map<int> stringMap;
+
+ // Clear a newly constructed, already empty container.
+ stringMap.clear();
+ EATEST_VERIFY(stringMap.validate());
+ EATEST_VERIFY(stringMap.size() == 0);
+
+ for (int i = 0; i < (int)kStringCount; i++)
+ stringMap.insert(strings[i], i);
+
+ EATEST_VERIFY(stringMap.validate());
+ EATEST_VERIFY(stringMap.size() == kStringCount);
+
+ stringMap.clear();
+ EATEST_VERIFY(stringMap.validate());
+ EATEST_VERIFY(stringMap.size() == 0);
+
+ for (int i = 0; i < (int)kStringCount; i++)
+ stringMap.insert(strings[i], i);
+ EATEST_VERIFY(stringMap.validate());
+ EATEST_VERIFY(stringMap.size() == kStringCount);
+
+ stringMap.clear();
+ EATEST_VERIFY(stringMap.validate());
+ EATEST_VERIFY(stringMap.size() == 0);
+ }
+
+
+ { // Test string_map
+
+ // size_type size() const
+ // bool empty() const
+ // insert_return_type insert(const value_type& value);
+ // insert_return_type insert(const value_type& value, hash_code_t c, node_type* pNodeNew = NULL);
+ // iterator insert(const_iterator, const value_type& value);
+ // iterator find(const key_type& k);
+ // const_iterator find(const key_type& k) const;
+ // size_type count(const key_type& k) const;
+
+ typedef string_map<int> StringMapInt;
+
+ StringMapInt stringMap;
+
+ EATEST_VERIFY(stringMap.empty());
+ EATEST_VERIFY(stringMap.size() == 0);
+ EATEST_VERIFY(stringMap.count(strings[0]) == 0);
+
+ for (int i = 0; i < (int)kStringCount; i++)
+ stringMap.insert(strings[i], i);
+
+ EATEST_VERIFY(!stringMap.empty());
+ EATEST_VERIFY(stringMap.size() == kStringCount);
+ EATEST_VERIFY(stringMap.count(strings[0]) == 1);
+
+ int j = 0;
+ for (StringMapInt::iterator it = stringMap.begin(); it != stringMap.end(); ++it, ++j)
+ {
+ int value = (*it).second;
+ EATEST_VERIFY(value < (int)kStringCount);
+ }
+
+ for (int i = 0; i < (int)kStringCount * 2; i++)
+ {
+ StringMapInt::iterator it = stringMap.find(strings[i]);
+
+ if (i < (int)kStringCount)
+ {
+ EATEST_VERIFY(it != stringMap.end());
+ const char* k = (*it).first;
+ int v = (*it).second;
+ EATEST_VERIFY(EA::StdC::Strcmp(k, strings[i]) == 0);
+ EATEST_VERIFY(v == i);
+ }
+ else
+ EATEST_VERIFY(it == stringMap.end());
+ }
+
+ StringMapInt::insert_return_type result = stringMap.insert("EASTLTEST");
+ EATEST_VERIFY(result.second == true);
+ result = stringMap.insert("EASTLTEST");
+ EATEST_VERIFY(result.second == false);
+ result.first->second = 0;
+
+ // iterator erase(const_iterator);
+ size_t nExpectedSize = stringMap.size();
+
+ StringMapInt::iterator itD = stringMap.find("d");
+ EATEST_VERIFY(itD != stringMap.end());
+
+ // erase the element and verify that the size has decreased
+ stringMap.erase(itD);
+ nExpectedSize--;
+ EATEST_VERIFY(stringMap.size() == nExpectedSize);
+
+ // verify that erased element is gone
+ itD = stringMap.find(strings[3]);
+ EATEST_VERIFY(itD == stringMap.end());
+
+ // iterator erase(const char*)
+ StringMapInt::size_type n = stringMap.erase(strings[4]);
+ nExpectedSize--;
+ EATEST_VERIFY(n == 1);
+ EATEST_VERIFY(stringMap.size() == nExpectedSize);
+
+
+ // mapped_type& operator[](const key_type& key)
+ stringMap.clear();
+
+ int x = stringMap["A"]; // A default-constructed int (i.e. 0) should be returned.
+ EATEST_VERIFY(x == 0);
+
+ stringMap["B"] = 1;
+ x = stringMap["B"];
+ EATEST_VERIFY(x == 1); // Verify that the value we assigned is returned and a default-constructed value is not returned.
+
+ stringMap["A"] = 10; // Overwrite our previous 0 with 10.
+ stringMap["B"] = 11;
+ x = stringMap["A"];
+ EATEST_VERIFY(x == 10); // Verify the value is as expected.
+ x = stringMap["B"];
+ EATEST_VERIFY(x == 11);
+
+ }
+
+
+ {
+ // string_map(const allocator_type& allocator);
+ // string_map& operator=(const this_type& x);
+ // bool validate() const;
+
+ string_map<int> stringMap1(EASTLAllocatorType("TestStringMap"));
+ string_map<int> stringMap2(stringMap1);
+
+ for (int i = 0; i < (int)kStringCount; i++)
+ {
+ stringMap1.insert(strings[i], i);
+ }
+
+ stringMap2 = stringMap1;
+ string_map<int> stringMap3(stringMap1);
+
+ EATEST_VERIFY(stringMap1.validate());
+ EATEST_VERIFY(stringMap2.validate());
+ EATEST_VERIFY(stringMap3.validate());
+
+ for (int i = 0; i < (int)kStringCount; i++)
+ {
+ EATEST_VERIFY(stringMap1[strings[i]] == stringMap2[strings[i]]);
+ EATEST_VERIFY(stringMap1[strings[i]] == stringMap3[strings[i]]);
+ }
+
+ }
+
+ return nErrorCount;
+}
diff --git a/EASTL/test/source/TestStringView.cpp b/EASTL/test/source/TestStringView.cpp
new file mode 100644
index 0000000..23e6e51
--- /dev/null
+++ b/EASTL/test/source/TestStringView.cpp
@@ -0,0 +1,115 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+#include "EASTLTest.h"
+#include <EABase/eabase.h>
+#include <EASTL/numeric_limits.h>
+#include <EASTL/string.h>
+#include <EASTL/string_view.h>
+
+// Verify char8_t support is present if the test build requested it.
+#if defined(EASTL_EXPECT_CHAR8T_SUPPORT) && !EA_CHAR8_UNIQUE
+static_assert(false, "Building with char8_t tests enabled, but EA_CHAR8_UNIQUE evaluates to false.");
+#endif
+
+// this mess is required inorder to inject string literal string conversion macros into the unit tests
+#define TEST_STRING_NAME TestBasicStringView
+#define LITERAL(x) x
+#include "TestStringView.inl"
+
+#define TEST_STRING_NAME TestBasicStringViewW
+#define LITERAL(x) EA_WCHAR(x)
+#include "TestStringView.inl"
+
+#define TEST_STRING_NAME TestBasicStringView8
+#define LITERAL(x) EA_CHAR8(x)
+#include "TestStringView.inl"
+
+#define TEST_STRING_NAME TestBasicStringView16
+#define LITERAL(x) EA_CHAR16(x)
+#include "TestStringView.inl"
+
+#define TEST_STRING_NAME TestBasicStringView32
+#define LITERAL(x) EA_CHAR32(x)
+#include "TestStringView.inl"
+
+
+int TestStringView()
+{
+ using namespace eastl;
+ int nErrorCount = 0;
+
+ nErrorCount += TestBasicStringView<eastl::basic_string_view<char>>();
+ nErrorCount += TestBasicStringView<eastl::string_view>();
+
+ nErrorCount += TestBasicStringViewW<eastl::basic_string_view<wchar_t>>();
+ nErrorCount += TestBasicStringViewW<eastl::wstring_view>();
+
+#if EA_CHAR8_UNIQUE
+ nErrorCount += TestBasicStringView8<eastl::basic_string_view<char8_t>>();
+ nErrorCount += TestBasicStringView8<eastl::u8string_view>();
+#endif
+
+ nErrorCount += TestBasicStringView16<eastl::basic_string_view<char16_t>>();
+ nErrorCount += TestBasicStringView16<eastl::u16string_view>();
+
+#if EA_CHAR32_NATIVE
+ nErrorCount += TestBasicStringView32<eastl::basic_string_view<char32_t>>();
+ nErrorCount += TestBasicStringView32<eastl::u32string_view>();
+#endif
+
+
+ // constexpr string_view operator "" sv(const char* str, size_t len) noexcept;
+ // constexpr u8string_view operator "" sv(const char8_t* str, size_t len) noexcept;
+ // constexpr u16string_view operator "" sv(const char16_t* str, size_t len) noexcept;
+ // constexpr u32string_view operator "" sv(const char32_t* str, size_t len) noexcept;
+ // constexpr wstring_view operator "" sv(const wchar_t* str, size_t len) noexcept;
+ #if EASTL_USER_LITERALS_ENABLED
+ {
+ VERIFY("cplusplus"_sv.compare("cplusplus") == 0);
+ VERIFY(L"cplusplus"_sv.compare(L"cplusplus") == 0);
+ VERIFY(u"cplusplus"_sv.compare(u"cplusplus") == 0);
+ VERIFY(U"cplusplus"_sv.compare(U"cplusplus") == 0);
+ VERIFY(u8"cplusplus"_sv.compare(u8"cplusplus") == 0);
+
+ static_assert(eastl::is_same_v<decltype("abcdef"_sv), eastl::string_view>, "string_view literal type mismatch");
+ static_assert(eastl::is_same_v<decltype(u8"abcdef"_sv), eastl::u8string_view>, "string_view literal type mismatch");
+ static_assert(eastl::is_same_v<decltype(u"abcdef"_sv), eastl::u16string_view>, "string_view literal type mismatch");
+ static_assert(eastl::is_same_v<decltype(U"abcdef"_sv), eastl::u32string_view>, "string_view literal type mismatch");
+ static_assert(eastl::is_same_v<decltype(L"abcdef"_sv), eastl::wstring_view>, "string_view literal type mismatch");
+
+
+ VERIFY("cplusplus"sv.compare("cplusplus") == 0);
+ VERIFY(L"cplusplus"sv.compare(L"cplusplus") == 0);
+ VERIFY(u"cplusplus"sv.compare(u"cplusplus") == 0);
+ VERIFY(U"cplusplus"sv.compare(U"cplusplus") == 0);
+ VERIFY(u8"cplusplus"sv.compare(u8"cplusplus") == 0);
+
+ static_assert(eastl::is_same_v<decltype("abcdef"sv), eastl::string_view>, "string_view literal type mismatch");
+ static_assert(eastl::is_same_v<decltype(u8"abcdef"sv), eastl::u8string_view>, "string_view literal type mismatch");
+ static_assert(eastl::is_same_v<decltype(u"abcdef"sv), eastl::u16string_view>, "string_view literal type mismatch");
+ static_assert(eastl::is_same_v<decltype(U"abcdef"sv), eastl::u32string_view>, "string_view literal type mismatch");
+ static_assert(eastl::is_same_v<decltype(L"abcdef"sv), eastl::wstring_view>, "string_view literal type mismatch");
+ }
+ #endif
+
+
+ // strlen(char_t) compatibility
+ {
+ auto* pStr = "Hello, World";
+ string_view sw(pStr, strlen(pStr));
+ VERIFY(sw.size() == strlen(pStr));
+ }
+
+ // strlen(wchar_t) compatibility
+ {
+ auto* pStr = L"Hello, World";
+ wstring_view sw(pStr, wcslen(pStr));
+ VERIFY(sw.size() == wcslen(pStr));
+ }
+
+
+ return nErrorCount;
+}
+
diff --git a/EASTL/test/source/TestStringView.inl b/EASTL/test/source/TestStringView.inl
new file mode 100644
index 0000000..cd4214e
--- /dev/null
+++ b/EASTL/test/source/TestStringView.inl
@@ -0,0 +1,599 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+template<typename StringViewT>
+int TEST_STRING_NAME()
+{
+ using StringT = eastl::basic_string<typename StringViewT::value_type>;
+
+ int nErrorCount = 0;
+ {
+ // EA_CONSTEXPR basic_string_view()
+ {
+ StringViewT sw;
+ VERIFY(sw.empty());
+ VERIFY(sw.data() == nullptr);
+ VERIFY(sw.size() == 0);
+ VERIFY(sw.size() == sw.length());
+ }
+
+ // User-reported regression: constructing string_view from a nullptr, NULL, 0
+ {
+ {
+ StringViewT sw(nullptr);
+ VERIFY(sw.empty());
+ VERIFY(sw.data() == nullptr);
+ VERIFY(sw.size() == 0);
+ VERIFY(sw.size() == sw.length());
+ }
+ {
+ StringViewT sw(0);
+ VERIFY(sw.empty());
+ VERIFY(sw.data() == nullptr);
+ VERIFY(sw.size() == 0);
+ VERIFY(sw.size() == sw.length());
+ }
+ {
+ StringViewT sw(NULL);
+ VERIFY(sw.empty());
+ VERIFY(sw.data() == nullptr);
+ VERIFY(sw.size() == 0);
+ VERIFY(sw.size() == sw.length());
+ }
+ }
+
+ // EA_CONSTEXPR basic_string_view(const basic_string_view& other) = default;
+ {
+ auto* pLiteral = LITERAL("Hello, World");
+ StringViewT sw1(pLiteral);
+ StringViewT sw2(sw1);
+ VERIFY(sw1.size() == sw2.size());
+ VERIFY(eastl::Compare(sw1.data(), sw2.data(), sw1.size()) == 0);
+ }
+
+ // EA_CONSTEXPR basic_string_view(const T* s, size_type count)
+ {
+ {
+ StringViewT sw(LITERAL("Hello, World"), 12);
+ VERIFY(!sw.empty());
+ VERIFY(sw.data() != nullptr);
+ VERIFY(sw.size() == 12);
+ VERIFY(sw.size() == sw.length());
+ }
+
+ {
+ StringViewT sw(LITERAL("Hello, World"), 5);
+ VERIFY(!sw.empty());
+ VERIFY(sw.data() != nullptr);
+ VERIFY(sw.size() == 5);
+ VERIFY(sw.size() == sw.length());
+ VERIFY(eastl::Compare(sw.data(), LITERAL("Hello"), sw.size()) == 0);
+ }
+ }
+
+ // EA_CONSTEXPR basic_string_view(const T* s)
+ {
+ auto* pLiteral = LITERAL("Vancouver, Canada");
+ StringViewT sw(pLiteral);
+ VERIFY(!sw.empty());
+ VERIFY(sw.data() != nullptr);
+ VERIFY(sw.size() == 17);
+ VERIFY(sw.size() == sw.length());
+ VERIFY(eastl::Compare(sw.data(), pLiteral, sw.size()) == 0);
+ }
+
+ // basic_string_view& operator=(const basic_string_view& view) = default;
+ {
+ auto* pLiteral = LITERAL("Hello, World");
+ StringViewT sw1(pLiteral);
+ StringViewT sw2;
+ VERIFY(!sw1.empty());
+ VERIFY(sw2.empty());
+
+ sw2 = sw1;
+
+ VERIFY(!sw1.empty());
+ VERIFY(!sw2.empty());
+ VERIFY(sw1.size() == sw2.size());
+ VERIFY(eastl::Compare(sw1.data(), pLiteral, sw1.size()) == 0);
+ VERIFY(eastl::Compare(sw2.data(), pLiteral, sw2.size()) == 0);
+ }
+
+ {
+ // EA_CONSTEXPR const_iterator begin() const EA_NOEXCEPT
+ // EA_CONSTEXPR const_iterator cbegin() const EA_NOEXCEPT
+ StringViewT sw(LITERAL("abcdefg"));
+ {
+ auto i = sw.begin();
+ auto ci = sw.cbegin();
+
+ VERIFY(*i++ == LITERAL('a'));
+ VERIFY(*i++ == LITERAL('b'));
+
+ VERIFY(*ci++ == LITERAL('a'));
+ VERIFY(*ci++ == LITERAL('b'));
+ }
+
+ // EA_CONSTEXPR const_iterator end() const EA_NOEXCEPT
+ // EA_CONSTEXPR const_iterator cend() const EA_NOEXCEPT
+ {
+ auto i = sw.end();
+ auto ci = sw.cend();
+
+ VERIFY(*i-- == LITERAL('\0'));
+ VERIFY(*i-- == LITERAL('g'));
+
+ VERIFY(*ci-- == LITERAL('\0'));
+ VERIFY(*ci-- == LITERAL('g'));
+ }
+
+ // EA_CONSTEXPR const_reverse_iterator rbegin() const EA_NOEXCEPT
+ // EA_CONSTEXPR const_reverse_iterator crbegin() const EA_NOEXCEPT
+ {
+ auto i = sw.rbegin();
+ auto ci = sw.crbegin();
+
+ VERIFY(*i++ == LITERAL('g'));
+ VERIFY(*i++ == LITERAL('f'));
+
+ VERIFY(*ci++ == LITERAL('g'));
+ VERIFY(*ci++ == LITERAL('f'));
+ }
+
+ // EA_CONSTEXPR const_reverse_iterator rend() const EA_NOEXCEPT
+ // EA_CONSTEXPR const_reverse_iterator crend() const EA_NOEXCEPT
+ {
+ auto i = sw.rend();
+ i--;
+
+ auto ci = sw.crend();
+ ci--;
+
+ VERIFY(*i-- == LITERAL('a'));
+ VERIFY(*i-- == LITERAL('b'));
+
+ VERIFY(*ci-- == LITERAL('a'));
+ VERIFY(*ci-- == LITERAL('b'));
+ }
+ }
+
+ // EA_CONSTEXPR const_pointer data() const
+ {
+ auto* pLiteral = LITERAL("Vancouver, Canada");
+ StringViewT sw(pLiteral);
+ VERIFY(sw.data() != nullptr);
+ VERIFY(eastl::Compare(sw.data(), pLiteral, sw.size()) == 0);
+ VERIFY(eastl::Compare(sw.data() + 11, LITERAL("Canada"), 6) == 0);
+ }
+
+ // EA_CONSTEXPR const_reference front() const
+ // EA_CONSTEXPR const_reference back() const
+ {
+ {
+ StringViewT sw(LITERAL("Vancouver, Canada"));
+ VERIFY(sw.front() == LITERAL('V'));
+ VERIFY(sw.back() == LITERAL('a'));
+
+ }
+ {
+ StringViewT sw(LITERAL("Canada"));
+ VERIFY(sw.front() == LITERAL('C'));
+ VERIFY(sw.back() == LITERAL('a'));
+ }
+ }
+
+ // EA_CONSTEXPR const_reference operator[](size_type pos) const
+ {
+ StringViewT sw(LITERAL("Vancouver"));
+ VERIFY(sw[0] == LITERAL('V'));
+ VERIFY(sw[1] == LITERAL('a'));
+ VERIFY(sw[2] == LITERAL('n'));
+ VERIFY(sw[3] == LITERAL('c'));
+ VERIFY(sw[4] == LITERAL('o'));
+ VERIFY(sw[5] == LITERAL('u'));
+ VERIFY(sw[6] == LITERAL('v'));
+ VERIFY(sw[7] == LITERAL('e'));
+ VERIFY(sw[8] == LITERAL('r'));
+ }
+
+ // EA_CONSTEXPR size_type size() const EA_NOEXCEPT
+ // EA_CONSTEXPR size_type length() const EA_NOEXCEPT
+ // EA_CONSTEXPR size_type max_size() const EA_NOEXCEPT
+ // EA_CONSTEXPR bool empty() const EA_NOEXCEPT
+ {
+ StringViewT sw(LITERAL("http://en.cppreference.com/w/cpp/header/string_view"));
+ VERIFY(sw.size() == 51);
+ VERIFY(sw.length() == 51);
+ VERIFY(sw.max_size() == eastl::numeric_limits<typename StringViewT::size_type>::max());
+ VERIFY(!sw.empty());
+ }
+
+ // EA_CONSTEXPR void swap(basic_string_view& v)
+ {
+ auto* pV = LITERAL("Vancouver");
+ auto* pC = LITERAL("Canada");
+ StringViewT sw1(pV);
+ StringViewT sw2(pC);
+ sw1.swap(sw2);
+ VERIFY(eastl::Compare(sw1.data(), pC, sw1.size()) == 0);
+ VERIFY(eastl::Compare(sw2.data(), pV, sw2.size()) == 0);
+ }
+
+ // EA_CONSTEXPR void remove_prefix(size_type n)
+ // EA_CONSTEXPR void remove_suffix(size_type n)
+ {
+ StringViewT sw(LITERAL("Vancouver"));
+ sw.remove_prefix(3);
+ VERIFY(eastl::Compare(sw.data(), LITERAL("couver"), sw.size()) == 0);
+ VERIFY(sw.size() == 6);
+
+ sw.remove_prefix(3);
+ VERIFY(eastl::Compare(sw.data(), LITERAL("ver"), sw.size()) == 0);
+ VERIFY(sw.size() == 3);
+
+ sw.remove_suffix(1);
+ VERIFY(eastl::Compare(sw.data(), LITERAL("ve"), sw.size()) == 0);
+ VERIFY(sw.size() == 2);
+
+ sw.remove_suffix(1);
+ VERIFY(eastl::Compare(sw.data(), LITERAL("v"), sw.size()) == 0);
+ VERIFY(sw.size() == 1);
+
+ sw.remove_suffix(1);
+ VERIFY(eastl::Compare(sw.data(), LITERAL(""), sw.size()) == 0);
+ VERIFY(sw.size() == 0);
+ }
+
+ // size_type copy(T* s, size_type n, size_type pos = 0) const;
+ {
+ typename StringViewT::value_type buf[256];
+ StringViewT sw(LITERAL("**Hello, World"));
+ auto cnt = sw.copy(buf, 5, 2);
+ VERIFY(eastl::Compare(buf, LITERAL("Hello"), 5) == 0);
+ VERIFY(cnt == 5);
+ }
+
+ // EA_CONSTEXPR basic_string_view substr(size_type pos = 0, size_type n = npos) const;
+ {
+ StringViewT sw(LITERAL("**Hello, World"));
+ auto sw2 = sw.substr(2, 5);
+ VERIFY(eastl::Compare(sw2.data(), LITERAL("Hello"), sw2.size()) == 0);
+ }
+
+ // EA_CONSTEXPR int compare(basic_string_view s) const EA_NOEXCEPT;
+ {
+ {
+ VERIFY(StringViewT(LITERAL("A")).compare(StringViewT(LITERAL("A"))) == 0);
+ VERIFY(StringViewT(LITERAL("a")).compare(StringViewT(LITERAL("a"))) == 0);
+ VERIFY(StringViewT(LITERAL("A")).compare(StringViewT(LITERAL("a"))) != 0);
+ VERIFY(StringViewT(LITERAL("A")).compare(StringViewT(LITERAL("a"))) < 0);
+ VERIFY(StringViewT(LITERAL("A")).compare(StringViewT(LITERAL("A"))) <= 0);
+ VERIFY(StringViewT(LITERAL("a")).compare(StringViewT(LITERAL("A"))) > 0);
+ VERIFY(StringViewT(LITERAL("A")).compare(StringViewT(LITERAL("A"))) >= 0);
+ }
+
+ {
+ VERIFY(StringViewT(LITERAL("Aa")).compare(StringViewT(LITERAL("A"))) > 0);
+ VERIFY(StringViewT(LITERAL("A")).compare(StringViewT(LITERAL("Aa"))) < 0);
+ }
+
+ {
+ StringViewT sw1(LITERAL("Hello, World"));
+ StringViewT sw2(LITERAL("Hello, WWorld"));
+ StringViewT sw3(LITERAL("Hello, Wzorld"));
+ VERIFY(sw1.compare(sw1) == 0);
+ VERIFY(sw1.compare(sw2) > 0);
+ VERIFY(sw1.compare(sw3) < 0);
+ }
+ }
+
+ // EA_CONSTEXPR int compare(size_type pos1, size_type n1, basic_string_view s) const;
+ {
+ StringViewT sw1(LITERAL("*** Hello ***"));
+ StringViewT sw2(LITERAL("Hello"));
+ VERIFY(sw1.compare(4, 5, sw2) == 0);
+ }
+
+ // EA_CONSTEXPR int compare(size_type pos1, size_type n1, basic_string_view s, size_type pos2, size_type n2) const;
+ {
+ StringViewT sw(LITERAL("Vancouver"));
+ VERIFY(sw.compare(0, 3, StringViewT(LITERAL("Van")), 0, 3) == 0);
+ VERIFY(sw.compare(6, 3, StringViewT(LITERAL("ver")), 0, 3) == 0);
+ VERIFY(sw.compare(0, 3, StringViewT(LITERAL("Tan")), 0, 3) != 0);
+ }
+
+ // EA_CONSTEXPR int compare(const T* s) const;
+ {
+ StringViewT sw(LITERAL("Hello"));
+ VERIFY(sw.compare(LITERAL("Vancouver")) != 0);
+ VERIFY(sw.compare(LITERAL("Vancouver!")) != 0);
+ VERIFY(sw.compare(LITERAL("Hello")) == 0);
+ }
+
+ // EA_CONSTEXPR int compare(size_type pos1, size_type n1, const T* s) const;
+ {
+ StringViewT sw(LITERAL("*** Hello"));
+ VERIFY(sw.compare(4, 5, LITERAL("Hello")) == 0);
+ VERIFY(sw.compare(4, 5, LITERAL("Hello 555")) != 0);
+ VERIFY(sw.compare(4, 5, LITERAL("hello")) != 0);
+ }
+
+ // EA_CONSTEXPR int compare(size_type pos1, size_type n1, const T* s, size_type n2) const;
+ {
+ StringViewT sw(LITERAL("*** Hello ***"));
+ VERIFY(sw.compare(4, 5, LITERAL("Hello"), 5) == 0);
+ VERIFY(sw.compare(0, 1, LITERAL("*"), 1) == 0);
+ VERIFY(sw.compare(0, 2, LITERAL("**"), 1) != 0);
+ VERIFY(sw.compare(0, 2, LITERAL("**"), 2) == 0);
+ VERIFY(sw.compare(0, 2, LITERAL("^^"), 2) != 0);
+ }
+
+
+ // EA_CONSTEXPR size_type find(basic_string_view s, size_type pos = 0) const EA_NOEXCEPT;
+ {
+ StringViewT sw(LITERAL("*** Hello ***"));
+ VERIFY(sw.find(StringViewT(LITERAL("Hello"))) != StringViewT::npos);
+ VERIFY(sw.find(StringViewT(LITERAL("ell"))) != StringViewT::npos);
+ VERIFY(sw.find(StringViewT(LITERAL("FailToFindMe"))) == StringViewT::npos);
+ }
+
+ // EA_CONSTEXPR size_type find(T c, size_type pos = 0) const EA_NOEXCEPT;
+ {
+ StringViewT sw(LITERAL("*** Hello ***"));
+ VERIFY(sw.find(LITERAL("H")) == 4);
+ VERIFY(sw.find(LITERAL("e")) == 5);
+ VERIFY(sw.find(LITERAL("l")) == 6);
+ VERIFY(sw.find(LITERAL("o")) == 8);
+ VERIFY(sw.find(LITERAL("&")) == StringViewT::npos);
+ VERIFY(sw.find(LITERAL("@")) == StringViewT::npos);
+ }
+
+ // EA_CONSTEXPR size_type find(const T* s, size_type pos, size_type n) const;
+ {
+ StringViewT sw(LITERAL("Hello, Vancouver"));
+ VERIFY(sw.find(LITERAL("Hello"), 0, 3) != StringViewT::npos);
+ VERIFY(sw.find(LITERAL("Hello"), 3, 3) == StringViewT::npos);
+ VERIFY(sw.find(LITERAL("Vancouv"), 7, 7) != StringViewT::npos);
+ }
+
+ // EA_CONSTEXPR size_type find(const T* s, size_type pos = 0) const;
+ {
+ StringViewT sw(LITERAL("Hello, Vancouver"));
+ VERIFY(sw.find(LITERAL("Hello"), 0) != StringViewT::npos);
+ VERIFY(sw.find(LITERAL("Hello"), 3) == StringViewT::npos);
+ VERIFY(sw.find(LITERAL("Vancouv"), 7) != StringViewT::npos);
+ }
+
+
+ // EA_CONSTEXPR size_type rfind(basic_string_view s, size_type pos = npos) const EA_NOEXCEPT;
+ // EA_CONSTEXPR size_type rfind(T c, size_type pos = npos) const EA_NOEXCEPT;
+ // EA_CONSTEXPR size_type rfind(const T* s, size_type pos, size_type n) const;
+ // EA_CONSTEXPR size_type rfind(const T* s, size_type pos = npos) const;
+ {
+ StringViewT str(LITERAL("abcdefghijklmnopqrstuvwxyz"));
+
+ VERIFY(str.rfind(StringViewT(LITERAL("d"))) != StringViewT::npos);
+ VERIFY(str.rfind(StringViewT(LITERAL("tuv"))) != StringViewT::npos);
+ VERIFY(str.rfind(StringViewT(LITERAL("123r"))) == StringViewT::npos);
+
+ VERIFY(str.rfind(LITERAL("d")) != StringViewT::npos);
+ VERIFY(str.rfind(LITERAL("tuv")) != StringViewT::npos);
+ VERIFY(str.rfind(LITERAL("123r")) == StringViewT::npos);
+
+ VERIFY(str.rfind(LITERAL("d"), str.length()) != StringViewT::npos);
+ VERIFY(str.rfind(LITERAL("tuv"), str.length() - 2) != StringViewT::npos);
+ VERIFY(str.rfind(LITERAL("123r"), str.length() - 2) == StringViewT::npos);
+
+ VERIFY(str.rfind(LITERAL('d'), str.length() - 0) != StringViewT::npos);
+ VERIFY(str.rfind(LITERAL('t'), str.length() - 2) != StringViewT::npos);
+ VERIFY(str.rfind(LITERAL('1'), str.length() - 2) == StringViewT::npos);
+ }
+
+ // EA_CONSTEXPR size_type find_first_of(basic_string_view s, size_type pos = 0) const EA_NOEXCEPT;
+ // EA_CONSTEXPR size_type find_first_of(T c, size_type pos = 0) const EA_NOEXCEPT;
+ // EA_CONSTEXPR size_type find_first_of(const T* s, size_type pos, size_type n) const;
+ // EA_CONSTEXPR size_type find_first_of(const T* s, size_type pos = 0) const;
+ {
+ StringViewT str(LITERAL("aaaaabbbbbcccdddddeeeeefffggh"));
+
+ VERIFY(str.find_first_of(StringViewT(LITERAL("aaa"))) == 0);
+ VERIFY(str.find_first_of(LITERAL("aab")) == 0);
+ VERIFY(str.find_first_of(LITERAL("baab")) == 0);
+ VERIFY(str.find_first_of(LITERAL("ceg")) == 10);
+ VERIFY(str.find_first_of(LITERAL("eeef"), 1, 2) == 18);
+ VERIFY(str.find_first_of(LITERAL("eeef"), 1, 4) == 18);
+ VERIFY(str.find_first_of(LITERAL('g')) == 26);
+ VERIFY(str.find_first_of(LITERAL('$')) == StringViewT::npos);
+ VERIFY(str.find_first_of(StringViewT(LITERAL(" a"), 1)) == StringViewT::npos);
+ }
+
+ // EA_CONSTEXPR size_type find_last_of(basic_string_view s, size_type pos = npos) const EA_NOEXCEPT;
+ // EA_CONSTEXPR size_type find_last_of(T c, size_type pos = npos) const EA_NOEXCEPT;
+ // EA_CONSTEXPR size_type find_last_of(const T* s, size_type pos, size_type n) const;
+ // EA_CONSTEXPR size_type find_last_of(const T* s, size_type pos = npos) const;
+ {
+ StringViewT str(LITERAL("aaaaabbbbbcccdddddeeeeefffggh"));
+
+ VERIFY(str.find_last_of(StringViewT(LITERAL("aaa"))) == 4);
+ VERIFY(str.find_last_of(LITERAL("aab")) == 9);
+ VERIFY(str.find_last_of(LITERAL("baab")) == 9);
+ VERIFY(str.find_last_of(LITERAL("ceg")) == 27);
+ // VERIFY(str.find_last_of(LITERAL("eeef"), 1, 2) == StringViewT::npos); // todo: FIX ME
+ // VERIFY(str.find_last_of(LITERAL("eeef"), 1, 4) == StringViewT::npos); // todo: FIX ME
+ VERIFY(str.find_last_of(LITERAL('g')) == 27);
+ VERIFY(str.find_last_of(LITERAL('$')) == StringViewT::npos);
+ }
+
+ // EA_CONSTEXPR size_type find_first_not_of(basic_string_view s, size_type pos = 0) const EA_NOEXCEPT;
+ // EA_CONSTEXPR size_type find_first_not_of(T c, size_type pos = 0) const EA_NOEXCEPT;
+ // EA_CONSTEXPR size_type find_first_not_of(const T* s, size_type pos, size_type n) const;
+ // EA_CONSTEXPR size_type find_first_not_of(const T* s, size_type pos = 0) const;
+ {
+ StringViewT str(LITERAL("aaaaabbbbbcccdddddeeeeefffggh"));
+
+ VERIFY(str.find_first_not_of(StringViewT(LITERAL("abcdfg"))) == 18);
+ VERIFY(str.find_first_not_of(LITERAL("abcdfg")) == 18);
+ // VERIFY(str.find_first_not_of(LITERAL("abcdfg"), 2, 2) == 0); // todo: FIX ME
+ // VERIFY(str.find_first_not_of(LITERAL("abcdfg"), 0, 2) == 10); // todo: FIX ME
+ VERIFY(str.find_first_not_of(LITERAL('a')) == 5);
+ }
+
+
+ // EA_CONSTEXPR size_type find_last_not_of(basic_string_view s, size_type pos = npos) const EA_NOEXCEPT;
+ // EA_CONSTEXPR size_type find_last_not_of(T c, size_type pos = npos) const EA_NOEXCEPT;
+ // EA_CONSTEXPR size_type find_last_not_of(const T* s, size_type pos, size_type n) const;
+ // EA_CONSTEXPR size_type find_last_not_of(const T* s, size_type pos = npos) const;
+ {
+ StringViewT str(LITERAL("aaaaabbbbbcccdddddeeeeefffggh"));
+
+ VERIFY(str.find_last_not_of(StringViewT(LITERAL("a"))) == 28);
+ VERIFY(str.find_last_not_of(StringViewT(LITERAL("abcdfg"))) == 28);
+ VERIFY(str.find_last_not_of(StringViewT(LITERAL("abcdfgh"))) == 22);
+ VERIFY(str.find_last_not_of(LITERAL("abcdfgh")) == 22);
+ // VERIFY(str.find_last_not_of(LITERAL("abcdfg"), 2, 2) == 0); // todo: FIX ME
+ // VERIFY(str.find_last_not_of(LITERAL("abcdfg"), 0, 2) == 10); // todo: FIX ME
+ VERIFY(str.find_last_not_of(LITERAL('a')) == 28);
+ }
+
+ // template <class CharT, class Traits>
+ // constexpr bool operator==(basic_string_view<CharT, Traits> lhs, basic_string_view<CharT, Traits> rhs);
+ // template <class CharT, class Traits>
+ // constexpr bool operator!=(basic_string_view<CharT, Traits> lhs, basic_string_view<CharT, Traits> rhs);
+ // template <class CharT, class Traits>
+ // constexpr bool operator<(basic_string_view<CharT, Traits> lhs, basic_string_view<CharT, Traits> rhs);
+ // template <class CharT, class Traits>
+ // constexpr bool operator<=(basic_string_view<CharT, Traits> lhs, basic_string_view<CharT, Traits> rhs);
+ // template <class CharT, class Traits>
+ // constexpr bool operator>(basic_string_view<CharT, Traits> lhs, basic_string_view<CharT, Traits> rhs);
+ // template <class CharT, class Traits>
+ // constexpr bool operator>=(basic_string_view<CharT, Traits> lhs, basic_string_view<CharT, Traits> rhs);
+ {
+ StringViewT sw1(LITERAL("AAAAABBBBBCCCDDDDDEEEEEFFFGGH"));
+ StringViewT sw2(LITERAL("aaaaabbbbbcccdddddeeeeefffggh"));
+
+ VERIFY(sw1 == StringViewT(LITERAL("AAAAABBBBBCCCDDDDDEEEEEFFFGGH")));
+ VERIFY(sw1 != StringViewT(LITERAL("abcdefghijklmnopqrstuvwxyz")));
+ VERIFY(sw1 < sw2);
+ VERIFY(sw1 <= sw2);
+ VERIFY(sw2 > sw1);
+ VERIFY(sw2 >= sw1);
+
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ VERIFY((sw1 <=> StringViewT(LITERAL("AAAAABBBBBCCCDDDDDEEEEEFFFGGH"))) == 0);
+ VERIFY((sw1 <=> StringViewT(LITERAL("abcdefghijklmnopqrstuvwxyz"))) != 0);
+ VERIFY((sw1 <=> sw2) < 0);
+ VERIFY((sw1 <=> sw2) <= 0);
+ VERIFY((sw2 <=> sw1) > 0);
+ VERIFY((sw2 <=> sw1) >= 0);
+#endif
+ }
+
+ {
+ auto s = LITERAL("Hello, World");
+ StringViewT sv(s);
+
+ VERIFY(s == sv);
+ VERIFY(sv == s);
+
+ VERIFY(s <= sv);
+ VERIFY(sv <= s);
+ VERIFY(s >= sv);
+ VERIFY(sv >= s);
+ VERIFY(!(s != sv));
+ VERIFY(!(sv != s));
+ VERIFY(!(s < sv));
+ VERIFY(!(sv < s));
+ VERIFY(!(s > sv));
+ VERIFY(!(sv > s));
+
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ VERIFY((s <=> sv) == 0);
+ VERIFY((sv <=> s) == 0);
+
+ VERIFY((s <=> sv) <= 0);
+ VERIFY((sv <=> s) <= 0);
+ VERIFY((s <=> sv) >= 0);
+ VERIFY((sv <=> s) >= 0);
+ VERIFY(!((s <=> sv) != 0));
+ VERIFY(!((sv <=> s) != 0));
+ VERIFY(!((s <=> sv) > 0));
+ VERIFY(!((sv <=> s) < 0));
+#endif
+ }
+
+ // Regression comparison operators should work between basic_string_view and basic_string.
+ // The idea is that type_identity_t on some overloads will force basic_string::operator basic_string_view() to kick in.
+ {
+ StringT s(LITERAL("Hello, Stockholm"));
+ StringViewT sv(s);
+
+ VERIFY(s == sv);
+ VERIFY(sv == s);
+
+ // All the operators bellow used to not work.
+ VERIFY(s <= sv);
+ VERIFY(sv <= s);
+ VERIFY(s >= sv);
+ VERIFY(sv >= s);
+ VERIFY(!(s != sv));
+ VERIFY(!(sv != s));
+ VERIFY(!(s < sv));
+ VERIFY(!(sv < s));
+ VERIFY(!(s > sv));
+ VERIFY(!(sv > s));
+
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ VERIFY((s <=> sv) == 0);
+ VERIFY((sv <=> s) == 0);
+
+ VERIFY((s <=> sv) <= 0);
+ VERIFY((sv <=> s) <= 0);
+ VERIFY((s <=> sv) >= 0);
+ VERIFY((sv <=> s) >= 0);
+ VERIFY(!((s <=> sv) != 0));
+ VERIFY(!((sv <=> s) != 0));
+ VERIFY(!((s <=> sv) > 0));
+ VERIFY(!((sv <=> s) < 0));
+#endif
+ }
+
+ // template<> struct hash<std::string_view>;
+ // template<> struct hash<std::wstring_view>;
+ // template<> struct hash<std::u16string_view>;
+ // template<> struct hash<std::u32string_view>;
+ {
+ StringViewT sw1(LITERAL("Hello, World"));
+ StringViewT sw2(LITERAL("Hello, World"), 5);
+ StringViewT sw3(LITERAL("Hello"));
+ auto s = LITERAL("Hello");
+
+ VERIFY(eastl::hash<StringViewT>{}(sw1) != eastl::hash<StringViewT>{}(sw2));
+ VERIFY(eastl::hash<StringViewT>{}(sw2) == eastl::hash<StringViewT>{}(sw3));
+ VERIFY(eastl::hash<StringViewT>{}(sw3) == eastl::hash<decltype(s)>{}(s));
+ }
+ }
+
+ {
+ StringViewT sw1(LITERAL("AAAAABBBBBCCCDDDDDEEEEEFFFGGH"));
+
+ VERIFY( sw1.starts_with(LITERAL('A')));
+ VERIFY(!sw1.starts_with(LITERAL('X')));
+ VERIFY( sw1.starts_with(LITERAL("AAAA")));
+ VERIFY( sw1.starts_with(StringViewT(LITERAL("AAAA"))));
+ VERIFY(!sw1.starts_with(LITERAL("AAAB")));
+
+ VERIFY( sw1.ends_with(LITERAL('H')));
+ VERIFY(!sw1.ends_with(LITERAL('X')));
+ VERIFY( sw1.ends_with(LITERAL("FGGH")));
+ VERIFY( sw1.ends_with(StringViewT(LITERAL("FGGH"))));
+ VERIFY(!sw1.ends_with(LITERAL("FGGH$")));
+ }
+
+ return nErrorCount;
+}
+
+// Required to prevent manual undef of macros when 'TestString.inl' preprocessed at the top of the unit test cpp file.
+#undef TEST_STRING_NAME
+#undef LITERAL
+
diff --git a/EASTL/test/source/TestTuple.cpp b/EASTL/test/source/TestTuple.cpp
new file mode 100644
index 0000000..6a7647e
--- /dev/null
+++ b/EASTL/test/source/TestTuple.cpp
@@ -0,0 +1,587 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+#include "EASTLTest.h"
+
+EA_DISABLE_VC_WARNING(4623 4625 4413 4510)
+
+#include <EASTL/tuple.h>
+#include <EASTL/unique_ptr.h>
+
+#if EASTL_TUPLE_ENABLED
+
+namespace TestTupleInternal
+{
+
+struct DefaultConstructibleType
+{
+ static const int defaultVal = 0x1EE7C0DE;
+ DefaultConstructibleType() : mVal(defaultVal) {}
+ int mVal;
+};
+
+struct OperationCountingType
+{
+ OperationCountingType() : mVal() { ++mDefaultConstructorCalls; }
+ OperationCountingType(int x) : mVal(x) { ++mIntConstructorCalls; }
+ OperationCountingType(const OperationCountingType& x) : mVal(x.mVal) { ++mCopyConstructorCalls; }
+ OperationCountingType(OperationCountingType&& x) : mVal(x.mVal)
+ {
+ ++mMoveConstructorCalls;
+ x.mVal = 0;
+ }
+ OperationCountingType& operator=(const OperationCountingType& x)
+ {
+ mVal = x.mVal;
+ ++mCopyAssignmentCalls;
+ return *this;
+ }
+ OperationCountingType& operator=(OperationCountingType&& x)
+ {
+ mVal = x.mVal;
+ x.mVal = 0;
+ ++mMoveAssignmentCalls;
+ return *this;
+ }
+ ~OperationCountingType() { ++mDestructorCalls; }
+
+ int mVal;
+
+ static void ResetCounters()
+ {
+ mDefaultConstructorCalls = 0;
+ mIntConstructorCalls = 0;
+ mCopyConstructorCalls = 0;
+ mMoveConstructorCalls = 0;
+ mCopyAssignmentCalls = 0;
+ mMoveAssignmentCalls = 0;
+ mDestructorCalls = 0;
+ }
+
+ static int mDefaultConstructorCalls;
+ static int mIntConstructorCalls;
+ static int mCopyConstructorCalls;
+ static int mMoveConstructorCalls;
+ static int mCopyAssignmentCalls;
+ static int mMoveAssignmentCalls;
+ static int mDestructorCalls;
+};
+
+int OperationCountingType::mDefaultConstructorCalls = 0;
+int OperationCountingType::mIntConstructorCalls = 0;
+int OperationCountingType::mCopyConstructorCalls = 0;
+int OperationCountingType::mMoveConstructorCalls = 0;
+int OperationCountingType::mCopyAssignmentCalls = 0;
+int OperationCountingType::mMoveAssignmentCalls = 0;
+int OperationCountingType::mDestructorCalls = 0;
+
+} // namespace TestTupleInternal
+
+int TestTuple()
+{
+ using namespace eastl;
+ using namespace TestTupleInternal;
+
+ int nErrorCount = 0;
+
+ static_assert(tuple_size<tuple<int>>::value == 1, "tuple_size<tuple<T>> test failed.");
+ static_assert(tuple_size<const tuple<int>>::value == 1, "tuple_size<const tuple<T>> test failed.");
+ static_assert(tuple_size<const tuple<const int>>::value == 1, "tuple_size<const tuple<const T>> test failed.");
+ static_assert(tuple_size<volatile tuple<int>>::value == 1, "tuple_size<volatile tuple<T>> test failed.");
+ static_assert(tuple_size<const volatile tuple<int>>::value == 1, "tuple_size<const volatile tuple<T>> test failed.");
+ static_assert(tuple_size<tuple<int, float, bool>>::value == 3, "tuple_size<tuple<T, T, T>> test failed.");
+
+ static_assert(is_same<tuple_element_t<0, tuple<int>>, int>::value, "tuple_element<I, T> test failed.");
+ static_assert(is_same<tuple_element_t<1, tuple<float, int>>, int>::value, "tuple_element<I, T> test failed.");
+ static_assert(is_same<tuple_element_t<1, tuple<float, const int>>, const int>::value, "tuple_element<I, T> test failed.");
+ static_assert(is_same<tuple_element_t<1, tuple<float, volatile int>>, volatile int>::value, "tuple_element<I, T> test failed.");
+ static_assert(is_same<tuple_element_t<1, tuple<float, const volatile int>>, const volatile int>::value, "tuple_element<I, T> test failed.");
+ static_assert(is_same<tuple_element_t<1, tuple<float, int&>>, int&>::value, "tuple_element<I, T> test failed.");
+
+ {
+ tuple<int> aSingleElementTuple(1);
+ EATEST_VERIFY(get<0>(aSingleElementTuple) == 1);
+ get<0>(aSingleElementTuple) = 2;
+ EATEST_VERIFY(get<0>(aSingleElementTuple) == 2);
+ get<int>(aSingleElementTuple) = 3;
+ EATEST_VERIFY(get<int>(aSingleElementTuple) == 3);
+
+ const tuple<int> aConstSingleElementTuple(3);
+ EATEST_VERIFY(get<0>(aConstSingleElementTuple) == 3);
+ EATEST_VERIFY(get<int>(aConstSingleElementTuple) == 3);
+
+ tuple<DefaultConstructibleType> aDefaultConstructedTuple;
+ EATEST_VERIFY(get<0>(aDefaultConstructedTuple).mVal == DefaultConstructibleType::defaultVal);
+
+ OperationCountingType::ResetCounters();
+ tuple<OperationCountingType> anOperationCountingTuple;
+ EATEST_VERIFY(OperationCountingType::mDefaultConstructorCalls == 1 &&
+ get<0>(anOperationCountingTuple).mVal == 0);
+ get<0>(anOperationCountingTuple).mVal = 1;
+ tuple<OperationCountingType> anotherOperationCountingTuple(anOperationCountingTuple);
+ EATEST_VERIFY(OperationCountingType::mDefaultConstructorCalls == 1 &&
+ OperationCountingType::mCopyConstructorCalls == 1 &&
+ get<0>(anotherOperationCountingTuple).mVal == 1);
+ get<0>(anOperationCountingTuple).mVal = 2;
+ anotherOperationCountingTuple = anOperationCountingTuple;
+ EATEST_VERIFY(
+ OperationCountingType::mDefaultConstructorCalls == 1 && OperationCountingType::mCopyConstructorCalls == 1 &&
+ OperationCountingType::mCopyAssignmentCalls == 1 && get<0>(anotherOperationCountingTuple).mVal == 2);
+
+ OperationCountingType::ResetCounters();
+ tuple<OperationCountingType> yetAnotherOperationCountingTuple(OperationCountingType(5));
+ EATEST_VERIFY(
+ OperationCountingType::mMoveConstructorCalls == 1 && OperationCountingType::mDefaultConstructorCalls == 0 &&
+ OperationCountingType::mCopyConstructorCalls == 0 && get<0>(yetAnotherOperationCountingTuple).mVal == 5);
+ }
+
+ EATEST_VERIFY(OperationCountingType::mDestructorCalls == 4);
+
+ {
+ // Test constructor
+ tuple<int, float, bool> aTuple(1, 1.0f, true);
+ EATEST_VERIFY(get<0>(aTuple) == 1);
+ EATEST_VERIFY(get<1>(aTuple) == 1.0f);
+ EATEST_VERIFY(get<2>(aTuple) == true);
+ EATEST_VERIFY(get<int>(aTuple) == 1);
+ EATEST_VERIFY(get<float>(aTuple) == 1.0f);
+ EATEST_VERIFY(get<bool>(aTuple) == true);
+
+ get<1>(aTuple) = 2.0f;
+ EATEST_VERIFY(get<1>(aTuple) == 2.0f);
+
+ // Test copy constructor
+ tuple<int, float, bool> anotherTuple(aTuple);
+ EATEST_VERIFY(get<0>(anotherTuple) == 1 && get<1>(anotherTuple) == 2.0f && get<2>(anotherTuple) == true);
+
+ // Test copy assignment
+ tuple<int, float, bool> yetAnotherTuple(2, 3.0f, true);
+ EATEST_VERIFY(get<0>(yetAnotherTuple) == 2 && get<1>(yetAnotherTuple) == 3.0f &&
+ get<2>(yetAnotherTuple) == true);
+ yetAnotherTuple = anotherTuple;
+ EATEST_VERIFY(get<0>(yetAnotherTuple) == 1 && get<1>(yetAnotherTuple) == 2.0f &&
+ get<2>(yetAnotherTuple) == true);
+
+ // Test converting 'copy' constructor (from a tuple of different type whose members are each convertible)
+ tuple<double, double, bool> aDifferentTuple(aTuple);
+ EATEST_VERIFY(get<0>(aDifferentTuple) == 1.0 && get<1>(aDifferentTuple) == 2.0 &&
+ get<2>(aDifferentTuple) == true);
+
+ // Test converting assignment operator (from a tuple of different type whose members are each convertible)
+ tuple<double, double, bool> anotherDifferentTuple;
+ EATEST_VERIFY(get<0>(anotherDifferentTuple) == 0.0 && get<1>(anotherDifferentTuple) == 0.0 &&
+ get<2>(anotherDifferentTuple) == false);
+ anotherDifferentTuple = anotherTuple;
+ EATEST_VERIFY(get<0>(anotherDifferentTuple) == 1.0 && get<1>(anotherDifferentTuple) == 2.0 &&
+ get<2>(anotherDifferentTuple) == true);
+
+ // Test default initialization (built in types should be value initialized rather than default initialized)
+ tuple<int, float, bool> aDefaultInitializedTuple;
+ EATEST_VERIFY(get<0>(aDefaultInitializedTuple) == 0 && get<1>(aDefaultInitializedTuple) == 0.0f &&
+ get<2>(aDefaultInitializedTuple) == false);
+ }
+
+ {
+ // Test some other cases with typed-getter
+ tuple<double, double, bool> aTupleWithRepeatedType(1.0f, 2.0f, true);
+ EATEST_VERIFY(get<bool>(aTupleWithRepeatedType) == true);
+
+ tuple<double, bool, double> anotherTupleWithRepeatedType(1.0f, true, 2.0f);
+ EATEST_VERIFY(get<bool>(anotherTupleWithRepeatedType) == true);
+
+ tuple<bool, double, double> yetAnotherTupleWithRepeatedType(true, 1.0f, 2.0f);
+ EATEST_VERIFY(get<bool>(anotherTupleWithRepeatedType) == true);
+
+ struct floatOne { float val; };
+ struct floatTwo { float val; };
+ tuple<floatOne, floatTwo> aTupleOfStructs({ 1.0f }, { 2.0f } );
+ EATEST_VERIFY(get<floatOne>(aTupleOfStructs).val == 1.0f);
+ EATEST_VERIFY(get<floatTwo>(aTupleOfStructs).val == 2.0f);
+
+ const tuple<double, double, bool> aConstTuple(aTupleWithRepeatedType);
+ const bool& constRef = get<bool>(aConstTuple);
+ EATEST_VERIFY(constRef == true);
+
+ const bool&& constRval = get<bool>(eastl::move(aTupleWithRepeatedType));
+ EATEST_VERIFY(constRval == true);
+ }
+
+ {
+ tuple<int, float> aTupleWithDefaultInit(1, {});
+
+ // tuple construction from pair
+ pair<int, float> aPair(1, 2.0f);
+ tuple<int, float> aTuple(aPair);
+ EATEST_VERIFY(get<0>(aTuple) == 1 && get<1>(aTuple) == 2.0f);
+ tuple<double, double> anotherTuple(aPair);
+ EATEST_VERIFY(get<0>(anotherTuple) == 1.0 && get<1>(anotherTuple) == 2.0);
+ anotherTuple = make_pair(2, 3);
+ EATEST_VERIFY(get<0>(anotherTuple) == 2.0 && get<1>(anotherTuple) == 3.0);
+
+ // operators: ==, !=, <
+ anotherTuple = aTuple;
+ EATEST_VERIFY(aTuple == anotherTuple);
+ EATEST_VERIFY(!(aTuple < anotherTuple) && !(anotherTuple < aTuple));
+ tuple<double, double> aDefaultInitTuple;
+ EATEST_VERIFY(aTuple != aDefaultInitTuple);
+ EATEST_VERIFY(aDefaultInitTuple < aTuple);
+
+ #if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ EATEST_VERIFY((aTuple <=> anotherTuple) == 0);
+ EATEST_VERIFY((aTuple <=> anotherTuple) >= 0);
+ EATEST_VERIFY((anotherTuple <=> aTuple) >= 0);
+ EATEST_VERIFY((aTuple <=> aDefaultInitTuple) != 0);
+ EATEST_VERIFY((aDefaultInitTuple <=> aTuple) < 0);
+ #endif
+
+ tuple<int, int, int> lesserTuple(1, 2, 3);
+ tuple<int, int, int> greaterTuple(1, 2, 4);
+ EATEST_VERIFY(lesserTuple < greaterTuple && !(greaterTuple < lesserTuple) && greaterTuple > lesserTuple &&
+ !(lesserTuple > greaterTuple));
+
+ #if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ EATEST_VERIFY((lesserTuple <=> greaterTuple) != 0);
+ EATEST_VERIFY((lesserTuple <=> greaterTuple) < 0);
+ EATEST_VERIFY((lesserTuple <=> greaterTuple) <= 0);
+ EATEST_VERIFY((greaterTuple <=> lesserTuple) > 0);
+ EATEST_VERIFY((greaterTuple <=> lesserTuple) >= 0);
+ #endif
+
+ tuple<int, float, TestObject> valTup(2, 2.0f, TestObject(2));
+ tuple<int&, float&, TestObject&> refTup(valTup);
+ tuple<const int&, const float&, const TestObject&> constRefTup(valTup);
+
+ EATEST_VERIFY(get<0>(refTup) == get<0>(valTup));
+ EATEST_VERIFY(get<1>(refTup) == get<1>(valTup));
+ EATEST_VERIFY(refTup == valTup);
+ EATEST_VERIFY(get<0>(refTup) == get<0>(constRefTup));
+ EATEST_VERIFY(get<1>(refTup) == get<1>(constRefTup));
+ EATEST_VERIFY(constRefTup == valTup);
+ EATEST_VERIFY(constRefTup == refTup);
+
+ // swap
+ swap(lesserTuple, greaterTuple);
+ EATEST_VERIFY(get<2>(lesserTuple) == 4 && get<2>(greaterTuple) == 3);
+ swap(greaterTuple, lesserTuple);
+ EATEST_VERIFY(lesserTuple < greaterTuple);
+ }
+
+ {
+ // Test construction of tuple containing a move only type
+ static_assert(is_constructible<MoveOnlyType, MoveOnlyType>::value, "is_constructible type trait giving confusing answers.");
+ static_assert(is_constructible<MoveOnlyType, MoveOnlyType&&>::value, "is_constructible type trait giving wrong answers.");
+ static_assert(is_constructible<MoveOnlyType&&, MoveOnlyType&&>::value, "is_constructible type trait giving bizarre answers.");
+ tuple<MoveOnlyType> aTupleWithMoveOnlyMember(1);
+ EATEST_VERIFY(get<0>(aTupleWithMoveOnlyMember).mVal == 1);
+ get<0>(aTupleWithMoveOnlyMember) = MoveOnlyType(2);
+ EATEST_VERIFY(get<0>(aTupleWithMoveOnlyMember).mVal == 2);
+
+ tuple<const MoveOnlyType&> aTupleWithRefToMoveOnlyMember(aTupleWithMoveOnlyMember);
+ EATEST_VERIFY(get<0>(aTupleWithRefToMoveOnlyMember).mVal == 2);
+
+ tuple<const MoveOnlyType&> aTupleWithConstRefToGetMoveOnly(get<0>(aTupleWithMoveOnlyMember));
+ EATEST_VERIFY(get<0>(aTupleWithConstRefToGetMoveOnly).mVal == 2);
+
+ tuple<MoveOnlyType&> aTupleWithRefToGetMoveOnly(get<0>(aTupleWithMoveOnlyMember));
+ EATEST_VERIFY(get<0>(aTupleWithRefToGetMoveOnly).mVal == 2);
+ }
+
+ {
+ // Test construction of tuple containing r-value references
+ int x = 42;
+ TestObject object{1337};
+
+ tuple<int&&, TestObject&&> aTupleWithRValueReference(eastl::move(x), eastl::move(object));
+ static_assert(is_same<decltype(get<0>(aTupleWithRValueReference)), int&>::value, "wrong return type for get when using r-value reference.");
+ static_assert(is_same<decltype(get<1>(aTupleWithRValueReference)), TestObject&>::value, "wrong return type for get when using r-value reference.");
+ EATEST_VERIFY(get<0>(aTupleWithRValueReference) == 42);
+ EATEST_VERIFY(get<1>(aTupleWithRValueReference).mX == 1337);
+
+ static_assert(!is_constructible<decltype(aTupleWithRValueReference), int&, TestObject&>::value, "it shouldn't be possible to assign r-value references with l-values.");
+ }
+
+ {
+ // Tuple helpers
+
+ // make_tuple
+ auto makeTuple = make_tuple(1, 2.0, true);
+ EATEST_VERIFY(get<0>(makeTuple) == 1 && get<1>(makeTuple) == 2.0 && get<2>(makeTuple) == true);
+
+ // TODO: reference_wrapper implementation needs to be finished to enable this code
+ {
+ int a = 2;
+ float b = 3.0f;
+ auto makeTuple2 = make_tuple(ref(a), b);
+ get<0>(makeTuple2) = 3;
+ get<1>(makeTuple2) = 4.0f;
+ EATEST_VERIFY(get<0>(makeTuple2) == 3 && get<1>(makeTuple2) == 4.0f && a == 3 && b == 3.0f);
+ }
+
+ // forward_as_tuple
+ {
+ auto forwardTest = [](tuple<MoveOnlyType&&, MoveOnlyType&&> x) -> tuple<MoveOnlyType, MoveOnlyType>
+ {
+ return tuple<MoveOnlyType, MoveOnlyType>(move(x));
+ };
+
+ tuple<MoveOnlyType, MoveOnlyType> aMovableTuple(
+ forwardTest(forward_as_tuple(MoveOnlyType(1), MoveOnlyType(2))));
+
+ EATEST_VERIFY(get<0>(aMovableTuple).mVal == 1 && get<1>(aMovableTuple).mVal == 2);
+ }
+
+ {
+ // tie
+ int a = 0;
+ double b = 0.0f;
+ static_assert(is_assignable<const Internal::ignore_t&, int>::value, "ignore_t not assignable");
+ static_assert(Internal::TupleAssignable<tuple<const Internal::ignore_t&>, tuple<int>>::value, "Not assignable");
+ tie(a, ignore, b) = make_tuple(1, 3, 5);
+ EATEST_VERIFY(a == 1 && b == 5.0f);
+
+ // tuple_cat
+ auto tcatRes = tuple_cat(make_tuple(1, 2.0f), make_tuple(3.0, true));
+ EATEST_VERIFY(get<0>(tcatRes) == 1 && get<1>(tcatRes) == 2.0f && get<2>(tcatRes) == 3.0 &&
+ get<3>(tcatRes) == true);
+
+ auto tcatRes2 = tuple_cat(make_tuple(1, 2.0f), make_tuple(3.0, true), make_tuple(5u, '6'));
+ EATEST_VERIFY(get<0>(tcatRes2) == 1 && get<1>(tcatRes2) == 2.0f && get<2>(tcatRes2) == 3.0 &&
+ get<3>(tcatRes2) == true && get<4>(tcatRes2) == 5u && get<5>(tcatRes2) == '6');
+
+ auto aCattedRefTuple = tuple_cat(make_tuple(1), tie(a, ignore, b));
+ get<1>(aCattedRefTuple) = 2;
+ EATEST_VERIFY(a == 2);
+ }
+
+ {
+ // Empty tuple
+ tuple<> emptyTuple;
+ EATEST_VERIFY(tuple_size<decltype(emptyTuple)>::value == 0);
+ emptyTuple = make_tuple();
+ auto anotherEmptyTuple = make_tuple();
+ swap(anotherEmptyTuple, emptyTuple);
+ }
+ }
+
+ // test piecewise_construct
+ {
+ {
+ struct local
+ {
+ local() = default;
+ local(int a, int b) : mA(a), mB(b) {}
+
+ int mA = 0;
+ int mB = 0;
+ };
+
+ auto t = make_tuple(42, 43);
+
+ eastl::pair<local, local> p(eastl::piecewise_construct, t, t);
+
+ EATEST_VERIFY(p.first.mA == 42);
+ EATEST_VERIFY(p.second.mA == 42);
+ EATEST_VERIFY(p.first.mB == 43);
+ EATEST_VERIFY(p.second.mB == 43);
+ }
+
+ {
+ struct local
+ {
+ local() = default;
+ local(int a, int b, int c, int d) : mA(a), mB(b), mC(c), mD(d) {}
+
+ int mA = 0;
+ int mB = 0;
+ int mC = 0;
+ int mD = 0;
+ };
+
+ auto t = make_tuple(42, 43, 44, 45);
+
+ eastl::pair<local, local> p(eastl::piecewise_construct, t, t);
+
+ EATEST_VERIFY(p.first.mA == 42);
+ EATEST_VERIFY(p.second.mA == 42);
+
+ EATEST_VERIFY(p.first.mB == 43);
+ EATEST_VERIFY(p.second.mB == 43);
+
+ EATEST_VERIFY(p.first.mC == 44);
+ EATEST_VERIFY(p.second.mC == 44);
+
+ EATEST_VERIFY(p.first.mD == 45);
+ EATEST_VERIFY(p.second.mD == 45);
+ }
+
+ {
+ struct local1
+ {
+ local1() = default;
+ local1(int a) : mA(a) {}
+ int mA = 0;
+ };
+
+ struct local2
+ {
+ local2() = default;
+ local2(char a) : mA(a) {}
+ char mA = 0;
+ };
+
+ auto t1 = make_tuple(42);
+ auto t2 = make_tuple('a');
+
+ eastl::pair<local1, local2> p(eastl::piecewise_construct, t1, t2);
+
+ EATEST_VERIFY(p.first.mA == 42);
+ EATEST_VERIFY(p.second.mA == 'a');
+ }
+ }
+
+ // apply
+ {
+ // test with tuples
+ {
+ {
+ auto result = eastl::apply([](int i) { return i; }, make_tuple(1));
+ EATEST_VERIFY(result == 1);
+ }
+
+ {
+ auto result = eastl::apply([](int i, int j) { return i + j; }, make_tuple(1, 2));
+ EATEST_VERIFY(result == 3);
+ }
+
+
+ {
+ auto result = eastl::apply([](int i, int j, int k, int m) { return i + j + k + m; }, make_tuple(1, 2, 3, 4));
+ EATEST_VERIFY(result == 10);
+ }
+ }
+
+ // test with pair
+ {
+ auto result = eastl::apply([](int i, int j) { return i + j; }, make_pair(1, 2));
+ EATEST_VERIFY(result == 3);
+ }
+
+ // test with array
+ {
+ // TODO(rparolin):
+ // eastl::array requires eastl::get support before we can support unpacking eastl::arrays with eastl::apply.
+ //
+ // {
+ // auto result = eastl::apply([](int i) { return i; }, eastl::array<int, 1>{1});
+ // EATEST_VERIFY(result == 1);
+ // }
+ // {
+ // auto result = eastl::apply([](int i, int j) { return i + j; }, eastl::array<int, 2>{1,2});
+ // EATEST_VERIFY(result == 3);
+ // }
+ // {
+ // auto result = eastl::apply([](int i, int j, int k, int m) { return i + j + k + m; }, eastl::array<int, 4>{1, 2, 3, 4});
+ // EATEST_VERIFY(result == 10);
+ // }
+ }
+ }
+
+ // Compilation test to make sure that the conditionally-explicit cast works
+ {
+ eastl::tuple<int, float, TestObject> arrayTup[] = {
+ {1, 1.0f, TestObject(1)},
+ {2, 2.0f, TestObject(2)},
+ {3, 3.0f, TestObject(3)},
+ {4, 4.0f, TestObject(4)}
+ };
+ (void)arrayTup;
+
+#if false
+ // the following code should not compile with conditionally-explicit behaviour (but does with fully implicit behaviour)
+ eastl::tuple<eastl::vector<float>, float> arrayOfArrayTup[] = {
+ {1.0f, 1.0f},
+ {2.0f, 2.0f}
+ };
+
+ eastl::tuple<eastl::vector<int>, float> arrayOfArrayTup2[] = {
+ {1, 1.0f},
+ {2, 2.0f}
+ };
+#endif
+ }
+
+ // Compilation test to make sure that we can handle reference to forward-declared types
+ {
+ struct ForwardDeclared;
+
+ auto fill_tuple = [](ForwardDeclared& f) {
+ eastl::tuple<ForwardDeclared&, const ForwardDeclared&> t{f, f};
+ return t;
+ };
+
+ struct ForwardDeclared
+ {
+ int x;
+ };
+
+ ForwardDeclared f{666};
+ auto t = fill_tuple(f);
+
+ EATEST_VERIFY(get<0>(t).x == 666);
+ EATEST_VERIFY(get<1>(t).x == 666);
+ }
+
+ #ifndef EA_COMPILER_NO_STRUCTURED_BINDING
+ // tuple structured bindings test
+ {
+ eastl::tuple<int, int, int> t = {1,2,3};
+ auto [x,y,z] = t;
+ EATEST_VERIFY(x == 1);
+ EATEST_VERIFY(y == 2);
+ EATEST_VERIFY(z == 3);
+ }
+
+ { // const unpacking test
+ eastl::tuple<int, int, int> t = {1,2,3};
+ const auto [x,y,z] = t;
+ EATEST_VERIFY(x == 1);
+ EATEST_VERIFY(y == 2);
+ EATEST_VERIFY(z == 3);
+ }
+ #endif
+
+ // user regression for tuple_cat
+ {
+ void* empty = nullptr;
+ auto t = eastl::make_tuple(empty, true);
+ auto tc = eastl::tuple_cat(eastl::make_tuple("asd", 1), t);
+
+ static_assert(eastl::is_same_v<decltype(tc), eastl::tuple<const char*, int, void*, bool>>, "type mismatch");
+
+ EATEST_VERIFY(eastl::string("asd") == eastl::get<0>(tc));
+ EATEST_VERIFY(eastl::get<1>(tc) == 1);
+ EATEST_VERIFY(eastl::get<2>(tc) == nullptr);
+ EATEST_VERIFY(eastl::get<3>(tc) == true);
+ }
+
+ // user reported regression that exercises type_traits trying to pull out the element_type from "fancy pointers"
+ {
+ auto up = eastl::make_unique<int[]>(100);
+ auto t = eastl::make_tuple(eastl::move(up));
+
+ using ResultTuple_t = decltype(t);
+ static_assert(eastl::is_same_v<ResultTuple_t, eastl::tuple<eastl::unique_ptr<int[]>>>);
+ static_assert(eastl::is_same_v<eastl::tuple_element_t<0, ResultTuple_t>, eastl::unique_ptr<int[]>>);
+ }
+
+ return nErrorCount;
+}
+
+#else
+
+int TestTuple() { return 0; }
+
+#endif // EASTL_TUPLE_ENABLED
+
+EA_RESTORE_VC_WARNING()
diff --git a/EASTL/test/source/TestTupleVector.cpp b/EASTL/test/source/TestTupleVector.cpp
new file mode 100644
index 0000000..8a83803
--- /dev/null
+++ b/EASTL/test/source/TestTupleVector.cpp
@@ -0,0 +1,1540 @@
+/////////////////////////////////////////////////////////////////////////////
+// TestTupleVector.cpp
+//
+// Copyright (c) 2018, Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+#include "EASTLTest.h"
+
+#include <EASTL/bonus/tuple_vector.h>
+
+#include <EASTL/sort.h>
+
+using namespace eastl;
+
+int TestTupleVector()
+{
+ int nErrorCount = 0;
+
+ // Test push-backs and accessors
+ {
+ tuple_vector<int> singleElementVec;
+ EATEST_VERIFY(singleElementVec.size() == 0);
+ EATEST_VERIFY(singleElementVec.capacity() == 0);
+ EATEST_VERIFY(singleElementVec.empty() == true);
+ EATEST_VERIFY(singleElementVec.validate());
+ singleElementVec.push_back_uninitialized();
+ singleElementVec.push_back(5);
+ EATEST_VERIFY(singleElementVec.size() == 2);
+ EATEST_VERIFY(singleElementVec.capacity() > 0);
+ EATEST_VERIFY(singleElementVec.get<0>()[1] == 5);
+ EATEST_VERIFY(singleElementVec.get<int>()[1] == 5);
+ EATEST_VERIFY(singleElementVec.empty() == false);
+ EATEST_VERIFY(singleElementVec.validate());
+
+ tuple_vector<int, float, bool> complexVec;
+ complexVec.reserve(5);
+ {
+ // need to call an overload of push_back that specifically grabs lvalue candidates - providing constants tend to prefer rvalue path
+ int intArg = 3;
+ float floatArg = 2.0f;
+ bool boolArg = true;
+ complexVec.push_back(intArg, floatArg, boolArg);
+ }
+ complexVec.push_back(1, 4.0f, false);
+ complexVec.push_back(2, 1.0f, true);
+ {
+ tuple<int, float, bool> complexTup(4, 3.0f, false);
+ complexVec.push_back(complexTup);
+ }
+ complexVec.push_back();
+ EATEST_VERIFY(complexVec.capacity() == 5);
+ EATEST_VERIFY(*(complexVec.get<0>()) == 3);
+ EATEST_VERIFY(complexVec.get<float>()[1] == 4.0f);
+ EATEST_VERIFY(complexVec.get<2>()[2] == complexVec.get<bool>()[2]);
+ EATEST_VERIFY(complexVec.validate());
+
+ tuple<int, float, bool> defaultComplexTup;
+ EATEST_VERIFY(complexVec.at(4) == defaultComplexTup);
+
+ tuple<int*, float*, bool*> complexPtrTuple = complexVec.data();
+ EATEST_VERIFY(get<0>(complexPtrTuple) != nullptr);
+ EATEST_VERIFY(get<2>(complexPtrTuple)[2] == complexVec.get<2>()[2]);
+
+ tuple<int&, float&, bool&> complexRefTuple = complexVec.at(2);
+ tuple<int&, float&, bool&> complexRefTupleBracket = complexVec[2];
+ tuple<int&, float&, bool&> complexRefTupleFront = complexVec.front();
+ tuple<int&, float&, bool&> complexRefTupleBack = complexVec.back();
+ EATEST_VERIFY(get<2>(complexRefTuple) == complexVec.get<2>()[2]);
+ EATEST_VERIFY(get<1>(complexRefTupleBracket) == 1.0f);
+ EATEST_VERIFY(get<1>(complexRefTupleFront) == 2.0f);
+ EATEST_VERIFY(get<1>(complexRefTupleBack) == 0.0f);
+
+ // verify the equivalent accessors for the const container exist/compile
+ {
+ const tuple_vector<int, float, bool>& constVec = complexVec;
+
+ EATEST_VERIFY(constVec.size() == 5);
+ EATEST_VERIFY(constVec.capacity() >= constVec.size());
+ EATEST_VERIFY(constVec.empty() == false);
+ EATEST_VERIFY(constVec.get<1>() == constVec.get<float>());
+
+ tuple<const int*, const float*, const bool*> constPtrTuple = constVec.data();
+ EATEST_VERIFY(get<0>(constPtrTuple) != nullptr);
+ EATEST_VERIFY(get<2>(constPtrTuple)[2] == constVec.get<2>()[2]);
+
+ tuple<const int&, const float&, const bool&> constRefTuple = constVec.at(2);
+ tuple<const int&, const float&, const bool&> constRefTupleBracket = constVec[2];
+ tuple<const int&, const float&, const bool&> constRefTupleFront = constVec.front();
+ tuple<const int&, const float&, const bool&> constRefTupleBack = constVec.back();
+ EATEST_VERIFY(get<2>(constRefTuple) == constVec.get<2>()[2]);
+ EATEST_VERIFY(get<1>(constRefTupleBracket) == 1.0f);
+ EATEST_VERIFY(get<1>(constRefTupleFront) == 2.0f);
+ EATEST_VERIFY(get<1>(constRefTupleBack) == 0.0f);
+
+ // check that return types of const-version of begin and cbegin (etc) match
+ static_assert(eastl::is_same<decltype(constVec.begin()), decltype(constVec.cbegin())>::value, "error");
+ static_assert(eastl::is_same<decltype(constVec.end()), decltype(constVec.cend())>::value, "error");
+ static_assert(eastl::is_same<decltype(constVec.rbegin()), decltype(constVec.crbegin())>::value, "error");
+ static_assert(eastl::is_same<decltype(constVec.rend()), decltype(constVec.crend())>::value, "error");
+
+ // check that return type of non-const version of begin and cbegin (etc) do _not_ match
+ static_assert(!eastl::is_same<decltype(complexVec.begin()), decltype(complexVec.cbegin())>::value, "error");
+ static_assert(!eastl::is_same<decltype(complexVec.end()), decltype(complexVec.cend())>::value, "error");
+ static_assert(!eastl::is_same<decltype(complexVec.rbegin()), decltype(complexVec.crbegin())>::value, "error");
+ static_assert(!eastl::is_same<decltype(complexVec.rend()), decltype(complexVec.crend())>::value, "error");
+
+ }
+ }
+
+ // test the memory layouts work for aligned structures
+ {
+ struct EA_ALIGN(16) AlignTestVec4
+ {
+ float a[4];
+ AlignTestVec4() : a{1.0f, 2.0f, 3.0f, 4.0f} {}
+ };
+
+ struct AlignTestByte3
+ {
+ char a[3];
+ AlignTestByte3() : a{1, 2, 3} {}
+ };
+
+ struct EA_ALIGN(8) AlignTestFourByte
+ {
+ int a[5];
+ AlignTestFourByte() : a{-1, -2, -3, -4, -5} {}
+ };
+
+ tuple_vector<bool, AlignTestVec4, AlignTestByte3, AlignTestFourByte> alignElementVec;
+ alignElementVec.push_back();
+ alignElementVec.push_back();
+ alignElementVec.push_back();
+ alignElementVec.push_back();
+ alignElementVec.push_back();
+
+ EATEST_VERIFY((uintptr_t)alignElementVec.get<AlignTestVec4>() % 16 == 0);
+ EATEST_VERIFY((uintptr_t)alignElementVec.get<AlignTestFourByte>() % 8 == 0);
+ }
+
+ // Test resize and various modifications
+ {
+ TestObject::Reset();
+
+ tuple_vector<bool, TestObject, float> testVec;
+ typedef tuple_vector<bool, TestObject, float>::size_type tuple_vector_size_type;
+ testVec.reserve(10);
+ for (int i = 0; i < 10; ++i)
+ {
+ testVec.push_back(i % 3 == 0, TestObject(i), (float)i);
+ }
+ testVec.pop_back();
+ EATEST_VERIFY(testVec.size() == 9);
+
+ // test resize that does destruction of objects
+ testVec.resize(5);
+ EATEST_VERIFY(testVec.size() == 5);
+ EATEST_VERIFY(TestObject::sTOCount == 5);
+ EATEST_VERIFY(testVec.capacity() == 10);
+
+ // test resize that does default construction of objects
+ testVec.resize(10);
+ EATEST_VERIFY(testVec.size() == 10);
+ EATEST_VERIFY(TestObject::sTOCount == 10);
+ EATEST_VERIFY(testVec.capacity() == 10);
+
+ // test resize that does default construction of objects and grows the vector
+ testVec.resize(15);
+ EATEST_VERIFY(testVec.size() == 15);
+ EATEST_VERIFY(TestObject::sTOCount == 15);
+ EATEST_VERIFY(testVec.capacity() > 10);
+ EATEST_VERIFY(testVec.validate());
+
+ // test resize with args that does destruction of objects
+ auto testVecCapacity = testVec.capacity();
+ testVec.resize(5, true, TestObject(5), 5.0f);
+ EATEST_VERIFY(testVec.size() == 5);
+ EATEST_VERIFY(TestObject::sTOCount == 5);
+ EATEST_VERIFY(testVec.capacity() == testVecCapacity);
+
+ // test resize with args that does construction of objects
+ testVec.resize(15, true, TestObject(5), 5.0f);
+ EATEST_VERIFY(testVec.size() == 15);
+ EATEST_VERIFY(TestObject::sTOCount == 15);
+ EATEST_VERIFY(testVec.capacity() == testVecCapacity);
+
+ // test resize with args that does construction of objects and grows the vector
+ auto newTestVecSize = testVecCapacity + 5;
+ testVec.resize(newTestVecSize, true, TestObject(5), 5.0f);
+ EATEST_VERIFY(testVec.size() == newTestVecSize);
+ EATEST_VERIFY(static_cast<tuple_vector_size_type>(TestObject::sTOCount) == newTestVecSize);
+ EATEST_VERIFY(testVec.capacity() > newTestVecSize);
+ EATEST_VERIFY(testVec.validate());
+ for (unsigned int i = 5; i < newTestVecSize; ++i)
+ {
+ EATEST_VERIFY(testVec.get<0>()[i] == true);
+ EATEST_VERIFY(testVec.get<1>()[i] == TestObject(5));
+ EATEST_VERIFY(testVec.get<2>()[i] == 5.0f);
+ }
+
+ {
+ tuple<bool, TestObject, float> resizeTup(true, TestObject(10), 10.0f);
+ typedef tuple_vector<bool, TestObject, float>::size_type tuple_vector_size_type;
+
+ // test resize with tuple that does destruction of objects
+ testVecCapacity = testVec.capacity();
+ EATEST_VERIFY(testVecCapacity >= 15); // check for next two resizes to make sure we don't grow vec
+
+ testVec.resize(20, resizeTup);
+ EATEST_VERIFY(testVec.size() == 20);
+ EATEST_VERIFY(TestObject::sTOCount == 20 + 1);
+ EATEST_VERIFY(testVec.capacity() == testVecCapacity);
+
+ // test resize with tuple that does construction of objects
+ testVec.resize(25, resizeTup);
+ EATEST_VERIFY(testVec.size() == 25);
+ EATEST_VERIFY(TestObject::sTOCount == 25 + 1);
+ EATEST_VERIFY(testVec.capacity() == testVecCapacity);
+
+ // test resize with tuple that does construction of objects and grows the vector
+ newTestVecSize = testVecCapacity + 5;
+ testVec.resize(newTestVecSize, resizeTup);
+ EATEST_VERIFY(testVec.size() == newTestVecSize);
+ EATEST_VERIFY(static_cast<tuple_vector_size_type>(TestObject::sTOCount) == newTestVecSize + 1);
+ EATEST_VERIFY(testVec.capacity() > newTestVecSize);
+ EATEST_VERIFY(testVec.validate());
+ for (unsigned int i = 5; i < 20; ++i)
+ {
+ EATEST_VERIFY(testVec.get<0>()[i] == true);
+ EATEST_VERIFY(testVec.get<1>()[i] == TestObject(5));
+ EATEST_VERIFY(testVec.get<2>()[i] == 5.0f);
+ }
+ for (unsigned int i = 20; i < testVecCapacity; ++i)
+ {
+ EATEST_VERIFY(testVec.get<0>()[i] == get<0>(resizeTup));
+ EATEST_VERIFY(testVec.get<1>()[i] == get<1>(resizeTup));
+ EATEST_VERIFY(testVec.get<2>()[i] == get<2>(resizeTup));
+ }
+ }
+
+ // test other modifiers
+ testVec.pop_back();
+ EATEST_VERIFY(testVec.size() == newTestVecSize - 1);
+ EATEST_VERIFY(static_cast<decltype(testVec)::size_type>(TestObject::sTOCount) == newTestVecSize - 1); // down 2 from last sTOCount check - resizeTup dtor and pop_back
+
+ EATEST_VERIFY(testVec.capacity() > newTestVecSize);
+ testVec.shrink_to_fit();
+ EATEST_VERIFY(testVec.capacity() == testVec.size());
+ EATEST_VERIFY(testVec.validate());
+
+ testVec.clear();
+ EATEST_VERIFY(testVec.empty());
+ EATEST_VERIFY(testVec.validate());
+ EATEST_VERIFY(TestObject::IsClear());
+
+ testVec.shrink_to_fit();
+ EATEST_VERIFY(testVec.capacity() == 0);
+ EATEST_VERIFY(testVec.validate());
+ TestObject::Reset();
+ }
+
+ // Test insert
+ {
+ TestObject::Reset();
+
+ // test insert with n values and lvalue args
+ {
+ tuple_vector<bool, TestObject, float> testVec;
+ bool boolArg = true;
+ TestObject toArg = TestObject(0);
+ float floatArg = 0.0f;
+ testVec.reserve(10);
+
+ // test insert on empty vector that doesn't cause growth
+ toArg = TestObject(3);
+ floatArg = 3.0f;
+ auto insertIter = testVec.insert(testVec.begin(), 3, boolArg, toArg, floatArg);
+ EATEST_VERIFY(testVec.size() == 3);
+ EATEST_VERIFY(insertIter == testVec.begin());
+
+ // test insert to end of vector that doesn't cause growth
+ toArg = TestObject(5);
+ floatArg = 5.0f;
+ insertIter = testVec.insert(testVec.end(), 3, boolArg, toArg, floatArg);
+ EATEST_VERIFY(testVec.size() == 6);
+ EATEST_VERIFY(insertIter == testVec.begin() + 3);
+
+ // test insert to middle of vector that doesn't cause growth
+ toArg = TestObject(4);
+ floatArg = 4.0f;
+ testVec.insert(testVec.begin() + 3, 3, boolArg, toArg, floatArg);
+ EATEST_VERIFY(testVec.size() == 9);
+ EATEST_VERIFY(testVec.capacity() == 10);
+
+ // test insert to end of vector that causes growth
+ toArg = TestObject(6);
+ floatArg = 6.0f;
+ testVec.insert(testVec.end(), 3, boolArg, toArg, floatArg);
+ EATEST_VERIFY(testVec.size() == 12);
+ testVec.shrink_to_fit();
+ EATEST_VERIFY(testVec.capacity() == 12);
+
+ // test insert to beginning of vector that causes growth
+ toArg = TestObject(1);
+ floatArg = 1.0f;
+ testVec.insert(testVec.begin(), 3, boolArg, toArg, floatArg);
+ EATEST_VERIFY(testVec.size() == 15);
+ testVec.shrink_to_fit();
+ EATEST_VERIFY(testVec.capacity() == 15);
+
+ // test insert to middle of vector that causes growth
+ toArg = TestObject(2);
+ floatArg = 2.0f;
+ testVec.insert(testVec.begin() + 3, 3, boolArg, toArg, floatArg);
+ EATEST_VERIFY(testVec.size() == 18);
+ testVec.shrink_to_fit();
+ EATEST_VERIFY(testVec.capacity() == 18);
+
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec.get<1>()[i] == TestObject(i / 3 + 1));
+ }
+ EATEST_VERIFY(testVec.validate());
+ }
+
+ // test insert with lvalue args
+ {
+ tuple_vector<bool, TestObject, float> testVec;
+ bool boolArg = true;
+ TestObject toArg = TestObject(0);
+ float floatArg = 0.0f;
+ testVec.reserve(3);
+
+ // test insert on empty vector that doesn't cause growth
+ toArg = TestObject(3);
+ floatArg = 3.0f;
+ testVec.insert(testVec.begin(), boolArg, toArg, floatArg);
+ EATEST_VERIFY(testVec.size() == 1);
+
+ // test insert to end of vector that doesn't cause growth
+ toArg = TestObject(5);
+ floatArg = 5.0f;
+ testVec.insert(testVec.end(), boolArg, toArg, floatArg);
+ EATEST_VERIFY(testVec.size() == 2);
+
+ // test insert to middle of vector that doesn't cause growth
+ toArg = TestObject(4);
+ floatArg = 4.0f;
+ testVec.insert(testVec.begin() + 1, boolArg, toArg, floatArg);
+ EATEST_VERIFY(testVec.size() == 3);
+ EATEST_VERIFY(testVec.capacity() == 3);
+
+ // test insert to end of vector that causes growth
+ toArg = TestObject(6);
+ floatArg = 6.0f;
+ testVec.insert(testVec.end(), boolArg, toArg, floatArg);
+ EATEST_VERIFY(testVec.size() == 4);
+ testVec.shrink_to_fit();
+ EATEST_VERIFY(testVec.capacity() == 4);
+
+ // test insert to beginning of vector that causes growth
+ toArg = TestObject(1);
+ floatArg = 1.0f;
+ testVec.insert(testVec.begin(), boolArg, toArg, floatArg);
+ EATEST_VERIFY(testVec.size() == 5);
+ testVec.shrink_to_fit();
+ EATEST_VERIFY(testVec.capacity() == 5);
+
+ // test insert to middle of vector that causes growth
+ toArg = TestObject(2);
+ floatArg = 2.0f;
+ testVec.insert(testVec.begin() + 1, boolArg, toArg, floatArg);
+ EATEST_VERIFY(testVec.size() == 6);
+ testVec.shrink_to_fit();
+ EATEST_VERIFY(testVec.capacity() == 6);
+
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec.get<1>()[i] == TestObject(i + 1));
+ }
+ EATEST_VERIFY(testVec.validate());
+ }
+
+ // test insert with n and tuple
+ {
+ tuple_vector<bool, TestObject, float> testVec;
+ tuple<bool, TestObject, float> testTup;
+ testVec.reserve(10);
+
+ // test insert on empty vector that doesn't cause growth
+ testTup = tuple<bool, TestObject, float>(true, TestObject(3), 3.0f);
+ testVec.insert(testVec.begin(), 3, testTup);
+ EATEST_VERIFY(testVec.size() == 3);
+
+ // test insert to end of vector that doesn't cause growth
+ testTup = tuple<bool, TestObject, float>(true, TestObject(5), 5.0f);
+ testVec.insert(testVec.end(), 3, testTup);
+ EATEST_VERIFY(testVec.size() == 6);
+
+ // test insert to middle of vector that doesn't cause growth
+ testTup = tuple<bool, TestObject, float>(true, TestObject(4), 4.0f);
+ testVec.insert(testVec.begin() + 3, 3, testTup);
+ EATEST_VERIFY(testVec.size() == 9);
+ EATEST_VERIFY(testVec.capacity() == 10);
+
+ // test insert to end of vector that causes growth
+ testTup = tuple<bool, TestObject, float>(true, TestObject(6), 6.0f);
+ testVec.insert(testVec.end(), 3, testTup);
+ EATEST_VERIFY(testVec.size() == 12);
+ testVec.shrink_to_fit();
+ EATEST_VERIFY(testVec.capacity() == 12);
+
+ // test insert to beginning of vector that causes growth
+ testTup = tuple<bool, TestObject, float>(true, TestObject(1), 1.0f);
+ testVec.insert(testVec.begin(), 3, testTup);
+ EATEST_VERIFY(testVec.size() == 15);
+ testVec.shrink_to_fit();
+ EATEST_VERIFY(testVec.capacity() == 15);
+
+ // test insert to middle of vector that causes growth
+ testTup = tuple<bool, TestObject, float>(true, TestObject(2), 2.0f);
+ testVec.insert(testVec.begin() + 3, 3, testTup);
+ EATEST_VERIFY(testVec.size() == 18);
+ testVec.shrink_to_fit();
+ EATEST_VERIFY(testVec.capacity() == 18);
+
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec.get<1>()[i] == TestObject(i / 3 + 1));
+ }
+ EATEST_VERIFY(testVec.validate());
+ }
+
+ // test insert with tuple
+ {
+ tuple_vector<bool, TestObject, float> testVec;
+ tuple<bool, TestObject, float> testTup;
+ testVec.reserve(3);
+
+ // test insert on empty vector that doesn't cause growth
+ testTup = tuple<bool, TestObject, float>(true, TestObject(3), 3.0f);
+ testVec.insert(testVec.begin(), testTup);
+ EATEST_VERIFY(testVec.size() == 1);
+
+ // test insert to end of vector that doesn't cause growth
+ testTup = tuple<bool, TestObject, float>(true, TestObject(5), 5.0f);
+ testVec.insert(testVec.end(), testTup);
+ EATEST_VERIFY(testVec.size() == 2);
+
+ // test insert to middle of vector that doesn't cause growth
+ testTup = tuple<bool, TestObject, float>(true, TestObject(4), 4.0f);
+ testVec.insert(testVec.begin() + 1, testTup);
+ EATEST_VERIFY(testVec.size() == 3);
+ EATEST_VERIFY(testVec.capacity() == 3);
+
+ // test insert to end of vector that causes growth
+ testTup = tuple<bool, TestObject, float>(true, TestObject(6), 6.0f);
+ testVec.insert(testVec.end(), 1, testTup);
+ EATEST_VERIFY(testVec.size() == 4);
+ testVec.shrink_to_fit();
+ EATEST_VERIFY(testVec.capacity() == 4);
+
+ // test insert to beginning of vector that causes growth
+ testTup = tuple<bool, TestObject, float>(true, TestObject(1), 1.0f);
+ testVec.insert(testVec.begin(), 1, testTup);
+ EATEST_VERIFY(testVec.size() == 5);
+ testVec.shrink_to_fit();
+ EATEST_VERIFY(testVec.capacity() == 5);
+
+ // test insert to middle of vector that causes growth
+ testTup = tuple<bool, TestObject, float>(true, TestObject(2), 2.0f);
+ testVec.insert(testVec.begin() + 1, 1, testTup);
+ EATEST_VERIFY(testVec.size() == 6);
+ testVec.shrink_to_fit();
+ EATEST_VERIFY(testVec.capacity() == 6);
+
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec.get<1>()[i] == TestObject(i + 1));
+ }
+ EATEST_VERIFY(testVec.validate());
+ }
+
+ // test insert with initList
+ {
+ tuple_vector<bool, TestObject, float> testVec;
+ tuple<bool, TestObject, float> testTup;
+ testVec.reserve(10);
+
+ // test insert on empty vector that doesn't cause growth
+ testTup = tuple<bool, TestObject, float>(true, TestObject(3), 3.0f);
+ testVec.insert(testVec.begin(), {
+ {true, TestObject(3), 3.0f},
+ testTup,
+ {true, TestObject(3), 3.0f}
+ });
+ EATEST_VERIFY(testVec.size() == 3);
+
+ // test insert to end of vector that doesn't cause growth
+ testTup = tuple<bool, TestObject, float>(true, TestObject(5), 5.0f);
+ testVec.insert(testVec.end(), {
+ {true, TestObject(5), 5.0f},
+ testTup,
+ {true, TestObject(5), 5.0f}
+ });
+ EATEST_VERIFY(testVec.size() == 6);
+
+ // test insert to middle of vector that doesn't cause growth
+ testTup = tuple<bool, TestObject, float>(true, TestObject(4), 4.0f);
+ testVec.insert(testVec.begin() + 3, {
+ {true, TestObject(4), 4.0f},
+ testTup,
+ {true, TestObject(4), 4.0f}
+ });
+ EATEST_VERIFY(testVec.size() == 9);
+ EATEST_VERIFY(testVec.capacity() == 10);
+
+ // test insert to end of vector that causes growth
+ testTup = tuple<bool, TestObject, float>(true, TestObject(6), 6.0f);
+ testVec.insert(testVec.end(), {
+ {true, TestObject(6), 6.0f},
+ testTup,
+ {true, TestObject(6), 6.0f}
+ });
+ EATEST_VERIFY(testVec.size() == 12);
+ testVec.shrink_to_fit();
+ EATEST_VERIFY(testVec.capacity() == 12);
+
+ // test insert to beginning of vector that causes growth
+ testTup = tuple<bool, TestObject, float>(true, TestObject(1), 1.0f);
+ testVec.insert(testVec.begin(), {
+ {true, TestObject(1), 1.0f},
+ testTup,
+ {true, TestObject(1), 1.0f}
+ });
+ EATEST_VERIFY(testVec.size() == 15);
+ testVec.shrink_to_fit();
+ EATEST_VERIFY(testVec.capacity() == 15);
+
+ // test insert to middle of vector that causes growth
+ testTup = tuple<bool, TestObject, float>(true, TestObject(2), 2.0f);
+ testVec.insert(testVec.begin() + 3, {
+ {true, TestObject(2), 2.0f},
+ testTup,
+ {true, TestObject(2), 2.0f
+ } });
+ EATEST_VERIFY(testVec.size() == 18);
+ testVec.shrink_to_fit();
+ EATEST_VERIFY(testVec.capacity() == 18);
+
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec.get<1>()[i] == TestObject(i / 3 + 1));
+ }
+ EATEST_VERIFY(testVec.validate());
+ }
+
+ // test insert with rvalue args
+ {
+ tuple_vector<int, MoveOnlyType, TestObject> testVec;
+ testVec.reserve(3);
+
+ // test insert on empty vector that doesn't cause growth
+ testVec.insert(testVec.begin(), 3, MoveOnlyType(3), TestObject(3));
+ EATEST_VERIFY(testVec.size() == 1);
+
+ // test insert to end of vector that doesn't cause growth
+ testVec.insert(testVec.end(), 5, MoveOnlyType(5), TestObject(5));
+ EATEST_VERIFY(testVec.size() == 2);
+
+ // test insert to middle of vector that doesn't cause growth
+ testVec.insert(testVec.begin() + 1, 4, MoveOnlyType(4), TestObject(4));
+ EATEST_VERIFY(testVec.size() == 3);
+ EATEST_VERIFY(testVec.capacity() == 3);
+
+ // test insert to end of vector that causes growth
+ testVec.insert(testVec.end(), 6, MoveOnlyType(6), TestObject(6));
+ EATEST_VERIFY(testVec.size() == 4);
+ testVec.shrink_to_fit();
+ EATEST_VERIFY(testVec.capacity() == 4);
+
+ // test insert to beginning of vector that causes growth
+ testVec.insert(testVec.begin(), 1, MoveOnlyType(1), TestObject(1));
+ EATEST_VERIFY(testVec.size() == 5);
+ testVec.shrink_to_fit();
+ EATEST_VERIFY(testVec.capacity() == 5);
+
+ // test insert to middle of vector that causes growth
+ testVec.insert(testVec.begin() + 1, 2, MoveOnlyType(2), TestObject(2));
+ EATEST_VERIFY(testVec.size() == 6);
+ testVec.shrink_to_fit();
+ EATEST_VERIFY(testVec.capacity() == 6);
+
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec.get<2>()[i] == TestObject(i + 1));
+ }
+ EATEST_VERIFY(testVec.validate());
+ }
+
+ // test insert with rvalue tuple
+ {
+ tuple_vector<int, MoveOnlyType, TestObject> testVec;
+ testVec.reserve(3);
+
+ // test insert on empty vector that doesn't cause growth
+ testVec.insert(testVec.begin(), forward_as_tuple(3, MoveOnlyType(3), TestObject(3)));
+ EATEST_VERIFY(testVec.size() == 1);
+
+ // test insert to end of vector that doesn't cause growth
+ testVec.insert(testVec.end(), forward_as_tuple(5, MoveOnlyType(5), TestObject(5)));
+ EATEST_VERIFY(testVec.size() == 2);
+
+ // test insert to middle of vector that doesn't cause growth
+ testVec.insert(testVec.begin() + 1, forward_as_tuple(4, MoveOnlyType(4), TestObject(4)));
+ EATEST_VERIFY(testVec.size() == 3);
+ EATEST_VERIFY(testVec.capacity() == 3);
+
+ // test insert to end of vector that causes growth
+ testVec.insert(testVec.end(), forward_as_tuple(6, MoveOnlyType(6), TestObject(6)));
+ EATEST_VERIFY(testVec.size() == 4);
+ testVec.shrink_to_fit();
+ EATEST_VERIFY(testVec.capacity() == 4);
+
+ // test insert to beginning of vector that causes growth
+ testVec.insert(testVec.begin(), forward_as_tuple(1, MoveOnlyType(1), TestObject(1)));
+ EATEST_VERIFY(testVec.size() == 5);
+ testVec.shrink_to_fit();
+ EATEST_VERIFY(testVec.capacity() == 5);
+
+ // test insert to middle of vector that causes growth
+ testVec.insert(testVec.begin() + 1, forward_as_tuple(2, MoveOnlyType(2), TestObject(2)));
+ EATEST_VERIFY(testVec.size() == 6);
+ testVec.shrink_to_fit();
+ EATEST_VERIFY(testVec.capacity() == 6);
+
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec.get<2>()[i] == TestObject(i + 1));
+ }
+ EATEST_VERIFY(testVec.validate());
+ }
+
+ // test insert with iterator range
+ {
+ tuple_vector<bool, TestObject, float> srcVec;
+ for (unsigned int i = 0; i < 20; ++i)
+ {
+ srcVec.push_back(true, TestObject(i), (float)i);
+ }
+
+ tuple_vector<bool, TestObject, float> testVec;
+ testVec.reserve(10);
+
+ // test insert on empty vector that doesn't cause growth
+ testVec.insert(testVec.begin(), srcVec.begin() + 6, srcVec.begin() + 9);
+ EATEST_VERIFY(testVec.size() == 3);
+
+ // test insert to end of vector that doesn't cause growth
+ testVec.insert(testVec.end(), srcVec.begin() + 12, srcVec.begin() + 15);
+ EATEST_VERIFY(testVec.size() == 6);
+
+ // test insert to middle of vector that doesn't cause growth
+ testVec.insert(testVec.begin() + 3, srcVec.begin() + 9, srcVec.begin() + 12);
+ EATEST_VERIFY(testVec.size() == 9);
+ EATEST_VERIFY(testVec.capacity() == 10);
+
+ // test insert to end of vector that causes growth
+ testVec.insert(testVec.end(), srcVec.begin() + 15, srcVec.begin() + 18);
+ EATEST_VERIFY(testVec.size() == 12);
+ testVec.shrink_to_fit();
+ EATEST_VERIFY(testVec.capacity() == 12);
+
+ // test insert to beginning of vector that causes growth
+ testVec.insert(testVec.begin(), srcVec.begin(), srcVec.begin() + 3);
+ EATEST_VERIFY(testVec.size() == 15);
+ testVec.shrink_to_fit();
+ EATEST_VERIFY(testVec.capacity() == 15);
+
+ // test insert to middle of vector that causes growth
+ testVec.insert(testVec.begin() + 3, srcVec.begin() + 3, srcVec.begin() + 6);
+ EATEST_VERIFY(testVec.size() == 18);
+ testVec.shrink_to_fit();
+ EATEST_VERIFY(testVec.capacity() == 18);
+
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec[i] == make_tuple(true, TestObject(i), (float)i));
+ }
+ EATEST_VERIFY(testVec.validate());
+ }
+ EATEST_VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+ }
+
+ // Test assign
+ {
+ {
+ tuple_vector<bool, TestObject, float> testVec;
+
+ // test assign that grows the capacity
+ testVec.assign(20, true, TestObject(1), 1.0f);
+ EATEST_VERIFY(testVec.size() == 20);
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec[i] == make_tuple(true, TestObject(1), 1.0f));
+ }
+ EATEST_VERIFY(TestObject::sTOCount == 20);
+
+ // test assign that shrinks the vector
+ testVec.assign(10, true, TestObject(2), 2.0f);
+ EATEST_VERIFY(testVec.size() == 10);
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec[i] == make_tuple(true, TestObject(2), 2.0f));
+ }
+ EATEST_VERIFY(TestObject::sTOCount == 10);
+
+ // test assign for when there's enough capacity
+ testVec.assign(15, true, TestObject(3), 3.0f);
+ EATEST_VERIFY(testVec.size() == 15);
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec[i] == make_tuple(true, TestObject(3), 3.0f));
+ }
+ EATEST_VERIFY(TestObject::sTOCount == 15);
+ }
+
+ {
+ tuple<bool, TestObject, float> srcTup;
+ tuple_vector<bool, TestObject, float> testVec;
+
+ // test assign from tuple that grows the capacity
+ srcTup = make_tuple(true, TestObject(1), 1.0f);
+ testVec.assign(20, srcTup);
+ EATEST_VERIFY(testVec.size() == 20);
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec[i] == srcTup);
+ }
+ EATEST_VERIFY(TestObject::sTOCount == 20 + 1);
+
+ // test assign from tuple that shrinks the vector
+ srcTup = make_tuple(true, TestObject(2), 2.0f);
+ testVec.assign(10, srcTup);
+ EATEST_VERIFY(testVec.size() == 10);
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec[i] == srcTup);
+ }
+ EATEST_VERIFY(TestObject::sTOCount == 10 + 1);
+
+ // test assign from tuple for when there's enough capacity
+ srcTup = make_tuple(true, TestObject(3), 3.0f);
+ testVec.assign(15, srcTup);
+ EATEST_VERIFY(testVec.size() == 15);
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec[i] == srcTup);
+ }
+ EATEST_VERIFY(TestObject::sTOCount == 15 + 1);
+ }
+
+ {
+ tuple_vector<bool, TestObject, float> srcVec;
+ for (unsigned int i = 0; i < 20; ++i)
+ {
+ srcVec.push_back(true, TestObject(i), (float)i);
+ }
+ tuple_vector<bool, TestObject, float> testVec;
+
+ // test assign from iter range that grows the capacity
+ testVec.assign(srcVec.begin() + 5, srcVec.begin() + 15);
+ EATEST_VERIFY(testVec.size() == 10);
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec[i] == srcVec[i+5]);
+ }
+ EATEST_VERIFY(TestObject::sTOCount == 10 + 20);
+
+ // test assign from iter range that shrinks the vector
+ testVec.assign(srcVec.begin() + 2, srcVec.begin() + 7);
+ EATEST_VERIFY(testVec.size() == 5);
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec[i] == srcVec[i + 2]);
+ }
+ EATEST_VERIFY(TestObject::sTOCount == 5 + 20);
+
+ // test assign from iter range for when there's enough capacity
+ testVec.assign(srcVec.begin() + 5, srcVec.begin() + 15);
+ EATEST_VERIFY(testVec.size() == 10);
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec[i] == srcVec[i + 5]);
+ }
+ EATEST_VERIFY(TestObject::sTOCount == 10 + 20);
+ }
+
+ {
+ tuple_vector<bool, TestObject, float> testVec;
+
+ // test assign from initList that grows the capacity
+ testVec.assign({
+ { true, TestObject(1), 1.0f },
+ { true, TestObject(2), 2.0f },
+ { true, TestObject(3), 3.0f }
+ });
+ EATEST_VERIFY(testVec.size() == 3);
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec[i] == make_tuple(true, TestObject(i + 1), (float)i + 1.0f));
+ }
+ EATEST_VERIFY(TestObject::sTOCount == 3);
+
+ // test assign from initList that shrinks the vector
+ testVec.assign({
+ { true, TestObject(4), 4.0f }
+ });
+ EATEST_VERIFY(testVec.size() == 1);
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec[i] == make_tuple(true, TestObject(i + 4), (float)i + 4.0f));
+ }
+ EATEST_VERIFY(TestObject::sTOCount == 1);
+
+ // test assign from initList for when there's enough capacity
+ testVec.assign({
+ { true, TestObject(5), 5.0f },
+ { true, TestObject(6), 6.0f }
+ });
+ EATEST_VERIFY(testVec.size() == 2);
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec[i] == make_tuple(true, TestObject(i + 5), (float)i + 5.0f));
+ }
+ EATEST_VERIFY(TestObject::sTOCount == 2);
+ }
+
+ EATEST_VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+ }
+
+ // Test erase functions
+ {
+ {
+ tuple_vector<bool, TestObject, float> srcVec;
+ for (unsigned int i = 0; i < 20; ++i)
+ {
+ srcVec.push_back(true, TestObject(i), (float)i);
+ }
+ tuple_vector<bool, TestObject, float> testVec;
+
+ // test erase on an iter range
+ testVec.assign(srcVec.begin(), srcVec.end());
+ auto eraseIter = testVec.erase(testVec.begin() + 5, testVec.begin() + 10);
+ EATEST_VERIFY(eraseIter == testVec.begin() + 5);
+ EATEST_VERIFY(testVec.size() == 15);
+ EATEST_VERIFY(testVec.validate());
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ if (i < 5)
+ EATEST_VERIFY(testVec[i] == make_tuple(true, TestObject(i), (float)i));
+ else
+ EATEST_VERIFY(testVec[i] == make_tuple(true, TestObject(i + 5), (float)(i + 5)));
+ }
+ EATEST_VERIFY(TestObject::sTOCount == 15 + 20);
+
+ // test erase on one position
+ testVec.assign(srcVec.begin(), srcVec.end());
+ eraseIter = testVec.erase(testVec.begin() + 5);
+ EATEST_VERIFY(eraseIter == testVec.begin() + 5);
+ EATEST_VERIFY(testVec.size() == 19);
+ EATEST_VERIFY(testVec.validate());
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ if (i < 5)
+ EATEST_VERIFY(testVec[i] == make_tuple(true, TestObject(i), (float)i));
+ else
+ EATEST_VERIFY(testVec[i] == make_tuple(true, TestObject(i + 1), (float)(i + 1)));
+ }
+ EATEST_VERIFY(TestObject::sTOCount == 19 + 20);
+
+ // test erase_unsorted
+ testVec.assign(srcVec.begin(), srcVec.end());
+ eraseIter = testVec.erase_unsorted(testVec.begin() + 5);
+ EATEST_VERIFY(eraseIter == testVec.begin() + 5);
+ EATEST_VERIFY(testVec.size() == 19);
+ EATEST_VERIFY(testVec.validate());
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ if (i != 5)
+ EATEST_VERIFY(testVec[i] == make_tuple(true, TestObject(i), (float)i));
+ else
+ EATEST_VERIFY(testVec[i] == make_tuple(true, TestObject(19), (float)(19)));
+ }
+ EATEST_VERIFY(TestObject::sTOCount == 19 + 20);
+ }
+
+ // test erase again but with reverse iterators everywhere
+ {
+ tuple_vector<bool, TestObject, float> srcVec;
+ for (unsigned int i = 0; i < 20; ++i)
+ {
+ srcVec.push_back(true, TestObject(i), (float)i);
+ }
+ tuple_vector<bool, TestObject, float> testVec;
+
+ // test erase on an iter range
+ testVec.assign(srcVec.begin(), srcVec.end());
+ auto eraseIter = testVec.erase(testVec.rbegin() + 5, testVec.rbegin() + 10);
+ EATEST_VERIFY(eraseIter == testVec.rbegin() + 5);
+ EATEST_VERIFY(testVec.size() == 15);
+ EATEST_VERIFY(testVec.validate());
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ if (i < 10)
+ EATEST_VERIFY(testVec[i] == make_tuple(true, TestObject(i), (float)i));
+ else
+ EATEST_VERIFY(testVec[i] == make_tuple(true, TestObject(i + 5), (float)(i + 5)));
+ }
+ EATEST_VERIFY(TestObject::sTOCount == 15 + 20);
+
+ // test erase on one position
+ testVec.assign(srcVec.begin(), srcVec.end());
+ eraseIter = testVec.erase(testVec.rbegin() + 5);
+ EATEST_VERIFY(eraseIter == testVec.rbegin() + 5);
+ EATEST_VERIFY(testVec.size() == 19);
+ EATEST_VERIFY(testVec.validate());
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ if (i < 14)
+ EATEST_VERIFY(testVec[i] == make_tuple(true, TestObject(i), (float)i));
+ else
+ EATEST_VERIFY(testVec[i] == make_tuple(true, TestObject(i + 1), (float)(i + 1)));
+ }
+ EATEST_VERIFY(TestObject::sTOCount == 19 + 20);
+
+ // test erase_unsorted
+ testVec.assign(srcVec.begin(), srcVec.end());
+ eraseIter = testVec.erase_unsorted(testVec.rbegin() + 5);
+ EATEST_VERIFY(eraseIter == testVec.rbegin() + 5);
+ EATEST_VERIFY(testVec.size() == 19);
+ EATEST_VERIFY(testVec.validate());
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ if (i != 14)
+ EATEST_VERIFY(testVec[i] == make_tuple(true, TestObject(i), (float)i));
+ else
+ EATEST_VERIFY(testVec[i] == make_tuple(true, TestObject(19), (float)(19)));
+ }
+ EATEST_VERIFY(TestObject::sTOCount == 19 + 20);
+ }
+ EATEST_VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+ }
+
+ // Test multitude of constructors
+ {
+ MallocAllocator ma;
+ TestObject::Reset();
+
+ // test ctor via initlist to prime srcVec
+ tuple_vector<bool, TestObject, float> srcVec({
+ { true, TestObject(0), 0.0f},
+ { false, TestObject(1), 1.0f},
+ { false, TestObject(2), 2.0f},
+ { true, TestObject(3), 3.0f},
+ { false, TestObject(4), 4.0f},
+ { false, TestObject(5), 5.0f},
+ { true, TestObject(6), 6.0f},
+ { false, TestObject(7), 7.0f},
+ { false, TestObject(8), 8.0f},
+ { true, TestObject(9), 9.0f}
+ });
+
+ // copy entire tuple_vector in ctor
+ {
+ tuple_vector<bool, TestObject, float> ctorFromConstRef(srcVec);
+ EATEST_VERIFY(ctorFromConstRef.size() == 10);
+ EATEST_VERIFY(ctorFromConstRef.validate());
+ for (int i = 0; i < 10; ++i)
+ {
+ EATEST_VERIFY(ctorFromConstRef.get<0>()[i] == (i % 3 == 0));
+ EATEST_VERIFY(ctorFromConstRef.get<1>()[i] == TestObject(i));
+ EATEST_VERIFY(ctorFromConstRef.get<2>()[i] == (float)i);
+ }
+ }
+
+ // copy entire tuple_vector via assignment
+ {
+ tuple_vector<bool, TestObject, float> ctorFromAssignment;
+ ctorFromAssignment = srcVec;
+ EATEST_VERIFY(ctorFromAssignment.size() == 10);
+ EATEST_VERIFY(ctorFromAssignment.validate());
+ for (int i = 0; i < 10; ++i)
+ {
+ EATEST_VERIFY(ctorFromAssignment.get<0>()[i] == (i % 3 == 0));
+ EATEST_VERIFY(ctorFromAssignment.get<1>()[i] == TestObject(i));
+ EATEST_VERIFY(ctorFromAssignment.get<2>()[i] == (float)i);
+ }
+ }
+
+ // copy entire tuple_vector via assignment of init-list
+ {
+ tuple_vector<bool, TestObject, float> ctorFromAssignment;
+ ctorFromAssignment = {
+ { true, TestObject(0), 0.0f},
+ { false, TestObject(1), 1.0f},
+ { false, TestObject(2), 2.0f},
+ { true, TestObject(3), 3.0f},
+ { false, TestObject(4), 4.0f},
+ { false, TestObject(5), 5.0f},
+ { true, TestObject(6), 6.0f},
+ { false, TestObject(7), 7.0f},
+ { false, TestObject(8), 8.0f},
+ { true, TestObject(9), 9.0f}
+ };
+ EATEST_VERIFY(ctorFromAssignment.size() == 10);
+ EATEST_VERIFY(ctorFromAssignment.validate());
+ for (int i = 0; i < 10; ++i)
+ {
+ EATEST_VERIFY(ctorFromAssignment.get<0>()[i] == (i % 3 == 0));
+ EATEST_VERIFY(ctorFromAssignment.get<1>()[i] == TestObject(i));
+ EATEST_VERIFY(ctorFromAssignment.get<2>()[i] == (float)i);
+ }
+ }
+
+ // ctor tuple_vector with iterator range
+ {
+ tuple_vector<bool, TestObject, float> ctorFromIters(srcVec.begin() + 2, srcVec.begin() + 7);
+ EATEST_VERIFY(ctorFromIters.size() == 5);
+ EATEST_VERIFY(ctorFromIters.validate());
+ for (int i = 2; i < 7; ++i)
+ {
+ EATEST_VERIFY(ctorFromIters.get<0>()[i - 2] == (i % 3 == 0));
+ EATEST_VERIFY(ctorFromIters.get<1>()[i - 2] == TestObject(i));
+ EATEST_VERIFY(ctorFromIters.get<2>()[i - 2] == (float)i);
+ }
+ }
+
+ // ctor tuple_vector with initial size
+ {
+ tuple_vector<bool, TestObject, float> ctorFromFill(10);
+ EATEST_VERIFY(ctorFromFill.size() == 10);
+ EATEST_VERIFY(ctorFromFill.validate());
+ for (int i = 0; i < 10; ++i)
+ {
+ EATEST_VERIFY(ctorFromFill.get<0>()[i] == false);
+ EATEST_VERIFY(ctorFromFill.get<1>()[i] == TestObject());
+ EATEST_VERIFY(ctorFromFill.get<2>()[i] == 0.0f);
+ }
+ }
+
+ // ctor tuple_vector with initial size and args
+ {
+ tuple_vector<bool, TestObject, float> ctorFromFillArgs(10, true, TestObject(5), 5.0f);
+ EATEST_VERIFY(ctorFromFillArgs.size() == 10);
+ EATEST_VERIFY(ctorFromFillArgs.validate());
+ for (int i = 0; i < 10; ++i)
+ {
+ EATEST_VERIFY(ctorFromFillArgs.get<0>()[i] == true);
+ EATEST_VERIFY(ctorFromFillArgs.get<1>()[i] == TestObject(5));
+ EATEST_VERIFY(ctorFromFillArgs.get<2>()[i] == 5.0f);
+ }
+ }
+
+ // ctor tuple_vector with initial size and tuple
+ {
+ tuple<bool, TestObject, float> tup(true, TestObject(5), 5.0f);
+ tuple_vector<bool, TestObject, float> ctorFromFillTup(10, tup);
+ EATEST_VERIFY(ctorFromFillTup.size() == 10);
+ EATEST_VERIFY(ctorFromFillTup.validate());
+ for (int i = 0; i < 10; ++i)
+ {
+ EATEST_VERIFY(ctorFromFillTup.get<0>()[i] == true);
+ EATEST_VERIFY(ctorFromFillTup.get<1>()[i] == TestObject(5));
+ EATEST_VERIFY(ctorFromFillTup.get<2>()[i] == 5.0f);
+ }
+ }
+
+ // ctor tuple_Vector with custom mallocator
+ {
+ tuple_vector_alloc<MallocAllocator, bool, TestObject, float> ctorWithAlloc(ma);
+ tuple_vector<bool, TestObject, float> ctorDefault;
+
+ ctorWithAlloc.push_back();
+ ctorDefault.push_back();
+
+ EATEST_VERIFY(ctorWithAlloc == ctorDefault);
+ EATEST_VERIFY(ctorWithAlloc.validate());
+ }
+
+ // ctor tuple_vector_alloc with copy (from diff. allocator)
+ {
+ tuple_vector_alloc<MallocAllocator, bool, TestObject, float> ctorFromConstRef(srcVec, ma);
+ EATEST_VERIFY(ctorFromConstRef.size() == 10);
+ EATEST_VERIFY(ctorFromConstRef.validate());
+ for (int i = 0; i < 10; ++i)
+ {
+ EATEST_VERIFY(ctorFromConstRef.get<0>()[i] == (i % 3 == 0));
+ EATEST_VERIFY(ctorFromConstRef.get<1>()[i] == TestObject(i));
+ EATEST_VERIFY(ctorFromConstRef.get<2>()[i] == (float)i);
+ }
+ EATEST_VERIFY(ctorFromConstRef.validate());
+ }
+
+ // ctor tuple_vector with initial size and args
+ {
+ tuple_vector_alloc<MallocAllocator, bool, TestObject, float> ctorFromFillArgs(10, true, TestObject(5), 5.0f, ma);
+ EATEST_VERIFY(ctorFromFillArgs.size() == 10);
+ EATEST_VERIFY(ctorFromFillArgs.validate());
+ for (int i = 0; i < 10; ++i)
+ {
+ EATEST_VERIFY(ctorFromFillArgs.get<0>()[i] == true);
+ EATEST_VERIFY(ctorFromFillArgs.get<1>()[i] == TestObject(5));
+ EATEST_VERIFY(ctorFromFillArgs.get<2>()[i] == 5.0f);
+ }
+ }
+
+ // ctor tuple_vector via move
+ {
+ tuple_vector<int, MoveOnlyType, TestObject> srcMoveVec;
+ for (int i = 0; i < 10; ++i)
+ {
+ srcMoveVec.emplace_back(move(i), MoveOnlyType(i), TestObject(i));
+ }
+
+ tuple_vector<int, MoveOnlyType, TestObject> ctorFromMove(move(srcMoveVec));
+
+ EATEST_VERIFY(ctorFromMove.size() == 10);
+ EATEST_VERIFY(ctorFromMove.validate());
+ for (int i = 0; i < 10; ++i)
+ {
+ EATEST_VERIFY(ctorFromMove.get<0>()[i] == i);
+ EATEST_VERIFY(ctorFromMove.get<1>()[i] == MoveOnlyType(i));
+ EATEST_VERIFY(ctorFromMove.get<2>()[i] == TestObject(i));
+ }
+ EATEST_VERIFY(srcMoveVec.size() == 0);
+ EATEST_VERIFY(srcMoveVec.validate());
+ }
+
+ // ctor tuple_vector via move (from diff. allocator)
+ {
+ tuple_vector_alloc<MallocAllocator, int, MoveOnlyType, TestObject> srcMoveVec;
+ for (int i = 0; i < 10; ++i)
+ {
+ srcMoveVec.emplace_back(move(i), MoveOnlyType(i), TestObject(i));
+ }
+
+ MallocAllocator otherMa;
+ tuple_vector_alloc<MallocAllocator, int, MoveOnlyType, TestObject> ctorFromMove(move(srcMoveVec), otherMa);
+
+ EATEST_VERIFY(ctorFromMove.size() == 10);
+ EATEST_VERIFY(ctorFromMove.validate());
+ for (int i = 0; i < 10; ++i)
+ {
+ EATEST_VERIFY(ctorFromMove.get<0>()[i] == i);
+ EATEST_VERIFY(ctorFromMove.get<1>()[i] == MoveOnlyType(i));
+ EATEST_VERIFY(ctorFromMove.get<2>()[i] == TestObject(i));
+ }
+ EATEST_VERIFY(srcMoveVec.size() == 0);
+ EATEST_VERIFY(srcMoveVec.validate());
+
+ // bonus test for specifying a custom allocator, but using the same one as above
+ tuple_vector_alloc<MallocAllocator, int, MoveOnlyType, TestObject> ctorFromMoveSameAlloc(move(ctorFromMove), otherMa);
+ EATEST_VERIFY(ctorFromMoveSameAlloc.size() == 10);
+ EATEST_VERIFY(ctorFromMoveSameAlloc.validate());
+ for (int i = 0; i < 10; ++i)
+ {
+ EATEST_VERIFY(ctorFromMoveSameAlloc.get<0>()[i] == i);
+ EATEST_VERIFY(ctorFromMoveSameAlloc.get<1>()[i] == MoveOnlyType(i));
+ EATEST_VERIFY(ctorFromMoveSameAlloc.get<2>()[i] == TestObject(i));
+ }
+ EATEST_VERIFY(ctorFromMove.size() == 0);
+ EATEST_VERIFY(ctorFromMove.validate());
+ }
+
+ // ctor tuple_vector via move-iters
+ {
+ tuple_vector<int, MoveOnlyType, TestObject> srcMoveVec;
+ for (int i = 0; i < 10; ++i)
+ {
+ srcMoveVec.emplace_back(move(i), MoveOnlyType(i), TestObject(i));
+ }
+
+ tuple_vector<int, MoveOnlyType, TestObject> ctorFromMove(make_move_iterator(srcMoveVec.begin() + 2), make_move_iterator(srcMoveVec.begin() + 7));
+
+ EATEST_VERIFY(ctorFromMove.size() == 5);
+ EATEST_VERIFY(ctorFromMove.validate());
+ for (int i = 2; i < 7; ++i)
+ {
+ EATEST_VERIFY(ctorFromMove.get<0>()[i-2] == i);
+ EATEST_VERIFY(ctorFromMove.get<1>()[i-2] == MoveOnlyType(i));
+ EATEST_VERIFY(ctorFromMove.get<2>()[i-2] == TestObject(i));
+ }
+ EATEST_VERIFY(srcMoveVec.size() == 10);
+ EATEST_VERIFY(srcMoveVec.validate());
+ for (int i = 0; i < 2; ++i)
+ {
+ EATEST_VERIFY(srcMoveVec.get<0>()[i] == i);
+ EATEST_VERIFY(srcMoveVec.get<1>()[i] == MoveOnlyType(i));
+ EATEST_VERIFY(srcMoveVec.get<2>()[i] == TestObject(i));
+ }
+ for (int i = 2; i < 7; ++i)
+ {
+ EATEST_VERIFY(srcMoveVec.get<0>()[i] == i); // int's just get copied because they're POD
+ EATEST_VERIFY(srcMoveVec.get<1>()[i] == MoveOnlyType(0));
+ EATEST_VERIFY(srcMoveVec.get<2>()[i] == TestObject(0));
+ }
+ for (int i = 7; i < 10; ++i)
+ {
+ EATEST_VERIFY(srcMoveVec.get<0>()[i] == i);
+ EATEST_VERIFY(srcMoveVec.get<1>()[i] == MoveOnlyType(i));
+ EATEST_VERIFY(srcMoveVec.get<2>()[i] == TestObject(i));
+ }
+ }
+
+ srcVec.clear();
+ EATEST_VERIFY(TestObject::IsClear());
+
+ TestObject::Reset();
+ }
+
+ // Test swap
+ {
+ tuple_vector<int, float, bool> complexVec;
+ complexVec.push_back(3, 2.0f, true);
+ complexVec.push_back(1, 4.0f, false);
+ complexVec.push_back(2, 1.0f, true);
+ complexVec.push_back(4, 3.0f, false);
+
+ tuple_vector<int, float, bool> otherComplexVec;
+ complexVec.swap(otherComplexVec);
+
+ EATEST_VERIFY(complexVec.size() == 0);
+ EATEST_VERIFY(complexVec.validate());
+ EATEST_VERIFY(otherComplexVec.validate());
+ EATEST_VERIFY(otherComplexVec.get<0>()[0] == 3);
+ EATEST_VERIFY(otherComplexVec.get<float>()[1] == 4.0f);
+
+ complexVec.push_back(10, 10.0f, true);
+ swap(complexVec, otherComplexVec);
+
+ EATEST_VERIFY(complexVec.validate());
+ EATEST_VERIFY(*(complexVec.get<0>()) == 3);
+ EATEST_VERIFY(complexVec.get<float>()[1] == 4.0f);
+
+ EATEST_VERIFY(otherComplexVec.validate());
+ EATEST_VERIFY(otherComplexVec.get<float>()[0] == 10.0f);
+ EATEST_VERIFY(otherComplexVec.size() == 1);
+
+ }
+
+
+ // Test tuple_Vector in a ranged for, and other large-scale iterator testing
+ {
+ tuple_vector<int, float, int> tripleElementVec;
+ tripleElementVec.push_back(1, 2.0f, 6);
+ tripleElementVec.push_back(2, 3.0f, 7);
+ tripleElementVec.push_back(3, 4.0f, 8);
+ tripleElementVec.push_back(4, 5.0f, 9);
+ tripleElementVec.push_back(5, 6.0f, 10);
+
+
+ // test copyConstructible, copyAssignable, swappable, prefix inc, !=, reference convertible to value_type (InputIterator!)
+ {
+ tuple_vector<int, float, int>::iterator iter = tripleElementVec.begin();
+ ++iter;
+ auto copiedIter(iter);
+ EATEST_VERIFY(get<2>(*copiedIter) == 7);
+ EATEST_VERIFY(copiedIter == iter);
+ EATEST_VERIFY(tripleElementVec.validate_iterator(iter) != isf_none);
+ EATEST_VERIFY(tripleElementVec.validate_iterator(copiedIter) != isf_none);
+
+ ++iter;
+ copiedIter = iter;
+ EATEST_VERIFY(get<2>(*copiedIter) == 8);
+ EATEST_VERIFY(tripleElementVec.validate_iterator(iter) != isf_none);
+ EATEST_VERIFY(tripleElementVec.validate_iterator(copiedIter) != isf_none);
+
+ ++iter;
+ swap(iter, copiedIter);
+ EATEST_VERIFY(get<2>(*iter) == 8);
+ EATEST_VERIFY(get<2>(*copiedIter) == 9);
+ EATEST_VERIFY(tripleElementVec.validate_iterator(iter) != isf_none);
+ EATEST_VERIFY(tripleElementVec.validate_iterator(copiedIter) != isf_none);
+
+ EATEST_VERIFY(copiedIter != iter);
+
+ tuple<const int&, const float&, const int&> ref(*iter);
+ tuple<int, float, int> value(*iter);
+ EATEST_VERIFY(get<2>(ref) == get<2>(value));
+ }
+
+ // test postfix increment, default constructible (ForwardIterator)
+ {
+ tuple_vector<int, float, int>::iterator iter = tripleElementVec.begin();
+ auto prefixIter = ++iter;
+
+ tuple_vector<int, float, int>::iterator postfixIter;
+ postfixIter = iter++;
+ EATEST_VERIFY(prefixIter == postfixIter);
+ EATEST_VERIFY(get<2>(*prefixIter) == 7);
+ EATEST_VERIFY(get<2>(*iter) == 8);
+ EATEST_VERIFY(tripleElementVec.validate_iterator(iter) != isf_none);
+ EATEST_VERIFY(tripleElementVec.validate_iterator(prefixIter) != isf_none);
+ EATEST_VERIFY(tripleElementVec.validate_iterator(postfixIter) != isf_none);
+ }
+
+ // test prefix decrement and postfix decrement (BidirectionalIterator)
+ {
+ tuple_vector<int, float, int>::iterator iter = tripleElementVec.end();
+ auto prefixIter = --iter;
+
+ tuple_vector<int, float, int>::iterator postfixIter;
+ postfixIter = iter--;
+ EATEST_VERIFY(prefixIter == postfixIter);
+ EATEST_VERIFY(get<2>(*prefixIter) == 10);
+ EATEST_VERIFY(get<2>(*iter) == 9);
+ EATEST_VERIFY(tripleElementVec.validate_iterator(iter) != isf_none);
+ EATEST_VERIFY(tripleElementVec.validate_iterator(prefixIter) != isf_none);
+ EATEST_VERIFY(tripleElementVec.validate_iterator(postfixIter) != isf_none);
+ }
+
+ // test many arithmetic operations (RandomAccessIterator)
+ {
+ tuple_vector<int, float, int>::iterator iter = tripleElementVec.begin();
+ auto symmetryOne = iter + 2;
+ auto symmetryTwo = 2 + iter;
+ iter += 2;
+ EATEST_VERIFY(symmetryOne == symmetryTwo);
+ EATEST_VERIFY(symmetryOne == iter);
+
+ symmetryOne = iter - 2;
+ symmetryTwo = 2 - iter;
+ iter -= 2;
+ EATEST_VERIFY(symmetryOne == symmetryTwo);
+ EATEST_VERIFY(symmetryOne == iter);
+
+ iter += 2;
+ EATEST_VERIFY(iter - symmetryOne == 2);
+
+ tuple<int&, float&, int&> symmetryRef = symmetryOne[2];
+ EATEST_VERIFY(get<2>(symmetryRef) == get<2>(*iter));
+
+ EATEST_VERIFY(symmetryOne < iter);
+ EATEST_VERIFY(iter > symmetryOne);
+ EATEST_VERIFY(symmetryOne >= symmetryTwo && iter >= symmetryOne);
+ EATEST_VERIFY(symmetryOne <= symmetryTwo && symmetryOne <= iter);
+ EATEST_VERIFY(tripleElementVec.validate_iterator(iter) != isf_none);
+ EATEST_VERIFY(tripleElementVec.validate_iterator(symmetryOne) != isf_none);
+ EATEST_VERIFY(tripleElementVec.validate_iterator(symmetryTwo) != isf_none);
+ }
+
+ // test simple iteration, and reverse iteration
+ {
+ float i = 0;
+ int j = 0;
+ EATEST_VERIFY(&get<0>(*tripleElementVec.begin()) == tripleElementVec.get<0>());
+ EATEST_VERIFY(&get<1>(*tripleElementVec.begin()) == tripleElementVec.get<1>());
+ for (auto iter : tripleElementVec)
+ {
+ i += get<1>(iter);
+ j += get<2>(iter);
+ }
+ EATEST_VERIFY(i == 20.0f);
+ EATEST_VERIFY(j == 40);
+
+ float reverse_i = 0;
+ int reverse_j = 0;
+
+ eastl::for_each(tripleElementVec.rbegin(), tripleElementVec.rend(),
+ [&](const tuple<int, float, int> tup)
+ {
+ reverse_i += get<1>(tup);
+ reverse_j += get<2>(tup);
+ });
+ EATEST_VERIFY(i == reverse_i);
+ EATEST_VERIFY(j == reverse_j);
+ EATEST_VERIFY(get<0>(*tripleElementVec.rbegin()) == 5);
+ }
+ }
+
+ // Test move operations
+ {
+ TestObject::Reset();
+
+ // test emplace
+ {
+ tuple_vector<int, MoveOnlyType, TestObject> testVec;
+ testVec.reserve(3);
+
+ // test emplace on empty vector that doesn't cause growth
+ testVec.emplace(testVec.begin(), 3, MoveOnlyType(3), TestObject(3));
+ EATEST_VERIFY(testVec.size() == 1);
+
+ // test emplace to end of vector that doesn't cause growth
+ testVec.emplace(testVec.end(), 5, MoveOnlyType(5), TestObject(5));
+ EATEST_VERIFY(testVec.size() == 2);
+
+ // test emplace to middle of vector that doesn't cause growth
+ testVec.emplace(testVec.begin() + 1, 4, MoveOnlyType(4), TestObject(4));
+ EATEST_VERIFY(testVec.size() == 3);
+ EATEST_VERIFY(testVec.capacity() == 3);
+
+ // test emplace to end of vector that causes growth
+ testVec.emplace(testVec.end(), 6, MoveOnlyType(6), TestObject(6));
+ EATEST_VERIFY(testVec.size() == 4);
+ testVec.shrink_to_fit();
+ EATEST_VERIFY(testVec.capacity() == 4);
+
+ // test emplace to beginning of vector that causes growth
+ testVec.emplace(testVec.begin(), 1, MoveOnlyType(1), TestObject(1));
+ EATEST_VERIFY(testVec.size() == 5);
+ testVec.shrink_to_fit();
+ EATEST_VERIFY(testVec.capacity() == 5);
+
+ // test emplace to middle of vector that causes growth
+ testVec.emplace(testVec.begin() + 1, 2, MoveOnlyType(2), TestObject(2));
+ EATEST_VERIFY(testVec.size() == 6);
+ testVec.shrink_to_fit();
+ EATEST_VERIFY(testVec.capacity() == 6);
+
+ for (unsigned int i = 0; i < testVec.size(); ++i)
+ {
+ EATEST_VERIFY(testVec.get<2>()[i] == TestObject(i + 1));
+ }
+ EATEST_VERIFY(testVec.validate());
+ }
+
+ // test some other miscellania around rvalues, including...
+ // push_back with rvalue args, push_back with rvalue tuple,
+ // emplace_back with args, and emplace_back with tup
+ {
+ tuple_vector<int, MoveOnlyType, TestObject> v1;
+ tuple_vector<int, MoveOnlyType, TestObject> v2;
+ // add some data in the vector so we can move it to the other vector.
+ v1.reserve(5);
+ auto emplacedTup = v1.emplace_back(1, MoveOnlyType(1), TestObject(1));
+ EATEST_VERIFY(emplacedTup == v1.back());
+ v1.push_back(3, MoveOnlyType(3), TestObject(3));
+ v1.emplace_back(forward_as_tuple(5, MoveOnlyType(5), TestObject(5)));
+ v1.push_back(forward_as_tuple(6, MoveOnlyType(6), TestObject(6)));
+ v1.emplace(v1.begin() + 1, 2, MoveOnlyType(2), TestObject(2));
+ v1.emplace(v1.begin() + 3, make_tuple(4, MoveOnlyType(4), TestObject(4)));
+
+ tuple<int&, MoveOnlyType&, TestObject&> movedTup = v1.at(0);
+ EATEST_VERIFY(v1.validate());
+ EATEST_VERIFY(get<0>(movedTup) == 1);
+ EATEST_VERIFY(get<0>(*v1.begin()) == 1);
+
+ for (int i = 0; i < static_cast<int>(v1.size()); ++i)
+ {
+ EATEST_VERIFY(v1.get<0>()[i] == i + 1);
+ }
+ EATEST_VERIFY(!v1.empty() && v2.empty());
+ v2 = eastl::move(v1);
+ EATEST_VERIFY(v2.validate());
+ EATEST_VERIFY(v1.empty() && !v2.empty());
+ v1.swap(v2);
+ EATEST_VERIFY(v1.validate());
+ EATEST_VERIFY(v2.validate());
+ EATEST_VERIFY(!v1.empty() && v2.empty());
+ }
+ EATEST_VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+ }
+
+ // Test comparisons
+ {
+ MallocAllocator ma;
+ tuple_vector<bool, TestObject, float> equalsVec1, equalsVec2;
+ for (int i = 0; i < 10; ++i)
+ {
+ equalsVec1.push_back(i % 3 == 0, TestObject(i), (float)i);
+ equalsVec2.push_back(i % 3 == 0, TestObject(i), (float)i);
+ }
+ EATEST_VERIFY(equalsVec1 == equalsVec2);
+
+ tuple_vector<bool, TestObject, float> smallSizeVec(5);
+ tuple_vector<bool, TestObject, float> lessThanVec(10);
+ tuple_vector_alloc<MallocAllocator, bool, TestObject, float> greaterThanVec(10, ma);
+ for (int i = 0; i < 10; ++i)
+ {
+ lessThanVec.push_back(i % 3 == 0, TestObject(i), (float)i);
+ greaterThanVec.push_back(i % 3 == 0, TestObject(i * 2), (float)i * 2);
+ }
+ EATEST_VERIFY(equalsVec1 != smallSizeVec);
+ EATEST_VERIFY(equalsVec1 != lessThanVec);
+ EATEST_VERIFY(equalsVec1 != greaterThanVec);
+ EATEST_VERIFY(lessThanVec < greaterThanVec);
+ EATEST_VERIFY(greaterThanVec > lessThanVec);
+ EATEST_VERIFY(lessThanVec <= greaterThanVec);
+ EATEST_VERIFY(equalsVec1 <= equalsVec2);
+ EATEST_VERIFY(equalsVec1 >= equalsVec2);
+ }
+
+ // Test partition
+ {
+ {
+ tuple_vector<bool, TestObject, float, MoveOnlyType> vec;
+ for (int i = 0; i < 10; ++i)
+ {
+ vec.push_back(i % 3 == 0, TestObject(i), (float)i, MoveOnlyType(i));
+ }
+
+ eastl::partition(vec.begin(), vec.end(), [](tuple<bool&, TestObject&, float&, MoveOnlyType&> a)
+ { return get<0>(a) == true; });
+
+ // partition will split the array into 4 elements where the bool property is true, and 6 where it's false
+ for (int i = 0; i < 4; ++i)
+ EATEST_VERIFY(vec.get<0>()[i] == true);
+ for (int i = 4; i < 10; ++i)
+ EATEST_VERIFY(vec.get<0>()[i] == false);
+
+ EATEST_VERIFY(vec.validate());
+ EATEST_VERIFY(TestObject::sTOCount == 10);
+ }
+ EATEST_VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+ }
+
+ // Test allocator manipulation
+ {
+ InstanceAllocator ia0((uint8_t)0), ia1((uint8_t)1);
+ tuple_vector_alloc<InstanceAllocator, int> vec(ia0);
+
+ // private vector allocator was copied from ia0 and should have matching id
+ EATEST_VERIFY(vec.get_allocator() == ia0);
+
+ // Assigning allocator
+ vec.set_allocator(ia1);
+ EATEST_VERIFY(vec.get_allocator() == ia1);
+ }
+
+ return nErrorCount;
+}
+
+
diff --git a/EASTL/test/source/TestTypeTraits.cpp b/EASTL/test/source/TestTypeTraits.cpp
new file mode 100644
index 0000000..2670e24
--- /dev/null
+++ b/EASTL/test/source/TestTypeTraits.cpp
@@ -0,0 +1,2439 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLTest.h"
+#include <EASTL/type_traits.h>
+#include <EASTL/vector.h>
+#include <EAStdC/EAAlignment.h>
+#include "ConceptImpls.h"
+
+
+
+using namespace eastl;
+
+
+bool GetType(const true_type&)
+{
+ return true;
+}
+
+bool GetType(const false_type&)
+{
+ return false;
+}
+
+int GetType(const integral_constant<size_t, (size_t)4>&)
+{
+ return 4;
+}
+
+int GetType(const integral_constant<size_t, (size_t)8>&)
+{
+ return 8;
+}
+
+int GetType(const integral_constant<size_t, (size_t)16>&)
+{
+ return 16;
+}
+
+int GetType(const integral_constant<size_t, (size_t)32>&)
+{
+ return 32;
+}
+
+#ifdef _MSC_VER
+ __declspec(align(32)) class ClassAlign32{ };
+#else
+ class ClassAlign32{ } __attribute__((aligned(32)));
+#endif
+
+
+struct Struct
+{
+ // Empty
+};
+
+class Class
+{
+ // Empty
+};
+
+class Subclass : public Class
+{
+ // Empty
+};
+
+class ClassEmpty
+{
+ // Empty
+};
+
+class ClassNonEmpty
+{
+public:
+ int x;
+};
+
+enum Enum
+{
+ kValue1
+};
+
+union Union
+{
+ int x;
+ short y;
+};
+
+struct FinalStruct final
+{
+};
+
+class FinalClass final
+{
+};
+
+#if !EASTL_TYPE_TRAIT_is_union_CONFORMANCE
+ EASTL_DECLARE_UNION(Union) // We have to do this because is_union simply cannot work without user help.
+#endif
+
+
+
+// Used for union_cast tests below.
+// C++11 allows for PodA/PodB to have a trivial default (i.e. compiler-generated) constructor,
+// but as of this writing (3/2012) most C++ compilers don't have support for this yet.
+struct PodA{
+ int mX;
+};
+
+struct PodB{
+ int mX;
+};
+
+bool operator ==(const PodA& a1, const PodA& a2) { return (a1.mX == a2.mX); }
+
+
+// std::tr1::is_volatile<T>::value == true if and only if, for a given type T:
+// * std::tr1::is_scalar<T>::value == true, or
+// * T is a class or struct that has no user-defined copy assignment operator or destructor,
+// and T has no non-static data members M for which is_pod<M>::value == false, and no members of reference type, or
+// * T is the type of an array of objects E for which is_pod<E>::value == true
+// is_pod may only be applied to complete types.
+
+struct Pod1
+{
+ // Empty
+};
+#if !EASTL_TYPE_TRAIT_is_pod_CONFORMANCE
+ EASTL_DECLARE_POD(Pod1) // We have to do this because is_pod simply cannot work without user help.
+#endif
+#if !EASTL_TYPE_TRAIT_is_standard_layout_CONFORMANCE
+ EASTL_DECLARE_STANDARD_LAYOUT(Pod1) // We have to do this because is_standard_layout simply cannot work without user help.
+#endif
+
+
+struct Pod2
+{
+ int mX;
+ Pod1 mPod1;
+};
+#if !EASTL_TYPE_TRAIT_is_pod_CONFORMANCE
+ EASTL_DECLARE_POD(Pod2)
+#endif
+#if !EASTL_TYPE_TRAIT_is_standard_layout_CONFORMANCE
+ EASTL_DECLARE_STANDARD_LAYOUT(Pod2)
+#endif
+
+struct Pod3
+{
+ Pod2 mPod2;
+ int mX;
+ Pod1 mPod1;
+};
+#if !EASTL_TYPE_TRAIT_is_pod_CONFORMANCE
+ EASTL_DECLARE_POD(Pod3)
+#endif
+#if !EASTL_TYPE_TRAIT_is_standard_layout_CONFORMANCE
+ EASTL_DECLARE_STANDARD_LAYOUT(Pod3)
+#endif
+
+
+struct NonPod1
+{
+ NonPod1(){}
+ virtual ~NonPod1(){}
+};
+
+struct NonPod2
+{
+ virtual ~NonPod2(){}
+ virtual void Function(){}
+};
+
+struct HasIncrementOperator { HasIncrementOperator& operator++() { return *this; } };
+
+template <class T>
+using has_increment_operator_detection = decltype(++eastl::declval<T>());
+
+template<typename, typename = eastl::void_t<>>
+struct has_increment_operator_using_void_t : eastl::false_type {};
+
+template <typename T>
+struct has_increment_operator_using_void_t<T, eastl::void_t<has_increment_operator_detection<T>>> : eastl::true_type {};
+
+
+// We use this for the is_copy_constructible test in order to verify that
+// is_copy_constructible in fact returns false for this type and not true.
+// std::is_copy_constructible specification: std::is_constructible<T, const T&>::value is true.
+// Note that the specification refers to const T& and not T&. So we rig our class to
+// accept T& and not const T&. This situation is significant because as of this
+// writing the clang <type_traits> implementation appears to be broken and mis-implements
+// the is_copy_constructible type trait to return true for ConstructibleOnlyWithNonConstReference
+// when in fact it should return false.
+EA_DISABLE_VC_WARNING(4521) // disable warning : "multiple copy constructors specified"
+struct ConstructibleOnlyWithNonConstReference
+{
+ ConstructibleOnlyWithNonConstReference(ConstructibleOnlyWithNonConstReference&) {}
+
+ #if defined(EA_COMPILER_NO_DELETED_FUNCTIONS)
+ private: ConstructibleOnlyWithNonConstReference() {}
+ private: ConstructibleOnlyWithNonConstReference(const ConstructibleOnlyWithNonConstReference&) {}
+ #else
+ ConstructibleOnlyWithNonConstReference() = delete;
+ ConstructibleOnlyWithNonConstReference(const ConstructibleOnlyWithNonConstReference&) = delete;
+ #endif
+};
+EA_RESTORE_VC_WARNING()
+
+#if defined(EA_COMPILER_NO_NOEXCEPT)
+ //This is needed because VS2013 supports is_nothrow__xxx type traits but doesn't support C++11 noexcept.
+ //So we use throw() to allow the is_nothrow_xxxx and similiar tests to work in VS2013
+ #define EASTL_TEST_NOEXCEPT throw()
+#else
+ #define EASTL_TEST_NOEXCEPT EA_NOEXCEPT
+#endif
+
+struct ThrowConstructibleTest
+{
+ ThrowConstructibleTest(const int = 0) EASTL_TEST_NOEXCEPT { }
+ ThrowConstructibleTest(const float) EA_NOEXCEPT_IF(false) { }
+};
+
+
+
+struct NoThrowAssignable { };
+
+struct ThrowAssignableTest
+{
+ void operator=(const NoThrowAssignable&) EASTL_TEST_NOEXCEPT { }
+ void operator=(const ThrowAssignableTest&) { }
+};
+
+
+struct NoThrowDestructible
+{
+ ~NoThrowDestructible() EASTL_TEST_NOEXCEPT {}
+};
+
+#if !defined(EA_COMPILER_NO_EXCEPTIONS)
+ struct ThrowDestructible
+ {
+ ~ThrowDestructible() noexcept(false) { throw(int()); }
+ };
+
+ struct ThrowDestructibleNoexceptFalse
+ {
+ virtual ~ThrowDestructibleNoexceptFalse() EA_NOEXCEPT_IF(false) { }
+ };
+#endif
+
+
+struct HasTrivialConstructor
+{
+ int x;
+};
+#if !EASTL_TYPE_TRAIT_has_trivial_constructor_CONFORMANCE
+ EASTL_DECLARE_TRIVIAL_CONSTRUCTOR(HasTrivialConstructor) // We have to do this because has_trivial_constructor simply cannot work without user help.
+#endif
+#if !EASTL_TYPE_TRAIT_is_standard_layout_CONFORMANCE
+ EASTL_DECLARE_STANDARD_LAYOUT(HasTrivialConstructor)
+#endif
+
+
+struct NoTrivialConstructor
+{
+ NoTrivialConstructor() { px = &x; }
+ int x;
+ int* px;
+};
+#if !EASTL_TYPE_TRAIT_is_standard_layout_CONFORMANCE
+ EASTL_DECLARE_STANDARD_LAYOUT(NoTrivialConstructor)
+#endif
+
+
+struct HasTrivialCopy
+{
+ void Function(){}
+ int x;
+};
+#if !EASTL_TYPE_TRAIT_has_trivial_constructor_CONFORMANCE
+ EASTL_DECLARE_TRIVIAL_COPY(HasTrivialCopy) // We have to do this because has_trivial_copy simply cannot work without user help.
+#endif
+
+
+#if defined(EA_COMPILER_MSVC) && (_MSC_VER == 1900)
+ // http://blogs.msdn.com/b/vcblog/archive/2014/06/06/c-14-stl-features-fixes-and-breaking-changes-in-visual-studio-14-ctp1.aspx
+ // VS2015-preview has a bug regarding C++14 implicit noexcept rules for destructors. We explicitly define noexcept below for VS2015-preview only.
+ //
+ // Re-evaluate when VS2015 RTM has been released.
+ //
+ struct NoTrivialCopy1
+ {
+ virtual ~NoTrivialCopy1() EASTL_TEST_NOEXCEPT {}
+ virtual void Function(){}
+ };
+#else
+ struct NoTrivialCopy1
+ {
+ virtual ~NoTrivialCopy1() {}
+ virtual void Function(){}
+ };
+#endif
+
+struct NoTrivialCopy2
+{
+ NoTrivialCopy1 ntv;
+};
+
+struct NonCopyable
+{
+ NonCopyable() : mX(0) {}
+ NonCopyable(int x) : mX(x) {}
+
+ int mX;
+
+ EA_NON_COPYABLE(NonCopyable)
+};
+
+struct HasTrivialAssign
+{
+ void Function(){}
+ int x;
+};
+#if !EASTL_TYPE_TRAIT_has_trivial_assign_CONFORMANCE
+ EASTL_DECLARE_TRIVIAL_ASSIGN(HasTrivialAssign) // We have to do this because has_trivial_assign simply cannot work without user help.
+#endif
+
+struct NoTrivialAssign1
+{
+ virtual ~NoTrivialAssign1(){}
+ virtual void Function(){}
+};
+
+struct NoTrivialAssign2
+{
+ NoTrivialAssign1 nta;
+};
+
+struct Polymorphic1
+{
+ virtual ~Polymorphic1(){}
+ virtual void Function(){}
+};
+
+struct Polymorphic2 : public Polymorphic1
+{
+ // Empty
+};
+
+struct Polymorphic3
+{
+ virtual ~Polymorphic3(){}
+ virtual void Function() = 0;
+};
+
+struct NonPolymorphic1
+{
+ void Function(){}
+};
+
+// Disable the following warning:
+// warning: ‘struct Abstract’ has virtual functions and accessible non-virtual destructor [-Wnon-virtual-dtor]
+// We explicitly want this class not to have a virtual destructor to test our type traits.
+EA_DISABLE_VC_WARNING(4265)
+EA_DISABLE_CLANG_WARNING(-Wnon-virtual-dtor)
+EA_DISABLE_GCC_WARNING(-Wnon-virtual-dtor)
+struct Abstract
+{
+ virtual void Function() = 0;
+};
+EA_RESTORE_GCC_WARNING()
+EA_RESTORE_CLANG_WARNING()
+EA_RESTORE_VC_WARNING()
+
+struct AbstractWithDtor
+{
+ virtual ~AbstractWithDtor(){}
+ virtual void Function() = 0;
+};
+
+struct DeletedDtor
+{
+ #if !defined(EA_COMPILER_NO_DELETED_FUNCTIONS)
+ ~DeletedDtor() = delete;
+ #endif
+};
+
+#if (EASTL_TYPE_TRAIT_is_destructible_CONFORMANCE == 0)
+ EASTL_DECLARE_IS_DESTRUCTIBLE(DeletedDtor, false)
+#endif
+
+struct Assignable
+{
+ void operator=(const Assignable&){}
+ void operator=(const Pod1&){}
+};
+
+class HiddenAssign
+{
+public:
+ HiddenAssign();
+
+private:
+ HiddenAssign(const HiddenAssign& x);
+ HiddenAssign& operator=(const HiddenAssign& x);
+};
+
+#if !EASTL_TYPE_TRAIT_has_trivial_assign_CONFORMANCE
+ EASTL_DECLARE_TRIVIAL_ASSIGN(HiddenAssign)
+#endif
+
+
+
+// This class exercises is_convertible for the case that the class has an explicit copy constructor.
+struct IsConvertibleTest1
+{
+ IsConvertibleTest1() {}
+ IsConvertibleTest1(int, int) {}
+ explicit IsConvertibleTest1(const IsConvertibleTest1&) {}
+ ~IsConvertibleTest1(){}
+};
+
+
+
+// Helpers for enable_if tests
+template<typename T>
+typename eastl::enable_if<eastl::is_floating_point<T>::value, T>::type EnableIfTestFunction(T)
+ { return 999; }
+
+template<typename T>
+typename eastl::enable_if<eastl::is_integral<T>::value, T>::type EnableIfTestFunction(T)
+ { return 888; }
+
+template<typename T>
+typename eastl::disable_if<eastl::is_signed<T>::value, T>::type EnableIfTestFunction(T)
+ { return 777; }
+
+
+
+// Test that EASTL_DECLARE_TRIVIAL_ASSIGN can be used to get around case whereby
+// the copy constructor and operator= are private. Normally vector requires this.
+// ** This is disabled because it turns out that vector in fact requires the
+// constructor for some uses. But we have code below which tests just part of vector.
+// template class eastl::vector<HiddenAssign>;
+
+
+typedef char Array[32];
+typedef const char ArrayConst[32];
+
+
+typedef Class& Reference;
+typedef const Class& ConstReference;
+
+
+typedef const int ConstInt;
+typedef int Int;
+typedef volatile int VolatileInt;
+typedef const volatile int ConstVolatileInt;
+typedef int& IntReference;
+typedef const int& ConstIntReference; // Note here that the int is const, not the reference to the int.
+typedef const volatile int& ConstVolatileIntReference; // Note here that the int is const, not the reference to the int.
+
+
+typedef void FunctionVoidVoid();
+typedef int FunctionIntVoid();
+typedef int FunctionIntFloat(float);
+typedef void (*FunctionVoidVoidPtr)();
+
+namespace
+{
+ const eastl::string gEmptyStringInstance("");
+
+ const eastl::integral_constant<int*, nullptr> gIntNullptrConstant;
+ static_assert(gIntNullptrConstant() == nullptr, "");
+}
+
+int TestTypeTraits()
+{
+ int nErrorCount = 0;
+
+
+ // static_min / static_max
+ #if EASTL_TYPE_TRAIT_static_min_CONFORMANCE
+ static_assert((static_min<3, 7, 1, 5>::value == 1), "static_min failure");
+ static_assert((static_max<3, 7, 1, 5>::value == 7), "static_max failure");
+ #else
+ static_assert((static_min<7, 1>::value == 1), "static_min failure");
+ static_assert((static_max<7, 1>::value == 7), "static_max failure");
+ #endif
+
+ // enable_if, disable_if.
+ EATEST_VERIFY((EnableIfTestFunction((double)1.1) == 999));
+ EATEST_VERIFY((EnableIfTestFunction((int)1) == 888));
+ EATEST_VERIFY((EnableIfTestFunction((int)-4) == 888));
+
+
+ // conditional
+ static_assert(sizeof(conditional<true, int8_t, int16_t>::type) == sizeof(int8_t), "conditional failure");
+ static_assert(sizeof(conditional<false, int8_t, int16_t>::type) == sizeof(int16_t), "conditional failure");
+
+ // bool_constant
+ static_assert(bool_constant<is_same<int, int>::value>::value == true, "bool_constant failure");
+ static_assert(bool_constant<is_same<int, short>::value>::value == false, "bool_constant failure");
+ static_assert(is_same<bool_constant<false>::type, integral_constant<bool, false>::type>::value, "bool_constant failure");
+
+
+
+ // identity
+ static_assert(sizeof(identity<int>::type) == sizeof(int), "identity failure");
+ static_assert((is_same<int, identity<int>::type >::value == true), "identity failure");
+
+ // type_identity
+ static_assert(sizeof(type_identity<int>::type) == sizeof(int), "type_identity failure");
+ static_assert((is_same<int, type_identity<int>::type >::value == true), "type_identity failure");
+ static_assert(sizeof(type_identity_t<int>) == sizeof(int), "type_identity failure");
+ static_assert((is_same_v<int, type_identity_t<int>> == true), "type_identity failure");
+
+
+
+ // is_void
+ static_assert(is_void<void>::value == true, "is_void failure");
+ static_assert(is_void<const void>::value == true, "is_void failure");
+ static_assert(is_void<int>::value == false, "is_void failure");
+
+
+ // is_null_pointer
+ #if defined(EA_COMPILER_CPP11_ENABLED)
+ #if !defined(EA_COMPILER_NO_DECLTYPE) && !defined(_MSC_VER) // VS2012 is broken for just the case of decltype(nullptr).
+ static_assert(is_null_pointer<decltype(nullptr)>::value == true, "is_null_pointer failure");
+ static_assert(is_null_pointer<decltype(NULL)>::value == false, "is_null_pointer failure");
+ #endif
+ #if defined(EA_HAVE_nullptr_t_IMPL)
+ static_assert(is_null_pointer<std::nullptr_t>::value == true, "is_null_pointer failure"); // Can't enable this until we are using an updated <EABase/nullptr.h> that is savvy to C++11 clang (defines nullptr) being used with C++98 GNU libstdc++ (defines std::nullptr_t).
+ #endif
+ static_assert(is_null_pointer<void*>::value == false, "is_null_pointer failure");
+ static_assert(is_null_pointer<intptr_t>::value == false, "is_null_pointer failure");
+ #endif
+
+ // is_integral
+ static_assert(is_integral<int>::value == true, "is_integral failure");
+ EATEST_VERIFY(GetType(is_integral<int>()) == true);
+
+ static_assert(is_integral<const int>::value == true, "is_integral failure");
+ EATEST_VERIFY(GetType(is_integral<const int>()) == true);
+
+ static_assert(is_integral<float>::value == false, "is_integral failure");
+ EATEST_VERIFY(GetType(is_integral<float>()) == false);
+
+ static_assert(is_integral<bool>::value, "is_integral failure");
+ static_assert(is_integral<char8_t>::value, "is_integral failure");
+ static_assert(is_integral<char16_t>::value, "is_integral failure");
+ static_assert(is_integral<char32_t>::value, "is_integral failure");
+ static_assert(is_integral<char>::value, "is_integral failure");
+ static_assert(is_integral<int>::value, "is_integral failure");
+ static_assert(is_integral<long long>::value, "is_integral failure");
+ static_assert(is_integral<long>::value, "is_integral failure");
+ static_assert(is_integral<short>::value, "is_integral failure");
+ static_assert(is_integral<signed char>::value, "is_integral failure");
+ static_assert(is_integral<unsigned char>::value, "is_integral failure");
+ static_assert(is_integral<unsigned int>::value, "is_integral failure");
+ static_assert(is_integral<unsigned long long>::value, "is_integral failure");
+ static_assert(is_integral<unsigned long>::value, "is_integral failure");
+ static_assert(is_integral<unsigned short>::value, "is_integral failure");
+#ifndef EA_WCHAR_T_NON_NATIVE // If wchar_t is a native type instead of simply a define to an existing type which is already handled...
+ static_assert(is_integral<wchar_t>::value, "is_integral failure");
+#endif
+
+
+ // is_floating_point
+ static_assert(is_floating_point<double>::value == true, "is_floating_point failure");
+ EATEST_VERIFY(GetType(is_floating_point<double>()) == true);
+
+ static_assert(is_floating_point<const double>::value == true, "is_floating_point failure");
+ EATEST_VERIFY(GetType(is_floating_point<const double>()) == true);
+
+ static_assert(is_floating_point<int>::value == false, "is_floating_point failure");
+ EATEST_VERIFY(GetType(is_floating_point<int>()) == false);
+
+
+ // is_arithmetic
+ static_assert(is_arithmetic<float>::value == true, "is_arithmetic failure");
+ static_assert(is_arithmetic_v<float> == true, "is_arithmetic failure");
+ EATEST_VERIFY(GetType(is_arithmetic<float>()) == true);
+
+ static_assert(is_arithmetic<Class>::value == false, "is_arithmetic failure");
+ static_assert(is_arithmetic_v<Class> == false, "is_arithmetic failure");
+ EATEST_VERIFY(GetType(is_arithmetic<Class>()) == false);
+
+
+ // is_fundamental
+ static_assert(is_fundamental<void>::value == true, "is_fundamental failure");
+ static_assert(is_fundamental_v<void> == true, "is_fundamental failure");
+ EATEST_VERIFY(GetType(is_fundamental<void>()) == true);
+
+ #ifndef EA_WCHAR_T_NON_NATIVE // If wchar_t is a native type instead of simply a define to an existing type which is already handled...
+ static_assert(is_fundamental<wchar_t>::value == true, "is_fundamental failure");
+ static_assert(is_fundamental_v<wchar_t> == true, "is_fundamental failure");
+ EATEST_VERIFY(GetType(is_fundamental<wchar_t>()) == true);
+ #endif
+
+ static_assert(is_fundamental<Class>::value == false, "is_fundamental failure");
+ static_assert(is_fundamental_v<Class> == false, "is_fundamental failure");
+ EATEST_VERIFY(GetType(is_fundamental<Class>()) == false);
+
+ static_assert(is_fundamental<std::nullptr_t>::value == true, "is_fundamental failure");
+ static_assert(is_fundamental_v<std::nullptr_t> == true, "is_fundamental failure");
+
+
+ // is_array
+ static_assert(is_array<Array>::value == true, "is_array failure");
+ EATEST_VERIFY(GetType(is_array<Array>()) == true);
+
+ static_assert(is_array<ArrayConst>::value == true, "is_array failure");
+ EATEST_VERIFY(GetType(is_array<ArrayConst>()) == true);
+
+ static_assert(is_array<int[]>::value == true, "is_array failure");
+
+ static_assert(is_array<uint32_t>::value == false, "is_array failure");
+ EATEST_VERIFY(GetType(is_array<uint32_t>()) == false);
+
+ static_assert(is_array<uint32_t*>::value == false, "is_array failure");
+ EATEST_VERIFY(GetType(is_array<uint32_t*>()) == false);
+
+
+ //is_bounded_array
+ static_assert(is_bounded_array<Array>::value == true, "is_bounded_array failure");
+ EATEST_VERIFY(GetType(is_bounded_array<Array>()) == true);
+
+ static_assert(is_bounded_array<ArrayConst>::value == true, "is_bounded_array failure");
+ EATEST_VERIFY(GetType(is_bounded_array<ArrayConst>()) == true);
+
+ static_assert(is_bounded_array<int>::value == false, "is_bounded_array failure");
+ static_assert(is_bounded_array<int[32]>::value == true, "is_bounded_array failure");
+ static_assert(is_bounded_array<int[]>::value == false, "is_bounded_array failure");
+
+ static_assert(is_bounded_array<uint32_t>::value == false, "is_bounded_array failure");
+ EATEST_VERIFY(GetType(is_bounded_array<uint32_t>()) == false);
+
+ static_assert(is_bounded_array<uint32_t*>::value == false, "is_bounded_array failure");
+ EATEST_VERIFY(GetType(is_bounded_array<uint32_t*>()) == false);
+
+
+ //is_unbounded_array
+ static_assert(is_unbounded_array<Array>::value == false, "is_unbounded_array failure");
+ EATEST_VERIFY(GetType(is_unbounded_array<Array>()) == false);
+
+ static_assert(is_unbounded_array<ArrayConst>::value == false, "is_unbounded_array failure");
+ EATEST_VERIFY(GetType(is_unbounded_array<ArrayConst>()) == false);
+
+ static_assert(is_unbounded_array<int>::value == false, "is_unbounded_array failure");
+ static_assert(is_unbounded_array<int[32]>::value == false, "is_unbounded_array failure");
+ static_assert(is_unbounded_array<int[]>::value == true, "is_unbounded_array failure");
+
+ static_assert(is_unbounded_array<uint32_t>::value == false, "is_unbounded_array failure");
+ EATEST_VERIFY(GetType(is_unbounded_array<uint32_t>()) == false);
+
+ static_assert(is_unbounded_array<uint32_t*>::value == false, "is_unbounded_array failure");
+ EATEST_VERIFY(GetType(is_unbounded_array<uint32_t*>()) == false);
+
+
+ // is_reference
+ static_assert(is_reference<Class&>::value == true, "is_reference failure");
+ EATEST_VERIFY(GetType(is_reference<Class&>()) == true);
+
+ static_assert(is_reference<Class&&>::value == true, "is_reference failure");
+ EATEST_VERIFY(GetType(is_reference<Class&&>()) == true);
+
+ static_assert(is_reference<const Class&>::value == true, "is_reference failure");
+ EATEST_VERIFY(GetType(is_reference<const Class&>()) == true);
+
+ static_assert(is_reference<const Class&&>::value == true, "is_reference failure");
+ EATEST_VERIFY(GetType(is_reference<const Class&&>()) == true);
+
+ static_assert(is_reference<Class>::value == false, "is_reference failure");
+ EATEST_VERIFY(GetType(is_reference<Class>()) == false);
+
+ static_assert(is_reference<const Class*>::value == false, "is_reference failure");
+ EATEST_VERIFY(GetType(is_reference<const Class*>()) == false);
+
+
+ // is_member_function_pointer
+ static_assert(is_member_function_pointer<int>::value == false, "is_member_function_pointer failure");
+ static_assert(is_member_function_pointer<int(Class::*)>::value == false, "is_member_function_pointer failure");
+ static_assert(is_member_function_pointer<int(Class::*)()>::value == true, "is_member_function_pointer failure");
+ static_assert(is_member_function_pointer<int(Class::*)(...)>::value == true, "is_member_function_pointer failure");
+ static_assert(is_member_function_pointer<int(Class::*)() noexcept>::value == true, "is_member_function_pointer failure");
+ static_assert(is_member_function_pointer<int(Class::*)() &>::value == true, "is_member_function_pointer failure");
+ static_assert(is_member_function_pointer<int(Class::*)() &&>::value == true, "is_member_function_pointer failure");
+
+
+ // is_member_object_pointer
+ static_assert(is_member_object_pointer<int>::value == false, "is_member_object_pointer failure");
+ static_assert(is_member_object_pointer<int(Class::*)>::value == true, "is_member_object_pointer failure");
+ static_assert(is_member_object_pointer<int(Class::*)()>::value == false, "is_member_object_pointer failure");
+
+
+ // is_member_pointer
+ static_assert(is_member_pointer<int>::value == false, "is_member_pointer failure");
+ static_assert(is_member_pointer<int(Class::*)>::value == true, "is_member_pointer failure");
+ static_assert(is_member_pointer<int(Class::*)()>::value == true, "is_member_pointer failure");
+ static_assert(is_member_pointer<int(Class::* const)>::value == true, "is_member_pointer failure");
+ static_assert(is_member_pointer<int(Class::* volatile)>::value == true, "is_member_pointer failure");
+ static_assert(is_member_pointer<int(Class::* const volatile)>::value == true, "is_member_pointer failure");
+
+
+ // is_pointer
+ static_assert(is_pointer<Class*>::value == true, "is_pointer failure");
+ static_assert(is_pointer<const Class*>::value == true, "is_pointer failure");
+ static_assert(is_pointer<Class>::value == false, "is_pointer failure");
+ static_assert(is_pointer<const Class&>::value == false, "is_pointer failure");
+ #if defined(EA_HAVE_nullptr_t_IMPL)
+ static_assert(is_pointer<std::nullptr_t>::value == false, "is_pointer failure");
+ #endif
+
+ // is_enum
+ static_assert(is_enum<Enum>::value == true, "is_enum failure ");
+ static_assert(is_enum_v<Enum> == true, "is_enum failure ");
+ EATEST_VERIFY(GetType(is_enum<Enum>()) == true);
+
+ static_assert(is_enum<const Enum>::value == true, "is_enum failure ");
+ static_assert(is_enum_v<const Enum> == true, "is_enum failure ");
+ EATEST_VERIFY(GetType(is_enum<const Enum>()) == true);
+
+ static_assert(is_enum<Enum*>::value == false, "is_enum failure ");
+ static_assert(is_enum_v<Enum*> == false, "is_enum failure ");
+ EATEST_VERIFY(GetType(is_enum<Enum*>()) == false);
+
+ static_assert(is_enum<Class>::value == false, "is_enum failure ");
+ static_assert(is_enum_v<Class> == false, "is_enum failure ");
+ EATEST_VERIFY(GetType(is_enum<Class>()) == false);
+
+ static_assert(is_enum<Enum&>::value == false, "is_enum failure ");
+ static_assert(is_enum_v<Enum&> == false, "is_enum failure ");
+ EATEST_VERIFY(GetType(is_enum<Enum&>()) == false);
+
+ static_assert(is_enum<Enum&&>::value == false, "is_enum failure ");
+ static_assert(is_enum_v<Enum&&> == false, "is_enum failure ");
+ EATEST_VERIFY(GetType(is_enum<Enum&&>()) == false);
+
+
+ // is_union
+ static_assert(is_union<Union>::value == true, "is_union failure");
+ static_assert(is_union_v<Union> == true, "is_union failure");
+ EATEST_VERIFY(GetType(is_union<Union>()) == true);
+
+ static_assert(is_union<int>::value == false, "is_union failure");
+ static_assert(is_union_v<int> == false, "is_union failure");
+ EATEST_VERIFY(GetType(is_union<int>()) == false);
+
+
+ // is_class
+ static_assert(is_class<Class>::value == true, "is_class failure");
+ EATEST_VERIFY(GetType(is_class<Class>()) == true);
+
+ static_assert(is_class<Struct>::value == true, "is_class failure");
+ EATEST_VERIFY(GetType(is_class<Struct>()) == true);
+
+ static_assert(is_class<Union>::value == false, "is_class failure");
+ EATEST_VERIFY(GetType(is_class<Union>()) == false);
+
+ static_assert(is_class<Enum>::value == false, "is_class failure");
+ EATEST_VERIFY(GetType(is_class<Enum>()) == false);
+
+ static_assert(is_class<int*>::value == false, "is_class failure");
+ EATEST_VERIFY(GetType(is_class<int*>()) == false);
+
+
+ // is_function
+ static_assert(is_function<void>::value == false, "is_function failure");
+ static_assert(is_function<FunctionVoidVoid>::value == true, "is_function failure");
+ static_assert(is_function<FunctionVoidVoid&>::value == false, "is_function failure");
+ static_assert(is_function<FunctionIntVoid>::value == true, "is_function failure");
+ static_assert(is_function<FunctionIntFloat>::value == true, "is_function failure");
+ static_assert(is_function<FunctionVoidVoidPtr>::value == false, "is_function failure");
+ static_assert(is_function<int>::value == false, "is_function failure");
+ static_assert(is_function<int[3]>::value == false, "is_function failure");
+ static_assert(is_function<int[]>::value == false, "is_function failure");
+ static_assert(is_function<Class>::value == false, "is_function failure");
+ #if EASTL_TYPE_TRAIT_is_function_CONFORMANCE
+ // typedef int PrintfConst(const char*, ...) const;
+ static_assert(is_function<int (const char*, ...)>::value == true, "is_function failure"); // This is the signature of printf.
+ #endif
+
+ static_assert(is_function<int (float)>::value == true, "is_function failure");
+ static_assert(is_function<int (float) const>::value == true, "is_function failure");
+ static_assert(is_function<int(float) volatile>::value == true, "is_function failure");
+ static_assert(is_function<int(float) const volatile>::value == true, "is_function failure");
+ static_assert(is_function<int(float)&>::value == true, "is_function failure");
+ static_assert(is_function<int(float)&&>::value == true, "is_function failure");
+ static_assert(is_function<int(float) noexcept>::value == true, "is_function failure");
+ static_assert(is_function<FunctionIntFloat &>::value == false, "is_function failure"); // reference to function, not a l-value reference qualified function
+ static_assert(is_function<FunctionIntFloat &&>::value == false, "is_function failure");
+
+ static_assert(is_function_v<void> == false, "is_function failure");
+ static_assert(is_function_v<FunctionVoidVoid> == true, "is_function failure");
+ static_assert(is_function_v<FunctionVoidVoid&> == false, "is_function failure");
+ static_assert(is_function_v<FunctionIntVoid> == true, "is_function failure");
+ static_assert(is_function_v<FunctionIntFloat> == true, "is_function failure");
+ static_assert(is_function_v<FunctionVoidVoidPtr> == false, "is_function failure");
+ static_assert(is_function_v<int> == false, "is_function failure");
+ static_assert(is_function_v<int[3]> == false, "is_function failure");
+ static_assert(is_function_v<int[]> == false, "is_function failure");
+ static_assert(is_function_v<Class> == false, "is_function failure");
+ #if EASTL_TYPE_TRAIT_is_function_CONFORMANCE
+ // typedef int PrintfConst(const char*, ...) const;
+ static_assert(is_function_v<int (const char*, ...)> == true, "is_function failure"); // This is the signature of printf.
+ #endif
+
+
+ // is_object
+ static_assert(is_object<int>::value == true, "is_object failure");
+ EATEST_VERIFY(GetType(is_object<int>()) == true);
+
+ static_assert(is_object<Class>::value == true, "is_object failure");
+ EATEST_VERIFY(GetType(is_object<Class>()) == true);
+
+ static_assert(is_object<Class*>::value == true, "is_object failure");
+ EATEST_VERIFY(GetType(is_object<Class*>()) == true);
+
+ static_assert(is_object<Class&>::value == false, "is_object failure");
+ EATEST_VERIFY(GetType(is_object<Class&>()) == false);
+
+ static_assert(is_object<Class&&>::value == false, "is_object failure");
+ EATEST_VERIFY(GetType(is_object<Class&&>()) == false);
+
+
+ // is_scalar
+ static_assert(is_scalar<int>::value == true, "is_scalar failure");
+ EATEST_VERIFY(GetType(is_scalar<int>()) == true);
+
+ static_assert(is_scalar<double>::value == true, "is_scalar failure");
+ EATEST_VERIFY(GetType(is_scalar<double>()) == true);
+
+ static_assert(is_scalar<Enum>::value == true, "is_scalar failure");
+ EATEST_VERIFY(GetType(is_scalar<Enum>()) == true);
+
+ static_assert(is_scalar<const Class*>::value == true, "is_scalar failure");
+ EATEST_VERIFY(GetType(is_scalar<const Class*>()) == true);
+
+ static_assert(is_scalar<std::nullptr_t>::value == true, "is_scalar failure");
+
+
+ // is_compound
+ static_assert(is_compound<Class>::value == true, "is_compound failure");
+ EATEST_VERIFY(GetType(is_compound<Class>()) == true);
+
+ static_assert(is_compound<const Class&>::value == true, "is_compound failure");
+ EATEST_VERIFY(GetType(is_compound<const Class&>()) == true);
+
+ static_assert(is_compound<int*>::value == true, "is_compound failure");
+ EATEST_VERIFY(GetType(is_compound<int*>()) == true);
+
+ static_assert(is_compound<float>::value == false, "is_compound failure");
+ EATEST_VERIFY(GetType(is_compound<float>()) == false);
+
+ static_assert(is_compound<bool>::value == false, "is_compound failure");
+ EATEST_VERIFY(GetType(is_compound<bool>()) == false);
+
+
+ // is_const
+ static_assert(is_const<Int>::value == false, "is_const failure");
+ EATEST_VERIFY(GetType(is_const<Int>()) == false);
+
+ static_assert(is_const<ConstInt>::value == true, "is_const failure");
+ EATEST_VERIFY(GetType(is_const<ConstInt>()) == true);
+
+ static_assert(is_const<VolatileInt>::value == false, "is_const failure");
+ EATEST_VERIFY(GetType(is_const<VolatileInt>()) == false);
+
+ static_assert(is_const<ConstVolatileInt>::value == true, "is_const failure");
+ EATEST_VERIFY(GetType(is_const<ConstVolatileInt>()) == true);
+
+ static_assert(is_const<IntReference>::value == false, "is_const failure");
+ EATEST_VERIFY(GetType(is_const<IntReference>()) == false);
+
+ static_assert(is_const<ConstIntReference>::value == false, "is_const failure"); // Note here that the int is const, not the reference to the int.
+ EATEST_VERIFY(GetType(is_const<ConstIntReference>()) == false);
+
+ static_assert(is_const<ConstVolatileIntReference>::value == false, "is_const failure"); // Note here that the int is const, not the reference to the int.
+ EATEST_VERIFY(GetType(is_const<ConstVolatileIntReference>()) == false);
+
+ static_assert(is_const<void() const>::value == false, "is_const failure");
+ EATEST_VERIFY(GetType(is_const<void() const>()) == false);
+
+ // is_volatile
+ static_assert(is_volatile<Int>::value == false, "is_volatile failure");
+ EATEST_VERIFY(GetType(is_volatile<Int>()) == false);
+
+ static_assert(is_volatile<ConstInt>::value == false, "is_volatile failure");
+ EATEST_VERIFY(GetType(is_volatile<ConstInt>()) == false);
+
+ static_assert(is_volatile<VolatileInt>::value == true, "is_volatile failure");
+ EATEST_VERIFY(GetType(is_volatile<VolatileInt>()) == true);
+
+ static_assert(is_volatile<ConstVolatileInt>::value == true, "is_volatile failure");
+ EATEST_VERIFY(GetType(is_volatile<ConstVolatileInt>()) == true);
+
+ static_assert(is_volatile<IntReference>::value == false, "is_volatile failure");
+ EATEST_VERIFY(GetType(is_volatile<IntReference>()) == false);
+
+ static_assert(is_volatile<ConstIntReference>::value == false, "is_volatile failure");
+ EATEST_VERIFY(GetType(is_volatile<ConstIntReference>()) == false);
+
+ static_assert(is_volatile<ConstVolatileIntReference>::value == false, "is_volatile failure"); // Note here that the int is volatile, not the reference to the int.
+ EATEST_VERIFY(GetType(is_volatile<ConstVolatileIntReference>()) == false);
+
+ static_assert(is_volatile<void() const>::value == false, "is_volatile failure");
+ EATEST_VERIFY(GetType(is_volatile<void() const>()) == false);
+
+
+ // underlying_type and to_underlying
+ #if EASTL_TYPE_TRAIT_underlying_type_CONFORMANCE && !defined(EA_COMPILER_NO_STRONGLY_TYPED_ENUMS) // If we can execute this test...
+ enum UnderlyingTypeTest : uint16_t { firstVal = 0, secondVal = 1 };
+
+ constexpr bool isUnderlyingTypeCorrect = is_same_v<underlying_type_t<UnderlyingTypeTest>, uint16_t>;
+ static_assert(isUnderlyingTypeCorrect, "Wrong type for underlying_type_t.");
+ EATEST_VERIFY(isUnderlyingTypeCorrect);
+
+ auto v1 = to_underlying(UnderlyingTypeTest::firstVal);
+ auto v2 = to_underlying(UnderlyingTypeTest::secondVal);
+
+ constexpr bool isToUnderlyingReturnTypeCorrect = is_same_v<decltype(v1), uint16_t>;
+ static_assert(isToUnderlyingReturnTypeCorrect, "Wrong return type for to_underlying.");
+ EATEST_VERIFY(isToUnderlyingReturnTypeCorrect);
+
+ EATEST_VERIFY(v1 == 0 && v2 == 1);
+ #endif
+
+
+ // is_literal_type
+ static_assert((is_literal_type<int>::value == true), "is_literal_type failure");
+ static_assert((is_literal_type<Enum>::value == true), "is_literal_type failure");
+ #if EASTL_TYPE_TRAIT_is_literal_type_CONFORMANCE
+ static_assert((is_literal_type<PodA>::value == true), "is_literal_type failure");
+ static_assert((is_literal_type<NonPod1>::value == false), "is_literal_type failure");
+ #endif
+
+
+ // is_trivial
+ // is_trivially_copyable
+ // is_trivially_default_constructible
+ #if EASTL_TYPE_TRAIT_is_trivial_CONFORMANCE
+ static_assert(is_trivial<Pod1>::value == true, "is_trivial failure");
+ static_assert(is_trivial<NonPod1>::value == false, "is_trivial failure");
+ #endif
+
+
+ // is_pod
+ static_assert(is_pod<Pod1>::value == true, "is_pod failure");
+ EATEST_VERIFY(GetType(is_pod<Pod1>()) == true);
+
+ static_assert(is_pod<Pod2>::value == true, "is_pod failure");
+ EATEST_VERIFY(GetType(is_pod<Pod2>()) == true);
+
+ static_assert(is_pod<Pod3>::value == true, "is_pod failure");
+ EATEST_VERIFY(GetType(is_pod<Pod3>()) == true);
+
+ static_assert(is_pod<float>::value == true, "is_pod failure");
+ EATEST_VERIFY(GetType(is_pod<float>()) == true);
+
+ static_assert(is_pod<Pod1*>::value == true, "is_pod failure");
+ EATEST_VERIFY(GetType(is_pod<Pod1*>()) == true);
+
+ static_assert(is_pod<NonPod1>::value == false, "is_pod failure");
+ EATEST_VERIFY(GetType(is_pod<NonPod1>()) == false);
+
+ static_assert(is_pod<NonPod2>::value == false, "is_pod failure");
+ EATEST_VERIFY(GetType(is_pod<NonPod2>()) == false);
+
+
+ // is_standard_layout
+ static_assert(is_standard_layout<Pod1>::value == true, "is_standard_layout<Pod1> failure");
+ static_assert(is_standard_layout_v<Pod1> == true, "is_standard_layout<Pod1> failure");
+ EATEST_VERIFY(GetType(is_standard_layout<Pod1>()) == true);
+
+ static_assert(is_standard_layout<Pod2>::value == true, "is_standard_layout<Pod2> failure");
+ static_assert(is_standard_layout_v<Pod2> == true, "is_standard_layout<Pod2> failure");
+ EATEST_VERIFY(GetType(is_standard_layout<Pod2>()) == true);
+
+ static_assert(is_standard_layout<Pod3>::value == true, "is_standard_layout<Pod3> failure");
+ static_assert(is_standard_layout_v<Pod3> == true, "is_standard_layout<Pod3> failure");
+ EATEST_VERIFY(GetType(is_standard_layout<Pod3>()) == true);
+
+ static_assert(is_standard_layout<float>::value == true, "is_standard_layout<float> failure");
+ static_assert(is_standard_layout_v<float> == true, "is_standard_layout<float> failure");
+ EATEST_VERIFY(GetType(is_standard_layout<float>()) == true);
+
+ static_assert(is_standard_layout<Pod1*>::value == true, "is_standard_layout<Pod1*> failure");
+ static_assert(is_standard_layout_v<Pod1*> == true, "is_standard_layout<Pod1*> failure");
+ EATEST_VERIFY(GetType(is_standard_layout<Pod1*>()) == true);
+
+ static_assert(is_standard_layout<NonPod1>::value == false, "is_standard_layout<NonPod1> failure");
+ static_assert(is_standard_layout_v<NonPod1> == false, "is_standard_layout<NonPod1> failure");
+ EATEST_VERIFY(GetType(is_standard_layout<NonPod1>()) == false);
+
+ static_assert(is_standard_layout<NonPod2>::value == false, "is_standard_layout<NonPod2> failure");
+ static_assert(is_standard_layout_v<NonPod2> == false, "is_standard_layout<NonPod2> failure");
+ EATEST_VERIFY(GetType(is_standard_layout<NonPod2>()) == false);
+
+ static_assert(is_standard_layout<HasTrivialConstructor>::value == true, "is_standard_layout<HasTrivialConstructor> failure");
+ static_assert(is_standard_layout_v<HasTrivialConstructor> == true, "is_standard_layout<HasTrivialConstructor> failure");
+ EATEST_VERIFY(GetType(is_standard_layout<HasTrivialConstructor>()) == true);
+
+ static_assert(is_standard_layout<NoTrivialConstructor>::value == true, "is_standard_layout<NoTrivialConstructor> failure"); // A key difference between a POD and Standard Layout is that the latter is true if there is a constructor.
+ static_assert(is_standard_layout_v<NoTrivialConstructor> == true, "is_standard_layout<NoTrivialConstructor> failure"); // A key difference between a POD and Standard Layout is that the latter is true if there is a constructor.
+ EATEST_VERIFY(GetType(is_standard_layout<NoTrivialConstructor>()) == true);
+
+
+ // is_empty
+ static_assert(is_empty<ClassEmpty>::value == true, "is_empty failure");
+ EATEST_VERIFY(GetType(is_empty<ClassEmpty>()) == true);
+
+ static_assert(is_empty<ClassNonEmpty>::value == false, "is_empty failure");
+ EATEST_VERIFY(GetType(is_empty<ClassNonEmpty>()) == false);
+
+ static_assert(is_empty<int>::value == false, "is_empty failure");
+ EATEST_VERIFY(GetType(is_empty<int>()) == false);
+
+ static_assert(is_empty<Enum>::value == false, "is_empty failure");
+ EATEST_VERIFY(GetType(is_empty<Enum>()) == false);
+
+
+ // is_polymorphic
+ static_assert(is_polymorphic<Polymorphic1>::value == true, "has_trivial_constructor failure");
+ EATEST_VERIFY(GetType(is_polymorphic<Polymorphic1>()) == true);
+
+ static_assert(is_polymorphic<Polymorphic2>::value == true, "has_trivial_constructor failure");
+ EATEST_VERIFY(GetType(is_polymorphic<Polymorphic2>()) == true);
+
+ static_assert(is_polymorphic<Polymorphic3>::value == true, "has_trivial_constructor failure");
+ EATEST_VERIFY(GetType(is_polymorphic<Polymorphic3>()) == true);
+
+ static_assert(is_polymorphic<NonPolymorphic1>::value == false, "has_trivial_constructor failure");
+ EATEST_VERIFY(GetType(is_polymorphic<NonPolymorphic1>()) == false);
+
+ static_assert(is_polymorphic<int>::value == false, "has_trivial_constructor failure");
+ EATEST_VERIFY(GetType(is_polymorphic<int>()) == false);
+
+ static_assert(is_polymorphic<Polymorphic1*>::value == false, "has_trivial_constructor failure");
+ EATEST_VERIFY(GetType(is_polymorphic<Polymorphic1*>()) == false);
+
+
+ // has_trivial_constructor
+ static_assert(has_trivial_constructor<int>::value == true, "has_trivial_constructor failure");
+ EATEST_VERIFY(GetType(has_trivial_constructor<int>()) == true);
+
+ static_assert(has_trivial_constructor<int*>::value == true, "has_trivial_constructor failure");
+ EATEST_VERIFY(GetType(has_trivial_constructor<int*>()) == true);
+
+ static_assert(has_trivial_constructor<HasTrivialConstructor>::value == true, "has_trivial_constructor failure");
+ EATEST_VERIFY(GetType(has_trivial_constructor<HasTrivialConstructor>()) == true);
+
+ static_assert(has_trivial_constructor<NoTrivialConstructor>::value == false, "has_trivial_constructor failure");
+ EATEST_VERIFY(GetType(has_trivial_constructor<NoTrivialConstructor>()) == false);
+
+ static_assert(has_trivial_constructor<int&>::value == false, "has_trivial_constructor failure");
+ EATEST_VERIFY(GetType(has_trivial_constructor<int&>()) == false);
+
+
+ // has_trivial_copy
+ static_assert(has_trivial_copy<int>::value == true, "has_trivial_copy failure");
+ EATEST_VERIFY(GetType(has_trivial_copy<int>()) == true);
+
+ static_assert(has_trivial_copy<int*>::value == true, "has_trivial_copy failure");
+ EATEST_VERIFY(GetType(has_trivial_copy<int*>()) == true);
+
+ static_assert(has_trivial_copy<HasTrivialCopy>::value == true, "has_trivial_copy failure");
+ EATEST_VERIFY(GetType(has_trivial_copy<HasTrivialCopy>()) == true);
+
+ static_assert(has_trivial_copy<NoTrivialCopy1>::value == false, "has_trivial_copy failure");
+ EATEST_VERIFY(GetType(has_trivial_copy<NoTrivialCopy1>()) == false);
+
+ static_assert(has_trivial_copy<NoTrivialCopy2>::value == false, "has_trivial_copy failure");
+ EATEST_VERIFY(GetType(has_trivial_copy<NoTrivialCopy2>()) == false);
+
+
+ // has_trivial_assign
+ static_assert(has_trivial_assign<int>::value == true, "has_trivial_assign failure");
+ EATEST_VERIFY(GetType(has_trivial_assign<int>()) == true);
+
+ static_assert(has_trivial_assign<int*>::value == true, "has_trivial_assign failure");
+ EATEST_VERIFY(GetType(has_trivial_assign<int*>()) == true);
+
+ static_assert(has_trivial_assign<HasTrivialAssign>::value == true, "has_trivial_assign failure");
+ EATEST_VERIFY(GetType(has_trivial_assign<HasTrivialAssign>()) == true);
+
+ static_assert(has_trivial_assign<NoTrivialAssign1>::value == false, "has_trivial_assign failure");
+ EATEST_VERIFY(GetType(has_trivial_assign<NoTrivialAssign1>()) == false);
+
+ static_assert(has_trivial_assign<NoTrivialAssign2>::value == false, "has_trivial_assign failure");
+ EATEST_VERIFY(GetType(has_trivial_assign<NoTrivialAssign2>()) == false);
+
+
+ // has_trivial_destructor
+ static_assert(has_trivial_assign<int>::value == true, "has_trivial_relocate failure");
+ EATEST_VERIFY(GetType(has_trivial_assign<int>()) == true);
+
+ static_assert(has_trivial_assign<int*>::value == true, "has_trivial_relocate failure");
+ EATEST_VERIFY(GetType(has_trivial_assign<int*>()) == true);
+
+
+ // has_trivial_relocate
+ static_assert(has_trivial_relocate<int>::value == true, "has_trivial_relocate failure");
+ EATEST_VERIFY(GetType(has_trivial_relocate<int>()) == true);
+
+ static_assert(has_trivial_relocate<int*>::value == true, "has_trivial_relocate failure");
+ EATEST_VERIFY(GetType(has_trivial_relocate<int*>()) == true);
+
+
+ // is_signed
+ static_assert(is_signed<int>::value == true, "is_signed failure ");
+ static_assert(is_signed_v<int> == true, "is_signed failure ");
+ EATEST_VERIFY(GetType(is_signed<int>()) == true);
+
+ static_assert(is_signed<const int64_t>::value == true, "is_signed failure ");
+ static_assert(is_signed_v<const int64_t> == true, "is_signed failure ");
+ EATEST_VERIFY(GetType(is_signed<const int64_t>()) == true);
+
+ static_assert(is_signed<uint32_t>::value == false, "is_signed failure ");
+ static_assert(is_signed_v<uint32_t> == false, "is_signed failure ");
+ EATEST_VERIFY(GetType(is_signed<uint32_t>()) == false);
+
+ static_assert(is_signed<bool>::value == false, "is_signed failure ");
+ static_assert(is_signed_v<bool> == false, "is_signed failure ");
+ EATEST_VERIFY(GetType(is_signed<bool>()) == false);
+
+ static_assert(is_signed<float>::value == true, "is_signed failure ");
+ static_assert(is_signed_v<float> == true, "is_signed failure ");
+ EATEST_VERIFY(GetType(is_signed<float>()) == true);
+
+ static_assert(is_signed<double>::value == true, "is_signed failure ");
+ static_assert(is_signed_v<double> == true, "is_signed failure ");
+ EATEST_VERIFY(GetType(is_signed<double>()) == true);
+
+ static_assert(is_signed<char16_t>::value == false, "is_signed failure ");
+ static_assert(is_signed_v<char16_t> == false, "is_signed failure ");
+ EATEST_VERIFY(GetType(is_signed<char16_t>()) == false);
+
+ static_assert(is_signed<char32_t>::value == false, "is_signed failure ");
+ static_assert(is_signed_v<char32_t> == false, "is_signed failure ");
+ EATEST_VERIFY(GetType(is_signed<char32_t>()) == false);
+
+#if EASTL_GCC_STYLE_INT128_SUPPORTED
+ static_assert(is_signed<__int128_t>::value == true, "is_signed failure ");
+ static_assert(is_signed_v<__int128_t> == true, "is_signed failure ");
+ EATEST_VERIFY(GetType(is_signed<__int128_t>()) == true);
+
+ static_assert(is_signed<__uint128_t>::value == false, "is_signed failure ");
+ static_assert(is_signed_v<__uint128_t> == false, "is_signed failure ");
+ EATEST_VERIFY(GetType(is_signed<__uint128_t>()) == false);
+#endif
+
+ // is_unsigned
+ static_assert(is_unsigned<unsigned int>::value == true, "is_unsigned failure ");
+ static_assert(is_unsigned_v<unsigned int> == true, "is_unsigned failure ");
+ EATEST_VERIFY(GetType(is_unsigned<unsigned int>()) == true);
+
+ static_assert(is_unsigned<const uint64_t>::value == true, "is_unsigned failure ");
+ static_assert(is_unsigned_v<const uint64_t> == true, "is_unsigned failure ");
+ EATEST_VERIFY(GetType(is_unsigned<const uint64_t>()) == true);
+
+ static_assert(is_unsigned<int32_t>::value == false, "is_unsigned failure ");
+ static_assert(is_unsigned_v<int32_t> == false, "is_unsigned failure ");
+ EATEST_VERIFY(GetType(is_unsigned<int32_t>()) == false);
+
+ static_assert(is_unsigned<bool>::value == true, "is_unsigned failure ");
+ static_assert(is_unsigned_v<bool> == true, "is_unsigned failure ");
+ EATEST_VERIFY(GetType(is_unsigned<bool>()) == true);
+
+ static_assert(is_unsigned<float>::value == false, "is_unsigned failure ");
+ static_assert(is_unsigned_v<float> == false, "is_unsigned failure ");
+ EATEST_VERIFY(GetType(is_unsigned<float>()) == false);
+
+ static_assert(is_unsigned<double>::value == false, "is_unsigned failure ");
+ static_assert(is_unsigned_v<double> == false, "is_unsigned failure ");
+ EATEST_VERIFY(GetType(is_unsigned<double>()) == false);
+
+ static_assert(is_unsigned<char16_t>::value == true, "is_unsigned failure ");
+ static_assert(is_unsigned_v<char16_t> == true, "is_unsigned failure ");
+ EATEST_VERIFY(GetType(is_unsigned<char16_t>()) == true);
+
+ static_assert(is_unsigned<char32_t>::value == true, "is_unsigned failure ");
+ static_assert(is_unsigned_v<char32_t> == true, "is_unsigned failure ");
+ EATEST_VERIFY(GetType(is_unsigned<char32_t>()) == true);
+
+#if EASTL_GCC_STYLE_INT128_SUPPORTED
+ static_assert(is_unsigned<__int128_t>::value == false, "is_unsigned failure ");
+ static_assert(is_unsigned_v<__int128_t> == false, "is_unsigned failure ");
+ EATEST_VERIFY(GetType(is_unsigned<__int128_t>()) == false);
+
+ static_assert(is_unsigned<__uint128_t>::value == true, "is_unsigned failure ");
+ static_assert(is_unsigned_v<__uint128_t> == true, "is_unsigned failure ");
+ EATEST_VERIFY(GetType(is_unsigned<__uint128_t>()) == true);
+#endif
+
+
+ // is_lvalue_reference
+ static_assert((is_lvalue_reference<Class>::value == false), "is_lvalue_reference failure");
+ static_assert((is_lvalue_reference<Class&>::value == true), "is_lvalue_reference failure");
+ static_assert((is_lvalue_reference<Class&&>::value == false), "is_lvalue_reference failure");
+ static_assert((is_lvalue_reference<int>::value == false), "is_lvalue_reference failure");
+ static_assert((is_lvalue_reference<int&>::value == true), "is_lvalue_reference failure");
+ static_assert((is_lvalue_reference<int&&>::value == false), "is_lvalue_reference failure");
+
+ static_assert((is_lvalue_reference_v<Class> == false), "is_lvalue_reference failure");
+ static_assert((is_lvalue_reference_v<Class&> == true), "is_lvalue_reference failure");
+ static_assert((is_lvalue_reference_v<Class&&> == false), "is_lvalue_reference failure");
+ static_assert((is_lvalue_reference_v<int> == false), "is_lvalue_reference failure");
+ static_assert((is_lvalue_reference_v<int&> == true), "is_lvalue_reference failure");
+ static_assert((is_lvalue_reference_v<int&&> == false), "is_lvalue_reference failure");
+
+
+ // is_rvalue_reference
+ static_assert((is_rvalue_reference<Class>::value == false), "is_rvalue_reference failure");
+ static_assert((is_rvalue_reference<Class&>::value == false), "is_rvalue_reference failure");
+ static_assert((is_rvalue_reference<Class&&>::value == true), "is_rvalue_reference failure");
+ static_assert((is_rvalue_reference<int>::value == false), "is_rvalue_reference failure");
+ static_assert((is_rvalue_reference<int&>::value == false), "is_rvalue_reference failure");
+ static_assert((is_rvalue_reference<int&&>::value == true), "is_rvalue_reference failure");
+
+ static_assert((is_rvalue_reference_v<Class> == false), "is_rvalue_reference failure");
+ static_assert((is_rvalue_reference_v<Class&> == false), "is_rvalue_reference failure");
+ static_assert((is_rvalue_reference_v<Class&&> == true), "is_rvalue_reference failure");
+ static_assert((is_rvalue_reference_v<int> == false), "is_rvalue_reference failure");
+ static_assert((is_rvalue_reference_v<int&> == false), "is_rvalue_reference failure");
+ static_assert((is_rvalue_reference_v<int&&> == true), "is_rvalue_reference failure");
+
+
+ // is_assignable
+ // See the documentation for is_assignable to understand the results below are as they are.
+ static_assert((eastl::is_assignable<int&, int>::value == true), "is_assignable failure");
+ static_assert((eastl::is_assignable<const int&, int>::value == false), "is_assignable failure");
+ static_assert((eastl::is_assignable<char*, int*>::value == false), "is_assignable failure");
+ static_assert((eastl::is_assignable<char*, const char*>::value == false), "is_assignable failure");
+ static_assert((eastl::is_assignable<PodA, PodB*>::value == false), "is_assignable failure");
+ static_assert((eastl::is_assignable<Assignable, Pod2>::value == false), "is_assignable failure");
+
+ #if EASTL_TYPE_TRAIT_is_assignable_CONFORMANCE
+ // These might not succeed unless the implementation is conforming.
+ static_assert((eastl::is_assignable<Assignable, Assignable>::value == true), "is_assignable failure");
+ static_assert((eastl::is_assignable<Assignable, Pod1>::value == true), "is_assignable failure");
+ static_assert((eastl::is_assignable<PodA&, PodA>::value == true), "is_assignable failure");
+
+ // These cannot succeed unless the implementation is conforming.
+ static_assert((eastl::is_assignable<void, void>::value == false), "is_assignable failure");
+ static_assert((eastl::is_assignable<int, int>::value == false), "is_assignable failure");
+ static_assert((eastl::is_assignable<int, const int>::value == false), "is_assignable failure");
+ static_assert((eastl::is_assignable<const int, int>::value == false), "is_assignable failure");
+ static_assert((eastl::is_assignable<int, int&>::value == false), "is_assignable failure");
+ static_assert((eastl::is_assignable<int64_t, int8_t>::value == false), "is_assignable failure");
+ static_assert((eastl::is_assignable<bool, bool>::value == false), "is_assignable failure");
+ static_assert((eastl::is_assignable<char*, char*>::value == false), "is_assignable failure");
+ static_assert((eastl::is_assignable<int, float>::value == false), "is_assignable failure");
+ static_assert((eastl::is_assignable<const char*, char*>::value == false), "is_assignable failure");
+ static_assert((eastl::is_assignable<int[], int[]>::value == false), "is_assignable failure");
+ #endif
+
+
+ // is_lvalue_assignable
+ static_assert((eastl::is_lvalue_assignable<int&, int>::value == true), "is_lvalue_assignable failure");
+ static_assert((eastl::is_lvalue_assignable<char*, int*>::value == false), "is_lvalue_assignable failure");
+ static_assert((eastl::is_lvalue_assignable<char*, const char*>::value == false), "is_lvalue_assignable failure");
+ static_assert((eastl::is_lvalue_assignable<PodA, PodB*>::value == false), "is_lvalue_assignable failure");
+ static_assert((eastl::is_lvalue_assignable<Assignable, Pod2>::value == false), "is_lvalue_assignable failure");
+
+ #if EASTL_TYPE_TRAIT_is_lvalue_assignable_CONFORMANCE
+ // These might not succeed unless the implementation is conforming.
+ static_assert((eastl::is_lvalue_assignable<Assignable, Assignable>::value == true), "is_lvalue_assignable failure");
+ static_assert((eastl::is_lvalue_assignable<Assignable, Pod1>::value == true), "is_lvalue_assignable failure");
+
+ // These cannot succeed unless the implementation is conforming.
+ static_assert((eastl::is_lvalue_assignable<void, void>::value == false), "is_lvalue_assignable failure");
+ static_assert((eastl::is_lvalue_assignable<int, int>::value == true), "is_lvalue_assignable failure");
+ static_assert((eastl::is_lvalue_assignable<int, const int>::value == true), "is_lvalue_assignable failure");
+ static_assert((eastl::is_lvalue_assignable<const int, int>::value == false), "is_lvalue_assignable failure");
+ static_assert((eastl::is_lvalue_assignable<int, int&>::value == true), "is_lvalue_assignable failure");
+ static_assert((eastl::is_lvalue_assignable<int64_t, int8_t>::value == true), "is_lvalue_assignable failure");
+ static_assert((eastl::is_lvalue_assignable<bool, bool>::value == true), "is_lvalue_assignable failure");
+ static_assert((eastl::is_lvalue_assignable<char*, char*>::value == true), "is_lvalue_assignable failure");
+ static_assert((eastl::is_lvalue_assignable<const char*, char*>::value == true), "is_lvalue_assignable failure");
+ static_assert((eastl::is_lvalue_assignable<int[], int[]>::value == false), "is_lvalue_assignable failure");
+ static_assert((eastl::is_lvalue_assignable<int[3], int[3]>::value == false), "is_lvalue_assignable failure"); // Despite that you can memcpy these, C++ syntax doesn't all =-based assignment.
+
+ #if !defined(EA_COMPILER_EDG) // EDG (and only EDG) is issuing int8_t->double conversion warnings from the decltype expression inside this trait. That's probably a compiler bug, though we need to verify.
+ static_assert((eastl::is_lvalue_assignable<double, int8_t>::value == true), "is_lvalue_assignable failure"); // Sure this might generate a warning, but it's valid syntax.
+ #endif
+ #endif
+
+
+ // is_copy_assignable
+ static_assert((eastl::is_copy_assignable<int&>::value == true), "is_copy_assignable failure");
+ static_assert((eastl::is_copy_assignable<char>::value == true), "is_copy_assignable failure");
+
+ #if EASTL_TYPE_TRAIT_is_assignable_CONFORMANCE
+ // These might not succeed unless the implementation is conforming.
+ static_assert((eastl::is_copy_assignable<Assignable>::value == true), "is_copy_assignable failure");
+ static_assert((eastl::is_copy_assignable<Assignable>::value == true), "is_copy_assignable failure");
+
+ // These cannot succeed unless the implementation is conforming.
+ static_assert((eastl::is_copy_assignable<char*>::value == true), "is_copy_assignable failure");
+ static_assert((eastl::is_copy_assignable<PodA>::value == true), "is_copy_assignable failure");
+ static_assert((eastl::is_copy_assignable<Assignable>::value == true), "is_copy_assignable failure");
+ static_assert((eastl::is_copy_assignable<void>::value == false), "is_copy_assignable failure");
+ static_assert((eastl::is_copy_assignable<int>::value == true), "is_copy_assignable failure");
+ static_assert((eastl::is_copy_assignable<const int>::value == false), "is_copy_assignable failure");
+ static_assert((eastl::is_copy_assignable<int64_t>::value == true), "is_copy_assignable failure");
+ static_assert((eastl::is_copy_assignable<bool>::value == true), "is_copy_assignable failure");
+ static_assert((eastl::is_copy_assignable<char*>::value == true), "is_copy_assignable failure");
+ static_assert((eastl::is_copy_assignable<const char*>::value == true), "is_copy_assignable failure");
+ static_assert((eastl::is_copy_assignable<int[3]>::value == false), "is_copy_assignable failure");
+ static_assert((eastl::is_copy_assignable<int[]>::value == false), "is_copy_assignable failure");
+ #endif
+
+
+ // is_trivially_assignable
+ static_assert((eastl::is_trivially_assignable<int&, int>::value == true), "is_trivially_assignable failure");
+ static_assert((eastl::is_trivially_assignable<char*, int*>::value == false), "is_trivially_assignable failure");
+ static_assert((eastl::is_trivially_assignable<char*, const char*>::value == false), "is_trivially_assignable failure");
+ static_assert((eastl::is_trivially_assignable<PodA, PodB*>::value == false), "is_trivially_assignable failure");
+ static_assert((eastl::is_trivially_assignable<Assignable, Assignable>::value == false), "is_trivially_assignable failure"); // False because not trivial.
+ static_assert((eastl::is_trivially_assignable<Assignable, Pod1>::value == false), "is_trivially_assignable failure"); // False because not trivial.
+ static_assert((eastl::is_trivially_assignable<Assignable, Pod2>::value == false), "is_trivially_assignable failure");
+
+ // is_nothrow_assignable
+ static_assert((is_nothrow_assignable<void, void>::value == false), "is_nothrow_assignable failure");
+ static_assert((is_nothrow_assignable<int32_t, int32_t>::value == false), "is_nothrow_assignable failure"); // See is_assignable for why this is so.
+ static_assert((is_nothrow_assignable<int32_t&, int32_t>::value == true), "is_nothrow_assignable failure");
+ static_assert((is_nothrow_assignable<int32_t, int8_t>::value == false), "is_nothrow_assignable failure");
+ #if EASTL_TYPE_TRAIT_is_nothrow_assignable_CONFORMANCE
+ static_assert((is_nothrow_assignable<int32_t&, int8_t>::value == true), "is_nothrow_assignable failure");
+ static_assert((is_nothrow_assignable<NoThrowAssignable, NoThrowAssignable>::value == true), "is_nothrow_assignable failure");
+ static_assert((is_nothrow_assignable<ThrowAssignableTest, NoThrowAssignable>::value == true), "is_nothrow_assignable failure");
+ static_assert((is_nothrow_assignable<ThrowAssignableTest, ThrowAssignableTest>::value == false), "is_nothrow_assignable failure");
+ #endif
+
+
+ // is_array_of_known_bounds
+ // is_array_of_unknown_bounds
+ static_assert(is_array_of_known_bounds<void>::value == false, "is_array_of_known_bounds failure");
+ static_assert(is_array_of_known_bounds<int>::value == false, "is_array_of_known_bounds failure");
+ static_assert(is_array_of_known_bounds<PodA>::value == false, "is_array_of_known_bounds failure");
+ static_assert(is_array_of_known_bounds<int[3]>::value == true, "is_array_of_known_bounds failure");
+ static_assert(is_array_of_known_bounds<int[]>::value == false, "is_array_of_known_bounds failure");
+
+ static_assert(is_array_of_unknown_bounds<void>::value == false, "is_array_of_known_bounds failure");
+ static_assert(is_array_of_unknown_bounds<int>::value == false, "is_array_of_known_bounds failure");
+ static_assert(is_array_of_unknown_bounds<PodA>::value == false, "is_array_of_known_bounds failure");
+ static_assert(is_array_of_unknown_bounds<int[3]>::value == false, "is_array_of_known_bounds failure");
+ static_assert(is_array_of_unknown_bounds<int[]>::value == true, "is_array_of_known_bounds failure");
+
+
+ // is_trivially_copyable
+ static_assert(is_trivially_copyable<void>::value == false, "is_trivially_copyable failure");
+ EATEST_VERIFY(GetType(is_trivially_copyable<void>()) == false);
+ static_assert(is_trivially_copyable<int>::value == true, "is_trivially_copyable failure");
+ static_assert(is_trivially_copyable<int*>::value == true, "is_trivially_copyable failure");
+ static_assert(is_trivially_copyable<int[]>::value == true, "is_trivially_copyable failure");
+ static_assert(is_trivially_copyable<int[4]>::value == true, "is_trivially_copyable failure");
+ #if EASTL_TYPE_TRAIT_is_trivially_copyable_CONFORMANCE
+ static_assert(is_trivially_copyable<NonPod1>::value == false, "is_trivially_copyable failure");
+ static_assert(is_trivially_copyable<NoTrivialCopy1>::value == false, "is_trivially_copyable failure");
+ static_assert(is_trivially_copyable<PodA>::value == true, "is_trivially_copyable failure");
+ #endif
+
+ { // user reported regression
+ struct Foo
+ {
+ int a;
+ Foo(int i) : a(i) {}
+ Foo(Foo&& other) : a(other.a) { other.a = 0; }
+
+ Foo(const Foo&) = delete;
+ Foo& operator=(const Foo&) = delete;
+ };
+
+ static_assert(!eastl::is_trivially_copyable<Foo>::value, "is_trivially_copyable failure");
+ }
+
+
+ // is_trivially_copy_assignable
+ {
+ static_assert(is_trivially_copy_assignable<int>::value == true, "is_trivially_copy_assignable failure");
+ static_assert(is_trivially_copy_assignable<char*>::value == true, "is_trivially_copy_assignable failure");
+ static_assert(is_trivially_copy_assignable<const char*>::value == true, "is_trivially_copy_assignable failure");
+ static_assert(is_trivially_copy_assignable<NoTrivialCopy1>::value == false, "is_trivially_copy_assignable failure");
+
+ #ifdef INTENTIONALLY_DISABLED
+ // These tests currently fail on clang, but they would pass using the std::is_trivially_copy_assignable trait. We should
+ // determine if our implementation is correct, or if clang is actually incorrect.
+ static_assert(is_trivially_copy_assignable<const int>::value == true, "is_trivially_copy_assignable failure");
+ static_assert(is_trivially_copy_assignable<const PodA>::value == true, "is_trivially_copy_assignable failure");
+ static_assert(is_trivially_copy_assignable<PodA>::value == true, "is_trivially_copy_assignable failure");
+ #endif
+ }
+ // is_trivially_default_constructible
+ // To do.
+
+
+ // is_trivial
+ // To do.
+
+
+ // is_constructible
+ static_assert(is_constructible<void>::value == false, "is_constructible failure");
+ static_assert(is_constructible<const void>::value == false, "is_constructible failure");
+ static_assert(is_constructible<int>::value == true, "is_constructible failure");
+ static_assert(is_constructible<int&>::value == false, "is_constructible failure");
+ static_assert(is_constructible<int&&>::value == false, "is_constructible failure");
+ static_assert(is_constructible<int*>::value == true, "is_constructible failure");
+ static_assert(is_constructible<int[]>::value == false, "is_constructible failure");
+ static_assert(is_constructible<int[4]>::value == true, "is_constructible failure");
+ static_assert(is_constructible<NonPod1>::value == true, " is_constructible failure");
+ static_assert(is_constructible<NoTrivialCopy1>::value == true, "is_constructible failure");
+ static_assert(is_constructible<PodA>::value == true, "is_constructible failure");
+ static_assert(is_constructible<Abstract>::value == false, "is_constructible failure");
+ static_assert(is_constructible<NonCopyable>::value == true, "is_constructible failure");
+ #if EASTL_TYPE_TRAIT_is_trivially_constructible_CONFORMANCE
+ static_assert((is_constructible<int, const int>::value == true), "is_constructible failure");
+ static_assert((is_constructible<char*, const char*>::value == false), "is_constructible failure");
+ static_assert((is_constructible<char*, char* const>::value == true), "is_constructible failure");
+ static_assert((is_constructible<ThrowConstructibleTest, int>::value == true), "is_constructible failure");
+ static_assert((is_constructible<ThrowConstructibleTest, float>::value == true), "is_constructible failure");
+ #endif
+
+
+ // is_trivially_constructible
+ // Need double parentheses because some older compilers need static_assert implemented as a macro.
+ static_assert((is_trivially_constructible<void>::value == false), "is_trivially_constructible failure");
+ static_assert((is_trivially_constructible<void, void>::value == false), "is_trivially_constructible failure");
+ static_assert((is_trivially_constructible<void, int>::value == false), "is_trivially_constructible failure");
+ static_assert((is_trivially_constructible<int>::value == true), "is_trivially_constructible failure");
+ static_assert((is_trivially_constructible<int, int>::value == true), "is_trivially_constructible failure");
+ static_assert((is_trivially_constructible<int, Abstract>::value == false), "is_trivially_constructible failure");
+ static_assert((is_trivially_constructible<int*>::value == true), "is_trivially_constructible failure");
+ static_assert((is_trivially_constructible<int[]>::value == false), "is_trivially_constructible failure");
+ static_assert((is_trivially_constructible<int[], int[]>::value == false), "is_trivially_constructible failure");
+ static_assert((is_trivially_constructible<int[4]>::value == true), "is_trivially_constructible failure");
+ static_assert((is_trivially_constructible<int[4], int[]>::value == false), "is_trivially_constructible failure");
+ #if EASTL_TYPE_TRAIT_is_trivially_constructible_CONFORMANCE
+ static_assert((is_trivially_constructible<NoTrivialCopy1>::value == false), "is_trivially_constructible failure");
+ static_assert((is_trivially_constructible<PodA>::value == true), "is_trivially_constructible failure");
+ static_assert((is_trivially_constructible<PodA, PodA>::value == true), "is_trivially_constructible failure");
+ static_assert((is_trivially_constructible<Abstract>::value == false), "is_trivially_constructible failure");
+ static_assert((is_trivially_constructible<NonPod1>::value == false), "is_trivially_constructible failure");
+ static_assert((is_trivially_constructible<NoTrivialConstructor>::value == false), "is_trivially_constructible failure");
+ #endif
+
+
+ // is_nothrow_constructible
+ static_assert((is_nothrow_constructible<void>::value == false), "is_nothrow_constructible failure");
+ static_assert((is_nothrow_constructible<int>::value == true), "is_nothrow_constructible failure");
+ static_assert((is_nothrow_constructible<int*>::value == true), "is_nothrow_constructible failure");
+ static_assert((is_nothrow_constructible<int[4]>::value == true), "is_nothrow_constructible failure");
+ #if EASTL_TYPE_TRAIT_is_nothrow_constructible_CONFORMANCE
+ static_assert((is_nothrow_constructible<int[]>::value == false), "is_nothrow_constructible failure");
+ static_assert((is_nothrow_constructible<Abstract>::value == false), "is_nothrow_constructible failure");
+ static_assert((is_nothrow_constructible<int, const int>::value == true), "is_nothrow_constructible failure");
+ static_assert((is_nothrow_constructible<char*, const char*>::value == false), "is_nothrow_constructible failure");
+ static_assert((is_nothrow_constructible<char*, char* const>::value == true), "is_nothrow_constructible failure");
+ static_assert((is_nothrow_constructible<NonPod1>::value == false), "is_nothrow_constructible failure");
+ static_assert((is_nothrow_constructible<PodA>::value == true), "is_nothrow_constructible failure");
+ static_assert((is_nothrow_constructible<ThrowConstructibleTest, int>::value == true), "is_nothrow_constructible failure");
+ static_assert((is_nothrow_constructible<ThrowConstructibleTest, float>::value == false), "is_nothrow_constructible failure");
+ static_assert((is_nothrow_constructible<NoTrivialCopy1>::value == true), "is_nothrow_constructible failure"); //True because it's a compiler-generated constructor.
+ #endif
+
+
+ // is_nothrow_move_constructible
+#if !defined(EA_PLATFORM_MICROSOFT)
+ static_assert((is_nothrow_move_constructible<void>::value == false), "is_nothrow_move_constructible failure");
+ static_assert((is_nothrow_move_constructible<int>::value == true), "is_nothrow_move_constructible failure");
+ static_assert((is_nothrow_move_constructible<int*>::value == true), "is_nothrow_move_constructible failure");
+ static_assert((is_nothrow_move_constructible<const int*>::value == true), "is_nothrow_move_constructible failure");
+ static_assert((is_nothrow_move_constructible<int&>::value == true), "is_nothrow_move_constructible failure");
+ static_assert((is_nothrow_move_constructible<double>::value == true), "is_nothrow_move_constructible failure");
+ static_assert((is_nothrow_move_constructible<ClassEmpty>::value == true), "is_nothrow_move_constructible failure");
+#endif
+
+
+ // is_copy_constructible
+ static_assert((is_copy_constructible<void>::value == false), "is_copy_constructible failure");
+ #if EASTL_TYPE_TRAIT_is_copy_constructible_CONFORMANCE
+ static_assert((is_copy_constructible<int>::value == true), "is_copy_constructible failure");
+ static_assert((is_copy_constructible<char*>::value == true), "is_copy_constructible failure");
+ static_assert((is_copy_constructible<int&>::value == true), "is_copy_constructible failure"); // As of this writing, GCC's libstdc++ reports true for this. I'm trying to find what's correct.
+ static_assert((is_copy_constructible<const int>::value == true), "is_copy_constructible failure");
+ static_assert((is_copy_constructible<HasTrivialCopy>::value == true), "is_copy_constructible failure");
+
+ #if !defined(EA_COMPILER_EDG) && !defined(EA_COMPILER_MSVC) // EDG (and only EDG) is generating warnings about the decltype expression referencing a deleted constructor. This seems like a bug, though we need to verify.
+ // EA_COMPILER_MSVC is disabled because VS2013 fails this test and it may be that EASTL_TYPE_TRAIT_is_copy_constructible_CONFORMANCE should really be 0 for VS2013.
+ static_assert((is_copy_constructible<ConstructibleOnlyWithNonConstReference>::value == false), "is_copy_constructible failure");
+ #endif
+ #endif
+
+
+ // is_destructible
+ static_assert(is_destructible<int>::value == true, "is_destructible failure");
+ static_assert(is_destructible<int&>::value == true, "is_destructible failure");
+ static_assert(is_destructible<int&&>::value == true, "is_destructible failure");
+ static_assert(is_destructible<char>::value == true, "is_destructible failure");
+ static_assert(is_destructible<char*>::value == true, "is_destructible failure");
+ static_assert(is_destructible<PodA>::value == true, "is_destructible failure");
+ static_assert(is_destructible<void>::value == false, "is_destructible failure");
+ static_assert(is_destructible<int[3]>::value == true, "is_destructible failure");
+ static_assert(is_destructible<int[]>::value == false, "is_destructible failure"); // You can't call operator delete on this class.
+ static_assert(is_destructible<Abstract>::value == true, "is_destructible failure");
+ static_assert(is_destructible<AbstractWithDtor>::value == true, "is_destructible failure");
+ #if !defined(EA_COMPILER_NO_DELETED_FUNCTIONS)
+ static_assert(is_destructible<DeletedDtor>::value == false, "is_destructible failure"); // You can't call operator delete on this class.
+ #endif
+ static_assert(is_destructible<NonPod2>::value == true, "is_destructible failure");
+
+
+ // is_trivially_destructible
+ static_assert(is_trivially_destructible<int>::value == true, "is_trivially_destructible failure");
+ static_assert(is_trivially_destructible<int&>::value == true, "is_trivially_destructible failure");
+ static_assert(is_trivially_destructible<int&&>::value == true, "is_trivially_destructible failure");
+ static_assert(is_trivially_destructible<char>::value == true, "is_trivially_destructible failure");
+ static_assert(is_trivially_destructible<char*>::value == true, "is_trivially_destructible failure");
+ static_assert(is_trivially_destructible<void>::value == false, "is_trivially_destructible failure");
+ #if EASTL_TYPE_TRAIT_is_trivially_destructible_CONFORMANCE
+ static_assert(is_trivially_destructible<PodA>::value == true, "is_trivially_destructible failure");
+ static_assert(is_trivially_destructible<int[3]>::value == true, "is_trivially_destructible failure");
+ static_assert(is_trivially_destructible<int[]>::value == false, "is_trivially_destructible failure");
+ static_assert(is_trivially_destructible<Abstract>::value == true, "is_trivially_destructible failure");
+ static_assert(is_trivially_destructible<AbstractWithDtor>::value == false, "is_trivially_destructible failure"); // Having a user-defined destructor make it non-trivial.
+ #if !defined(EA_COMPILER_NO_DELETED_FUNCTIONS)
+ static_assert(is_trivially_destructible<DeletedDtor>::value == false, "is_trivially_destructible failure");
+ #endif
+ static_assert(is_trivially_destructible<NonPod2>::value == false, "is_trivially_destructible failure"); // This case differs from is_destructible, because we have a declared destructor.
+ #endif
+
+
+ // is_nothrow_destructible
+ static_assert(is_nothrow_destructible<int>::value == true, "is_nothrow_destructible failure");
+ static_assert(is_nothrow_destructible<int&>::value == true, "is_nothrow_destructible failure");
+ static_assert(is_nothrow_destructible<int&&>::value == true, "is_nothrow_destructible failure");
+ static_assert(is_nothrow_destructible<void>::value == false, "is_nothrow_destructible failure");
+ static_assert(is_nothrow_destructible<Abstract>::value == true, "is_nothrow_destructible failure");
+ static_assert(is_nothrow_destructible<AbstractWithDtor>::value == true, "is_nothrow_destructible failure");
+ #if !defined(EA_COMPILER_NO_DELETED_FUNCTIONS)
+ static_assert(is_nothrow_destructible<DeletedDtor>::value == false, "is_nothrow_destructible failure"); // You can't call operator delete on this class.
+ #endif
+ #if EASTL_TYPE_TRAIT_is_nothrow_destructible_CONFORMANCE
+ static_assert(is_nothrow_destructible<NonPod2>::value == true, "is_nothrow_destructible failure"); // NonPod2 is nothrow destructible because it has an empty destructor (makes no calls) which has no exception specification. Thus its exception specification defaults to noexcept(true) [C++11 Standard, 15.4 paragraph 14]
+ static_assert(is_nothrow_destructible<NoThrowDestructible>::value == true, "is_nothrow_destructible failure");
+ #endif
+ #if EASTL_TYPE_TRAIT_is_nothrow_destructible_CONFORMANCE && !defined(EA_COMPILER_NO_EXCEPTIONS)
+ static_assert(is_nothrow_destructible<ThrowDestructible>::value == false, "is_nothrow_destructible failure");
+ static_assert(is_nothrow_destructible<ThrowDestructibleNoexceptFalse>::value == false, "is_nothrow_destructible failure");
+ #endif
+
+
+ // alignment_of
+ #if !defined(EA_ABI_ARM_APPLE) // Apple on ARM (i.e. iPhone/iPad) doesn't align 8 byte types on 8 byte boundaries, and the hardware allows it.
+ static_assert(alignment_of<uint64_t>::value == 8, "alignment_of failure");
+ EATEST_VERIFY(GetType(alignment_of<uint64_t>()) == 8);
+ #endif
+
+ static_assert(alignment_of<ClassAlign32>::value == 32, "alignment_of failure");
+ EATEST_VERIFY(GetType(alignment_of<ClassAlign32>()) == 32);
+
+
+ // common_type
+ static_assert((is_same<common_type<NonPod2*>::type, NonPod2*>::value), "common_type failure");
+ static_assert((is_same<common_type<int>::type, int>::value), "common_type failure");
+ static_assert((is_same<common_type<void, void>::type, void>::value), "common_type failure");
+ static_assert((is_same<common_type<int, int>::type, int>::value), "common_type failure");
+
+
+ // rank
+ static_assert(rank<int[1][2][3][4][5][6]>::value == 6, "rank failure");
+ static_assert(rank<int[][1][2]>::value == 3, "rank failure");
+ static_assert(rank<int>::value == 0, "rank failure");
+ static_assert(rank<void>::value == 0, "rank failure");
+
+ static_assert(rank_v<int[1][2][3][4][5][6]> == 6, "rank failure");
+ static_assert(rank_v<int[][1][2]> == 3, "rank failure");
+ static_assert(rank_v<int> == 0, "rank failure");
+ static_assert(rank_v<void> == 0, "rank failure");
+
+
+
+ // extent
+ static_assert((extent<int> ::value == 0), "extent failure");
+ static_assert((extent<int[2]> ::value == 2), "extent failure");
+ static_assert((extent<int[2][4]> ::value == 2), "extent failure");
+ static_assert((extent<int[]> ::value == 0), "extent failure");
+ static_assert((extent<int[][4]> ::value == 0), "extent failure");
+ static_assert((extent<int, 1> ::value == 0), "extent failure");
+ static_assert((extent<int[2], 1> ::value == 0), "extent failure");
+ static_assert((extent<int[2][4], 1>::value == 4), "extent failure");
+ static_assert((extent<int[][4], 1> ::value == 4), "extent failure");
+
+ static_assert((extent_v<int> == 0), "extent failure");
+ static_assert((extent_v<int[2]> == 2), "extent failure");
+ static_assert((extent_v<int[2][4]> == 2), "extent failure");
+ static_assert((extent_v<int[]> == 0), "extent failure");
+ static_assert((extent_v<int[][4]> == 0), "extent failure");
+ static_assert((extent_v<int, 1> == 0), "extent failure");
+ static_assert((extent_v<int[2], 1> == 0), "extent failure");
+ static_assert((extent_v<int[2][4], 1> == 4), "extent failure");
+ static_assert((extent_v<int[][4], 1> == 4), "extent failure");
+
+
+
+ // is_aligned
+ static_assert(is_aligned<uint8_t>::value == false, "is_aligned failure");
+ EATEST_VERIFY(GetType(is_aligned<uint8_t>()) == false);
+
+ static_assert(is_aligned<uint16_t>::value == false, "is_aligned failure");
+ EATEST_VERIFY(GetType(is_aligned<uint16_t>()) == false);
+
+ static_assert(is_aligned<uint32_t>::value == false, "is_aligned failure");
+ EATEST_VERIFY(GetType(is_aligned<uint32_t>()) == false);
+
+ static_assert(is_aligned<uint64_t>::value == false, "is_aligned failure");
+ EATEST_VERIFY(GetType(is_aligned<uint64_t>()) == false);
+
+ static_assert(is_aligned<uint64_t>::value == false, "is_aligned failure");
+ EATEST_VERIFY(GetType(is_aligned<uint64_t>()) == false);
+
+ {
+ #if (kEASTLTestAlign16 == 16) // To do: Rename kEASTLTestAlign16, as what it really means is "is 16 byte alignment+ supported".
+ static_assert(is_aligned<Align16>::value, "is_aligned failure");
+ EATEST_VERIFY(GetType(is_aligned<Align16>()));
+
+
+ static_assert(is_aligned<Align32>::value, "is_aligned failure");
+ EATEST_VERIFY(GetType(is_aligned<Align32>()));
+
+ static_assert(is_aligned<Align64>::value, "is_aligned failure");
+ EATEST_VERIFY(GetType(is_aligned<Align64>()));
+ #endif
+ }
+
+
+ // is_same
+ static_assert((is_same<uint32_t, uint32_t>::value == true), "is_same failure");
+ static_assert((is_same<void, void>::value == true), "is_same failure");
+ static_assert((is_same<void*, void*>::value == true), "is_same failure");
+ static_assert((is_same<uint64_t, uint64_t>::value == true), "is_same failure");
+ static_assert((is_same<Class, Class>::value == true), "is_same failure");
+ static_assert((is_same<uint64_t, uint32_t>::value == false), "is_same failure");
+ static_assert((is_same<Class, ClassAlign32>::value == false), "is_same failure");
+
+ static_assert((is_same_v<uint32_t, uint32_t> == true), "is_same_v failure");
+ static_assert((is_same_v<void, void> == true), "is_same_v failure");
+ static_assert((is_same_v<void*, void*> == true), "is_same_v failure");
+ static_assert((is_same_v<uint64_t, uint64_t> == true), "is_same_v failure");
+ static_assert((is_same_v<Class, Class> == true), "is_same_v failure");
+ static_assert((is_same_v<uint64_t, uint32_t> == false), "is_same_v failure");
+ static_assert((is_same_v<Class, ClassAlign32> == false), "is_same_v failure");
+
+
+
+ // is_convertible
+ static_assert((is_convertible<uint16_t, uint32_t>::value == true), "is_convertible failure");
+ static_assert((is_convertible<int32_t, int16_t>::value == true), "is_convertible failure"); // This is a conversion from 32 bits down to 16 bits. All compilers natively report that this is true. However, VC++ generates warnings for actual such conversions.
+ static_assert((is_convertible<Subclass, Class>::value == true), "is_convertible failure");
+ static_assert((is_convertible<Subclass*, Class*>::value == true), "is_convertible failure");
+ static_assert((is_convertible<Subclass&, const Class&>::value == true), "is_convertible failure");
+ static_assert((is_convertible<int, Class>::value == false), "is_convertible failure");
+ static_assert((is_convertible<NonPod1, NonPod1>::value == true), "is_convertible failure");
+ static_assert((is_convertible<NonPod1, NonPod2>::value == false), "is_convertible failure");
+ #if EASTL_TYPE_TRAIT_is_convertible_CONFORMANCE // This causes compile failures.
+ static_assert((is_convertible<IsConvertibleTest1, IsConvertibleTest1>::value == false), "is_convertible failure");
+ #endif
+
+ // Test EASTL_DECLARE_TRIVIAL_ASSIGN(HiddenAssign);
+ eastl::vector<HiddenAssign> v;
+ EATEST_VERIFY(v.empty());
+
+
+ // make_signed
+ // make_unsigned
+ {
+ // Test declarations
+ eastl::make_signed<int8_t>::type i8 = -1;
+ EATEST_VERIFY(i8 == -1);
+ eastl::make_unsigned<uint8_t>::type u8 = 0xff;
+ EATEST_VERIFY(u8 == 0xff);
+
+ eastl::make_signed<int16_t>::type i16 = -1;
+ EATEST_VERIFY(i16 == -1);
+ eastl::make_unsigned<uint16_t>::type u16 = 0xffff;
+ EATEST_VERIFY(u16 == 0xffff);
+
+ eastl::make_signed<int32_t>::type i32 = -1;
+ EATEST_VERIFY(i32 == -1);
+ eastl::make_unsigned<uint32_t>::type u32 = 0xffffffff;
+ EATEST_VERIFY(u32 == 0xffffffff);
+
+ eastl::make_signed<int64_t>::type i64 = -1;
+ EATEST_VERIFY(i64 == -1);
+ eastl::make_unsigned<uint64_t>::type u64 = UINT64_C(0xffffffffffffffff);
+ EATEST_VERIFY(u64 == UINT64_C(0xffffffffffffffff));
+
+ // Test conversions via static_cast:
+ u8 = static_cast<eastl::make_unsigned<uint8_t>::type>(i8);
+ EATEST_VERIFY(u8 == 0xff);
+ i8 = static_cast<eastl::make_signed<int8_t>::type>(u8);
+ EATEST_VERIFY(i8 == -1);
+
+ u16 = static_cast<eastl::make_unsigned<uint16_t>::type>(i16);
+ EATEST_VERIFY(u16 == 0xffff);
+ i16 = static_cast<eastl::make_signed<int16_t>::type>(u16);
+ EATEST_VERIFY(i16 == -1);
+
+ u32 = static_cast<eastl::make_unsigned<uint32_t>::type>(i32);
+ EATEST_VERIFY(u32 == 0xffffffff);
+ i32 = static_cast<eastl::make_signed<int32_t>::type>(u32);
+ EATEST_VERIFY(i32 == -1);
+
+ u64 = static_cast<eastl::make_unsigned<uint64_t>::type>(i64);
+ EATEST_VERIFY(u64 == UINT64_C(0xffffffffffffffff));
+ i64 = static_cast<eastl::make_signed<int64_t>::type>(u64);
+ EATEST_VERIFY(i64 == -1);
+
+
+ static_assert(eastl::is_same_v<signed char, eastl::make_signed<unsigned char>::type>);
+ static_assert(eastl::is_same_v<short, eastl::make_signed<unsigned short>::type>);
+ static_assert(eastl::is_same_v<int, eastl::make_signed<unsigned int>::type>);
+ static_assert(eastl::is_same_v<long, eastl::make_signed<unsigned long>::type>);
+ static_assert(eastl::is_same_v<long long, eastl::make_signed<unsigned long long>::type>);
+
+ static_assert(eastl::is_same_v<const signed char, eastl::make_signed<const unsigned char>::type>);
+ static_assert(eastl::is_same_v<const short, eastl::make_signed<const unsigned short>::type>);
+ static_assert(eastl::is_same_v<const int, eastl::make_signed<const unsigned int>::type>);
+ static_assert(eastl::is_same_v<const long, eastl::make_signed<const unsigned long>::type>);
+ static_assert(eastl::is_same_v<const long long, eastl::make_signed<const unsigned long long>::type>);
+
+ static_assert(eastl::is_same_v<volatile signed char, eastl::make_signed<volatile unsigned char>::type>);
+ static_assert(eastl::is_same_v<volatile short, eastl::make_signed<volatile unsigned short>::type>);
+ static_assert(eastl::is_same_v<volatile int, eastl::make_signed<volatile unsigned int>::type>);
+ static_assert(eastl::is_same_v<volatile long, eastl::make_signed<volatile unsigned long>::type>);
+ static_assert(eastl::is_same_v<volatile long long, eastl::make_signed<volatile unsigned long long>::type>);
+
+ static_assert(eastl::is_same_v<const volatile signed char, eastl::make_signed<const volatile unsigned char>::type>);
+ static_assert(eastl::is_same_v<const volatile short, eastl::make_signed<const volatile unsigned short>::type>);
+ static_assert(eastl::is_same_v<const volatile int, eastl::make_signed<const volatile unsigned int>::type>);
+ static_assert(eastl::is_same_v<const volatile long, eastl::make_signed<const volatile unsigned long>::type>);
+ static_assert(eastl::is_same_v<const volatile long long, eastl::make_signed<const volatile unsigned long long>::type>);
+
+ static_assert(eastl::is_same_v<unsigned char, eastl::make_unsigned<signed char>::type>);
+ static_assert(eastl::is_same_v<unsigned short, eastl::make_unsigned<short>::type>);
+ static_assert(eastl::is_same_v<unsigned int, eastl::make_unsigned<int>::type>);
+ static_assert(eastl::is_same_v<unsigned long, eastl::make_unsigned<long>::type>);
+ static_assert(eastl::is_same_v<unsigned long long, eastl::make_unsigned<long long>::type>);
+
+ static_assert(eastl::is_same_v<const unsigned char, eastl::make_unsigned<const signed char>::type>);
+ static_assert(eastl::is_same_v<const unsigned short, eastl::make_unsigned<const short>::type>);
+ static_assert(eastl::is_same_v<const unsigned int, eastl::make_unsigned<const int>::type>);
+ static_assert(eastl::is_same_v<const unsigned long, eastl::make_unsigned<const long>::type>);
+ static_assert(eastl::is_same_v<const unsigned long long, eastl::make_unsigned<const long long>::type>);
+
+ static_assert(eastl::is_same_v<volatile unsigned char, eastl::make_unsigned<volatile signed char>::type>);
+ static_assert(eastl::is_same_v<volatile unsigned short, eastl::make_unsigned<volatile short>::type>);
+ static_assert(eastl::is_same_v<volatile unsigned int, eastl::make_unsigned<volatile int>::type>);
+ static_assert(eastl::is_same_v<volatile unsigned long, eastl::make_unsigned<volatile long>::type>);
+ static_assert(eastl::is_same_v<volatile unsigned long long, eastl::make_unsigned<volatile long long>::type>);
+
+ static_assert(eastl::is_same_v<const volatile unsigned char, eastl::make_unsigned<const volatile signed char>::type>);
+ static_assert(eastl::is_same_v<const volatile unsigned short, eastl::make_unsigned<const volatile short>::type>);
+ static_assert(eastl::is_same_v<const volatile unsigned int, eastl::make_unsigned<const volatile int>::type>);
+ static_assert(eastl::is_same_v<const volatile unsigned long, eastl::make_unsigned<const volatile long>::type>);
+ static_assert(eastl::is_same_v<const volatile unsigned long long, eastl::make_unsigned<const volatile long long>::type>);
+
+ static_assert(eastl::is_same_v<signed char, eastl::make_signed<signed char>::type>);
+ static_assert(eastl::is_same_v<short, eastl::make_signed<signed short>::type>);
+ static_assert(eastl::is_same_v<int, eastl::make_signed<signed int>::type>);
+ static_assert(eastl::is_same_v<long, eastl::make_signed<signed long>::type>);
+ static_assert(eastl::is_same_v<long long, eastl::make_signed<signed long long>::type>);
+
+ static_assert(eastl::is_same_v<unsigned char, eastl::make_unsigned<unsigned char>::type>);
+ static_assert(eastl::is_same_v<unsigned short, eastl::make_unsigned<unsigned short>::type>);
+ static_assert(eastl::is_same_v<unsigned int, eastl::make_unsigned<unsigned int>::type>);
+ static_assert(eastl::is_same_v<unsigned long, eastl::make_unsigned<unsigned long>::type>);
+ static_assert(eastl::is_same_v<unsigned long long, eastl::make_unsigned<unsigned long long>::type>);
+
+ #if EASTL_GCC_STYLE_INT128_SUPPORTED
+ static_assert(eastl::is_same_v<__uint128_t, eastl::make_unsigned<__int128_t>::type>);
+ static_assert(eastl::is_same_v<__uint128_t, eastl::make_unsigned<__uint128_t>::type>);
+
+ static_assert(eastl::is_same_v<__int128_t, eastl::make_signed<__int128_t>::type>);
+ static_assert(eastl::is_same_v<__int128_t, eastl::make_signed<__uint128_t>::type>);
+ #endif
+
+ // Char tests
+ static_assert(sizeof(char) == sizeof(eastl::make_signed<char>::type));
+ static_assert(sizeof(wchar_t) == sizeof(eastl::make_signed<wchar_t>::type));
+ static_assert(sizeof(char8_t) == sizeof(eastl::make_signed<char8_t>::type));
+ static_assert(sizeof(char16_t) == sizeof(eastl::make_signed<char16_t>::type));
+ static_assert(sizeof(char32_t) == sizeof(eastl::make_signed<char32_t>::type));
+ static_assert(sizeof(char) == sizeof(eastl::make_unsigned<char>::type));
+ static_assert(sizeof(wchar_t) == sizeof(eastl::make_unsigned<wchar_t>::type));
+ static_assert(sizeof(char8_t) == sizeof(eastl::make_unsigned<char8_t>::type));
+ static_assert(sizeof(char16_t) == sizeof(eastl::make_unsigned<char16_t>::type));
+ static_assert(sizeof(char32_t) == sizeof(eastl::make_unsigned<char32_t>::type));
+
+ static_assert(eastl::is_same_v<signed char, eastl::make_signed<char8_t>::type>);
+ static_assert(eastl::is_same_v<unsigned char, eastl::make_unsigned<char8_t>::type>);
+
+ // Enum tests
+ enum EnumUCharSize : unsigned char {};
+ enum EnumUShortSize : unsigned short {};
+ enum EnumUIntSize : unsigned int {};
+ enum EnumULongSize : unsigned long {};
+ enum EnumULongLongSize : unsigned long long {};
+
+ static_assert(eastl::is_signed_v<eastl::make_signed<EnumUCharSize>::type>);
+ static_assert(eastl::is_signed_v<eastl::make_signed<EnumUShortSize>::type>);
+ static_assert(eastl::is_signed_v<eastl::make_signed<EnumUIntSize>::type>);
+ static_assert(eastl::is_signed_v<eastl::make_signed<EnumULongSize>::type>);
+ static_assert(eastl::is_signed_v<eastl::make_signed<EnumULongLongSize>::type>);
+ static_assert(sizeof(EnumUCharSize) == sizeof(eastl::make_signed<EnumUCharSize>::type));
+ static_assert(sizeof(EnumUShortSize) == sizeof(eastl::make_signed<EnumUShortSize>::type));
+ static_assert(sizeof(EnumUIntSize) == sizeof(eastl::make_signed<EnumUIntSize>::type));
+ static_assert(sizeof(EnumULongSize) == sizeof(eastl::make_signed<EnumULongSize>::type));
+ static_assert(sizeof(EnumULongLongSize) == sizeof(eastl::make_signed<EnumULongLongSize>::type));
+
+ enum EnumCharSize : signed char {};
+ enum EnumShortSize : short {};
+ enum EnumIntSize : int {};
+ enum EnumLongSize : long {};
+ enum EnumLongLongSize : long long {};
+
+ static_assert(eastl::is_unsigned_v<eastl::make_unsigned<EnumCharSize>::type>);
+ static_assert(eastl::is_unsigned_v<eastl::make_unsigned<EnumShortSize>::type>);
+ static_assert(eastl::is_unsigned_v<eastl::make_unsigned<EnumIntSize>::type>);
+ static_assert(eastl::is_unsigned_v<eastl::make_unsigned<EnumLongSize>::type>);
+ static_assert(eastl::is_unsigned_v<eastl::make_unsigned<EnumLongLongSize>::type>);
+ static_assert(sizeof(EnumCharSize) == sizeof(eastl::make_unsigned<EnumCharSize>::type));
+ static_assert(sizeof(EnumShortSize) == sizeof(eastl::make_unsigned<EnumShortSize>::type));
+ static_assert(sizeof(EnumIntSize) == sizeof(eastl::make_unsigned<EnumIntSize>::type));
+ static_assert(sizeof(EnumLongSize) == sizeof(eastl::make_unsigned<EnumLongSize>::type));
+ static_assert(sizeof(EnumLongLongSize) == sizeof(eastl::make_unsigned<EnumLongLongSize>::type));
+ }
+
+ // remove_const
+ // remove_volatile
+ // remove_cv
+ {
+ // To do: Make more thorough tests verifying this. Such tests will probably involve template metaprogramming.
+ remove_const<const int32_t>::type i32 = 47;
+ EATEST_VERIFY(++i32 == 48);
+
+ remove_volatile<volatile int16_t>::type i16 = 47;
+ EATEST_VERIFY(++i16 == 48);
+
+ remove_cv<const volatile int32_t>::type i64 = 47;
+ EATEST_VERIFY(++i64 == 48);
+
+ //static_assert(is_same<std::remove_cv<int (int, ...)>::type , std::remove_cv<int (int, ...) const>::type>::value, "remove_cv failure");
+ }
+
+ // remove_cvref
+ {
+ static_assert(is_same_v<remove_cvref_t<int>, int>, "remove_cvref failure");
+ static_assert(is_same_v<remove_cvref_t<int&>, int>, "remove_cvref failure");
+ static_assert(is_same_v<remove_cvref_t<int&&>, int>, "remove_cvref failure");
+
+ static_assert(is_same_v<remove_cvref_t<const int>, int>, "remove_cvref failure");
+ static_assert(is_same_v<remove_cvref_t<const int&>, int>, "remove_cvref failure");
+ static_assert(is_same_v<remove_cvref_t<const int&&>, int>, "remove_cvref failure");
+
+ static_assert(is_same_v<remove_cvref_t<volatile int>, int>, "remove_cvref failure");
+ static_assert(is_same_v<remove_cvref_t<volatile int&>, int>, "remove_cvref failure");
+ static_assert(is_same_v<remove_cvref_t<volatile int&&>, int>, "remove_cvref failure");
+
+ static_assert(is_same_v<remove_cvref_t<const volatile int>, int>, "remove_cvref failure");
+ static_assert(is_same_v<remove_cvref_t<const volatile int&>, int>, "remove_cvref failure");
+ static_assert(is_same_v<remove_cvref_t<const volatile int&&>, int>, "remove_cvref failure");
+
+ // test pointer types
+ static_assert(is_same_v<remove_cvref_t<int*>, int*>, "remove_cvref failure");
+ static_assert(is_same_v<remove_cvref_t<int*&>, int*>, "remove_cvref failure");
+ static_assert(is_same_v<remove_cvref_t<int*&&>, int*>, "remove_cvref failure");
+
+ static_assert(is_same_v<remove_cvref_t<const int*>, const int*>, "remove_cvref failure");
+ static_assert(is_same_v<remove_cvref_t<const int*&>, const int*>, "remove_cvref failure");
+ static_assert(is_same_v<remove_cvref_t<const int*&&>, const int*>, "remove_cvref failure");
+
+ static_assert(is_same_v<remove_cvref_t<int* const>, int*>, "remove_cvref failure");
+ static_assert(is_same_v<remove_cvref_t<int* const&>, int*>, "remove_cvref failure");
+ static_assert(is_same_v<remove_cvref_t<int* const&&>, int*>, "remove_cvref failure");
+
+ static_assert(is_same_v<remove_cvref_t<int* const volatile>, int*>, "remove_cvref failure");
+ static_assert(is_same_v<remove_cvref_t<int* const volatile&>, int*>, "remove_cvref failure");
+ static_assert(is_same_v<remove_cvref_t<int* const volatile&&>, int*>, "remove_cvref failure");
+ }
+
+
+ // add_const
+ // add_volatile
+ // add_cv
+ {
+ // To do: Make more thorough tests verifying this. Such tests will probably involve template metaprogramming.
+ eastl::add_const<int32_t>::type i32 = 47;
+ EATEST_VERIFY(i32 == 47);
+
+ eastl::add_volatile<volatile int16_t>::type i16 = 47;
+ EATEST_VERIFY(++i16 == 48);
+
+ eastl::add_cv<const volatile int32_t>::type i64 = 47;
+ EATEST_VERIFY(i64 == 47);
+ }
+
+
+ // as_const
+ {
+ {
+ int i = 42;
+ static_assert(eastl::is_same<decltype(eastl::as_const(i)), const int&>::value, "expecting a 'const T&' return type");
+ EATEST_VERIFY(eastl::as_const(i) == 42);
+ }
+
+ {
+ eastl::string str = "Electronic Arts";
+ static_assert(eastl::is_same<decltype(eastl::as_const(str)), const eastl::string&>::value, "expecting a 'const T&' return type");
+ EATEST_VERIFY(eastl::as_const(str) == "Electronic Arts");
+ }
+ }
+
+
+ // remove_reference
+ // add_reference
+ // remove_pointer
+ // add_pointer
+ // remove_extent
+ // remove_all_extents
+ {
+ int x = 17;
+ eastl::add_reference<int>::type xRef = x;
+ x++;
+ EATEST_VERIFY(xRef == 18);
+
+ eastl::remove_reference<int&>::type xValue;
+ xValue = 3;
+ EATEST_VERIFY(xValue == 3);
+
+ eastl::add_pointer<int>::type xPtr = &x;
+ *xPtr = 19;
+ EATEST_VERIFY(x == 19);
+
+ eastl::remove_pointer<int*>::type yValue;
+ yValue = 3;
+ EATEST_VERIFY(yValue == 3);
+
+ // ref to T
+ // -> T*
+ static_assert(is_same_v<add_pointer_t<int&>, int*>, "add_pointer failure");
+ static_assert(is_same_v<add_pointer_t<int(&)()>, int(*)()>, "add_pointer failure");
+
+ // object type (a (possibly cv-qualified) type other than function type, reference type or void), or
+ // a function type that is not cv- or ref-qualified, or a (possibly cv-qualified) void type
+ // -> T*
+ static_assert(is_same_v<add_pointer_t<int>, int*>, "add_pointer failure");
+ static_assert(is_same_v<add_pointer_t<int*>, int**>, "add_pointer failure");
+ static_assert(is_same_v<add_pointer_t<int()>, int(*)()>, "add_pointer failure");
+ static_assert(is_same_v<add_pointer_t<void>, void*>, "add_pointer failure");
+ static_assert(is_same_v<add_pointer_t<const void>, const void*>, "add_pointer failure");
+ static_assert(is_same_v<add_pointer_t<volatile void>, volatile void*>, "add_pointer failure");
+ static_assert(is_same_v<add_pointer_t<const volatile void>, const volatile void*>, "add_pointer failure");
+
+ // otherwise (cv- or ref-qualified function type)
+ // -> T
+ static_assert(is_same_v<add_pointer_t<int() const>, int() const>, "add_pointer failure");
+ static_assert(is_same_v<add_pointer_t<int() volatile>, int() volatile>, "add_pointer failure");
+ static_assert(is_same_v<add_pointer_t<int() const volatile>, int() const volatile>, "add_pointer failure");
+
+ // remove_extent
+ // If T is an array of some type X, provides the member typedef type equal to X, otherwise
+ // type is T. Note that if T is a multidimensional array, only the first dimension is removed.
+ typedef int IntArray1[37];
+ typedef eastl::remove_extent<IntArray1>::type Int;
+ static_assert((eastl::is_same<Int, int>::value == true), "remove_extent/is_same failure");
+
+ // remove_all_extents
+ typedef int IntArray2[37][54];
+ typedef eastl::remove_all_extents<IntArray2>::type Int2;
+ static_assert((eastl::is_same<Int2, int>::value == true), "remove_all_extents/is_same failure");
+ }
+
+ // add_lvalue_reference
+ {
+ // function type with no cv- or ref-qualifier
+ // -> T&
+ static_assert(is_same_v<add_lvalue_reference_t<void()>, void(&)()>, "add_lvalue_reference failure");
+
+ // object type (a (possibly cv-qualified) type other than function type, reference type or void)
+ // -> T&
+ static_assert(is_same_v<add_lvalue_reference_t<int>, int&>, "add_lvalue_reference failure");
+ static_assert(is_same_v<add_lvalue_reference_t<const int>, const int&>, "add_lvalue_reference failure");
+
+ // if T is an rvalue reference (to some type U)
+ // -> U&
+ static_assert(is_same_v<add_lvalue_reference_t<int&&>, int&>, "add_lvalue_reference failure");
+
+ // otherwise (cv- or ref-qualified function type, or reference type, or (possibly cv-qualified) void)
+ // -> T
+ static_assert(is_same_v<add_lvalue_reference_t<void() const>, void() const>, "add_lvalue_reference failure");
+ static_assert(is_same_v<add_lvalue_reference_t<void()&>, void()&>, "add_lvalue_reference failure");
+ static_assert(is_same_v<add_lvalue_reference_t<void()&&>, void()&&>, "add_lvalue_reference failure");
+ static_assert(is_same_v<add_lvalue_reference_t<int&>, int&>, "add_lvalue_reference failure");
+ static_assert(is_same_v<add_lvalue_reference_t<const int&>, const int&>, "add_lvalue_reference failure");
+ static_assert(is_same_v<add_lvalue_reference_t<void>, void>, "add_lvalue_reference failure");
+ static_assert(is_same_v<add_lvalue_reference_t<const void>, const void>, "add_lvalue_reference failure");
+ }
+
+ // add_rvalue_reference
+ {
+ // function type with no cv- or ref-qualifier
+ // -> T&&
+ static_assert(is_same_v<add_rvalue_reference_t<void()>, void(&&)()>, "add_rvalue_reference failure");
+
+ // object type (a (possibly cv-qualified) type other than function type, reference type or void)
+ // -> T&&
+ static_assert(is_same_v<add_rvalue_reference_t<int>, int&&>, "add_rvalue_reference failure");
+ static_assert(is_same_v<add_rvalue_reference_t<const int>, const int&&>, "add_rvalue_reference failure");
+
+ // otherwise (cv- or ref-qualified function type, or reference type, or (possibly cv-qualified) void)
+ // -> T
+ static_assert(is_same_v<add_rvalue_reference_t<void() const>, void() const>, "add_rvalue_reference failure");
+ static_assert(is_same_v<add_rvalue_reference_t<void()&>, void()&>, "add_rvalue_reference failure");
+ static_assert(is_same_v<add_rvalue_reference_t<void()&&>, void()&&>, "add_rvalue_reference failure");
+ static_assert(is_same_v<add_rvalue_reference_t<int&>, int&>, "add_rvalue_reference failure");
+ static_assert(is_same_v<add_rvalue_reference_t<int&&>, int&&>, "add_rvalue_reference failure");
+ static_assert(is_same_v<add_rvalue_reference_t<const int&>, const int&>, "add_rvalue_reference failure");
+ static_assert(is_same_v<add_rvalue_reference_t<void>, void>, "add_rvalue_reference failure");
+ static_assert(is_same_v<add_rvalue_reference_t<const void>, const void>, "add_rvalue_reference failure");
+ }
+
+
+ // decay
+ {
+ static_assert((eastl::is_same<uint32_t, eastl::decay<uint32_t>::type>::value == true), "is_same failure");
+ static_assert((eastl::is_same<uint32_t, eastl::decay<const uint32_t>::type>::value == true), "is_same failure");
+ static_assert((eastl::is_same<uint32_t, eastl::decay<volatile uint32_t>::type>::value == true), "is_same failure");
+ static_assert((eastl::is_same<uint32_t, eastl::decay<uint32_t&>::type>::value == true), "is_same failure");
+ static_assert((eastl::is_same<uint32_t, eastl::decay<const uint32_t&>::type>::value == true), "is_same failure");
+ static_assert((eastl::is_same<uint32_t, eastl::decay<const volatile uint32_t&>::type>::value == true), "is_same failure");
+ #if !EASTL_NO_RVALUE_REFERENCES
+ static_assert((eastl::is_same<uint32_t, eastl::decay<uint32_t&&>::type>::value == true), "is_same failure");
+ #endif
+ static_assert((eastl::is_same<uint32_t*, eastl::decay<uint32_t[3]>::type>::value == true), "is_same failure");
+ static_assert((eastl::is_same<uint32_t(*)(char), eastl::decay<uint32_t(char)>::type>::value == true), "is_same failure");
+ }
+
+
+ // aligned_storage
+ // Some compilers don't support or ignore alignment specifications for stack variables,
+ // so we limit our testing to compilers that are known to support it.
+ #if (EA_ALIGN_MAX_AUTOMATIC >= 64) && defined(EA_PLATFORM_DESKTOP) // Actually there are additional compilers that support alignment of stack-based variables, most significantly clang, GCC 4.4+, and probably others.
+ {
+ // Test the creation of a single aligned value.
+ const size_t kArraySize = 100;
+ const size_t kExpectedAlignment = 64;
+ typedef uint16_t Type;
+
+ eastl::aligned_storage<sizeof(Type), kExpectedAlignment>::type data;
+ Type* value = new(&data) Type;
+ *value = 37;
+ EATEST_VERIFY_F((EA::StdC::GetAlignment(value) >= kExpectedAlignment) && (*value == 37),
+ "eastl::aligned_storage failure: Expected: %u, Actual: %u", (unsigned)kExpectedAlignment, (unsigned)EA::StdC::GetAlignment(value));
+
+ // Create an array of 100 values aligned.
+ eastl::aligned_storage<sizeof(Type), kExpectedAlignment>::type dataArray[kArraySize];
+ Type* valueArray = new(dataArray) Type[kArraySize];
+ valueArray[0] = 37;
+ EATEST_VERIFY_F((EA::StdC::GetAlignment(valueArray) >= kExpectedAlignment) && (valueArray[0] == 37),
+ "eastl::aligned_storage failure: Expected: %u, Actual: %u", (unsigned)kExpectedAlignment, (unsigned)EA::StdC::GetAlignment(valueArray));
+ }
+ {
+ // Test the creation of a single aligned value.
+ const size_t kArraySize = 17;
+ const size_t kExpectedAlignment = 128;
+ typedef uint8_t Type;
+
+ eastl::aligned_storage<sizeof(Type), kExpectedAlignment>::type data;
+ Type* value = new(&data) Type;
+ *value = 37;
+ EATEST_VERIFY_F((EA::StdC::GetAlignment(value) >= kExpectedAlignment) && (*value == 37),
+ "eastl::aligned_storage failure: Expected: %u, Actual: %u", (unsigned)kExpectedAlignment, (unsigned)EA::StdC::GetAlignment(value));
+
+ // Create an array of 100 values aligned.
+ eastl::aligned_storage<sizeof(Type), kExpectedAlignment>::type dataArray[kArraySize];
+ Type* valueArray = new(dataArray) Type[kArraySize];
+ valueArray[0] = 37;
+ EATEST_VERIFY_F((EA::StdC::GetAlignment(valueArray) >= kExpectedAlignment) && (valueArray[0] == 37),
+ "eastl::aligned_storage failure: Expected: %u, Actual: %u", (unsigned)kExpectedAlignment, (unsigned)EA::StdC::GetAlignment(valueArray));
+ }
+ {
+ // Test the creation of a single aligned value.
+ const size_t kArraySize = 27;
+ const size_t kExpectedAlignment = 256;
+ typedef uint32_t Type;
+
+ eastl::aligned_storage<sizeof(Type), kExpectedAlignment>::type data;
+ Type* value = new(&data) Type;
+ *value = 37;
+ EATEST_VERIFY_F((EA::StdC::GetAlignment(value) >= kExpectedAlignment) && (*value == 37),
+ "eastl::aligned_storage failure: Expected: %u, Actual: %u", (unsigned)kExpectedAlignment, (unsigned)EA::StdC::GetAlignment(value));
+
+ // Create an array of 100 values aligned.
+ eastl::aligned_storage<sizeof(Type), kExpectedAlignment>::type dataArray[kArraySize];
+ Type* valueArray = new(dataArray) Type[kArraySize];
+ valueArray[0] = 37;
+ EATEST_VERIFY_F((EA::StdC::GetAlignment(valueArray) >= kExpectedAlignment) && (valueArray[0] == 37),
+ "eastl::aligned_storage failure: Expected: %u, Actual: %u", (unsigned)kExpectedAlignment, (unsigned)EA::StdC::GetAlignment(valueArray));
+ }
+ #endif
+
+
+ // aligned_union
+ // Some compilers don't support or ignore alignment specifications for stack variables,
+ // so we limit our testing to compilers that are known to support it.
+ {
+ union AlignedUnion
+ {
+ char c;
+ int i;
+ float f;
+ char a[32];
+
+ AlignedUnion(float fValue) : f(fValue) {}
+ };
+
+ typedef aligned_union<sizeof(AlignedUnion), char, int, float>::type AlignedUnionStorage;
+
+ static_assert((EA_ALIGN_OF(AlignedUnionStorage) >= EA_ALIGN_OF(float)) && (EA_ALIGN_OF(AlignedUnionStorage) <= EA_ALIGN_OF(double)), "aligned_union failure");
+ static_assert(sizeof(AlignedUnionStorage) >= sizeof(AlignedUnion), "aligned_union failure");
+
+ AlignedUnionStorage alignedUnionStorage; // Since we know that our alignment is a simple value <= default alignment, we can just declare an object here and it will work with all compilers, including those that are limited in the stack alignments they support.
+ AlignedUnion* pAlignedUnion = new (&alignedUnionStorage) AlignedUnion(21.4f);
+ EATEST_VERIFY(pAlignedUnion->f == 21.4f);
+ pAlignedUnion->i = 37;
+ EATEST_VERIFY(pAlignedUnion->i == 37);
+ }
+
+
+ // union_cast
+ {
+ float f32 = -1234.f;
+ uint32_t n32 = union_cast<uint32_t>(f32);
+ float f32New = union_cast<float>(n32);
+ EATEST_VERIFY(f32 == f32New);
+
+ double f64 = -1234.0;
+ uint64_t n64 = union_cast<uint64_t>(f64);
+ double f64New = union_cast<double>(n64);
+ EATEST_VERIFY(f64 == f64New);
+
+ PodA a = { -1234 };
+ PodB b = union_cast<PodB>(a);
+ PodA aNew = union_cast<PodA>(b);
+ EATEST_VERIFY(a == aNew);
+
+ PodA* pA = new PodA;
+ PodB* pB = union_cast<PodB*>(pA);
+ PodA* pANew = union_cast<PodA*>(pB);
+ EATEST_VERIFY(pA == pANew);
+ delete pA;
+ }
+
+ // void_t
+ {
+ {
+ static_assert(is_same<void_t<void>, void>::value, "void_t failure");
+ static_assert(is_same<void_t<int>, void>::value, "void_t failure");
+ static_assert(is_same<void_t<short>, void>::value, "void_t failure");
+ static_assert(is_same<void_t<long>, void>::value, "void_t failure");
+ static_assert(is_same<void_t<long long>, void>::value, "void_t failure");
+ static_assert(is_same<void_t<ClassEmpty>, void>::value, "void_t failure");
+ static_assert(is_same<void_t<ClassNonEmpty>, void>::value, "void_t failure");
+ static_assert(is_same<void_t<vector<int>>, void>::value, "void_t failure");
+ }
+
+ // new sfinae mechansim test
+ {
+ static_assert(has_increment_operator_using_void_t<HasIncrementOperator>::value, "void_t sfinae failure");
+ static_assert(!has_increment_operator_using_void_t<ClassEmpty>::value, "void_t sfinae failure");
+ }
+ }
+
+ // detected idiom
+ {
+ static_assert(is_detected<has_increment_operator_detection, HasIncrementOperator>::value, "is_detected failure.");
+ static_assert(!is_detected<has_increment_operator_detection, ClassEmpty>::value, "is_detected failure.");
+
+ static_assert(is_same<detected_t<has_increment_operator_detection, HasIncrementOperator>, HasIncrementOperator&>::value, "is_detected_t failure.");
+ static_assert(is_same<detected_t<has_increment_operator_detection, ClassEmpty>, nonesuch>::value, "is_detected_t failure.");
+
+ using detected_or_positive_result = detected_or<float, has_increment_operator_detection, HasIncrementOperator>;
+ using detected_or_negative_result = detected_or<float, has_increment_operator_detection, ClassEmpty>;
+ static_assert(detected_or_positive_result::value_t::value, "detected_or failure.");
+ static_assert(!detected_or_negative_result::value_t::value, "detected_or failure.");
+ static_assert(is_same<detected_or_positive_result::type, HasIncrementOperator&>::value, "detected_or failure.");
+ static_assert(is_same<detected_or_negative_result::type, float>::value, "detected_or failure.");
+
+ static_assert(is_same<detected_or_t<float, has_increment_operator_detection, HasIncrementOperator>, HasIncrementOperator&>::value, "detected_or_t failure.");
+ static_assert(is_same<detected_or_t<float, has_increment_operator_detection, ClassEmpty>, float>::value, "detected_or_t failure.");
+
+ static_assert(is_detected_exact<HasIncrementOperator&, has_increment_operator_detection, HasIncrementOperator>::value, "is_detected_exact failure.");
+ static_assert(!is_detected_exact<float, has_increment_operator_detection, HasIncrementOperator>::value, "is_detected_exact failure.");
+ static_assert(is_detected_exact<nonesuch, has_increment_operator_detection, ClassEmpty>::value, "is_detected_exact failure.");
+ static_assert(!is_detected_exact<float, has_increment_operator_detection, ClassEmpty>::value, "is_detected_exact failure.");
+
+ static_assert(is_detected_convertible<HasIncrementOperator&, has_increment_operator_detection, HasIncrementOperator>::value, "is_detected_convertible failure.");
+ static_assert(is_detected_convertible<HasIncrementOperator, has_increment_operator_detection, HasIncrementOperator>::value, "is_detected_convertible failure.");
+ static_assert(!is_detected_convertible<float, has_increment_operator_detection, HasIncrementOperator>::value, "is_detected_convertible failure.");
+ static_assert(!is_detected_convertible<nonesuch, has_increment_operator_detection, ClassEmpty>::value, "is_detected_convertible failure.");
+ static_assert(!is_detected_convertible<float, has_increment_operator_detection, ClassEmpty>::value, "is_detected_convertible failure.");
+
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ static_assert(is_detected_v<has_increment_operator_detection, HasIncrementOperator>, "is_detected_v failure.");
+ static_assert(!is_detected_v<has_increment_operator_detection, ClassEmpty>, "is_detected_v failure.");
+
+ static_assert(is_detected_exact_v<HasIncrementOperator&, has_increment_operator_detection, HasIncrementOperator>, "is_detected_exact_v failure.");
+ static_assert(!is_detected_exact_v<float, has_increment_operator_detection, HasIncrementOperator>, "is_detected_exact_v failure.");
+ static_assert(is_detected_exact_v<nonesuch, has_increment_operator_detection, ClassEmpty>, "is_detected_exact_v failure.");
+ static_assert(!is_detected_exact_v<float, has_increment_operator_detection, ClassEmpty>, "is_detected_exact_v failure.");
+
+ static_assert(is_detected_convertible_v<HasIncrementOperator&, has_increment_operator_detection, HasIncrementOperator>, "is_detected_convertible_v failure.");
+ static_assert(is_detected_convertible_v<HasIncrementOperator, has_increment_operator_detection, HasIncrementOperator>, "is_detected_convertible_v failure.");
+ static_assert(!is_detected_convertible_v<float, has_increment_operator_detection, HasIncrementOperator>, "is_detected_convertible_v failure.");
+ static_assert(!is_detected_convertible_v<nonesuch, has_increment_operator_detection, ClassEmpty>, "is_detected_convertible_v failure.");
+ static_assert(!is_detected_convertible_v<float, has_increment_operator_detection, ClassEmpty>, "is_detected_convertible_v failure.");
+ #endif
+ }
+
+ // conjunction
+ {
+ static_assert( conjunction<>::value, "conjunction failure");
+ static_assert(!conjunction<false_type>::value, "conjunction failure");
+ static_assert(!conjunction<false_type, false_type>::value, "conjunction failure");
+ static_assert(!conjunction<false_type, false_type, false_type>::value, "conjunction failure");
+ static_assert(!conjunction<false_type, false_type, false_type, true_type>::value, "conjunction failure");
+ static_assert(!conjunction<false_type, false_type, true_type, true_type>::value, "conjunction failure");
+ static_assert(!conjunction<false_type, true_type, true_type, true_type>::value, "conjunction failure");
+ static_assert(!conjunction<true_type, true_type, true_type, true_type, false_type>::value, "conjunction failure");
+ static_assert(!conjunction<true_type, false_type, true_type, true_type, true_type>::value, "conjunction failure");
+ static_assert( conjunction<true_type, true_type, true_type, true_type, true_type>::value, "conjunction failure");
+ static_assert( conjunction<true_type, true_type, true_type, true_type>::value, "conjunction failure");
+ static_assert( conjunction<true_type, true_type, true_type>::value, "conjunction failure");
+ static_assert( conjunction<true_type>::value, "conjunction failure");
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ static_assert( conjunction_v<>, "conjunction failure");
+ static_assert(!conjunction_v<false_type>, "conjunction failure");
+ static_assert(!conjunction_v<false_type, false_type>, "conjunction failure");
+ static_assert(!conjunction_v<false_type, false_type, false_type>, "conjunction failure");
+ static_assert(!conjunction_v<false_type, false_type, false_type, true_type>, "conjunction failure");
+ static_assert(!conjunction_v<false_type, false_type, true_type, true_type>, "conjunction failure");
+ static_assert(!conjunction_v<false_type, true_type, true_type, true_type>, "conjunction failure");
+ static_assert(!conjunction_v<true_type, true_type, true_type, true_type, false_type>, "conjunction failure");
+ static_assert(!conjunction_v<true_type, false_type, true_type, true_type, true_type>, "conjunction failure");
+ static_assert( conjunction_v<true_type, true_type, true_type, true_type, true_type>, "conjunction failure");
+ static_assert( conjunction_v<true_type, true_type, true_type, true_type>, "conjunction failure");
+ static_assert( conjunction_v<true_type, true_type, true_type>, "conjunction failure");
+ static_assert( conjunction_v<true_type>, "conjunction failure");
+ #endif
+ }
+
+ // disjunction
+ {
+ static_assert(!disjunction<>::value, "disjunction failure");
+ static_assert(!disjunction<false_type>::value, "disjunction failure");
+ static_assert(!disjunction<false_type, false_type>::value, "disjunction failure");
+ static_assert(!disjunction<false_type, false_type, false_type>::value, "disjunction failure");
+ static_assert( disjunction<false_type, false_type, false_type, true_type>::value, "disjunction failure");
+ static_assert( disjunction<false_type, false_type, true_type, true_type>::value, "disjunction failure");
+ static_assert( disjunction<false_type, true_type, true_type, true_type>::value, "disjunction failure");
+ static_assert( disjunction<true_type, true_type, true_type, true_type, false_type>::value, "disjunction failure");
+ static_assert( disjunction<true_type, false_type, true_type, true_type, true_type>::value, "disjunction failure");
+ static_assert( disjunction<true_type, true_type, true_type, true_type, true_type>::value, "disjunction failure");
+ static_assert( disjunction<true_type, true_type, true_type, true_type>::value, "disjunction failure");
+ static_assert( disjunction<true_type, true_type, true_type>::value, "disjunction failure");
+ static_assert( disjunction<true_type>::value, "disjunction failure");
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ static_assert(!disjunction_v<>, "disjunction failure");
+ static_assert(!disjunction_v<false_type>, "disjunction failure");
+ static_assert(!disjunction_v<false_type, false_type>, "disjunction failure");
+ static_assert(!disjunction_v<false_type, false_type, false_type>, "disjunction failure");
+ static_assert( disjunction_v<false_type, false_type, false_type, true_type>, "disjunction failure");
+ static_assert( disjunction_v<false_type, false_type, true_type, true_type>, "disjunction failure");
+ static_assert( disjunction_v<false_type, true_type, true_type, true_type>, "disjunction failure");
+ static_assert( disjunction_v<true_type, true_type, true_type, true_type, false_type>, "disjunction failure");
+ static_assert( disjunction_v<true_type, false_type, true_type, true_type, true_type>, "disjunction failure");
+ static_assert( disjunction_v<true_type, true_type, true_type, true_type, true_type>, "disjunction failure");
+ static_assert( disjunction_v<true_type, true_type, true_type, true_type>, "disjunction failure");
+ static_assert( disjunction_v<true_type, true_type, true_type>, "disjunction failure");
+ static_assert( disjunction_v<true_type>, "disjunction failure");
+ #endif
+ }
+
+ // negation
+ {
+ static_assert( negation<false_type>::value, "negation failure");
+ static_assert(!negation<true_type>::value, "negation failure");
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ static_assert( negation_v<false_type>, "negation failure");
+ static_assert(!negation_v<true_type>, "negation failure");
+ #endif
+ }
+
+ // has_unique_object_representations
+ {
+ static_assert( has_unique_object_representations<bool>::value, "has_unique_object_representations failure");
+ static_assert( has_unique_object_representations<char16_t>::value, "has_unique_object_representations failure");
+ static_assert( has_unique_object_representations<char32_t>::value, "has_unique_object_representations failure");
+ static_assert( has_unique_object_representations<char>::value, "has_unique_object_representations failure");
+ static_assert( has_unique_object_representations<int>::value, "has_unique_object_representations failure");
+ static_assert( has_unique_object_representations<long long>::value, "has_unique_object_representations failure");
+ static_assert( has_unique_object_representations<long>::value, "has_unique_object_representations failure");
+ static_assert( has_unique_object_representations<short>::value, "has_unique_object_representations failure");
+ static_assert( has_unique_object_representations<signed char>::value, "has_unique_object_representations failure");
+ static_assert( has_unique_object_representations<unsigned char>::value, "has_unique_object_representations failure");
+ static_assert( has_unique_object_representations<unsigned int>::value, "has_unique_object_representations failure");
+ static_assert( has_unique_object_representations<unsigned long long>::value, "has_unique_object_representations failure");
+ static_assert( has_unique_object_representations<unsigned long>::value, "has_unique_object_representations failure");
+ static_assert( has_unique_object_representations<unsigned short>::value, "has_unique_object_representations failure");
+ static_assert(!has_unique_object_representations<void>::value, "has_unique_object_representations failure");
+#ifndef EA_WCHAR_T_NON_NATIVE // If wchar_t is a native type instead of simply a define to an existing type which is already handled...
+ static_assert( has_unique_object_representations<wchar_t>::value, "has_unique_object_representations failure");
+#endif
+
+ #if EASTL_TYPE_TRAIT_has_unique_object_representations_CONFORMANCE
+ {
+ struct packed_type { int a; };
+ static_assert( has_unique_object_representations<packed_type>::value, "has_unique_object_representations failure");
+
+ struct padded_type { int a; char b; int c; };
+ static_assert(!has_unique_object_representations<padded_type>::value, "has_unique_object_representations failure");
+ }
+ #endif
+ }
+
+ // is_final
+ {
+ #if (EA_COMPILER_HAS_FEATURE(is_final))
+ static_assert(std::is_final<FinalStruct>::value == eastl::is_final<FinalStruct>::value, "final struct not correctly detected");
+ static_assert(std::is_final<FinalClass>::value == eastl::is_final<FinalClass>::value, "final class not correctly detected");
+ static_assert(std::is_final<Enum>::value == eastl::is_final<Enum>::value, "enum not correctly detected");
+ static_assert(std::is_final<int>::value == eastl::is_final<int>::value, "int not correctly detected");
+ static_assert(std::is_final<Struct>::value == eastl::is_final<Struct>::value, "non-final struct not correctly detected");
+ static_assert(std::is_final<Class>::value == eastl::is_final<Class>::value, "non-final class not correctly detected");
+ #endif
+
+ // endian (big-endian and little; no mixed-endian/middle-endian)
+ static_assert(eastl::endian::big != eastl::endian::little, "little-endian and big-endian are not the same");
+ static_assert(eastl::endian::native == eastl::endian::big || eastl::endian::native == eastl::endian::little, "native may be little endian or big endian");
+ static_assert(!(eastl::endian::native == eastl::endian::big && eastl::endian::native == eastl::endian::little), "native cannot be both big and little endian");
+
+ #ifdef EA_SYSTEM_LITTLE_ENDIAN
+ static_assert(eastl::endian::native == eastl::endian::little, "must be little endian");
+ static_assert(eastl::endian::native != eastl::endian::big, "must not be big endian");
+ #else
+ static_assert(eastl::endian::native != eastl::endian::little, "must not be little endian");
+ static_assert(eastl::endian::native == eastl::endian::big, "must be big endian");
+ #endif
+ }
+
+ // has_equality
+ {
+ static_assert( has_equality_v<int>, "has_equality failure");
+ static_assert( has_equality_v<short>, "has_equality failure");
+ static_assert( has_equality_v<long>, "has_equality failure");
+ static_assert( has_equality_v<long long>, "has_equality failure");
+ static_assert( has_equality_v<TestObject>, "has_equality failure");
+ static_assert(!has_equality_v<MissingEquality>, "has_equality failure");
+ }
+
+ // is_aggregate
+ #if EASTL_TYPE_TRAIT_is_aggregate_CONFORMANCE
+ {
+ static_assert(!is_aggregate_v<int>, "is_aggregate failure");
+ static_assert( is_aggregate_v<int[]>, "is_aggregate failure");
+
+ {
+ struct Aggregrate {};
+ static_assert(is_aggregate_v<Aggregrate>, "is_aggregate failure");
+ }
+
+ {
+ struct NotAggregrate { NotAggregrate() {} }; // user provided ctor
+ static_assert(!is_aggregate_v<NotAggregrate>, "is_aggregate failure");
+ }
+
+ #if defined(EA_COMPILER_CPP11_ENABLED) && !defined(EA_COMPILER_CPP14_ENABLED)
+ // See https://en.cppreference.com/w/cpp/language/aggregate_initialization
+ // In C++11 the requirement was added to aggregate types that no default member initializers exist,
+ // however this requirement was removed in C++14.
+ {
+ struct NotAggregrate { int data = 42; }; // default member initializer
+ static_assert(!is_aggregate_v<NotAggregrate>, "is_aggregate failure");
+ }
+ #endif
+
+ {
+ struct NotAggregrate { virtual void foo() {} }; // virtual member function
+ static_assert(!is_aggregate_v<NotAggregrate>, "is_aggregate failure");
+ }
+ }
+ #endif
+
+ // is_complete_type
+ {
+ struct Foo
+ {
+ int x;
+ };
+
+ struct FooEmpty
+ {
+ };
+
+ struct Bar;
+
+ void FooFunc();
+
+ static_assert(eastl::internal::is_complete_type_v<Foo>, "is_complete_type failure");
+ static_assert(eastl::internal::is_complete_type_v<FooEmpty>, "is_complete_type failure");
+ static_assert(!eastl::internal::is_complete_type_v<Bar>, "is_complete_type failure");
+ static_assert(!eastl::internal::is_complete_type_v<void>, "is_complete_type failure");
+ static_assert(!eastl::internal::is_complete_type_v<volatile void>, "is_complete_type failure");
+ static_assert(!eastl::internal::is_complete_type_v<const void>, "is_complete_type failure");
+ static_assert(!eastl::internal::is_complete_type_v<const volatile void>, "is_complete_type failure");
+ static_assert(eastl::internal::is_complete_type_v<decltype(FooFunc)>, "is_complete_type failure");
+ }
+
+
+ return nErrorCount;
+}
diff --git a/EASTL/test/source/TestUtility.cpp b/EASTL/test/source/TestUtility.cpp
new file mode 100644
index 0000000..e9027e5
--- /dev/null
+++ b/EASTL/test/source/TestUtility.cpp
@@ -0,0 +1,915 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+#include "EASTLTest.h"
+#include <EASTL/utility.h>
+#include <EAStdC/EAString.h>
+
+struct BasicObject
+{
+ int mX;
+ BasicObject(int x) : mX(x) {}
+};
+
+inline bool operator==(const BasicObject& t1, const BasicObject& t2) { return t1.mX == t2.mX; }
+
+inline bool operator<(const BasicObject& t1, const BasicObject& t2) { return t1.mX < t2.mX; }
+
+///////////////////////////////////////////////////////////////////////////////
+// TestUtilityPair
+//
+static int TestUtilityPair()
+{
+ using namespace eastl;
+
+ int nErrorCount = 0;
+
+ {
+ int _0 = 0, _2 = 2, _3 = 3;
+ float _1f = 1.f;
+
+ // pair();
+ pair<int, float> ifPair1;
+ EATEST_VERIFY((ifPair1.first == 0) && (ifPair1.second == 0.f));
+
+ // pair(const T1& x, const T2& y);
+ pair<int, float> ifPair2(_0, _1f);
+ EATEST_VERIFY((ifPair2.first == 0) && (ifPair2.second == 1.f));
+
+ // template <typename U, typename V>
+ // pair(U&& u, V&& v);
+ pair<int, float> ifPair3(int(0), float(1.f));
+ EATEST_VERIFY((ifPair3.first == 0) && (ifPair3.second == 1.f));
+
+ // template <typename U>
+ // pair(U&& x, const T2& y);
+ const float fConst1 = 1.f;
+ pair<int, float> ifPair4(int(0), fConst1);
+ EATEST_VERIFY((ifPair4.first == 0) && (ifPair4.second == 1.f));
+
+ // template <typename V>
+ // pair(const T1& x, V&& y);
+ const int intConst0 = 0;
+ pair<int, float> ifPair5(intConst0, float(1.f));
+ EATEST_VERIFY((ifPair5.first == 0) && (ifPair5.second == 1.f));
+
+ pair<const int, const int> constIntPair(_2, _3);
+ EATEST_VERIFY((constIntPair.first == 2) && (constIntPair.second == 3));
+
+ // pair(const pair&) = default;
+ pair<int, float> ifPair2Copy(ifPair2);
+ EATEST_VERIFY((ifPair2Copy.first == 0) && (ifPair2Copy.second == 1.f));
+
+ pair<const int, const int> constIntPairCopy(constIntPair);
+ EATEST_VERIFY((constIntPairCopy.first == 2) && (constIntPairCopy.second == 3));
+
+ // template<typename U, typename V>
+ // pair(const pair<U, V>& p);
+ pair<long, double> idPair2(ifPair2);
+ EATEST_VERIFY((idPair2.first == 0) && (idPair2.second == 1.0));
+
+ // pair(pair&& p);
+
+ // template<typename U, typename V>
+ // pair(pair<U, V>&& p);
+
+ // pair& operator=(const pair& p);
+
+ // template<typename U, typename V>
+ // pair& operator=(const pair<U, V>& p);
+
+ // pair& operator=(pair&& p);
+
+ // template<typename U, typename V>
+ // pair& operator=(pair<U, V>&& p);
+
+ // void swap(pair& p);
+
+ // use_self, use_first, use_second
+ use_self<pair<int, float> > usIFPair;
+ use_first<pair<int, float> > u1IFPair;
+ use_second<pair<int, float> > u2IFPair;
+
+ ifPair2 = usIFPair(ifPair2);
+ EATEST_VERIFY((ifPair2.first == 0) && (ifPair2.second == 1));
+
+ int first = u1IFPair(ifPair2);
+ EATEST_VERIFY(first == 0);
+
+ float second = u2IFPair(ifPair2);
+ EATEST_VERIFY(second == 1);
+
+ // make_pair
+ pair<int, float> p1 = make_pair(int(0), float(1));
+ EATEST_VERIFY((p1.first == 0) && (p1.second == 1.f));
+
+ pair<int, float> p2 = make_pair_ref(int(0), float(1));
+ EATEST_VERIFY((p2.first == 0) && (p2.second == 1.f));
+
+ pair<const char*, int> p3 = eastl::make_pair("a", 1);
+ EATEST_VERIFY((EA::StdC::Strcmp(p3.first, "a") == 0) && (p3.second == 1));
+
+ pair<const char*, int> p4 = eastl::make_pair<const char*, int>("a", 1);
+ EATEST_VERIFY((EA::StdC::Strcmp(p4.first, "a") == 0) && (p4.second == 1));
+
+ pair<int, const char*> p5 = eastl::make_pair<int, const char*>(1, "b");
+ EATEST_VERIFY((p5.first == 1) && (EA::StdC::Strcmp(p5.second, "b") == 0));
+
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ pair<int, int> p6 = eastl::make_pair<int, int>(1, 2);
+ pair<int, int> p7 = eastl::make_pair<int, int>(2, 1);
+ pair<int, int> p8 = eastl::make_pair<int, int>(7, 8);
+ pair<int, int> p9 = eastl::make_pair<int, int>(10, 1);
+
+ EATEST_VERIFY( (p6 <=> p7) != 0);
+ EATEST_VERIFY( (p6 <=> p6) == 0);
+ EATEST_VERIFY( (p7 <=> p8) < 0);
+ EATEST_VERIFY( (p7 <=> p8) <= 0);
+ EATEST_VERIFY( (p9 <=> p8) > 0);
+ EATEST_VERIFY( (p9 <=> p8) >= 0);
+#endif
+
+#if !defined(EA_COMPILER_NO_AUTO)
+ auto p60 = eastl::make_pair("a", "b"); // Different strings of same length of 1.
+ EATEST_VERIFY((EA::StdC::Strcmp(p60.first, "a") == 0) && (EA::StdC::Strcmp(p60.second, "b") == 0));
+
+ auto p61 = eastl::make_pair("ab", "cd"); // Different strings of same length > 1.
+ EATEST_VERIFY((EA::StdC::Strcmp(p61.first, "ab") == 0) && (EA::StdC::Strcmp(p61.second, "cd") == 0));
+
+ auto p62 = eastl::make_pair("abc", "bcdef"); // Different strings of different length.
+ EATEST_VERIFY((EA::StdC::Strcmp(p62.first, "abc") == 0) && (EA::StdC::Strcmp(p62.second, "bcdef") == 0));
+
+ char strA[] = "a";
+ auto p70 = eastl::make_pair(strA, strA);
+ EATEST_VERIFY((EA::StdC::Strcmp(p70.first, "a") == 0) && (EA::StdC::Strcmp(p70.second, "a") == 0));
+
+ char strBC[] = "bc";
+ auto p71 = eastl::make_pair(strA, strBC);
+ EATEST_VERIFY((EA::StdC::Strcmp(p71.first, "a") == 0) && (EA::StdC::Strcmp(p71.second, "bc") == 0));
+
+ const char cstrA[] = "a";
+ auto p80 = eastl::make_pair(cstrA, cstrA);
+ EATEST_VERIFY((EA::StdC::Strcmp(p80.first, "a") == 0) && (EA::StdC::Strcmp(p80.second, "a") == 0));
+
+ const char cstrBC[] = "bc";
+ auto p81 = eastl::make_pair(cstrA, cstrBC);
+ EATEST_VERIFY((EA::StdC::Strcmp(p81.first, "a") == 0) && (EA::StdC::Strcmp(p81.second, "bc") == 0));
+#endif
+ }
+
+ {
+// One-off tests and regressions
+
+#if EASTL_PAIR_CONFORMANCE // See http://www.open-std.org/jtc1/sc22/wg21/docs/lwg-defects.html#811
+ pair<char*, char*> zeroLiteralPair(0, 0);
+ EATEST_VERIFY((zeroLiteralPair.first == NULL) && (zeroLiteralPair.second == NULL));
+#endif
+
+ // template<typename U>
+ // pair(U&& x, const T2& y)
+ typedef eastl::pair<uint16_t, const char8_t*> LCIDMapping;
+ LCIDMapping lcidMappingArray[1] = {LCIDMapping(0x0036, EA_CHAR8("af"))}; // Note that 0x0036 is of type int.
+ EATEST_VERIFY((lcidMappingArray[0].first == 0x0036));
+
+ // template<typename V>
+ // pair(const T1& x, V&& y)
+ typedef eastl::pair<const char8_t*, uint16_t> LCIDMapping2;
+ LCIDMapping2 lcidMapping2Array[1] = {LCIDMapping2(EA_CHAR8("af"), 0x0036)};
+ EATEST_VERIFY((lcidMapping2Array[0].second == 0x0036));
+
+// The following code was giving an EDG compiler:
+// error 145: a value of type "int" cannot be used to initialize
+// an entity of type "void *" second(eastl::forward<V>(v)) {}
+// template <typename U, typename V>
+// pair(U&& u, V&& v);
+#if EASTL_PAIR_CONFORMANCE
+ typedef eastl::pair<float*, void*> TestPair1;
+ float fOne = 1.f;
+ TestPair1 testPair1(&fOne, NULL);
+ EATEST_VERIFY(*testPair1.first == 1.f);
+#endif
+ }
+
+#ifndef EA_COMPILER_NO_STRUCTURED_BINDING
+ // pair structured bindings test
+ {
+ eastl::pair<int, int> t = {1,2};
+ auto [x,y] = t;
+ EATEST_VERIFY(x == 1);
+ EATEST_VERIFY(y == 2);
+ }
+
+ {
+ auto t = eastl::make_pair(1, 2);
+ auto [x,y] = t;
+ EATEST_VERIFY(x == 1);
+ EATEST_VERIFY(y == 2);
+ }
+
+ { // reported user-regression structured binding unpacking for iterators
+ eastl::vector<int> v = {1,2,3,4,5,6};
+ auto t = eastl::make_pair(v.begin(), v.end() - 1);
+ auto [x,y] = t;
+ EATEST_VERIFY(*x == 1);
+ EATEST_VERIFY(*y == 6);
+ }
+
+ { // reported user-regression structured binding unpacking for iterators
+ eastl::vector<int> v = {1,2,3,4,5,6};
+ auto t = eastl::make_pair(v.begin(), v.end());
+ auto [x,y] = t;
+ EATEST_VERIFY(*x == 1);
+ EA_UNUSED(y);
+ }
+
+ { // reported user-regression for const structured binding unpacking for iterators
+ eastl::vector<int> v = {1,2,3,4,5,6};
+ const auto [x,y] = eastl::make_pair(v.begin(), v.end());;
+ EATEST_VERIFY(*x == 1);
+ EA_UNUSED(y);
+ }
+#endif
+
+ return nErrorCount;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// TestUtilityRelops
+//
+static int TestUtilityRelops()
+{
+ int nErrorCount = 0;
+
+ {
+ using namespace eastl::rel_ops; // Defines default versions of operators !=, <, >, <=, >= based on == and <.
+
+ BasicObject bo1(1), bo2(2);
+
+ EATEST_VERIFY(!(bo1 == bo2));
+ EATEST_VERIFY((bo1 != bo2));
+ EATEST_VERIFY((bo1 < bo2));
+ EATEST_VERIFY(!(bo1 > bo2));
+ EATEST_VERIFY((bo1 <= bo2));
+ EATEST_VERIFY(!(bo1 >= bo2));
+ }
+
+ return nErrorCount;
+}
+
+// ThrowSwappable
+struct ThrowSwappable
+{
+};
+
+void swap(ThrowSwappable& x, ThrowSwappable& y) EA_NOEXCEPT_IF(false)
+{
+ ThrowSwappable temp(x);
+ x = y;
+ y = temp;
+
+#if EASTL_EXCEPTIONS_ENABLED
+ throw int();
+#endif
+}
+
+#if EASTL_TYPE_TRAIT_is_nothrow_swappable_CONFORMANCE
+// NoThrowSwappable
+struct NoThrowSwappable
+{
+};
+
+void swap(NoThrowSwappable& x, NoThrowSwappable& y) EA_NOEXCEPT_IF(true)
+{
+ NoThrowSwappable temp(x);
+ x = y;
+ y = temp;
+}
+#endif
+
+struct Swappable1 {};
+struct Swappable2 {};
+struct Swappable3 {};
+void swap(Swappable1&, Swappable2&) {}
+void swap(Swappable2&, Swappable1&) {}
+void swap(Swappable1&, Swappable3&) {} // intentionally missing 'swap(Swappable3, Swappable1)'
+
+
+static int TestUtilitySwap()
+{
+ int nErrorCount = 0;
+
+// is_swappable
+// is_nothrow_swappable
+#if EASTL_TYPE_TRAIT_is_swappable_CONFORMANCE
+ static_assert((eastl::is_swappable<int>::value == true), "is_swappable failure");
+ static_assert((eastl::is_swappable<eastl::vector<int> >::value == true), "is_swappable failure");
+ static_assert((eastl::is_swappable<ThrowSwappable>::value == true), "is_swappable failure");
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ static_assert((eastl::is_swappable_v<int> == true), "is_swappable failure");
+ static_assert((eastl::is_swappable_v<eastl::vector<int> > == true), "is_swappable failure");
+ static_assert((eastl::is_swappable_v<ThrowSwappable> == true), "is_swappable failure");
+ #endif
+// Need to come up with a class that's not swappable. How do we do that, given the universal swap template?
+// static_assert((eastl::is_swappable<?>::value == false), "is_swappable failure");
+#endif
+
+#if EASTL_TYPE_TRAIT_is_nothrow_swappable_CONFORMANCE
+ static_assert((eastl::is_nothrow_swappable<int>::value == true), "is_nothrow_swappable failure"); // There currently isn't any specialization for swap of scalar types that's nothrow.
+ static_assert((eastl::is_nothrow_swappable<eastl::vector<int> >::value == false), "is_nothrow_swappable failure");
+ static_assert((eastl::is_nothrow_swappable<ThrowSwappable>::value == false), "is_nothrow_swappable failure");
+ static_assert((eastl::is_nothrow_swappable<NoThrowSwappable>::value == true), "is_nothrow_swappable failure");
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ static_assert((eastl::is_nothrow_swappable_v<int> == true), "is_nothrow_swappable failure"); // There currently isn't any specialization for swap of scalar types that's nothrow.
+ static_assert((eastl::is_nothrow_swappable_v<eastl::vector<int>> == false), "is_nothrow_swappable failure");
+ static_assert((eastl::is_nothrow_swappable_v<ThrowSwappable> == false), "is_nothrow_swappable failure");
+ static_assert((eastl::is_nothrow_swappable_v<NoThrowSwappable> == true), "is_nothrow_swappable failure");
+ #endif
+#endif
+
+#if EASTL_VARIADIC_TEMPLATES_ENABLED
+// is_swappable_with
+// is_nothrow_swappable_with
+ static_assert(eastl::is_swappable_with<int&, int&>::value, "is_swappable_with failure");
+ static_assert(!eastl::is_swappable_with<int, int>::value, "is_swappable_with failure");
+ static_assert(!eastl::is_swappable_with<int&, int>::value, "is_swappable_with failure");
+ static_assert(!eastl::is_swappable_with<int, int&>::value, "is_swappable_with failure");
+ static_assert(!eastl::is_swappable_with<int, short>::value, "is_swappable_with failure");
+ static_assert(!eastl::is_swappable_with<int, long>::value, "is_swappable_with failure");
+ static_assert(!eastl::is_swappable_with<int, eastl::vector<int>>::value, "is_swappable_with failure");
+ static_assert(!eastl::is_swappable_with<void, void>::value, "is_swappable_with failure");
+ static_assert(!eastl::is_swappable_with<int, void>::value, "is_swappable_with failure");
+ static_assert(!eastl::is_swappable_with<void, int>::value, "is_swappable_with failure");
+ static_assert(!eastl::is_swappable_with<ThrowSwappable, ThrowSwappable>::value, "is_swappable_with failure");
+ static_assert(eastl::is_swappable_with<ThrowSwappable&, ThrowSwappable&>::value, "is_swappable_with failure");
+ static_assert(eastl::is_swappable_with<Swappable1&, Swappable1&>::value, "is_swappable_with failure");
+ static_assert(eastl::is_swappable_with<Swappable1&, Swappable2&>::value, "is_swappable_with failure");
+ static_assert(eastl::is_swappable_with<Swappable2&, Swappable1&>::value, "is_swappable_with failure");
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ static_assert(eastl::is_swappable_with_v<int&, int&>, "is_swappable_with_v failure");
+ static_assert(!eastl::is_swappable_with_v<int, int>, "is_swappable_with_v failure");
+ static_assert(!eastl::is_swappable_with_v<int&, int>, "is_swappable_with_v failure");
+ static_assert(!eastl::is_swappable_with_v<int, int&>, "is_swappable_with_v failure");
+ static_assert(!eastl::is_swappable_with_v<int, short>, "is_swappable_with_v failure");
+ static_assert(!eastl::is_swappable_with_v<int, long>, "is_swappable_with_v failure");
+ static_assert(!eastl::is_swappable_with_v<int, eastl::vector<int>>, "is_swappable_with_v failure");
+ static_assert(!eastl::is_swappable_with_v<void, void>, "is_swappable_with_v failure");
+ static_assert(!eastl::is_swappable_with_v<int, void>, "is_swappable_with_v failure");
+ static_assert(!eastl::is_swappable_with_v<void, int>, "is_swappable_with_v failure");
+ static_assert(!eastl::is_swappable_with_v<ThrowSwappable, ThrowSwappable>, "is_swappable_with_v failure");
+ static_assert(eastl::is_swappable_with_v<ThrowSwappable&, ThrowSwappable&>, "is_swappable_with_v failure");
+ static_assert(eastl::is_swappable_with_v<Swappable1&, Swappable1&>, "is_swappable_with_v failure");
+ static_assert(eastl::is_swappable_with_v<Swappable1&, Swappable2&>, "is_swappable_with_v failure");
+ static_assert(eastl::is_swappable_with_v<Swappable2&, Swappable1&>, "is_swappable_with_v failure");
+ #endif // EASTL_VARIABLE_TEMPLATES_ENABLED
+
+#if EASTL_TYPE_TRAIT_is_nothrow_swappable_with_CONFORMANCE
+ static_assert(eastl::is_nothrow_swappable_with<int&, int&>::value, "is_nothrow_swappable_with failure");
+ static_assert(!eastl::is_nothrow_swappable_with<int, int>::value, "is_nothrow_swappable_with failure");
+ static_assert(!eastl::is_nothrow_swappable_with<int&, int>::value, "is_nothrow_swappable_with failure");
+ static_assert(!eastl::is_nothrow_swappable_with<int, int&>::value, "is_nothrow_swappable_with failure");
+ static_assert(!eastl::is_nothrow_swappable_with<int, short>::value, "is_nothrow_swappable_with failure");
+ static_assert(!eastl::is_nothrow_swappable_with<int, long>::value, "is_nothrow_swappable_with failure");
+ static_assert(!eastl::is_nothrow_swappable_with<int, eastl::vector<int>>::value, "is_nothrow_swappable_with failure");
+ static_assert(!eastl::is_nothrow_swappable_with<void, void>::value, "is_nothrow_swappable_with failure");
+ static_assert(!eastl::is_nothrow_swappable_with<int, void>::value, "is_nothrow_swappable_with failure");
+ static_assert(!eastl::is_nothrow_swappable_with<void, int>::value, "is_nothrow_swappable_with failure");
+ static_assert(!eastl::is_nothrow_swappable_with<ThrowSwappable, ThrowSwappable>::value, "is_nothrow_swappable_with failure");
+ static_assert(!eastl::is_nothrow_swappable_with<ThrowSwappable&, ThrowSwappable&>::value, "is_nothrow_swappable_with failure");
+ static_assert(!eastl::is_nothrow_swappable_with<NoThrowSwappable, NoThrowSwappable>::value, "is_nothrow_swappable_with failure");
+ static_assert(eastl::is_nothrow_swappable_with<NoThrowSwappable&, NoThrowSwappable&>::value, "is_nothrow_swappable_with failure");
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ static_assert(eastl::is_nothrow_swappable_with_v<int&, int&>, "is_nothrow_swappable_with_v failure");
+ static_assert(!eastl::is_nothrow_swappable_with_v<int, int>, "is_nothrow_swappable_with_v failure");
+ static_assert(!eastl::is_nothrow_swappable_with_v<int&, int>, "is_nothrow_swappable_with_v failure");
+ static_assert(!eastl::is_nothrow_swappable_with_v<int, int&>, "is_nothrow_swappable_with_v failure");
+ static_assert(!eastl::is_nothrow_swappable_with_v<int, short>, "is_nothrow_swappable_with_v failure");
+ static_assert(!eastl::is_nothrow_swappable_with_v<int, long>, "is_nothrow_swappable_with_v failure");
+ static_assert(!eastl::is_nothrow_swappable_with_v<int, eastl::vector<int>>, "is_nothrow_swappable_with_v failure");
+ static_assert(!eastl::is_nothrow_swappable_with_v<void, void>, "is_nothrow_swappable_with_v failure");
+ static_assert(!eastl::is_nothrow_swappable_with_v<int, void>, "is_nothrow_swappable_with_v failure");
+ static_assert(!eastl::is_nothrow_swappable_with_v<void, int>, "is_nothrow_swappable_with_v failure");
+ static_assert(!eastl::is_nothrow_swappable_with_v<ThrowSwappable, ThrowSwappable>, "is_nothrow_swappable_with_v failure");
+ static_assert(!eastl::is_nothrow_swappable_with_v<ThrowSwappable&, ThrowSwappable&>, "is_nothrow_swappable_with_v failure");
+ static_assert(!eastl::is_nothrow_swappable_with_v<NoThrowSwappable, NoThrowSwappable>, "is_nothrow_swappable_with_v failure");
+ static_assert(eastl::is_nothrow_swappable_with_v<NoThrowSwappable&, NoThrowSwappable&>, "is_nothrow_swappable_with_v failure");
+ #endif // EASTL_VARIABLE_TEMPLATES_ENABLED
+#endif
+#endif // EASTL_VARIADIC_TEMPLATES_ENABLED
+
+ return nErrorCount;
+}
+
+#if !defined(EA_COMPILER_NO_NOEXCEPT)
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////
+// Warning C4626 warns against an implicitly deleted move assignment operator.
+// This warning was disabled by default in VS2013. It was enabled by default in
+// VS2015. Since the the tests below are explicitly testing move construction
+// of the various classes explicitly deleting the move assignment to remove the
+// warning is safe.
+//
+// https://msdn.microsoft.com/en-us/library/23k5d385.aspx
+
+struct noexcept_move_copy
+{
+ bool mStatus;
+
+ noexcept_move_copy() : mStatus(true) {}
+
+ noexcept_move_copy(const noexcept_move_copy&) = default;
+
+ noexcept_move_copy(noexcept_move_copy&& r) noexcept { r.mStatus = false; }
+
+ noexcept_move_copy& operator=(const noexcept_move_copy&) = delete; // required as VS2015 enabled C4626 by default.
+};
+
+struct noexcept_move_no_copy
+{
+ bool mStatus;
+
+ noexcept_move_no_copy() : mStatus(true) {}
+
+ noexcept_move_no_copy(const noexcept_move_no_copy&) = delete;
+
+ noexcept_move_no_copy(noexcept_move_no_copy&& r) noexcept { r.mStatus = false; };
+
+ noexcept_move_no_copy& operator=(const noexcept_move_no_copy&) = delete; // required as VS2015 enabled C4626 by default.
+};
+
+struct except_move_copy
+{
+ bool mStatus;
+
+ except_move_copy() : mStatus(true) {}
+
+ except_move_copy(const except_move_copy&) = default;
+
+ except_move_copy(except_move_copy&& r) noexcept(false) { r.mStatus = false; };
+
+ except_move_copy& operator=(const except_move_copy&) = delete; // required as VS2015 enabled C4626 by default.
+};
+
+struct except_move_no_copy
+{
+ bool mStatus;
+
+ except_move_no_copy() : mStatus(true) {}
+
+ except_move_no_copy(const except_move_no_copy&) = delete;
+
+ except_move_no_copy(except_move_no_copy&& r) noexcept(false) { r.mStatus = false; };
+
+ except_move_no_copy& operator=(const except_move_no_copy&) = delete; // required as VS2015 enabled C4626 by default.
+};
+#endif
+
+static int TestUtilityMove()
+{
+ int nErrorCount = 0;
+
+// move_if_noexcept
+#if !defined(EA_COMPILER_NO_NOEXCEPT)
+ noexcept_move_copy nemcA;
+ noexcept_move_copy nemcB =
+ eastl::move_if_noexcept(nemcA); // nemcB should be constructed via noexcept_move_copy(noexcept_move_copy&&)
+ EATEST_VERIFY(nemcA.mStatus == false);
+ EA_UNUSED(nemcB);
+
+ noexcept_move_no_copy nemncA;
+ noexcept_move_no_copy nemncB = eastl::move_if_noexcept(
+ nemncA); // nemncB should be constructed via noexcept_move_no_copy(noexcept_move_no_copy&&)
+ EATEST_VERIFY(nemncA.mStatus == false);
+ EA_UNUSED(nemncB);
+
+ except_move_copy emcA;
+ except_move_copy emcB = eastl::move_if_noexcept(
+ emcA); // emcB should be constructed via except_move_copy(const except_move_copy&) if exceptions are enabled.
+#if EASTL_EXCEPTIONS_ENABLED
+ EATEST_VERIFY(emcA.mStatus == true);
+#else
+ EATEST_VERIFY(emcA.mStatus == false);
+#endif
+ EA_UNUSED(emcB);
+
+ except_move_no_copy emncA;
+ except_move_no_copy emncB =
+ eastl::move_if_noexcept(emncA); // emncB should be constructed via except_move_no_copy(except_move_no_copy&&)
+ EATEST_VERIFY(emncA.mStatus == false);
+ EA_UNUSED(emncB);
+#endif
+
+ return nErrorCount;
+}
+
+static int TestUtilityIntegerSequence()
+{
+ using namespace eastl;
+ int nErrorCount = 0;
+#if EASTL_VARIADIC_TEMPLATES_ENABLED
+
+ EATEST_VERIFY((integer_sequence<int, 0, 1, 2, 3, 4>::size() == 5));
+ EATEST_VERIFY((make_integer_sequence<int, 5>::size() == 5));
+ static_assert(is_same<make_integer_sequence<int, 5>, integer_sequence<int, 0, 1, 2, 3, 4>>::value);
+
+ EATEST_VERIFY((index_sequence<0, 1, 2, 3, 4>::size() == 5));
+ EATEST_VERIFY((make_index_sequence<5>::size() == 5));
+ static_assert(is_same<make_index_sequence<5>, index_sequence<0, 1, 2, 3, 4>>::value);
+ static_assert(is_same<make_index_sequence<5>, integer_sequence<size_t, 0, 1, 2, 3, 4>>::value);
+#endif // EASTL_VARIADIC_TEMPLATES_ENABLED
+
+ return nErrorCount;
+}
+
+static int TestUtilityExchange()
+{
+ int nErrorCount = 0;
+
+ {
+ int a = 0;
+ auto r = eastl::exchange(a, 1);
+
+ EATEST_VERIFY(r == 0);
+ EATEST_VERIFY(a == 1);
+ }
+
+ {
+ int a = 0;
+ auto r = eastl::exchange(a, 1.78);
+
+ EATEST_VERIFY(r == 0);
+ EATEST_VERIFY(a == 1);
+ }
+
+ {
+ int a = 0;
+ auto r = eastl::exchange(a, 1.78f);
+
+ EATEST_VERIFY(r == 0);
+ EATEST_VERIFY(a == 1);
+ }
+
+ {
+ int a = 0, b = 1;
+ auto r = eastl::exchange(a, b);
+
+ EATEST_VERIFY(r == 0);
+ EATEST_VERIFY(a == 1);
+ EATEST_VERIFY(b == 1);
+ }
+
+ {
+ bool b = true;
+
+ auto r = eastl::exchange(b, true);
+ EATEST_VERIFY(r);
+
+ r = eastl::exchange(b, false);
+ EATEST_VERIFY(r);
+ EATEST_VERIFY(!b);
+
+ r = eastl::exchange(b, true);
+ EATEST_VERIFY(!r);
+ EATEST_VERIFY(b);
+ }
+
+ {
+ TestObject::Reset();
+
+ TestObject a(42);
+ auto r = eastl::exchange(a, TestObject(24));
+
+ EATEST_VERIFY(r.mX == 42);
+ EATEST_VERIFY(a.mX == 24);
+ }
+
+ {
+ const char* const pElectronicArts = "Electronic Arts";
+ const char* const pEAVancouver = "EA Vancouver";
+
+ eastl::string a(pElectronicArts);
+ auto r = eastl::exchange(a, pEAVancouver);
+
+ EATEST_VERIFY(r == pElectronicArts);
+ EATEST_VERIFY(a == pEAVancouver);
+
+ r = eastl::exchange(a, "EA Standard Template Library");
+ EATEST_VERIFY(a == "EA Standard Template Library");
+ }
+
+ // Construct pair using single move constructor
+ {
+ struct TestPairSingleMoveConstructor
+ {
+ void test(int&& val)
+ {
+ eastl::pair<int,int> p(eastl::pair_first_construct, eastl::move(val));
+ }
+ };
+
+ int i1 = 1;
+ TestPairSingleMoveConstructor test;
+ test.test(eastl::move(i1));
+ }
+
+ // User reported regression where via reference collapsing, we see the same single element ctor defined twice.
+ //
+ // T = const U&
+ // pair(const T&) -> pair(const const U& &) -> pair(const U&)
+ // pair(T&&) -> pair(const U& &&) -> pair(const U&)
+ {
+ struct FooType {};
+
+ using VectorOfPairWithReference = eastl::vector<eastl::pair<const FooType&, float>>;
+
+ VectorOfPairWithReference v;
+ }
+
+ return nErrorCount;
+}
+
+#if defined(EA_COMPILER_CPP20_ENABLED)
+template <typename T>
+static int TestCmpCommon()
+{
+ int nErrorCount = 0;
+
+ EATEST_VERIFY(eastl::cmp_equal(T(0), T(0)));
+ EATEST_VERIFY(eastl::cmp_equal(T(1), T(1)));
+ EATEST_VERIFY(eastl::cmp_equal(eastl::numeric_limits<T>::min(), eastl::numeric_limits<T>::min()));
+ EATEST_VERIFY(eastl::cmp_equal(eastl::numeric_limits<T>::max(), eastl::numeric_limits<T>::max()));
+ EATEST_VERIFY(!eastl::cmp_equal(T(0), T(1)));
+ EATEST_VERIFY(!eastl::cmp_equal(T(1), T(0)));
+ if (eastl::is_signed_v<T>)
+ {
+ EATEST_VERIFY(eastl::cmp_equal(T(-1), T(-1)));
+ EATEST_VERIFY(!eastl::cmp_equal(T(-1), T(-2)));
+ EATEST_VERIFY(!eastl::cmp_equal(T(-2), T(-1)));
+ }
+
+ EATEST_VERIFY(eastl::cmp_not_equal(T(1), T(0)));
+ EATEST_VERIFY(eastl::cmp_not_equal(T(0), T(1)));
+ EATEST_VERIFY(eastl::cmp_not_equal(eastl::numeric_limits<T>::min(), eastl::numeric_limits<T>::max()));
+ EATEST_VERIFY(eastl::cmp_not_equal(eastl::numeric_limits<T>::max(), eastl::numeric_limits<T>::min()));
+ if (eastl::is_signed_v<T>)
+ {
+ EATEST_VERIFY(!eastl::cmp_not_equal(T(-1), T(-1)));
+ EATEST_VERIFY(eastl::cmp_not_equal(T(-1), T(-2)));
+ EATEST_VERIFY(eastl::cmp_not_equal(T(-2), T(-1)));
+ }
+
+ EATEST_VERIFY(eastl::cmp_less(T(0), T(1)));
+ EATEST_VERIFY(eastl::cmp_less(T(5), T(10)));
+ EATEST_VERIFY(!eastl::cmp_less(T(0), T(0)));
+ EATEST_VERIFY(!eastl::cmp_less(T(1), T(0)));
+ EATEST_VERIFY(eastl::cmp_less(eastl::numeric_limits<T>::min(), eastl::numeric_limits<T>::max()));
+ EATEST_VERIFY(!eastl::cmp_less(eastl::numeric_limits<T>::min(), eastl::numeric_limits<T>::min()));
+ EATEST_VERIFY(!eastl::cmp_less(eastl::numeric_limits<T>::max(), eastl::numeric_limits<T>::max()));
+ EATEST_VERIFY(!eastl::cmp_less(eastl::numeric_limits<T>::max(), eastl::numeric_limits<T>::min()));
+ if (eastl::is_signed_v<T>)
+ {
+ EATEST_VERIFY(!eastl::cmp_less(T(-1), T(-1)));
+ EATEST_VERIFY(!eastl::cmp_less(T(-1), T(-2)));
+ EATEST_VERIFY(eastl::cmp_less(T(-2), T(-1)));
+ }
+
+ EATEST_VERIFY(eastl::cmp_less_equal(T(0), T(1)));
+ EATEST_VERIFY(eastl::cmp_less_equal(T(5), T(10)));
+ EATEST_VERIFY(eastl::cmp_less_equal(T(0), T(0)));
+ EATEST_VERIFY(eastl::cmp_less_equal(T(1), T(1)));
+ EATEST_VERIFY(!eastl::cmp_less_equal(T(1), T(0)));
+ EATEST_VERIFY(eastl::cmp_less_equal(eastl::numeric_limits<T>::min(), eastl::numeric_limits<T>::max()));
+ EATEST_VERIFY(eastl::cmp_less_equal(eastl::numeric_limits<T>::min(), eastl::numeric_limits<T>::min()));
+ EATEST_VERIFY(eastl::cmp_less_equal(eastl::numeric_limits<T>::max(), eastl::numeric_limits<T>::max()));
+ EATEST_VERIFY(!eastl::cmp_less_equal(eastl::numeric_limits<T>::max(), eastl::numeric_limits<T>::min()));
+ if (eastl::is_signed_v<T>)
+ {
+ EATEST_VERIFY(eastl::cmp_less_equal(T(-1), T(-1)));
+ EATEST_VERIFY(!eastl::cmp_less_equal(T(-1), T(-2)));
+ EATEST_VERIFY(eastl::cmp_less_equal(T(-2), T(-1)));
+ }
+
+ EATEST_VERIFY(eastl::cmp_greater(T(1), T(0)));
+ EATEST_VERIFY(eastl::cmp_greater(T(10), T(5)));
+ EATEST_VERIFY(!eastl::cmp_greater(T(0), T(0)));
+ EATEST_VERIFY(!eastl::cmp_greater(T(0), T(1)));
+ EATEST_VERIFY(eastl::cmp_greater(eastl::numeric_limits<T>::max(), eastl::numeric_limits<T>::min()));
+ EATEST_VERIFY(!eastl::cmp_greater(eastl::numeric_limits<T>::min(), eastl::numeric_limits<T>::min()));
+ EATEST_VERIFY(!eastl::cmp_greater(eastl::numeric_limits<T>::max(), eastl::numeric_limits<T>::max()));
+ EATEST_VERIFY(!eastl::cmp_greater(eastl::numeric_limits<T>::min(), eastl::numeric_limits<T>::max()));
+ if (eastl::is_signed_v<T>)
+ {
+ EATEST_VERIFY(!eastl::cmp_greater(T(-1), T(-1)));
+ EATEST_VERIFY(eastl::cmp_greater(T(-1), T(-2)));
+ EATEST_VERIFY(!eastl::cmp_greater(T(-2), T(-1)));
+ }
+
+ EATEST_VERIFY(eastl::cmp_greater_equal(T(1), T(0)));
+ EATEST_VERIFY(eastl::cmp_greater_equal(T(10), T(5)));
+ EATEST_VERIFY(eastl::cmp_greater_equal(T(0), T(0)));
+ EATEST_VERIFY(!eastl::cmp_greater_equal(T(0), T(1)));
+ EATEST_VERIFY(eastl::cmp_greater_equal(eastl::numeric_limits<T>::max(), eastl::numeric_limits<T>::min()));
+ EATEST_VERIFY(eastl::cmp_greater_equal(eastl::numeric_limits<T>::min(), eastl::numeric_limits<T>::min()));
+ EATEST_VERIFY(eastl::cmp_greater_equal(eastl::numeric_limits<T>::max(), eastl::numeric_limits<T>::max()));
+ EATEST_VERIFY(!eastl::cmp_greater_equal(eastl::numeric_limits<T>::min(), eastl::numeric_limits<T>::max()));
+ if (eastl::is_signed_v<T>)
+ {
+ EATEST_VERIFY(eastl::cmp_greater_equal(T(-1), T(-1)));
+ EATEST_VERIFY(eastl::cmp_greater_equal(T(-1), T(-2)));
+ EATEST_VERIFY(!eastl::cmp_greater_equal(T(-2), T(-1)));
+ }
+
+ return nErrorCount;
+}
+
+template <typename T, typename U>
+static int TestUtilityCmpEql(const T x, const U y)
+{
+ int nErrorCount = 0;
+
+ EATEST_VERIFY(eastl::cmp_equal(T(x), U(y)));
+ EATEST_VERIFY(eastl::cmp_equal(U(y), T(x)));
+ EATEST_VERIFY(!eastl::cmp_not_equal(T(x), U(y)));
+ EATEST_VERIFY(!eastl::cmp_not_equal(U(y), T(x)));
+
+ return nErrorCount;
+}
+
+template <typename T, typename U>
+static int TestUtilityCmpLess(const T x, const U y)
+{
+ int nErrorCount = 0;
+
+ EATEST_VERIFY(eastl::cmp_less(T(x), U(y)));
+ EATEST_VERIFY(!eastl::cmp_less(U(y), T(x)));
+
+ EATEST_VERIFY(!eastl::cmp_greater_equal(T(x), U(y)));
+ EATEST_VERIFY(eastl::cmp_greater_equal(U(y), T(x)));
+
+ return nErrorCount;
+}
+
+template <typename T, typename U>
+static int TestUtilityCmpGreater(const T x, const U y)
+{
+ int nErrorCount = 0;
+
+ EATEST_VERIFY(eastl::cmp_greater(T(x), U(y)));
+ EATEST_VERIFY(!eastl::cmp_greater(U(y), T(x)));
+
+ EATEST_VERIFY(!eastl::cmp_less_equal(T(x), U(y)));
+ EATEST_VERIFY(eastl::cmp_less_equal(U(y), T(x)));
+
+ return nErrorCount;
+}
+
+template <typename T, typename U>
+static int TestUtilityCmpLessEq(const T x, const U y)
+{
+ int nErrorCount = 0;
+
+ EATEST_VERIFY(eastl::cmp_less_equal(T(x), U(y)));
+ EATEST_VERIFY(eastl::cmp_less(T(x), U(y)) || eastl::cmp_equal(T(x), U(y)));
+
+ EATEST_VERIFY(eastl::cmp_greater_equal(U(y), T(x)));
+
+ return nErrorCount;
+}
+
+template <typename T, typename U>
+static int TestUtilityCmpGreaterEq(const T x, const U y)
+{
+ int nErrorCount = 0;
+
+ EATEST_VERIFY(eastl::cmp_greater_equal(T(x), U(y)));
+ EATEST_VERIFY(eastl::cmp_greater(T(x), U(y)) || eastl::cmp_equal(T(x), U(y)));
+
+ EATEST_VERIFY(eastl::cmp_less_equal(U(y), T(x)));
+
+ return nErrorCount;
+}
+
+static int TestUtilityIntegralComp()
+{
+ int nErrorCount = 0;
+
+ // Test integral comparisons among same types
+ nErrorCount += TestCmpCommon<int>();
+ nErrorCount += TestCmpCommon<short>();
+ nErrorCount += TestCmpCommon<long>();
+ nErrorCount += TestCmpCommon<long long>();
+
+ nErrorCount += TestCmpCommon<unsigned int>();
+ nErrorCount += TestCmpCommon<unsigned short>();
+ nErrorCount += TestCmpCommon<unsigned long>();
+ nErrorCount += TestCmpCommon<unsigned long long>();
+
+ // Test integral comparison among different types
+ nErrorCount += TestUtilityCmpEql(int(0), short(0));
+ nErrorCount += TestUtilityCmpEql(short(2), long(2));
+ nErrorCount += TestUtilityCmpEql(short(3), unsigned long(3));
+ nErrorCount += TestUtilityCmpEql(int(-5), long long(-5));
+ nErrorCount += TestUtilityCmpEql(short(-100), long long(-100));
+ nErrorCount += TestUtilityCmpEql(unsigned int(100), long(100));
+ nErrorCount += TestUtilityCmpEql(unsigned long long(100), int(100));
+
+ nErrorCount += TestUtilityCmpLess(int(0), long long(1));
+ nErrorCount += TestUtilityCmpLess(int(-1), unsigned long(1));
+ nErrorCount += TestUtilityCmpLess(short(-100), long long(100));
+ nErrorCount += TestUtilityCmpLess(eastl::numeric_limits<long>::min(), short(0));
+ nErrorCount += TestUtilityCmpLess(short(0), eastl::numeric_limits<int>::max());
+ nErrorCount += TestUtilityCmpLess(eastl::numeric_limits<unsigned short>::min(), eastl::numeric_limits<int>::max());
+ nErrorCount += TestUtilityCmpLess(eastl::numeric_limits<short>::max(), eastl::numeric_limits<long>::max());
+ nErrorCount += TestUtilityCmpLess(eastl::numeric_limits<int>::max(), eastl::numeric_limits<long long>::max());
+ nErrorCount += TestUtilityCmpLess(int(-100), unsigned int(0));
+ nErrorCount += TestUtilityCmpLess(eastl::numeric_limits<int>::min(), eastl::numeric_limits<unsigned int>::min());
+
+ nErrorCount += TestUtilityCmpGreater(int(1), short(0));
+ nErrorCount += TestUtilityCmpGreater(unsigned long(1), int(-1));
+ nErrorCount += TestUtilityCmpGreater(unsigned long long(100), short(-100));
+ nErrorCount += TestUtilityCmpGreater(short(0), eastl::numeric_limits<short>::min());
+ nErrorCount += TestUtilityCmpGreater(eastl::numeric_limits<long>::max(), unsigned short(5));
+ nErrorCount += TestUtilityCmpGreater(eastl::numeric_limits<long>::max(), eastl::numeric_limits<int>::min());
+ nErrorCount += TestUtilityCmpGreater(eastl::numeric_limits<int>::max(), eastl::numeric_limits<short>::max());
+ nErrorCount += TestUtilityCmpGreater(eastl::numeric_limits<long long>::max(), eastl::numeric_limits<int>::max());
+ nErrorCount += TestUtilityCmpGreater(unsigned int(0), int(-100));
+ nErrorCount += TestUtilityCmpGreater(eastl::numeric_limits<unsigned int>::min(), eastl::numeric_limits<int>::min());
+
+ nErrorCount += TestUtilityCmpLessEq(int(0), short(1));
+ nErrorCount += TestUtilityCmpLessEq(int(-1), long long(-1));
+ nErrorCount += TestUtilityCmpLessEq(short(-100), unsigned long long(100));
+ nErrorCount += TestUtilityCmpLessEq(short(-100), long long(-100));
+ nErrorCount += TestUtilityCmpLessEq(eastl::numeric_limits<int>::min(), short(0));
+ nErrorCount += TestUtilityCmpLessEq(short(0), eastl::numeric_limits<int>::max());
+ nErrorCount += TestUtilityCmpLessEq(eastl::numeric_limits<short>::min(), eastl::numeric_limits<short>::min());
+ nErrorCount += TestUtilityCmpLessEq(eastl::numeric_limits<int>::max(), eastl::numeric_limits<int>::max());
+ nErrorCount += TestUtilityCmpLessEq(eastl::numeric_limits<int>::max(), eastl::numeric_limits<long long>::max());
+ nErrorCount += TestUtilityCmpLessEq(int(50), unsigned int(50));
+ nErrorCount += TestUtilityCmpLessEq(eastl::numeric_limits<int>::min(), eastl::numeric_limits<unsigned int>::min());
+
+ nErrorCount += TestUtilityCmpGreaterEq(int(1), short(1));
+ nErrorCount += TestUtilityCmpGreaterEq(long long(-1), int(-1));
+ nErrorCount += TestUtilityCmpGreaterEq(long long(-100), short(-100));
+ nErrorCount += TestUtilityCmpGreaterEq(short(0), long(0));
+ nErrorCount += TestUtilityCmpGreaterEq(eastl::numeric_limits<long>::max(), eastl::numeric_limits<long>::max());
+ nErrorCount += TestUtilityCmpGreaterEq(eastl::numeric_limits<int>::max(), eastl::numeric_limits<short>::min());
+ nErrorCount += TestUtilityCmpGreaterEq(eastl::numeric_limits<int>::max(), eastl::numeric_limits<short>::max());
+ nErrorCount += TestUtilityCmpGreaterEq(eastl::numeric_limits<long long>::max(), eastl::numeric_limits<int>::max());
+ nErrorCount += TestUtilityCmpGreaterEq(unsigned int(0), int(0));
+ nErrorCount += TestUtilityCmpGreaterEq(eastl::numeric_limits<unsigned int>::min(), eastl::numeric_limits<int>::min());
+
+ // Test in_range
+ EATEST_VERIFY(eastl::in_range<int>(0));
+ EATEST_VERIFY(eastl::in_range<int>(eastl::numeric_limits<int>::min()));
+ EATEST_VERIFY(eastl::in_range<int>(eastl::numeric_limits<int>::max()));
+ EATEST_VERIFY(eastl::in_range<unsigned int>(0));
+ EATEST_VERIFY(eastl::in_range<unsigned int>(eastl::numeric_limits<unsigned int>::min()));
+ EATEST_VERIFY(eastl::in_range<unsigned int>(eastl::numeric_limits<unsigned int>::max()));
+ EATEST_VERIFY(!eastl::in_range<unsigned int>(-1));
+ EATEST_VERIFY(!eastl::in_range<int>(eastl::numeric_limits<unsigned int>::max()));
+ EATEST_VERIFY(!eastl::in_range<unsigned int>(eastl::numeric_limits<int>::min()));
+
+ EATEST_VERIFY(eastl::in_range<short>(100));
+ EATEST_VERIFY(eastl::in_range<short>(eastl::numeric_limits<short>::min()));
+ EATEST_VERIFY(eastl::in_range<short>(eastl::numeric_limits<short>::max()));
+ EATEST_VERIFY(eastl::in_range<unsigned short>(100));
+ EATEST_VERIFY(eastl::in_range<unsigned short>(eastl::numeric_limits<unsigned short>::min()));
+ EATEST_VERIFY(eastl::in_range<unsigned short>(eastl::numeric_limits<unsigned short>::max()));
+ EATEST_VERIFY(!eastl::in_range<unsigned short>(-1));
+ EATEST_VERIFY(!eastl::in_range<short>(eastl::numeric_limits<unsigned int>::max()));
+ EATEST_VERIFY(!eastl::in_range<unsigned short>(eastl::numeric_limits<int>::min()));
+
+ EATEST_VERIFY(eastl::in_range<long>(50));
+ EATEST_VERIFY(eastl::in_range<long>(eastl::numeric_limits<long>::min()));
+ EATEST_VERIFY(eastl::in_range<long>(eastl::numeric_limits<long>::max()));
+ EATEST_VERIFY(eastl::in_range<unsigned long>(50));
+ EATEST_VERIFY(eastl::in_range<unsigned long>(eastl::numeric_limits<unsigned long>::min()));
+ EATEST_VERIFY(eastl::in_range<unsigned long>(eastl::numeric_limits<unsigned long>::max()));
+ EATEST_VERIFY(!eastl::in_range<unsigned long>(-1));
+ EATEST_VERIFY(!eastl::in_range<long>(eastl::numeric_limits<unsigned int>::max()));
+ EATEST_VERIFY(!eastl::in_range<unsigned long>(eastl::numeric_limits<int>::min()));
+
+ return nErrorCount;
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+// TestUtility
+//
+int TestUtility()
+{
+ int nErrorCount = 0;
+
+ nErrorCount += TestUtilityPair();
+ nErrorCount += TestUtilityRelops();
+ nErrorCount += TestUtilitySwap();
+ nErrorCount += TestUtilityMove();
+ nErrorCount += TestUtilityIntegerSequence();
+ nErrorCount += TestUtilityExchange();
+#if defined(EA_COMPILER_CPP20_ENABLED)
+ nErrorCount += TestUtilityIntegralComp();
+#endif
+ return nErrorCount;
+}
diff --git a/EASTL/test/source/TestVariant.cpp b/EASTL/test/source/TestVariant.cpp
new file mode 100644
index 0000000..2a78a89
--- /dev/null
+++ b/EASTL/test/source/TestVariant.cpp
@@ -0,0 +1,1823 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLTest.h"
+#include <EASTL/string.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/sort.h>
+#include <EASTL/bonus/overloaded.h>
+
+#ifdef EA_COMPILER_CPP14_ENABLED
+#include "ConceptImpls.h"
+#include <EASTL/variant.h>
+
+
+#if EASTL_EXCEPTIONS_ENABLED
+
+// Intentionally Non-Trivial.
+// There are optimizations we can make in variant if the types are trivial that we don't currently do but can do.
+template <typename T>
+struct valueless_struct
+{
+ valueless_struct() {}
+
+ valueless_struct(const valueless_struct&) {}
+
+ ~valueless_struct() {}
+
+ struct exception_tag {};
+
+ operator T() const { throw exception_tag{}; }
+};
+
+#endif
+
+
+int TestVariantAlternative()
+{
+ using namespace eastl;
+ int nErrorCount = 0;
+ {
+ using v_t = variant<int>;
+ static_assert(is_same_v<variant_alternative_t<0, v_t>, int>, "error variant_alternative");
+ }
+ {
+ using v_t = variant<int, long, short, char>;
+
+ static_assert(is_same_v<variant_alternative_t<0, v_t>, int>, "error variant_alternative");
+ static_assert(is_same_v<variant_alternative_t<1, v_t>, long>, "error variant_alternative");
+ static_assert(is_same_v<variant_alternative_t<2, v_t>, short>, "error variant_alternative");
+ static_assert(is_same_v<variant_alternative_t<3, v_t>, char>, "error variant_alternative");
+ }
+ {
+ struct custom_type1 {};
+ struct custom_type2 {};
+ struct custom_type3 {};
+
+ using v_t = variant<int, long, short, char, size_t, unsigned, signed, custom_type1, custom_type2, custom_type3>;
+
+ static_assert(is_same_v<variant_alternative_t<5, v_t>, unsigned>, "error variant_alternative");
+ static_assert(is_same_v<variant_alternative_t<6, v_t>, signed>, "error variant_alternative");
+ static_assert(is_same_v<variant_alternative_t<7, v_t>, custom_type1>, "error variant_alternative");
+ static_assert(is_same_v<variant_alternative_t<8, v_t>, custom_type2>, "error variant_alternative");
+ static_assert(is_same_v<variant_alternative_t<9, v_t>, custom_type3>, "error variant_alternative");
+ }
+ // cv-qualifier tests
+ {
+ using v_t = variant<int, const int, volatile int, const volatile int>;
+
+ static_assert(is_same_v<variant_alternative_t<0, v_t>, int>, "error variant_alternative");
+ static_assert(is_same_v<variant_alternative_t<1, v_t>, const int>, "error variant_alternative");
+ static_assert(is_same_v<variant_alternative_t<2, v_t>, volatile int>, "error variant_alternative");
+ static_assert(is_same_v<variant_alternative_t<3, v_t>, const volatile int>, "error variant_alternative");
+ }
+ return nErrorCount;
+}
+
+int TestVariantSize()
+{
+ using namespace eastl;
+ int nErrorCount = 0;
+
+ static_assert(variant_size<variant<int>>() == 1, "error variant_size");
+ static_assert(variant_size<variant<int, int>>() == 2, "error variant_size");
+ static_assert(variant_size<variant<int, int, int, int>>() == 4, "error variant_size");
+ static_assert(variant_size<variant<const int>>() == 1, "error variant_size");
+ static_assert(variant_size<variant<volatile int>>() == 1, "error variant_size");
+ static_assert(variant_size<variant<const volatile int>>() == 1, "error variant_size");
+
+ static_assert(variant_size_v<variant<int>> == 1, "error variant_size");
+ static_assert(variant_size_v<variant<int, int>> == 2, "error variant_size");
+ static_assert(variant_size_v<variant<int, int, int, int>> == 4, "error variant_size");
+ static_assert(variant_size_v<variant<const int>> == 1, "error variant_size");
+ static_assert(variant_size_v<variant<volatile int>> == 1, "error variant_size");
+ static_assert(variant_size_v<variant<const volatile int>> == 1, "error variant_size");
+
+ static_assert(variant_size_v<variant<int, int>> == 2, "error variant_size_v");
+ static_assert(variant_size_v<variant<volatile int, const int>> == 2, "error variant_size_v");
+ static_assert(variant_size_v<variant<volatile int, const int, const volatile int>> == 3, "error variant_size_v");
+
+ return nErrorCount;
+}
+
+int TestVariantHash()
+{
+ using namespace eastl;
+ int nErrorCount = 0;
+
+ { hash<monostate> h; EA_UNUSED(h); }
+
+ return nErrorCount;
+}
+
+int TestVariantBasic()
+{
+ using namespace eastl;
+ int nErrorCount = 0;
+
+ { VERIFY(variant_npos == size_t(-1)); }
+
+ { variant<int> v; EA_UNUSED(v); }
+ { variant<int, short> v; EA_UNUSED(v); }
+ { variant<int, short, float> v; EA_UNUSED(v); }
+ { variant<int, short, float, char> v; EA_UNUSED(v); }
+ { variant<int, short, float, char, long> v; EA_UNUSED(v); }
+ { variant<int, short, float, char, long, long long> v; EA_UNUSED(v); }
+ { variant<int, short, float, char, long, long long, double> v; EA_UNUSED(v); }
+
+ { variant<monostate> v; EA_UNUSED(v); }
+ { variant<monostate, NotDefaultConstructible> v; EA_UNUSED(v); }
+ { variant<int, NotDefaultConstructible> v; EA_UNUSED(v); }
+
+ {
+ struct MyObj
+ {
+ MyObj() : i(1337) {}
+ ~MyObj() {}
+
+ int i;
+ };
+
+ struct MyObj2
+ {
+ MyObj2(int& ii) : i(ii) {}
+ ~MyObj2() {}
+
+ MyObj2& operator=(const MyObj2&) = delete;
+
+ int& i;
+ };
+
+ static_assert(!eastl::is_trivially_destructible_v<MyObj>, "MyObj can't be trivially destructible");
+ static_assert(!eastl::is_trivially_destructible_v<MyObj2>, "MyObj2 can't be trivially destructible");
+
+ {
+ eastl::variant<MyObj, MyObj2> myVar;
+ VERIFY(get<MyObj>(myVar).i == 1337);
+ }
+
+ {
+ eastl::variant<MyObj, MyObj2> myVar = MyObj();
+ VERIFY(get<MyObj>(myVar).i == 1337);
+ }
+
+ {
+ int i = 42;
+ eastl::variant<MyObj, MyObj2> myVar = MyObj2(i);
+ VERIFY(get<MyObj2>(myVar).i == 42);
+ }
+
+ {
+ auto m = MyObj();
+ m.i = 2000;
+
+ eastl::variant<MyObj, MyObj2> myVar = m;
+ VERIFY(get<MyObj>(myVar).i == 2000);
+ }
+ }
+
+ { variant<int, int> v; EA_UNUSED(v); }
+ { variant<const short, volatile short, const volatile short> v; EA_UNUSED(v); }
+ { variant<int, int, const short, volatile short, const volatile short> v; EA_UNUSED(v); }
+
+ {
+ // verify constructors and destructors are called
+ {
+ variant<TestObject> v = TestObject(1337);
+ VERIFY((get<TestObject>(v)).mX == 1337);
+
+ variant<TestObject> vCopy = v;
+ VERIFY((get<TestObject>(vCopy)).mX == 1337);
+ }
+ VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+ }
+
+ {
+ variant<string> v;
+ VERIFY(*(get_if<string>(&v)) == "");
+ VERIFY(get_if<string>(&v)->empty());
+ VERIFY(get_if<string>(&v)->length() == 0);
+ VERIFY(get_if<string>(&v)->size() == 0);
+
+ *(get_if<string>(&v)) += 'a';
+ VERIFY(*(get_if<string>(&v)) == "a");
+ }
+
+ return nErrorCount;
+}
+
+int TestVariantGet()
+{
+ using namespace eastl;
+ int nErrorCount = 0;
+
+ {
+ const char* strValue = "canada";
+ using v_t = variant<int, string>;
+ {
+ v_t v;
+ v = 42;
+ VERIFY(v.index() == 0);
+ VERIFY(*get_if<int>(&v) == 42);
+ VERIFY(get<int>(v) == 42);
+ VERIFY( holds_alternative<int>(v));
+ VERIFY(!holds_alternative<string>(v));
+ }
+ {
+ v_t v;
+ v = strValue;
+ VERIFY(v.index() == 1);
+ VERIFY(*get_if<string>(&v) == strValue);
+ VERIFY(get<string>(v) == strValue);
+ VERIFY(!holds_alternative<int>(v));
+ VERIFY(holds_alternative<string>(v));
+ VERIFY(get<string>(move(v)) == strValue);
+ }
+ {
+ v_t v;
+ v = 42;
+ VERIFY(v.index() == 0);
+ VERIFY(*get_if<0>(&v) == 42);
+ VERIFY(get<0>(v) == 42);
+ VERIFY( holds_alternative<int>(v));
+ VERIFY(!holds_alternative<string>(v));
+ }
+ {
+ v_t v;
+ v = strValue;
+ VERIFY(v.index() == 1);
+ VERIFY(*get_if<1>(&v) == strValue);
+ VERIFY(get<1>(v) == strValue);
+ VERIFY(!holds_alternative<int>(v));
+ VERIFY( holds_alternative<string>(v));
+ }
+ {
+ v_t v;
+ v = strValue;
+ VERIFY(v.index() == 1);
+ VERIFY(*get_if<1>(&v) == strValue);
+ VERIFY(get_if<0>(&v) == nullptr);
+ }
+ {
+ VERIFY(get_if<0>((v_t*)nullptr) == nullptr);
+ VERIFY(get_if<1>((v_t*)nullptr) == nullptr);
+ }
+ }
+
+ return nErrorCount;
+}
+
+int TestVariantHoldsAlternative()
+{
+ using namespace eastl;
+ int nErrorCount = 0;
+
+ {
+ {
+ using v_t = variant<int, short>; // default construct first type
+ v_t v;
+
+ VERIFY(!holds_alternative<long>(v)); // Verify that a query for a T not in the variant typelist returns false.
+ VERIFY(!holds_alternative<string>(v)); // Verify that a query for a T not in the variant typelist returns false.
+ VERIFY( holds_alternative<int>(v)); // variant does hold an int, because its a default constructible first parameter
+ VERIFY(!holds_alternative<short>(v)); // variant does not hold a short
+ }
+
+ {
+ using v_t = variant<monostate, int, short>; // default construct monostate
+ v_t v;
+
+ VERIFY(!holds_alternative<long>(v)); // Verify that a query for a T not in the variant typelist returns false.
+ VERIFY(!holds_alternative<string>(v)); // Verify that a query for a T not in the variant typelist returns false.
+ VERIFY(!holds_alternative<int>(v)); // variant does not hold an int
+ VERIFY(!holds_alternative<short>(v)); // variant does not hold a short
+ }
+
+ {
+ using v_t = variant<monostate, int>;
+
+ {
+ v_t v;
+ VERIFY(!holds_alternative<int>(v)); // variant does not hold an int
+
+ v = 42;
+ VERIFY(holds_alternative<int>(v)); // variant does hold an int
+ }
+
+ {
+ v_t v1, v2;
+ VERIFY(!holds_alternative<int>(v1));
+ VERIFY(!holds_alternative<int>(v2));
+
+ v1 = 42;
+ VERIFY(holds_alternative<int>(v1));
+ VERIFY(!holds_alternative<int>(v2));
+
+ eastl::swap(v1, v2);
+ VERIFY(!holds_alternative<int>(v1));
+ VERIFY(holds_alternative<int>(v2));
+ }
+ }
+ }
+
+ return nErrorCount;
+}
+
+int TestVariantValuelessByException()
+{
+ using namespace eastl;
+ int nErrorCount = 0;
+
+ {
+ {
+ using v_t = variant<int, short>;
+ static_assert(eastl::is_default_constructible_v<v_t>, "valueless_by_exception error");
+
+ v_t v;
+ VERIFY(!v.valueless_by_exception());
+
+ v = 42;
+ VERIFY(!v.valueless_by_exception());
+ }
+
+ {
+ using v_t = variant<monostate, int>;
+ static_assert(eastl::is_default_constructible_v<v_t>, "valueless_by_exception error");
+
+ v_t v1, v2;
+ VERIFY(!v1.valueless_by_exception());
+ VERIFY(!v2.valueless_by_exception());
+
+ v1 = 42;
+ VERIFY(!v1.valueless_by_exception());
+ VERIFY(!v2.valueless_by_exception());
+
+ eastl::swap(v1, v2);
+ VERIFY(!v1.valueless_by_exception());
+ VERIFY(!v2.valueless_by_exception());
+
+ v1 = v2;
+ VERIFY(!v1.valueless_by_exception());
+ VERIFY(!v2.valueless_by_exception());
+ }
+
+ {
+ struct NotDefaultConstructibleButHasConversionCtor
+ {
+ NotDefaultConstructibleButHasConversionCtor() = delete;
+ NotDefaultConstructibleButHasConversionCtor(int) {}
+ };
+ static_assert(!eastl::is_default_constructible<NotDefaultConstructibleButHasConversionCtor>::value, "valueless_by_exception error");
+
+ using v_t = variant<NotDefaultConstructibleButHasConversionCtor>;
+ v_t v(42);
+ static_assert(!eastl::is_default_constructible_v<v_t>, "valueless_by_exception error");
+ VERIFY(!v.valueless_by_exception());
+ }
+
+ // TODO(rparolin): review exception safety for variant types
+ //
+ // {
+ // #if EASTL_EXCEPTIONS_ENABLED
+ // struct DefaultConstructibleButThrows
+ // {
+ // DefaultConstructibleButThrows() {}
+ // ~DefaultConstructibleButThrows() {}
+ //
+ // DefaultConstructibleButThrows(DefaultConstructibleButThrows&&) { throw 42; }
+ // DefaultConstructibleButThrows(const DefaultConstructibleButThrows&) { throw 42; }
+ // DefaultConstructibleButThrows& operator=(const DefaultConstructibleButThrows&) { throw 42; }
+ // DefaultConstructibleButThrows& operator=(DefaultConstructibleButThrows&&) { throw 42; }
+ // };
+ //
+ // using v_t = variant<DefaultConstructibleButThrows>;
+ //
+ // v_t v1;
+ // VERIFY(!v1.valueless_by_exception());
+ //
+ // try
+ // {
+ // v1 = DefaultConstructibleButThrows();
+ // }
+ // catch (...)
+ // {
+ // VERIFY(v1.valueless_by_exception());
+ // }
+ // #endif
+ // }
+ }
+
+ return nErrorCount;
+}
+
+int TestVariantCopyAndMove()
+{
+ using namespace eastl;
+ int nErrorCount = 0;
+
+ {
+ {
+ using v_t = variant<int, short, char>;
+
+ v_t v1 = 42;
+ v_t v2 = v1;
+
+ VERIFY(get<int>(v2) == get<int>(v1));
+ }
+
+ }
+
+ return nErrorCount;
+}
+
+int TestVariantEmplace()
+{
+ using namespace eastl;
+ int nErrorCount = 0;
+
+ {
+ variant<int> v;
+ v.emplace<int>(42);
+ VERIFY(get<int>(v) == 42);
+ }
+ {
+ variant<int> v;
+ v.emplace<0>(42);
+ VERIFY(get<0>(v) == 42);
+ }
+
+ {
+ variant<int, short, long> v;
+
+ v.emplace<0>(42);
+ VERIFY(get<0>(v) == 42);
+
+ v.emplace<1>(short(43));
+ VERIFY(get<1>(v) == short(43));
+
+ v.emplace<2>(44L);
+ VERIFY(get<2>(v) == 44L);
+ }
+ {
+ variant<int, short, long> v;
+
+ v.emplace<int>(42);
+ VERIFY(get<int>(v) == 42);
+
+ v.emplace<short>(short(43));
+ VERIFY(get<short>(v) == short(43));
+
+ v.emplace<long>(44L);
+ VERIFY(get<long>(v) == 44L);
+ }
+
+ {
+ {
+ variant<TestObject> v;
+ v.emplace<0>(1337);
+ VERIFY(get<0>(v).mX == 1337);
+ }
+ VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+ }
+
+ {
+ {
+ variant<int, TestObject> v;
+
+ v.emplace<int>(42);
+ VERIFY(get<int>(v) == 42);
+
+ v.emplace<TestObject>(1337);
+ VERIFY(get<TestObject>(v).mX == 1337);
+
+ v.emplace<TestObject>(1338, 42, 3);
+ VERIFY(get<TestObject>(v).mX == 1338 + 42 + 3);
+ }
+ VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+ }
+
+ {
+ {
+ struct r {
+ r() = default;
+ r(int x) : mX(x) {}
+ int mX;
+ };
+
+ variant<int, r> v;
+
+ v.emplace<0>(42);
+ VERIFY(get<0>(v) == 42);
+
+ v.emplace<1>(1337);
+ VERIFY(get<1>(v).mX == 1337);
+ }
+ }
+
+ {
+ struct r {
+ r() = default;
+ r(int a, int b, int c, int d) : a(a), b(b), c(c), d(d) {}
+ r(std::initializer_list<int> l)
+ {
+ auto it = l.begin();
+
+ a = *it++;
+ b = *it++;
+ c = *it++;
+ d = *it++;
+ }
+ int a, b, c, d;
+ };
+
+ r aa{1,2,3,4};
+ VERIFY(aa.a == 1);
+ VERIFY(aa.b == 2);
+ VERIFY(aa.c == 3);
+ VERIFY(aa.d == 4);
+
+ variant<r> v;
+ v.emplace<0>(std::initializer_list<int>{1,2,3,4});
+
+ VERIFY(get<r>(v).a == 1);
+ VERIFY(get<r>(v).b == 2);
+ VERIFY(get<r>(v).c == 3);
+ VERIFY(get<r>(v).d == 4);
+ }
+
+ return nErrorCount;
+}
+
+int TestVariantSwap()
+{
+ using namespace eastl;
+ int nErrorCount = 0;
+
+ {
+ variant<int, float> v1 = 42;
+ variant<int, float> v2 = 24;
+
+ v1.swap(v2);
+
+ VERIFY(get<int>(v1) == 24);
+ VERIFY(get<int>(v2) == 42);
+
+ v1.swap(v2);
+
+ VERIFY(get<int>(v1) == 42);
+ VERIFY(get<int>(v2) == 24);
+ }
+
+ {
+ variant<string> v1 = "Hello";
+ variant<string> v2 = "World";
+
+ VERIFY(get<string>(v1) == "Hello");
+ VERIFY(get<string>(v2) == "World");
+
+ v1.swap(v2);
+
+ VERIFY(get<string>(v1) == "World");
+ VERIFY(get<string>(v2) == "Hello");
+ }
+
+ return nErrorCount;
+}
+
+int TestVariantRelOps()
+{
+ using namespace eastl;
+ int nErrorCount = 0;
+
+ {
+ variant<int, float> v1 = 42;
+ variant<int, float> v2 = 24;
+ variant<int, float> v1e = v1;
+
+ VERIFY(v1 == v1e);
+ VERIFY(v1 != v2);
+ VERIFY(v1 > v2);
+ VERIFY(v2 < v1);
+ }
+
+ {
+ vector<variant<int, string>> v = {{1}, {3}, {7}, {4}, {0}, {5}, {2}, {6}, {8}};
+ eastl::sort(v.begin(), v.end());
+ VERIFY(eastl::is_sorted(v.begin(), v.end()));
+ }
+
+ return nErrorCount;
+}
+
+
+int TestVariantInplaceCtors()
+{
+ using namespace eastl;
+ int nErrorCount = 0;
+
+ {
+ variant<int, int> v(in_place<0>, 42);
+ VERIFY(get<0>(v) == 42);
+ VERIFY(v.index() == 0);
+ }
+
+ {
+ variant<int, int> v(in_place<1>, 42);
+ VERIFY(get<1>(v) == 42);
+ VERIFY(v.index() == 1);
+ }
+
+ {
+ variant<int, string> v(in_place<int>, 42);
+ VERIFY(get<0>(v) == 42);
+ VERIFY(v.index() == 0);
+ }
+
+ {
+ variant<int, string> v(in_place<string>, "hello");
+ VERIFY(get<1>(v) == "hello");
+ VERIFY(v.index() == 1);
+ }
+
+ return nErrorCount;
+}
+
+// Many Compilers are smart and will fully inline the visitor in our unittests,
+// Thereby not actually testing the recursive call.
+EA_NO_INLINE int TestVariantVisitNoInline(const eastl::variant<int, bool, unsigned>& v)
+{
+ int nErrorCount = 0;
+
+ bool bVisited = false;
+
+ struct MyVisitor
+ {
+ MyVisitor() = delete;
+ MyVisitor(bool& visited) : mVisited(visited) {};
+
+ void operator()(int) { mVisited = true; }
+ void operator()(bool) { mVisited = true; }
+ void operator()(unsigned) { mVisited = true; }
+
+ bool& mVisited;
+ };
+
+ eastl::visit(MyVisitor(bVisited), v);
+
+ EATEST_VERIFY(bVisited);
+
+ return nErrorCount;
+}
+
+EA_NO_INLINE int TestVariantVisit2NoInline(const eastl::variant<int, bool>& v0, const eastl::variant<int, bool>& v1)
+{
+ int nErrorCount = 0;
+
+ bool bVisited = false;
+
+ struct MyVisitor
+ {
+ MyVisitor() = delete;
+ MyVisitor(bool& visited) : mVisited(visited) {};
+
+ void operator()(int, int) { mVisited = true; }
+ void operator()(bool, int) { mVisited = true; }
+ void operator()(int, bool) { mVisited = true; }
+ void operator()(bool, bool) { mVisited = true; }
+
+ bool& mVisited;
+ };
+
+ eastl::visit(MyVisitor(bVisited), v0, v1);
+
+ EATEST_VERIFY(bVisited);
+
+ return nErrorCount;
+}
+
+EA_NO_INLINE int TestVariantVisit3tNoInline(const eastl::variant<int, bool>& v0, const eastl::variant<int, bool>& v1, const eastl::variant<int, bool>& v2)
+{
+ int nErrorCount = 0;
+
+ bool bVisited = false;
+
+ struct MyVisitor
+ {
+ MyVisitor() = delete;
+ MyVisitor(bool& visited) : mVisited(visited) {};
+
+ void operator()(int, int, int) { mVisited = true; }
+ void operator()(bool, int, int) { mVisited = true; }
+ void operator()(int, bool, int) { mVisited = true; }
+ void operator()(bool, bool, int) { mVisited = true; }
+
+ void operator()(int, int, bool) { mVisited = true; }
+ void operator()(bool, int, bool) { mVisited = true; }
+ void operator()(int, bool, bool) { mVisited = true; }
+ void operator()(bool, bool, bool) { mVisited = true; }
+
+ bool& mVisited;
+ };
+
+ eastl::visit(MyVisitor(bVisited), v0, v1, v2);
+
+ EATEST_VERIFY(bVisited);
+
+ return nErrorCount;
+}
+
+int TestVariantVisitorOverloaded()
+{
+ using namespace eastl;
+ int nErrorCount = 0;
+
+ using v_t = variant<int, string, double, long>;
+ v_t arr[] = {42, "jean", 42.0, 42L};
+ v_t v{42.0};
+
+
+ #ifdef __cpp_deduction_guides
+ {
+ int count = 0;
+
+ for (auto& e : arr)
+ {
+ eastl::visit(
+ overloaded{
+ [&](int) { count++; },
+ [&](string) { count++; },
+ [&](double) { count++; },
+ [&](long) { count++; }},
+ e
+ );
+ }
+
+ VERIFY(count == EAArrayCount(arr));
+ }
+
+ {
+ double visitedValue = 0.0f;
+
+ eastl::visit(
+ overloaded{
+ [](int) { },
+ [](string) { },
+ [&](double d) { visitedValue = d; },
+ [](long) { }},
+ v
+ );
+
+ VERIFY(visitedValue == 42.0f);
+ }
+
+ #endif
+
+ {
+ int count = 0;
+
+ for (auto& e : arr)
+ {
+ eastl::visit(
+ eastl::make_overloaded(
+ [&](int) { count++; },
+ [&](string) { count++; },
+ [&](double) { count++; },
+ [&](long) { count++; }),
+ e
+ );
+ }
+
+ VERIFY(count == EAArrayCount(arr));
+ }
+
+ {
+ double visitedValue = 0.0f;
+
+ eastl::visit(
+ eastl::make_overloaded(
+ [](int) { },
+ [](string) { },
+ [&](double d) { visitedValue = d; },
+ [](long) { }),
+ v
+ );
+
+ VERIFY(visitedValue == 42.0f);
+ }
+
+ return nErrorCount;
+}
+
+int TestVariantVisitor()
+{
+ using namespace eastl;
+ int nErrorCount = 0;
+
+ using v_t = variant<int, string, double, long>;
+
+ {
+ v_t arr[] = {42, "hello", 42.0, 42L};
+
+ int count = 0;
+ for (auto& e : arr)
+ {
+ eastl::visit([&](auto){ count++; }, e);
+ }
+
+ VERIFY(count == EAArrayCount(arr));
+
+ count = 0;
+ for (auto& e : arr)
+ {
+ eastl::visit<void>([&](auto){ count++; }, e);
+ }
+
+ VERIFY(count == EAArrayCount(arr));
+ }
+
+ {
+ static bool bVisited = false;
+
+ variant<int, long, string> v = 42;
+
+ struct MyVisitor
+ {
+ void operator()(int) { bVisited = true; };
+ void operator()(long) { };
+ void operator()(string) { };
+ void operator()(unsigned) { }; // not in variant
+ };
+
+ visit(MyVisitor{}, v);
+ VERIFY(bVisited);
+
+ bVisited = false;
+
+ visit<void>(MyVisitor{}, v);
+ VERIFY(bVisited);
+ }
+
+ {
+ static bool bVisited = false;
+
+ variant<int, bool, unsigned> v = (int)1;
+
+ struct MyVisitor
+ {
+ bool& operator()(int) { return bVisited; }
+ bool& operator()(bool) { return bVisited; }
+ bool& operator()(unsigned) { return bVisited; }
+ };
+
+ bool& ret = visit(MyVisitor{}, v);
+ ret = true;
+ VERIFY(bVisited);
+
+ bVisited = false;
+ bool& ret2 = visit<bool&>(MyVisitor{}, v);
+ ret2 = true;
+ VERIFY(bVisited);
+ }
+
+ {
+ variant<int, bool, unsigned> v = (int)1;
+
+ struct MyVisitor
+ {
+ void operator()(int& i) { i = 2; }
+ void operator()(bool&) {}
+ void operator()(unsigned&) {}
+ };
+
+ visit(MyVisitor{}, v);
+ EATEST_VERIFY(get<0>(v) == (int)2);
+
+ v = (int)1;
+ visit<void>(MyVisitor{}, v);
+ EATEST_VERIFY(get<0>(v) == (int)2);
+ }
+
+ {
+ static bool bVisited = false;
+
+ variant<int, bool, unsigned> v =(int)1;
+
+ struct MyVisitor
+ {
+ void operator()(const int&) { bVisited = true; }
+ void operator()(const bool&) {}
+ void operator()(const unsigned&) {}
+ };
+
+ visit(MyVisitor{}, v);
+ EATEST_VERIFY(bVisited);
+
+ bVisited = false;
+ visit<void>(MyVisitor{}, v);
+ EATEST_VERIFY(bVisited);
+ }
+
+ {
+ static bool bVisited = false;
+
+ const variant<int, bool, unsigned> v =(int)1;
+
+ struct MyVisitor
+ {
+ void operator()(const int&) { bVisited = true; }
+ void operator()(const bool&) {}
+ void operator()(const unsigned&) {}
+ };
+
+ visit(MyVisitor{}, v);
+ EATEST_VERIFY(bVisited);
+
+ bVisited = false;
+ visit<void>(MyVisitor{}, v);
+ EATEST_VERIFY(bVisited);
+ }
+
+ {
+ static bool bVisited = false;
+
+ struct MyVisitor
+ {
+ void operator()(int&&) { bVisited = true; }
+ void operator()(bool&&) {}
+ void operator()(unsigned&&) {}
+ };
+
+ visit(MyVisitor{}, variant<int, bool, unsigned>{(int)1});
+ EATEST_VERIFY(bVisited);
+
+ visit<void>(MyVisitor{}, variant<int, bool, unsigned>{(int)1});
+ EATEST_VERIFY(bVisited);
+ }
+
+ {
+ static bool bVisited = false;
+
+ variant<int, bool, unsigned> v = (int)1;
+
+ struct MyVisitor
+ {
+ bool&& operator()(int) { return eastl::move(bVisited); }
+ bool&& operator()(bool) { return eastl::move(bVisited); }
+ bool&& operator()(unsigned) { return eastl::move(bVisited); }
+ };
+
+ bool&& ret = visit(MyVisitor{}, v);
+ ret = true;
+ VERIFY(bVisited);
+
+ bVisited = false;
+ bool&& ret2 = visit<bool&&>(MyVisitor{}, v);
+ ret2 = true;
+ VERIFY(bVisited);
+ }
+
+ {
+ variant<int, bool, unsigned> v = (int)1;
+
+ TestVariantVisitNoInline(v);
+ v = (bool)true;
+ TestVariantVisitNoInline(v);
+ v = (int)3;
+ TestVariantVisitNoInline(v);
+ }
+
+ {
+ variant<int, bool> v0 = (int)1;
+ variant<int, bool> v1 = (bool)true;
+
+ TestVariantVisit2NoInline(v0, v1);
+ v0 = (bool)false;
+ TestVariantVisit2NoInline(v0, v1);
+ v1 = (int)2;
+ TestVariantVisit2NoInline(v0, v1);
+ }
+
+ {
+ variant<int, bool> v0 = (int)1;
+ variant<int, bool> v1 = (int)2;
+ variant<int, bool> v2 = (int)3;
+
+ TestVariantVisit3tNoInline(v0, v1, v2);
+ v2 = (bool)false;
+ TestVariantVisit3tNoInline(v0, v1, v2);
+ v0 = (bool)true;
+ TestVariantVisit3tNoInline(v0, v1, v2);
+ }
+
+ {
+ static bool bVisited = false;
+
+ variant<int, string> i = 42;
+ variant<int, string> s = "hello";
+
+ struct MultipleVisitor
+ {
+ MultipleVisitor& operator()(int, int) { return *this; }
+ MultipleVisitor& operator()(int, string) { bVisited = true; return *this; }
+ MultipleVisitor& operator()(string, int) { return *this; }
+ MultipleVisitor& operator()(string, string) { return *this; }
+ };
+
+ MultipleVisitor& ret = visit(MultipleVisitor{}, i, s);
+ EA_UNUSED(ret);
+ VERIFY(bVisited);
+
+ MultipleVisitor& ret2 = visit<MultipleVisitor&>(MultipleVisitor{}, i, s);
+ EA_UNUSED(ret2);
+ VERIFY(bVisited);
+ }
+
+ {
+ bool bVisited = false;
+
+ variant<int, bool> v0 = 0;
+ variant<int, bool> v1 = 1;
+
+ struct MultipleVisitor
+ {
+ MultipleVisitor() = delete;
+ MultipleVisitor(bool& visited) : mVisited(visited) {};
+
+ void operator()(int, int) { mVisited = true; }
+ void operator()(int, bool) {}
+ void operator()(bool, int) {}
+ void operator()(bool, bool) {}
+
+ bool& mVisited;
+ };
+
+ visit(MultipleVisitor(bVisited), v0, v1);
+ EATEST_VERIFY(bVisited);
+
+ bVisited = false;
+ visit<void>(MultipleVisitor(bVisited), v0, v1);
+ EATEST_VERIFY(bVisited);
+ }
+
+ {
+ variant<int, string> v = 42;
+
+ struct ModifyingVisitor
+ {
+ void operator()(int &i) { i += 1; }
+ void operator()(string &s) { s += "hello"; }
+ };
+
+ visit(ModifyingVisitor{}, v);
+ VERIFY(get<0>(v) == 43);
+ }
+
+ {
+ variant<int, string> v = 42;
+
+ struct ReturningVisitor
+ {
+ int operator()(int i) {return i;}
+ int operator()(string s) {return 0;}
+ };
+
+ VERIFY(visit(ReturningVisitor{}, v) == 42);
+ }
+
+ return nErrorCount;
+}
+
+int TestVariantVisitorReturn()
+{
+ int nErrorCount = 0;
+
+ {
+ static bool bVisited = false;
+
+ eastl::variant<int, bool> v = (int)1;
+
+ struct MyVisitor
+ {
+ bool operator()(int) { bVisited = true; return true; }
+ bool operator()(bool) { return false; }
+ };
+
+ eastl::visit<void>(MyVisitor{}, v);
+ EATEST_VERIFY(bVisited);
+ }
+
+ {
+ static bool bVisited = false;
+
+ eastl::variant<int, bool> v = (int)1;
+
+ struct MyVisitor
+ {
+ bool operator()(int) { bVisited = true; return true; }
+ bool operator()(bool) { return false; }
+ };
+
+ eastl::visit<const void>(MyVisitor{}, v);
+ EATEST_VERIFY(bVisited);
+ }
+
+ {
+ static bool bVisited = false;
+
+ eastl::variant<int, bool> v = (int)1;
+
+ struct MyVisitor
+ {
+ bool operator()(int) { bVisited = true; return true; }
+ bool operator()(bool) { return false; }
+ };
+
+ eastl::visit<volatile void>(MyVisitor{}, v);
+ EATEST_VERIFY(bVisited);
+ }
+
+ {
+ static bool bVisited = false;
+
+ eastl::variant<int, bool> v = (int)1;
+
+ struct MyVisitor
+ {
+ bool operator()(int) { bVisited = true; return true; }
+ bool operator()(bool) { return false; }
+ };
+
+ eastl::visit<const volatile void>(MyVisitor{}, v);
+ EATEST_VERIFY(bVisited);
+ }
+
+ {
+ static bool bVisited = false;
+
+ eastl::variant<int, bool> v = (int)1;
+
+ struct MyVisitor
+ {
+ bool operator()(int) { bVisited = true; return true; }
+ bool operator()(bool) { return false; }
+ };
+
+ int ret = eastl::visit<int>(MyVisitor{}, v);
+ EATEST_VERIFY(bVisited);
+ EATEST_VERIFY(ret);
+ }
+
+ {
+ static bool bVisited = false;
+
+ struct A {};
+ struct B : public A {};
+ struct C : public A {};
+
+ eastl::variant<int, bool> v = (int)1;
+
+ struct MyVisitor
+ {
+ B operator()(int) { bVisited = true; return B{}; }
+ C operator()(bool) { return C{}; }
+ };
+
+ A ret = eastl::visit<A>(MyVisitor{}, v);
+ EA_UNUSED(ret);
+ EATEST_VERIFY(bVisited);
+ }
+
+ {
+ static bool bVisited = false;
+
+ eastl::variant<int, bool> v = (int)1;
+
+ struct MyVisitor
+ {
+ MyVisitor operator()(int) { bVisited = true; return MyVisitor{}; }
+ MyVisitor operator()(bool) { return MyVisitor{}; }
+ };
+
+ MyVisitor ret = eastl::visit<MyVisitor>(MyVisitor{}, v);
+ EA_UNUSED(ret);
+ EATEST_VERIFY(bVisited);
+ }
+
+ return nErrorCount;
+}
+
+int TestVariantAssignment()
+{
+ using namespace eastl;
+ int nErrorCount = 0;
+
+ {
+ variant<int, TestObject> v = TestObject(1337);
+ VERIFY(get<TestObject>(v).mX == 1337);
+ TestObject::Reset();
+
+ v.operator=(42); // ensure assignment-operator is called
+ VERIFY(TestObject::sTODtorCount == 1); // verify TestObject dtor is called.
+ VERIFY(get<int>(v) == 42);
+ TestObject::Reset();
+ }
+
+ return nErrorCount;
+}
+
+
+int TestVariantMoveOnly()
+{
+ using namespace eastl;
+ int nErrorCount = 0;
+
+ {
+ variant<int, MoveOnlyType> v = MoveOnlyType(1337);
+ VERIFY(get<MoveOnlyType>(v).mVal == 1337);
+ }
+
+ return nErrorCount;
+}
+
+
+//compilation test related to PR #315: converting constructor and assignment operator compilation error
+void TestCompilation(const double e) { eastl::variant<double> v{e}; }
+
+
+
+int TestVariantUserRegressionCopyMoveAssignmentOperatorLeak()
+{
+ using namespace eastl;
+ int nErrorCount = 0;
+
+ {
+ {
+ eastl::variant<TestObject> v = TestObject(1337);
+ VERIFY(eastl::get<TestObject>(v).mX == 1337);
+ eastl::variant<TestObject> v2 = TestObject(1338);
+ VERIFY(eastl::get<TestObject>(v2).mX == 1338);
+ v.operator=(v2);
+ VERIFY(eastl::get<TestObject>(v).mX == 1338);
+ VERIFY(eastl::get<TestObject>(v2).mX == 1338);
+ }
+ VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+ }
+ {
+ {
+ eastl::variant<TestObject> v = TestObject(1337);
+ VERIFY(eastl::get<TestObject>(v).mX == 1337);
+ eastl::variant<TestObject> v2 = TestObject(1338);
+ VERIFY(eastl::get<TestObject>(v2).mX == 1338);
+ v.operator=(eastl::move(v2));
+ VERIFY(eastl::get<TestObject>(v).mX == 1338);
+ }
+ VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+ }
+ {
+ {
+ eastl::variant<TestObject> v = TestObject(1337);
+ VERIFY(eastl::get<TestObject>(v).mX == 1337);
+ v = {};
+ VERIFY(eastl::get<TestObject>(v).mX == 0);
+ }
+ VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+ }
+
+ return nErrorCount;
+}
+
+int TestVariantRelationalOperators()
+{
+ int nErrorCount = 0;
+
+ using VariantNoThrow = eastl::variant<int, bool, float>;
+
+ // Equality
+ {
+ {
+ VariantNoThrow v1{ (int)1 };
+ VariantNoThrow v2{ true };
+
+ EATEST_VERIFY((v1 == v2) == false);
+ }
+
+ {
+ VariantNoThrow v1{ (int)1 };
+ VariantNoThrow v2{ (int)1 };
+
+ EATEST_VERIFY((v1 == v2) == true);
+ }
+
+ {
+ VariantNoThrow v1{ (int)1 };
+ VariantNoThrow v2{ (int)0 };
+
+ EATEST_VERIFY((v1 == v2) == false);
+ }
+ }
+
+ // Inequality
+ {
+ {
+ VariantNoThrow v1{ (int)1 };
+ VariantNoThrow v2{ true };
+
+ EATEST_VERIFY((v1 != v2) == true);
+ }
+
+ {
+ VariantNoThrow v1{ (int)1 };
+ VariantNoThrow v2{ (int)1 };
+
+ EATEST_VERIFY((v1 != v2) == false);
+ }
+
+ {
+ VariantNoThrow v1{ (int)1 };
+ VariantNoThrow v2{ (int)0 };
+
+ EATEST_VERIFY((v1 != v2) == true);
+ }
+ }
+
+ // Less Than
+ {
+ {
+ VariantNoThrow v1{ (int)1 };
+ VariantNoThrow v2{ true };
+
+ EATEST_VERIFY((v1 < v2) == true);
+ }
+
+ {
+ VariantNoThrow v1{ true };
+ VariantNoThrow v2{ (int)1 };
+
+ EATEST_VERIFY((v1 < v2) == false);
+ }
+
+ {
+ VariantNoThrow v1{ (int)1 };
+ VariantNoThrow v2{ (int)1 };
+
+ EATEST_VERIFY((v1 < v2) == false);
+ }
+
+ {
+ VariantNoThrow v1{ (int)0 };
+ VariantNoThrow v2{ (int)1 };
+
+ EATEST_VERIFY((v1 < v2) == true);
+ }
+ }
+
+ // Greater Than
+ {
+ {
+ VariantNoThrow v1{ (int)1 };
+ VariantNoThrow v2{ true };
+
+ EATEST_VERIFY((v1 > v2) == false);
+ }
+
+ {
+ VariantNoThrow v1{ true };
+ VariantNoThrow v2{ (int)1 };
+
+ EATEST_VERIFY((v1 > v2) == true);
+ }
+
+ {
+ VariantNoThrow v1{ (int)1 };
+ VariantNoThrow v2{ (int)1 };
+
+ EATEST_VERIFY((v1 > v2) == false);
+ }
+
+ {
+ VariantNoThrow v1{ (int)1 };
+ VariantNoThrow v2{ (int)0 };
+
+ EATEST_VERIFY((v1 > v2) == true);
+ }
+ }
+
+ // Less Equal
+ {
+ {
+ VariantNoThrow v1{ (int)1 };
+ VariantNoThrow v2{ true };
+
+ EATEST_VERIFY((v1 <= v2) == true);
+ }
+
+ {
+ VariantNoThrow v1{ true };
+ VariantNoThrow v2{ (int)1 };
+
+ EATEST_VERIFY((v1 <= v2) == false);
+ }
+
+ {
+ VariantNoThrow v1{ (int)1 };
+ VariantNoThrow v2{ (int)1 };
+
+ EATEST_VERIFY((v1 <= v2) == true);
+ }
+
+ {
+ VariantNoThrow v1{ (int)0 };
+ VariantNoThrow v2{ (int)1 };
+
+ EATEST_VERIFY((v1 <= v2) == true);
+ }
+
+ {
+ VariantNoThrow v1{ (int)1 };
+ VariantNoThrow v2{ (int)0 };
+
+ EATEST_VERIFY((v1 <= v2) == false);
+ }
+ }
+
+ // Greater Equal
+ {
+ {
+ VariantNoThrow v1{ (int)1 };
+ VariantNoThrow v2{ true };
+
+ EATEST_VERIFY((v1 >= v2) == false);
+ }
+
+ {
+ VariantNoThrow v1{ true };
+ VariantNoThrow v2{ (int)1 };
+
+ EATEST_VERIFY((v1 >= v2) == true);
+ }
+
+ {
+ VariantNoThrow v1{ (int)1 };
+ VariantNoThrow v2{ (int)1 };
+
+ EATEST_VERIFY((v1 >= v2) == true);
+ }
+
+ {
+ VariantNoThrow v1{ (int)0 };
+ VariantNoThrow v2{ (int)1 };
+
+ EATEST_VERIFY((v1 >= v2) == false);
+ }
+
+ {
+ VariantNoThrow v1{ (int)1 };
+ VariantNoThrow v2{ (int)0 };
+
+ EATEST_VERIFY((v1 >= v2) == true);
+ }
+ }
+
+#if EASTL_EXCEPTIONS_ENABLED
+
+ using VariantThrow = eastl::variant<int, bool, float>;
+
+ auto make_variant_valueless = [](VariantThrow& v)
+ {
+ try
+ {
+ v.emplace<0>(valueless_struct<int>{});
+ }
+ catch(const typename valueless_struct<int>::exception_tag &)
+ {
+ }
+ };
+
+ // Equality
+ {
+ {
+ VariantThrow v0{ (int)0 };
+ VariantThrow v1{ (int)1 };
+
+ make_variant_valueless(v0);
+ make_variant_valueless(v1);
+
+ EATEST_VERIFY((v0 == v1) == true);
+ }
+ }
+
+ // Inequality
+ {
+ {
+ VariantThrow v0{ (int)0 };
+ VariantThrow v1{ (int)1 };
+
+ make_variant_valueless(v0);
+ make_variant_valueless(v1);
+
+ EATEST_VERIFY((v0 != v1) == false);
+ }
+ }
+
+ // Less Than
+ {
+ {
+ VariantThrow v0{ (int)0 };
+ VariantThrow v1{ (int)1 };
+
+ make_variant_valueless(v0);
+
+ EATEST_VERIFY((v0 < v1) == true);
+ }
+
+ {
+ VariantThrow v0{ (int)0 };
+ VariantThrow v1{ (int)1 };
+
+ make_variant_valueless(v1);
+
+ EATEST_VERIFY((v0 < v1) == false);
+ }
+ }
+
+ // Greater Than
+ {
+ {
+ VariantThrow v0{ (int)1 };
+ VariantThrow v1{ (int)0 };
+
+ make_variant_valueless(v0);
+
+ EATEST_VERIFY((v0 > v1) == false);
+ }
+
+ {
+ VariantThrow v0{ (int)1 };
+ VariantThrow v1{ (int)0 };
+
+ make_variant_valueless(v1);
+
+ EATEST_VERIFY((v0 > v1) == true);
+ }
+ }
+
+ // Less Equal
+ {
+ {
+ VariantThrow v0{ (int)1 };
+ VariantThrow v1{ (int)1 };
+
+ make_variant_valueless(v0);
+
+ EATEST_VERIFY((v0 <= v1) == true);
+ }
+
+ {
+ VariantThrow v0{ (int)1 };
+ VariantThrow v1{ (int)0 };
+
+ make_variant_valueless(v1);
+
+ EATEST_VERIFY((v0 <= v1) == false);
+ }
+ }
+
+ // Greater Equal
+ {
+ {
+ VariantThrow v0{ (int)1 };
+ VariantThrow v1{ (int)1 };
+
+ make_variant_valueless(v0);
+
+ EATEST_VERIFY((v0 >= v1) == false);
+ }
+
+ {
+ VariantThrow v0{ (int)1 };
+ VariantThrow v1{ (int)0 };
+
+ make_variant_valueless(v1);
+
+ EATEST_VERIFY((v0 >= v1) == true);
+ }
+ }
+
+#endif
+
+ return nErrorCount;
+}
+
+
+int TestVariantUserRegressionIncompleteType()
+{
+ using namespace eastl;
+ int nErrorCount = 0;
+
+ {
+ struct B;
+
+ struct A
+ {
+ vector<variant<B>> v;
+ };
+
+ struct B
+ {
+ vector<variant<A>> v;
+ };
+ }
+
+ return nErrorCount;
+}
+
+#define EASTL_TEST_BIG_VARIANT_RELATIONAL_OPS(Type, VarName) \
+ bool operator==(const Type & rhs) const { return VarName == rhs.VarName; } \
+ bool operator!=(const Type & rhs) const { return VarName != rhs.VarName; } \
+ bool operator<(const Type & rhs) const { return VarName < rhs.VarName; } \
+ bool operator>(const Type & rhs) const { return VarName > rhs.VarName; } \
+ bool operator<=(const Type & rhs) const { return VarName <= rhs.VarName; } \
+ bool operator>=(const Type & rhs) const { return VarName >= rhs.VarName; }
+
+int TestBigVariantComparison()
+{
+ int nErrorCount = 0;
+
+ struct A;
+ struct B;
+ struct C;
+ struct D;
+ struct E;
+ struct F;
+ struct G;
+ struct H;
+ struct I;
+ struct J;
+ struct K;
+ struct L;
+ struct M;
+ struct N;
+ struct O;
+ struct P;
+ struct Q;
+ struct R;
+ struct S;
+ struct T;
+ struct U;
+ struct V;
+ struct W;
+ struct X;
+ struct Y;
+ struct Z;
+
+ using BigVariant = eastl::variant<A, B, C, D, E, F, G, H, I, J, K, L, M, N,
+ O, P, Q, R, S, T, U, V, W, X, Y, Z>;
+
+ struct A { int a; EASTL_TEST_BIG_VARIANT_RELATIONAL_OPS(A, a) };
+ struct B { int b; EASTL_TEST_BIG_VARIANT_RELATIONAL_OPS(B, b) };
+ struct C { int c; EASTL_TEST_BIG_VARIANT_RELATIONAL_OPS(C, c) };
+ struct D { int d; EASTL_TEST_BIG_VARIANT_RELATIONAL_OPS(D, d) };
+ struct E { int e; EASTL_TEST_BIG_VARIANT_RELATIONAL_OPS(E, e) };
+ struct F { int f; EASTL_TEST_BIG_VARIANT_RELATIONAL_OPS(F, f) };
+ struct G { int g; EASTL_TEST_BIG_VARIANT_RELATIONAL_OPS(G, g) };
+ struct H { int h; EASTL_TEST_BIG_VARIANT_RELATIONAL_OPS(H, h) };
+ struct I { int i; EASTL_TEST_BIG_VARIANT_RELATIONAL_OPS(I, i) };
+ struct J { int j; EASTL_TEST_BIG_VARIANT_RELATIONAL_OPS(J, j) };
+ struct K { int k; EASTL_TEST_BIG_VARIANT_RELATIONAL_OPS(K, k) };
+ struct L { int l; EASTL_TEST_BIG_VARIANT_RELATIONAL_OPS(L, l) };
+ struct M { int m; EASTL_TEST_BIG_VARIANT_RELATIONAL_OPS(M, m) };
+ struct N { int n; EASTL_TEST_BIG_VARIANT_RELATIONAL_OPS(N, n) };
+ struct O { int o; EASTL_TEST_BIG_VARIANT_RELATIONAL_OPS(O, o) };
+ struct P { int p; EASTL_TEST_BIG_VARIANT_RELATIONAL_OPS(P, p) };
+ struct Q { int q; EASTL_TEST_BIG_VARIANT_RELATIONAL_OPS(Q, q) };
+ struct R { int r; EASTL_TEST_BIG_VARIANT_RELATIONAL_OPS(R, r) };
+ struct S { int s; EASTL_TEST_BIG_VARIANT_RELATIONAL_OPS(S, s) };
+ struct T { int t; EASTL_TEST_BIG_VARIANT_RELATIONAL_OPS(T, t) };
+ struct U { int u; EASTL_TEST_BIG_VARIANT_RELATIONAL_OPS(U, u) };
+ struct V { int v; EASTL_TEST_BIG_VARIANT_RELATIONAL_OPS(V, v) };
+ struct W { int w; EASTL_TEST_BIG_VARIANT_RELATIONAL_OPS(W, w) };
+ struct X { int x; EASTL_TEST_BIG_VARIANT_RELATIONAL_OPS(X, x) };
+ struct Y { int y; EASTL_TEST_BIG_VARIANT_RELATIONAL_OPS(Y, y) };
+ struct Z { int z; EASTL_TEST_BIG_VARIANT_RELATIONAL_OPS(Z, z) };
+
+ {
+ BigVariant v0{ A{0} };
+ BigVariant v1{ A{1} };
+
+ VERIFY(v0 != v1);
+ }
+
+ {
+ BigVariant v0{ A{0} };
+ BigVariant v1{ A{1} };
+
+ VERIFY(v0 < v1);
+ }
+
+ {
+ BigVariant v0{ A{0} };
+ BigVariant v1{ A{0} };
+
+ VERIFY(v0 == v1);
+ }
+
+ {
+ BigVariant v0{ A{1} };
+ BigVariant v1{ A{0} };
+
+ VERIFY(v0 > v1);
+ }
+
+ {
+ BigVariant v0{ A{0} };
+ BigVariant v1{ A{1} };
+
+ VERIFY(v0 <= v1);
+ }
+
+ {
+ BigVariant v0{ A{0} };
+ BigVariant v1{ A{0} };
+
+ VERIFY(v0 <= v1);
+ }
+
+ {
+ BigVariant v0{ A{0} };
+ BigVariant v1{ A{0} };
+
+ VERIFY(v0 >= v1);
+ }
+
+ {
+ BigVariant v0{ A{1} };
+ BigVariant v1{ A{0} };
+
+ VERIFY(v0 >= v1);
+ }
+
+ {
+ BigVariant v0{ A{0} };
+ BigVariant v1{ B{0} };
+
+ VERIFY(v0 != v1);
+ }
+
+ {
+ BigVariant v0{ A{0} };
+ BigVariant v1{ B{0} };
+
+ VERIFY(v0 < v1);
+ }
+
+ {
+ BigVariant v0{ A{0} };
+ BigVariant v1{ B{0} };
+
+ VERIFY(v1 > v0);
+ }
+
+ return nErrorCount;
+}
+
+int TestVariantGeneratingComparisonOverloads();
+
+int TestVariant()
+{
+ int nErrorCount = 0;
+
+ nErrorCount += TestVariantBasic();
+ nErrorCount += TestVariantSize();
+ nErrorCount += TestVariantAlternative();
+ nErrorCount += TestVariantValuelessByException();
+ nErrorCount += TestVariantGet();
+ nErrorCount += TestVariantHoldsAlternative();
+ nErrorCount += TestVariantHash();
+ nErrorCount += TestVariantCopyAndMove();
+ nErrorCount += TestVariantSwap();
+ nErrorCount += TestVariantEmplace();
+ nErrorCount += TestVariantRelOps();
+ nErrorCount += TestVariantInplaceCtors();
+ nErrorCount += TestVariantVisitorOverloaded();
+ nErrorCount += TestVariantVisitor();
+ nErrorCount += TestVariantAssignment();
+ nErrorCount += TestVariantMoveOnly();
+ nErrorCount += TestVariantUserRegressionCopyMoveAssignmentOperatorLeak();
+ nErrorCount += TestVariantUserRegressionIncompleteType();
+ nErrorCount += TestVariantGeneratingComparisonOverloads();
+ nErrorCount += TestBigVariantComparison();
+ nErrorCount += TestVariantRelationalOperators();
+
+ return nErrorCount;
+}
+#else
+ int TestVariant() { return 0; }
+#endif
diff --git a/EASTL/test/source/TestVariant2.cpp b/EASTL/test/source/TestVariant2.cpp
new file mode 100644
index 0000000..e2bd90f
--- /dev/null
+++ b/EASTL/test/source/TestVariant2.cpp
@@ -0,0 +1,82 @@
+/**
+ * NOTE:
+ *
+ * DO NOT INCLUDE EATest/EATest.h or ANY OTHER HEADER
+ * There is a bug in MSVC whereby pushing/poping all warnings from a header does not reenable all warnings
+ * in the TU that included the header.
+ * For example C4805 will not reenabled.
+ */
+
+#include <EASTL/variant.h>
+
+int TestVariantGeneratingComparisonOverloads()
+{
+ int nErrorCount = 0;
+
+ {
+ eastl::variant<int, float, bool> a;
+ eastl::variant<int, float, bool> b;
+
+ auto r = a == b;
+
+ nErrorCount += !r;
+ }
+
+ {
+ eastl::variant<int, float, bool> a;
+ eastl::variant<int, float, bool> b;
+
+ bool r = (a == b);
+
+ nErrorCount += !r;
+ }
+
+ // A variant is permitted to hold the same type more than once, and to hold differently cv-qualified versions of the same type.
+
+ {
+ eastl::variant<int, int, int> a;
+ eastl::variant<int, int, int> b;
+
+ bool r = (a == b);
+
+ nErrorCount += !r;
+ }
+
+ {
+ eastl::variant<signed int, unsigned int> a;
+ eastl::variant<signed int, unsigned int> b;
+
+ bool r = (a == b);
+
+ nErrorCount += !r;
+ }
+
+ {
+ eastl::variant<int, bool> a;
+ eastl::variant<int, bool> b;
+
+ bool r = (a == b);
+
+ nErrorCount += !r;
+ }
+
+ {
+ eastl::variant<volatile int, int, const int, const volatile int> a;
+ eastl::variant<volatile int, int, const int, const volatile int> b;
+
+ bool r = (a == b);
+
+ nErrorCount += !r;
+ }
+
+ {
+ eastl::variant<volatile int, int, const int, const volatile int, bool> a;
+ eastl::variant<volatile int, int, const int, const volatile int, bool> b;
+
+ bool r = (a == b);
+
+ nErrorCount += !r;
+ }
+
+ return nErrorCount;
+}
diff --git a/EASTL/test/source/TestVector.cpp b/EASTL/test/source/TestVector.cpp
new file mode 100644
index 0000000..69cdb52
--- /dev/null
+++ b/EASTL/test/source/TestVector.cpp
@@ -0,0 +1,1821 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+#include "EASTLTest.h"
+#include <EASTL/vector.h>
+#include <EASTL/string.h>
+#include <EASTL/deque.h>
+#include <EASTL/list.h>
+#include <EASTL/slist.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/utility.h>
+#include <EASTL/allocator_malloc.h>
+#include <EASTL/unique_ptr.h>
+
+#include "ConceptImpls.h"
+
+
+EA_DISABLE_ALL_VC_WARNINGS()
+#ifndef EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+ #include <vector>
+ #include <string>
+#endif
+EA_RESTORE_ALL_VC_WARNINGS()
+
+
+// Template instantations.
+// These tell the compiler to compile all the functions for the given class.
+template class eastl::vector<bool>;
+template class eastl::vector<int>;
+template class eastl::vector<Align64>;
+template class eastl::vector<TestObject>;
+
+
+// This tests "uninitialized_fill" usage in vector when T has a user provided
+// address-of operator overload. In these situations, EASTL containers must use
+// the standard utility "eastl::addressof(T)" which is designed to by-pass user
+// provided address-of operator overloads.
+//
+// Previously written as:
+// for(; first != last; ++first, ++currentDest)
+// ::new((void*)&*currentDest) value_type(*first); // & not guaranteed to be a pointer
+//
+// Bypasses user 'addressof' operators:
+// for(; n > 0; --n, ++currentDest)
+// ::new(eastl::addressof(*currentDest)) value_type(value); // guaranteed to be a pointer
+//
+struct AddressOfOperatorResult {};
+struct HasAddressOfOperator
+{
+ // problematic 'addressof' operator that doesn't return a pointer type
+ AddressOfOperatorResult operator&() const { return {}; }
+ bool operator==(const HasAddressOfOperator&) const { return false; }
+};
+template class eastl::vector<HasAddressOfOperator>; // force compile all functions of vector
+
+
+
+// Test compiler issue that appeared in VS2012 relating to kAlignment
+struct StructWithContainerOfStructs
+{
+ eastl::vector<StructWithContainerOfStructs> children;
+};
+
+ // This relatively complex test is to prevent a regression on VS2013. The data types have what may appear to be
+ // strange names (for test code) because the code is based on a test case extracted from the Frostbite codebase.
+ // This test is actually invalid and should be removed as const data memebers are problematic for STL container
+ // implementations. (ie. they prevent constructors from being generated).
+namespace
+{
+ EA_DISABLE_VC_WARNING(4512) // disable warning : "assignment operator could not be generated"
+#if (defined(_MSC_VER) && (_MSC_VER >= 1900)) // VS2015-preview and later.
+ EA_DISABLE_VC_WARNING(5025) // disable warning : "move assignment operator could not be generated"
+ EA_DISABLE_VC_WARNING(4626) // disable warning : "assignment operator was implicitly defined as deleted"
+ EA_DISABLE_VC_WARNING(5027) // disable warning : "move assignment operator was implicitly defined as deleted"
+#endif
+struct ScenarioRefEntry
+{
+ ScenarioRefEntry(const eastl::string& contextDatabase) : ContextDatabase(contextDatabase) {}
+
+ struct RowEntry
+ {
+ RowEntry(int levelId, int sceneId, int actorId, int partId, const eastl::string& controller)
+ : LevelId(levelId), SceneId(sceneId), ActorId(actorId), PartId(partId), Controller(controller)
+ {
+ }
+
+ int LevelId;
+ int SceneId;
+ int ActorId;
+ int PartId;
+ const eastl::string& Controller;
+ };
+ const eastl::string& ContextDatabase; // note: const class members prohibits move semantics
+ typedef eastl::vector<RowEntry> RowData;
+ RowData Rows;
+};
+typedef eastl::vector<ScenarioRefEntry> ScenarRefData;
+struct AntMetaDataRecord
+{
+ ScenarRefData ScenarioRefs;
+};
+typedef eastl::vector<AntMetaDataRecord> MetadataRecords;
+
+struct StructWithConstInt
+{
+ StructWithConstInt(const int& _i) : i(_i) {}
+ const int i;
+};
+
+struct StructWithConstRefToInt
+{
+ StructWithConstRefToInt(const int& _i) : i(_i) {}
+ const int& i;
+};
+#if (defined(_MSC_VER) && (_MSC_VER >= 1900)) // VS2015-preview and later.
+ EA_RESTORE_VC_WARNING() // disable warning 5025: "move assignment operator could not be generated"
+ EA_RESTORE_VC_WARNING() // disable warning 4626: "assignment operator was implicitly defined as deleted"
+ EA_RESTORE_VC_WARNING() // disable warning 5027: "move assignment operator was implicitly defined as deleted"
+#endif
+EA_RESTORE_VC_WARNING()
+}
+
+struct ItemWithConst
+{
+ ItemWithConst& operator=(const ItemWithConst&);
+
+public:
+ ItemWithConst(int _i) : i(_i) {}
+ ItemWithConst(const ItemWithConst& x) : i(x.i) {}
+ const int i;
+};
+
+struct testmovable
+{
+ EA_NON_COPYABLE(testmovable)
+public:
+ testmovable() EA_NOEXCEPT {}
+
+ testmovable(testmovable&&) EA_NOEXCEPT {}
+
+ testmovable& operator=(testmovable&&) EA_NOEXCEPT { return *this; }
+};
+
+struct TestMoveAssignToSelf
+{
+ TestMoveAssignToSelf() EA_NOEXCEPT : mMovedToSelf(false) {}
+ TestMoveAssignToSelf(const TestMoveAssignToSelf& other) { mMovedToSelf = other.mMovedToSelf; }
+ TestMoveAssignToSelf& operator=(TestMoveAssignToSelf&&) { mMovedToSelf = true; return *this; }
+ TestMoveAssignToSelf& operator=(const TestMoveAssignToSelf&) = delete;
+
+ bool mMovedToSelf;
+};
+
+#if EASTL_VARIABLE_TEMPLATES_ENABLED
+ /// custom type-trait which checks if a type is comparable via the <operator.
+ template <class, class = eastl::void_t<>>
+ struct is_less_comparable : eastl::false_type { };
+ template <class T>
+ struct is_less_comparable<T, eastl::void_t<decltype(eastl::declval<T>() < eastl::declval<T>())>> : eastl::true_type { };
+#else
+ // bypass the test since the compiler doesn't support variable templates.
+ template <class> struct is_less_comparable : eastl::false_type { };
+#endif
+
+
+int TestVector()
+{
+ int nErrorCount = 0;
+ eastl_size_t i;
+
+ TestObject::Reset();
+
+ {
+ MetadataRecords mMetadataRecords;
+ AntMetaDataRecord r, s;
+ mMetadataRecords.push_back(r);
+ mMetadataRecords.push_back(s);
+ }
+
+ {
+ using namespace eastl;
+
+ // explicit vector();
+ vector<int> intArray1;
+ vector<TestObject> toArray1;
+ vector<list<TestObject> > toListArray1;
+
+ EATEST_VERIFY(intArray1.validate());
+ EATEST_VERIFY(intArray1.empty());
+ EATEST_VERIFY(toArray1.validate());
+ EATEST_VERIFY(toArray1.empty());
+ EATEST_VERIFY(toListArray1.validate());
+ EATEST_VERIFY(toListArray1.empty());
+
+ // explicit vector(const allocator_type& allocator);
+ MallocAllocator::reset_all();
+ MallocAllocator ma;
+ vector<int, MallocAllocator> intArray6(ma);
+ vector<TestObject, MallocAllocator> toArray6(ma);
+ vector<list<TestObject>, MallocAllocator> toListArray6(ma);
+ intArray6.resize(1);
+ toArray6.resize(1);
+ toListArray6.resize(1);
+ EATEST_VERIFY(MallocAllocator::mAllocCountAll == 3);
+
+ // explicit vector(size_type n, const allocator_type& allocator = EASTL_VECTOR_DEFAULT_ALLOCATOR)
+ vector<int> intArray2(10);
+ vector<TestObject> toArray2(10);
+ vector<list<TestObject> > toListArray2(10);
+
+ EATEST_VERIFY(intArray2.validate());
+ EATEST_VERIFY(intArray2.size() == 10);
+ EATEST_VERIFY(toArray2.validate());
+ EATEST_VERIFY(toArray2.size() == 10);
+ EATEST_VERIFY(toListArray2.validate());
+ EATEST_VERIFY(toListArray2.size() == 10);
+
+ // vector(size_type n, const value_type& value, const allocator_type& allocator =
+ // EASTL_VECTOR_DEFAULT_ALLOCATOR)
+ vector<int> intArray3(10, 7);
+ vector<TestObject> toArray3(10, TestObject(7));
+ vector<list<TestObject> > toListArray3(10, list<TestObject>(7));
+
+ EATEST_VERIFY(intArray3.validate());
+ EATEST_VERIFY(intArray3.size() == 10);
+ EATEST_VERIFY(intArray3[5] == 7);
+ EATEST_VERIFY(toArray3.validate());
+ EATEST_VERIFY(toArray3[5] == TestObject(7));
+ EATEST_VERIFY(toListArray3.validate());
+ EATEST_VERIFY(toListArray3[5] == list<TestObject>(7));
+
+ // vector(const vector& x)
+ vector<int> intArray4(intArray2);
+ vector<TestObject> toArray4(toArray2);
+ vector<list<TestObject> > toListArray4(toListArray2);
+
+ EATEST_VERIFY(intArray4.validate());
+ EATEST_VERIFY(intArray4 == intArray2);
+ EATEST_VERIFY(toArray4.validate());
+ EATEST_VERIFY(toArray4 == toArray2);
+ EATEST_VERIFY(intArray4.validate());
+ EATEST_VERIFY(toListArray4 == toListArray2);
+
+ // vector(const this_type& x, const allocator_type& allocator)
+ MallocAllocator::reset_all();
+ vector<int, MallocAllocator> intArray7(intArray6, ma);
+ vector<TestObject, MallocAllocator> toArray7(toArray6, ma);
+ vector<list<TestObject>, MallocAllocator> toListArray7(toListArray6, ma);
+ EATEST_VERIFY(MallocAllocator::mAllocCountAll == 3);
+
+ // vector(InputIterator first, InputIterator last)
+ deque<int> intDeque(3);
+ deque<TestObject> toDeque(3);
+ deque<list<TestObject> > toListDeque(3);
+
+ vector<int> intArray5(intDeque.begin(), intDeque.end());
+ vector<TestObject> toArray5(toDeque.begin(), toDeque.end());
+ vector<list<TestObject> > toListArray5(toListDeque.begin(), toListDeque.end());
+
+ // vector(std::initializer_list<T> ilist, const Allocator& allocator = EASTL_VECTOR_DEFAULT_ALLOCATOR);
+ {
+#if !defined(EA_COMPILER_NO_INITIALIZER_LISTS)
+ eastl::vector<float> floatVector{0, 1, 2, 3};
+
+ EATEST_VERIFY(floatVector.size() == 4);
+ EATEST_VERIFY((floatVector[0] == 0) && (floatVector[3] == 3));
+#endif
+ }
+
+ // vector& operator=(const vector& x);
+ intArray3 = intArray4;
+ toArray3 = toArray4;
+ toListArray3 = toListArray4;
+
+ EATEST_VERIFY(intArray3.validate());
+ EATEST_VERIFY(intArray3 == intArray4);
+ EATEST_VERIFY(toArray3.validate());
+ EATEST_VERIFY(toArray3 == toArray4);
+ EATEST_VERIFY(intArray3.validate());
+ EATEST_VERIFY(toListArray3 == toListArray4);
+
+// this_type& operator=(std::initializer_list<T> ilist);
+#if !defined(EA_COMPILER_NO_INITIALIZER_LISTS)
+ intArray3 = {0, 1, 2, 3};
+ EATEST_VERIFY((intArray3.size() == 4) && (intArray3[0] == 0) && (intArray3[3] == 3));
+#endif
+ }
+
+ EATEST_VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+
+ {
+ using namespace eastl;
+
+ // vector(this_type&& x)
+ // vector(this_type&& x, const Allocator& allocator)
+ // this_type& operator=(this_type&& x)
+
+ vector<TestObject> vector3TO33(3, TestObject(33));
+ vector<TestObject> toVectorA(eastl::move(vector3TO33));
+ EATEST_VERIFY((toVectorA.size() == 3) && (toVectorA.front().mX == 33) && (vector3TO33.size() == 0));
+
+ // The following is not as strong a test of this ctor as it could be. A stronger test would be to use
+ // IntanceAllocator with different instances.
+ vector<TestObject, MallocAllocator> vector4TO44(4, TestObject(44));
+ vector<TestObject, MallocAllocator> toVectorB(eastl::move(vector4TO44), MallocAllocator());
+ EATEST_VERIFY((toVectorB.size() == 4) && (toVectorB.front().mX == 44) && (vector4TO44.size() == 0));
+
+ vector<TestObject, MallocAllocator> vector5TO55(5, TestObject(55));
+ toVectorB = eastl::move(vector5TO55);
+ EATEST_VERIFY((toVectorB.size() == 5) && (toVectorB.front().mX == 55) && (vector5TO55.size() == 0));
+
+ // Should be able to emplace_back an item with const members (non-copyable)
+ eastl::vector<ItemWithConst> myVec2;
+ ItemWithConst& ref = myVec2.emplace_back(42);
+ EATEST_VERIFY(myVec2.back().i == 42);
+ EATEST_VERIFY(ref.i == 42);
+ }
+
+ {
+ using namespace eastl;
+
+ // pointer data();
+ // const_pointer data() const;
+ // reference front();
+ // const_reference front() const;
+ // reference back();
+ // const_reference back() const;
+
+ vector<int> intArray(10, 7);
+ intArray[0] = 10;
+ intArray[1] = 11;
+ intArray[2] = 12;
+
+ EATEST_VERIFY(intArray.data() == &intArray[0]);
+ EATEST_VERIFY(*intArray.data() == 10);
+ EATEST_VERIFY(intArray.front() == 10);
+ EATEST_VERIFY(intArray.back() == 7);
+
+ const vector<TestObject> toArrayC(10, TestObject(7));
+
+ EATEST_VERIFY(toArrayC.data() == &toArrayC[0]);
+ EATEST_VERIFY(*toArrayC.data() == TestObject(7));
+ EATEST_VERIFY(toArrayC.front() == TestObject(7));
+ EATEST_VERIFY(toArrayC.back() == TestObject(7));
+ }
+
+ {
+ using namespace eastl;
+
+ // iterator begin();
+ // const_iterator begin() const;
+ // iterator end();
+ // const_iterator end() const;
+ // reverse_iterator rbegin();
+ // const_reverse_iterator rbegin() const;
+ // reverse_iterator rend();
+ // const_reverse_iterator rend() const;
+
+ vector<int> intArray(20);
+ for (i = 0; i < 20; i++)
+ intArray[i] = (int)i;
+
+ i = 0;
+ for (vector<int>::iterator it = intArray.begin(); it != intArray.end(); ++it, ++i)
+ EATEST_VERIFY(*it == (int)i);
+
+ i = intArray.size() - 1;
+ for (vector<int>::reverse_iterator itr = intArray.rbegin(); itr != intArray.rend(); ++itr, --i)
+ EATEST_VERIFY(*itr == (int)i);
+ }
+
+ EATEST_VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+
+ {
+ using namespace eastl;
+
+ // void swap(vector& x);
+ // void assign(size_type n, const value_type& value);
+ // void assign(InputIterator first, InputIterator last);
+
+ const int A[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17};
+ const int B[] = {99, 99, 99, 99, 99};
+ const size_t N = sizeof(A) / sizeof(int);
+ const size_t M = sizeof(B) / sizeof(int);
+
+ // assign from pointer range
+ vector<int> v3;
+ v3.assign(A, A + N);
+ EATEST_VERIFY(equal(v3.begin(), v3.end(), A));
+ EATEST_VERIFY(v3.size() == N);
+
+ // assign from iterator range
+ vector<int> v4;
+ v4.assign(v3.begin(), v3.end());
+ EATEST_VERIFY(equal(v4.begin(), v4.end(), A));
+ EATEST_VERIFY(equal(A, A + N, v4.begin()));
+
+ // assign from initializer range with resize
+ v4.assign(M, 99);
+ EATEST_VERIFY(equal(v4.begin(), v4.end(), B));
+ EATEST_VERIFY(equal(B, B + M, v4.begin()));
+ EATEST_VERIFY((v4.size() == M) && (M != N));
+
+#if !defined(EA_COMPILER_NO_INITIALIZER_LISTS)
+ // void assign(std::initializer_list<T> ilist);
+ v4.assign({0, 1, 2, 3});
+ EATEST_VERIFY(v4.size() == 4);
+ EATEST_VERIFY((v4[0] == 0) && (v4[3] == 3));
+#endif
+ }
+
+ EATEST_VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+
+ {
+ using namespace eastl;
+
+ // reference operator[](size_type n);
+ // const_reference operator[](size_type n) const;
+ // reference at(size_type n);
+ // const_reference at(size_type n) const;
+
+ vector<int> intArray(5);
+ EATEST_VERIFY(intArray[3] == 0);
+ EATEST_VERIFY(intArray.at(3) == 0);
+
+ vector<TestObject> toArray(5);
+ EATEST_VERIFY(toArray[3] == TestObject(0));
+ EATEST_VERIFY(toArray.at(3) == TestObject(0));
+
+#if EASTL_EXCEPTIONS_ENABLED
+ vector<TestObject> vec01(5);
+
+ try
+ {
+ TestObject& r01 = vec01.at(6);
+ EATEST_VERIFY(!(r01 == TestObject(0))); // Should not get here, as exception thrown.
+ }
+ catch (std::out_of_range&) { EATEST_VERIFY(true); }
+ catch (...) { EATEST_VERIFY(false); }
+#endif
+ }
+
+ EATEST_VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+
+ {
+ using namespace eastl;
+
+ // void push_back(const value_type& value);
+ // void push_back();
+ // void pop_back();
+ // void push_back(T&& value);
+
+ vector<int> intArray(6);
+ for (i = 0; i < 6; i++)
+ intArray[i] = (int)i;
+
+ EATEST_VERIFY(intArray.validate());
+ EATEST_VERIFY(intArray.size() == 6);
+ EATEST_VERIFY(intArray[5] == 5);
+
+ for (i = 0; i < 40; i++)
+ {
+ int& ref = intArray.push_back();
+ EATEST_VERIFY(&ref == &intArray.back());
+ ref = 98;
+ }
+
+ EATEST_VERIFY(intArray.validate());
+ EATEST_VERIFY(intArray.size() == 46);
+ EATEST_VERIFY(intArray[45] == 98);
+
+ for (i = 0; i < 40; i++)
+ intArray.push_back(99);
+
+ EATEST_VERIFY(intArray.validate());
+ EATEST_VERIFY(intArray.size() == 86);
+ EATEST_VERIFY(intArray[85] == 99);
+
+ for (i = 0; i < 30; i++)
+ intArray.pop_back();
+
+ EATEST_VERIFY(intArray.validate());
+ EATEST_VERIFY(intArray.size() == 56);
+ EATEST_VERIFY(intArray[5] == 5);
+ }
+
+ {
+ using namespace eastl;
+
+ // void* push_back_uninitialized();
+
+ int64_t toCount0 = TestObject::sTOCount;
+
+ vector<TestObject> vTO;
+ EATEST_VERIFY(TestObject::sTOCount == toCount0);
+
+ for (i = 0; i < 25; i++)
+ {
+ void* pTO = vTO.push_back_uninitialized();
+ EATEST_VERIFY(TestObject::sTOCount == (toCount0 + static_cast<int64_t>(i)));
+
+ new (pTO) TestObject((int)i);
+ EATEST_VERIFY(TestObject::sTOCount == (toCount0 + static_cast<int64_t>(i) + 1));
+ EATEST_VERIFY(vTO.back().mX == (int)i);
+ EATEST_VERIFY(vTO.validate());
+ }
+ }
+
+ {
+ using namespace eastl;
+
+ // template<class... Args>
+ // iterator emplace(const_iterator position, Args&&... args);
+
+ // template<class... Args>
+ // void emplace_back(Args&&... args);
+
+ // iterator insert(const_iterator position, value_type&& value);
+ // void push_back(value_type&& value);
+
+ TestObject::Reset();
+
+ vector<TestObject> toVectorA;
+
+ TestObject& ref = toVectorA.emplace_back(2, 3, 4);
+ EATEST_VERIFY((toVectorA.size() == 1) && (toVectorA.back().mX == (2 + 3 + 4)) &&
+ (TestObject::sTOCtorCount == 1));
+ EATEST_VERIFY(ref.mX == (2 + 3 + 4));
+
+ toVectorA.emplace(toVectorA.begin(), 3, 4, 5);
+ EATEST_VERIFY((toVectorA.size() == 2) && (toVectorA.front().mX == (3 + 4 + 5)) &&
+ (TestObject::sTOCtorCount == 3)); // 3 because the original count of 1, plus the existing vector
+ // element will be moved, plus the one being emplaced.
+
+ TestObject::Reset();
+
+ // void push_back(T&& x);
+ // iterator insert(const_iterator position, T&& x);
+
+ vector<TestObject> toVectorC;
+
+ toVectorC.push_back(TestObject(2, 3, 4));
+ EATEST_VERIFY((toVectorC.size() == 1) && (toVectorC.back().mX == (2 + 3 + 4)) &&
+ (TestObject::sTOMoveCtorCount == 1));
+
+ toVectorC.insert(toVectorC.begin(), TestObject(3, 4, 5));
+ EATEST_VERIFY((toVectorC.size() == 2) && (toVectorC.front().mX == (3 + 4 + 5)) &&
+ (TestObject::sTOMoveCtorCount == 3)); // 3 because the original count of 1, plus the existing
+ // vector element will be moved, plus the one being
+ // emplaced.
+ }
+
+ // We don't check for TestObject::IsClear because we messed with state above and don't currently have a matching set
+ // of ctors and dtors.
+ TestObject::Reset();
+
+ {
+ using namespace eastl;
+
+ // iterator erase(iterator position);
+ // iterator erase(iterator first, iterator last);
+ // iterator erase_unsorted(iterator position);
+ // iterator erase_first(const T& pos);
+ // iterator erase_first_unsorted(const T& pos);
+ // iterator erase_last(const T& pos);
+ // iterator erase_last_unsorted(const T& pos);
+ // void clear();
+
+ vector<int> intArray(20);
+ for (i = 0; i < 20; i++)
+ intArray[i] = (int)i;
+
+ // 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19
+
+ intArray.erase(intArray.begin() +
+ 10); // Becomes: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19
+ EATEST_VERIFY(intArray.validate());
+ EATEST_VERIFY(intArray.size() == 19);
+ EATEST_VERIFY(intArray[0] == 0);
+ EATEST_VERIFY(intArray[10] == 11);
+ EATEST_VERIFY(intArray[18] == 19);
+
+ intArray.erase(intArray.begin() + 10,
+ intArray.begin() + 15); // Becomes: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16, 17, 18, 19
+ EATEST_VERIFY(intArray.validate());
+ EATEST_VERIFY(intArray.size() == 14);
+ EATEST_VERIFY(intArray[9] == 9);
+ EATEST_VERIFY(intArray[13] == 19);
+
+ intArray.erase(intArray.begin() + 1, intArray.begin() + 5); // Becomes: 0, 5, 6, 7, 8, 9, 16, 17, 18, 19
+ EATEST_VERIFY(intArray.validate());
+ EATEST_VERIFY(intArray.size() == 10);
+ EATEST_VERIFY(intArray[0] == 0);
+ EATEST_VERIFY(intArray[1] == 5);
+ EATEST_VERIFY(intArray[9] == 19);
+
+ intArray.erase(intArray.begin() + 7, intArray.begin() + 10); // Becomes: 0, 5, 6, 7, 8, 9, 16
+ EATEST_VERIFY(intArray.validate());
+ EATEST_VERIFY(intArray.size() == 7);
+ EATEST_VERIFY(intArray[0] == 0);
+ EATEST_VERIFY(intArray[1] == 5);
+ EATEST_VERIFY(intArray[6] == 16);
+
+ intArray.clear();
+ EATEST_VERIFY(intArray.validate());
+ EATEST_VERIFY(intArray.empty());
+ EATEST_VERIFY(intArray.size() == 0);
+
+ vector<TestObject> toArray(20);
+ for (i = 0; i < 20; i++)
+ toArray[i] = TestObject((int)i);
+
+ toArray.erase(toArray.begin() + 10);
+ EATEST_VERIFY(toArray.validate());
+ EATEST_VERIFY(toArray.size() == 19);
+ EATEST_VERIFY(toArray[10] == TestObject(11));
+
+ toArray.erase(toArray.begin() + 10, toArray.begin() + 15);
+ EATEST_VERIFY(toArray.validate());
+ EATEST_VERIFY(toArray.size() == 14);
+ EATEST_VERIFY(toArray[10] == TestObject(16));
+
+ toArray.clear();
+ EATEST_VERIFY(toArray.validate());
+ EATEST_VERIFY(toArray.empty());
+ EATEST_VERIFY(toArray.size() == 0);
+
+ // iterator erase_unsorted(iterator position);
+ intArray.resize(20);
+ for (i = 0; i < 20; i++)
+ intArray[i] = (int)i;
+
+ intArray.erase_unsorted(intArray.begin() + 0);
+ EATEST_VERIFY(intArray.validate());
+ EATEST_VERIFY(intArray.size() == 19);
+ EATEST_VERIFY(intArray[0] == 19);
+ EATEST_VERIFY(intArray[1] == 1);
+ EATEST_VERIFY(intArray[18] == 18);
+
+ intArray.erase_unsorted(intArray.begin() + 10);
+ EATEST_VERIFY(intArray.validate());
+ EATEST_VERIFY(intArray.size() == 18);
+ EATEST_VERIFY(intArray[0] == 19);
+ EATEST_VERIFY(intArray[10] == 18);
+ EATEST_VERIFY(intArray[17] == 17);
+
+ intArray.erase_unsorted(intArray.begin() + 17);
+ EATEST_VERIFY(intArray.validate());
+ EATEST_VERIFY(intArray.size() == 17);
+ EATEST_VERIFY(intArray[0] == 19);
+ EATEST_VERIFY(intArray[10] == 18);
+ EATEST_VERIFY(intArray[16] == 16);
+
+ // iterator erase_first(iterator position);
+ intArray.resize(20);
+ for (i = 0; i < 20; i++)
+ intArray[i] = (int)i % 3; // (i.e. 0,1,2,0,1,2...)
+
+ intArray.erase_first(1);
+ EATEST_VERIFY(intArray.validate());
+ EATEST_VERIFY(intArray.size() == 19);
+ EATEST_VERIFY(intArray[0] == 0);
+ EATEST_VERIFY(intArray[1] == 2);
+ EATEST_VERIFY(intArray[2] == 0);
+ EATEST_VERIFY(intArray[3] == 1);
+ EATEST_VERIFY(intArray[18] == 1);
+
+ intArray.erase_first(1);
+ EATEST_VERIFY(intArray.validate());
+ EATEST_VERIFY(intArray.size() == 18);
+ EATEST_VERIFY(intArray[0] == 0);
+ EATEST_VERIFY(intArray[1] == 2);
+ EATEST_VERIFY(intArray[2] == 0);
+ EATEST_VERIFY(intArray[3] == 2);
+ EATEST_VERIFY(intArray[17] == 1);
+
+ intArray.erase_first(0);
+ EATEST_VERIFY(intArray.validate());
+ EATEST_VERIFY(intArray.size() == 17);
+ EATEST_VERIFY(intArray[0] == 2);
+ EATEST_VERIFY(intArray[1] == 0);
+ EATEST_VERIFY(intArray[2] == 2);
+ EATEST_VERIFY(intArray[3] == 0);
+ EATEST_VERIFY(intArray[16] == 1);
+
+ // iterator erase_first_unsorted(const T& val);
+ intArray.resize(20);
+ for (i = 0; i < 20; i++)
+ intArray[i] = (int) i/2; // every two values are the same (i.e. 0,0,1,1,2,2,3,3...)
+
+ intArray.erase_first_unsorted(1);
+ EATEST_VERIFY(intArray.validate());
+ EATEST_VERIFY(intArray.size() == 19);
+ EATEST_VERIFY(intArray[0] == 0);
+ EATEST_VERIFY(intArray[1] == 0);
+ EATEST_VERIFY(intArray[2] == 9);
+ EATEST_VERIFY(intArray[3] == 1);
+ EATEST_VERIFY(intArray[18] == 9);
+
+ intArray.erase_first_unsorted(1);
+ EATEST_VERIFY(intArray.validate());
+ EATEST_VERIFY(intArray.size() == 18);
+ EATEST_VERIFY(intArray[0] == 0);
+ EATEST_VERIFY(intArray[1] == 0);
+ EATEST_VERIFY(intArray[2] == 9);
+ EATEST_VERIFY(intArray[3] == 9);
+ EATEST_VERIFY(intArray[17] == 8);
+
+ intArray.erase_first_unsorted(0);
+ EATEST_VERIFY(intArray.validate());
+ EATEST_VERIFY(intArray.size() == 17);
+ EATEST_VERIFY(intArray[0] == 8);
+ EATEST_VERIFY(intArray[1] == 0);
+ EATEST_VERIFY(intArray[2] == 9);
+ EATEST_VERIFY(intArray[3] == 9);
+ EATEST_VERIFY(intArray[16] == 8);
+
+ // iterator erase_last(const T& val);
+ intArray.resize(20);
+ for (i = 0; i < 20; i++)
+ intArray[i] = (int)i % 3; // (i.e. 0,1,2,0,1,2...)
+
+ intArray.erase_last(1);
+ EATEST_VERIFY(intArray.validate());
+ EATEST_VERIFY(intArray.size() == 19);
+ EATEST_VERIFY(intArray[0] == 0);
+ EATEST_VERIFY(intArray[1] == 1);
+ EATEST_VERIFY(intArray[2] == 2);
+ EATEST_VERIFY(intArray[3] == 0);
+ EATEST_VERIFY(intArray[15] == 0);
+ EATEST_VERIFY(intArray[16] == 1);
+ EATEST_VERIFY(intArray[17] == 2);
+ EATEST_VERIFY(intArray[18] == 0);
+
+ intArray.erase_last(1);
+ EATEST_VERIFY(intArray.validate());
+ EATEST_VERIFY(intArray.size() == 18);
+ EATEST_VERIFY(intArray[0] == 0);
+ EATEST_VERIFY(intArray[1] == 1);
+ EATEST_VERIFY(intArray[2] == 2);
+ EATEST_VERIFY(intArray[3] == 0);
+ EATEST_VERIFY(intArray[14] == 2);
+ EATEST_VERIFY(intArray[15] == 0);
+ EATEST_VERIFY(intArray[16] == 2);
+ EATEST_VERIFY(intArray[17] == 0);
+
+ intArray.erase_last(0);
+ EATEST_VERIFY(intArray.validate());
+ EATEST_VERIFY(intArray.size() == 17);
+ EATEST_VERIFY(intArray[0] == 0);
+ EATEST_VERIFY(intArray[1] == 1);
+ EATEST_VERIFY(intArray[2] == 2);
+ EATEST_VERIFY(intArray[3] == 0);
+ EATEST_VERIFY(intArray[13] == 1);
+ EATEST_VERIFY(intArray[14] == 2);
+ EATEST_VERIFY(intArray[15] == 0);
+ EATEST_VERIFY(intArray[16] == 2);
+
+ // iterator erase_last_unsorted(const T& val);
+ intArray.resize(20);
+ for (i = 0; i < 20; i++)
+ intArray[i] = (int)i / 2; // every two values are the same (i.e. 0,0,1,1,2,2,3,3...)
+
+ intArray.erase_last_unsorted(1);
+ EATEST_VERIFY(intArray.validate());
+ EATEST_VERIFY(intArray.size() == 19);
+ EATEST_VERIFY(intArray[0] == 0);
+ EATEST_VERIFY(intArray[1] == 0);
+ EATEST_VERIFY(intArray[2] == 1);
+ EATEST_VERIFY(intArray[3] == 9);
+ EATEST_VERIFY(intArray[18] == 9);
+
+ intArray.erase_last_unsorted(1);
+ EATEST_VERIFY(intArray.validate());
+ EATEST_VERIFY(intArray.size() == 18);
+ EATEST_VERIFY(intArray[0] == 0);
+ EATEST_VERIFY(intArray[1] == 0);
+ EATEST_VERIFY(intArray[2] == 9);
+ EATEST_VERIFY(intArray[3] == 9);
+ EATEST_VERIFY(intArray[17] == 8);
+
+ intArray.erase_last_unsorted(0);
+ EATEST_VERIFY(intArray.validate());
+ EATEST_VERIFY(intArray.size() == 17);
+ EATEST_VERIFY(intArray[0] == 0);
+ EATEST_VERIFY(intArray[1] == 8);
+ EATEST_VERIFY(intArray[2] == 9);
+ EATEST_VERIFY(intArray[3] == 9);
+ EATEST_VERIFY(intArray[16] == 8);
+ }
+
+ EATEST_VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+
+ {
+ using namespace eastl;
+
+ // iterator erase(reverse_iterator position);
+ // iterator erase(reverse_iterator first, reverse_iterator last);
+ // iterator erase_unsorted(reverse_iterator position);
+
+ vector<int> intVector;
+
+ for (i = 0; i < 20; i++)
+ intVector.push_back((int)i);
+ EATEST_VERIFY((intVector.size() == 20) && (intVector[0] == 0) && (intVector[19] == 19));
+
+ vector<int>::reverse_iterator r2A = intVector.rbegin();
+ vector<int>::reverse_iterator r2B = r2A + 3;
+ intVector.erase(r2A, r2B);
+ EATEST_VERIFY((intVector.size() == 17));
+ EATEST_VERIFY((intVector[0] == 0));
+ EATEST_VERIFY((intVector[16] == 16));
+
+ r2B = intVector.rend();
+ r2A = r2B - 3;
+ intVector.erase(r2A, r2B);
+ EATEST_VERIFY((intVector.size() == 14));
+ EATEST_VERIFY((intVector[0] == 3));
+ EATEST_VERIFY((intVector[13] == 16));
+
+ r2B = intVector.rend() - 1;
+ intVector.erase(r2B);
+ EATEST_VERIFY((intVector.size() == 13));
+ EATEST_VERIFY((intVector[0] == 4));
+ EATEST_VERIFY((intVector[12] == 16));
+
+ r2B = intVector.rbegin();
+ intVector.erase(r2B);
+ EATEST_VERIFY((intVector.size() == 12));
+ EATEST_VERIFY((intVector[0] == 4));
+ EATEST_VERIFY((intVector[11] == 15));
+
+ r2A = intVector.rbegin();
+ r2B = intVector.rend();
+ intVector.erase(r2A, r2B);
+ EATEST_VERIFY(intVector.size() == 0);
+
+ // iterator erase_unsorted(iterator position);
+ intVector.resize(20);
+ for (i = 0; i < 20; i++)
+ intVector[i] = (int)i;
+
+ intVector.erase_unsorted(intVector.rbegin() + 0);
+ EATEST_VERIFY(intVector.validate());
+ EATEST_VERIFY(intVector.size() == 19);
+ EATEST_VERIFY(intVector[0] == 0);
+ EATEST_VERIFY(intVector[10] == 10);
+ EATEST_VERIFY(intVector[18] == 18);
+
+ intVector.erase_unsorted(intVector.rbegin() + 10);
+ EATEST_VERIFY(intVector.validate());
+ EATEST_VERIFY(intVector.size() == 18);
+ EATEST_VERIFY(intVector[0] == 0);
+ EATEST_VERIFY(intVector[8] == 18);
+ EATEST_VERIFY(intVector[17] == 17);
+
+ intVector.erase_unsorted(intVector.rbegin() + 17);
+ EATEST_VERIFY(intVector.validate());
+ EATEST_VERIFY(intVector.size() == 17);
+ EATEST_VERIFY(intVector[0] == 17);
+ EATEST_VERIFY(intVector[8] == 18);
+ EATEST_VERIFY(intVector[16] == 16);
+ }
+
+ EATEST_VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+
+ {
+ const int valueToRemove = 44;
+ int testValues[] = {42, 43, 44, 45, 46, 47};
+
+ eastl::vector<eastl::unique_ptr<int>> v;
+
+ for(auto& te : testValues)
+ v.push_back(eastl::make_unique<int>(te));
+
+ // remove 'valueToRemove' from the container
+ auto iterToRemove = eastl::find_if(v.begin(), v.end(), [&](eastl::unique_ptr<int>& e)
+ { return *e == valueToRemove; });
+ v.erase_unsorted(iterToRemove);
+ EATEST_VERIFY(v.size() == 5);
+
+ // verify 'valueToRemove' is no longer in the container
+ EATEST_VERIFY(eastl::find_if(v.begin(), v.end(), [&](eastl::unique_ptr<int>& e)
+ { return *e == valueToRemove; }) == v.end());
+
+ // verify all other expected values are in the container
+ for (auto& te : testValues)
+ {
+ if (te == valueToRemove)
+ continue;
+
+ EATEST_VERIFY(eastl::find_if(v.begin(), v.end(), [&](eastl::unique_ptr<int>& e)
+ { return *e == te; }) != v.end());
+ }
+ }
+
+ EATEST_VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+
+ {
+ using namespace eastl;
+
+ // iterator insert(iterator position, const value_type& value);
+ // iterator insert(iterator position, size_type n, const value_type& value);
+ // iterator insert(iterator position, InputIterator first, InputIterator last);
+ // iterator insert(const_iterator position, std::initializer_list<T> ilist);
+
+ vector<int> v(7, 13);
+ EATEST_VERIFY(VerifySequence(v.begin(), v.end(), int(), "vector", 13, 13, 13, 13, 13, 13, 13, -1));
+
+ // insert at end of size and capacity.
+ v.insert(v.end(), 99);
+ EATEST_VERIFY(v.validate());
+ EATEST_VERIFY(VerifySequence(v.begin(), v.end(), int(), "vector.insert", 13, 13, 13, 13, 13, 13, 13, 99, -1));
+
+ // insert at end of size.
+ v.reserve(30);
+ v.insert(v.end(), 999);
+ EATEST_VERIFY(v.validate());
+ EATEST_VERIFY(
+ VerifySequence(v.begin(), v.end(), int(), "vector.insert", 13, 13, 13, 13, 13, 13, 13, 99, 999, -1));
+
+ // Insert in middle.
+ vector<int>::iterator it = v.begin() + 7;
+ it = v.insert(it, 49);
+ EATEST_VERIFY(v.validate());
+ EATEST_VERIFY(
+ VerifySequence(v.begin(), v.end(), int(), "vector.insert", 13, 13, 13, 13, 13, 13, 13, 49, 99, 999, -1));
+
+ // Insert multiple copies
+ it = v.insert(v.begin() + 5, 3, 42);
+ EATEST_VERIFY(it == v.begin() + 5);
+ EATEST_VERIFY(VerifySequence(v.begin(), v.end(), int(), "vector.insert", 13, 13, 13, 13, 13, 42, 42, 42, 13, 13,
+ 49, 99, 999, -1));
+
+ // Insert multiple copies with count == 0
+ vector<int>::iterator at = v.end();
+ it = v.insert(at, 0, 666);
+ EATEST_VERIFY(it == at);
+ EATEST_VERIFY(VerifySequence(v.begin(), v.end(), int(), "vector.insert", 13, 13, 13, 13, 13, 42, 42, 42, 13, 13,
+ 49, 99, 999, -1));
+ // Insert iterator range
+ const int data[] = {2, 3, 4, 5};
+ it = v.insert(v.begin() + 1, data, data + 4);
+ EATEST_VERIFY(it == v.begin() + 1);
+ EATEST_VERIFY(VerifySequence(v.begin(), v.end(), int(), "vector.insert", 13, 2, 3, 4, 5, 13, 13, 13, 13, 42, 42,
+ 42, 13, 13, 49, 99, 999, -1));
+
+ // Insert empty iterator range
+ at = v.begin() + 1;
+ it = v.insert(at, data + 4, data + 4);
+ EATEST_VERIFY(it == at);
+ EATEST_VERIFY(VerifySequence(v.begin(), v.end(), int(), "vector.insert", 13, 2, 3, 4, 5, 13, 13, 13, 13, 42, 42,
+ 42, 13, 13, 49, 99, 999, -1));
+
+ // Insert with reallocation
+ it = v.insert(v.end() - 3, 6, 17);
+ EATEST_VERIFY(it == v.end() - (3 + 6));
+ EATEST_VERIFY(VerifySequence(v.begin(), v.end(), int(), "vector.insert", 13, 2, 3, 4, 5, 13, 13, 13, 13, 42, 42,
+ 42, 13, 13, 17, 17, 17, 17, 17, 17, 49, 99, 999, -1));
+
+ // Single insert with reallocation
+ vector<int> v2;
+ v2.reserve(100);
+ v2.insert(v2.begin(), 100, 17);
+ EATEST_VERIFY(v2.size() == 100);
+ EATEST_VERIFY(v2[0] == 17);
+ v2.insert(v2.begin() + 50, 42);
+ EATEST_VERIFY(v2.size() == 101);
+ EATEST_VERIFY(v2[50] == 42);
+
+ // Test insertion of values that come from within the vector.
+ v.insert(v.end() - 3, v.end() - 5, v.end());
+ EATEST_VERIFY(VerifySequence(v.begin(), v.end(), int(), "vector.insert", 13, 2, 3, 4, 5, 13, 13, 13, 13, 42, 42,
+ 42, 13, 13, 17, 17, 17, 17, 17, 17, 17, 17, 49, 99, 999, 49, 99, 999, -1));
+
+ v.insert(v.end() - 3, v.back());
+ EATEST_VERIFY(VerifySequence(v.begin(), v.end(), int(), "vector.insert", 13, 2, 3, 4, 5, 13, 13, 13, 13, 42, 42,
+ 42, 13, 13, 17, 17, 17, 17, 17, 17, 17, 17, 49, 99, 999, 999, 49, 99, 999, -1));
+
+ v.insert(v.end() - 3, 2, v[v.size() - 3]);
+ EATEST_VERIFY(VerifySequence(v.begin(), v.end(), int(), "vector.insert", 13, 2, 3, 4, 5, 13, 13, 13, 13, 42, 42,
+ 42, 13, 13, 17, 17, 17, 17, 17, 17, 17, 17, 49, 99, 999, 999, 49, 49, 49, 99, 999,
+ -1));
+
+#if !defined(EASTL_STD_ITERATOR_CATEGORY_ENABLED) && !defined(EA_COMPILER_NO_STANDARD_CPP_LIBRARY)
+ // std::vector / eastl::vector
+ std::vector<TestObject> stdV(10);
+ eastl::vector<TestObject> eastlV(10);
+
+ eastlV.insert(eastlV.end(), stdV.begin(), stdV.end());
+ stdV.insert(stdV.end(), eastlV.begin(), eastlV.end());
+
+ EATEST_VERIFY(eastlV.size() == 20);
+ EATEST_VERIFY(stdV.size() == 30);
+
+ // std::string / eastl::vector
+ std::string stdString("blah");
+ eastl::vector<char8_t> eastlVString;
+
+ eastlVString.assign(stdString.begin(), stdString.end());
+#endif
+
+// iterator insert(const_iterator position, std::initializer_list<T> ilist);
+#if !defined(EA_COMPILER_NO_INITIALIZER_LISTS)
+ // iterator insert(const_iterator position, std::initializer_list<T> ilist);
+ eastl::vector<float> floatVector;
+
+ floatVector.insert(floatVector.end(), {0, 1, 2, 3});
+ EATEST_VERIFY(floatVector.size() == 4);
+ EATEST_VERIFY((floatVector[0] == 0) && (floatVector[3] == 3));
+#endif
+ }
+
+ EATEST_VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+
+ {
+ // Test insert move objects
+ eastl::vector<TestObject> toVector1;
+ toVector1.reserve(20);
+ for(int idx = 0; idx < 2; ++idx)
+ toVector1.push_back(TestObject(idx));
+
+ eastl::vector<TestObject> toVector2;
+ for(int idx = 0; idx < 3; ++idx)
+ toVector2.push_back(TestObject(10 + idx));
+
+ // Insert more objects than the existing number using insert with iterator
+ TestObject::Reset();
+ eastl::vector<TestObject>::iterator it;
+ it = toVector1.insert(toVector1.begin(), toVector2.begin(), toVector2.end());
+ EATEST_VERIFY(it == toVector1.begin());
+ EATEST_VERIFY(VerifySequence(toVector1.begin(), toVector1.end(), int(), "vector.insert", 10, 11, 12, 0, 1, -1));
+ EATEST_VERIFY(TestObject::sTOMoveCtorCount + TestObject::sTOMoveAssignCount == 2 &&
+ TestObject::sTOCopyCtorCount + TestObject::sTOCopyAssignCount == 3); // Move 2 existing elements and copy the 3 inserted
+
+ eastl::vector<TestObject> toVector3;
+ toVector3.push_back(TestObject(20));
+
+ // Insert less objects than the existing number using insert with iterator
+ TestObject::Reset();
+ it = toVector1.insert(toVector1.begin(), toVector3.begin(), toVector3.end());
+ EATEST_VERIFY(VerifySequence(toVector1.begin(), toVector1.end(), int(), "vector.insert", 20, 10, 11, 12, 0, 1, -1));
+ EATEST_VERIFY(it == toVector1.begin());
+ EATEST_VERIFY(TestObject::sTOMoveCtorCount + TestObject::sTOMoveAssignCount == 5 &&
+ TestObject::sTOCopyCtorCount + TestObject::sTOCopyAssignCount == 1); // Move 5 existing elements and copy the 1 inserted
+
+ // Insert more objects than the existing number using insert without iterator
+ TestObject::Reset();
+ it = toVector1.insert(toVector1.begin(), 1, TestObject(17));
+ EATEST_VERIFY(it == toVector1.begin());
+ EATEST_VERIFY(VerifySequence(toVector1.begin(), toVector1.end(), int(), "vector.insert", 17, 20, 10, 11, 12, 0, 1, -1));
+ EATEST_VERIFY(TestObject::sTOMoveCtorCount + TestObject::sTOMoveAssignCount == 6 &&
+ TestObject::sTOCopyCtorCount + TestObject::sTOCopyAssignCount == 2); // Move 6 existing element and copy the 1 inserted +
+ // the temporary one inside the function
+
+ // Insert less objects than the existing number using insert without iterator
+ TestObject::Reset();
+ it = toVector1.insert(toVector1.begin(), 10, TestObject(18));
+ EATEST_VERIFY(it == toVector1.begin());
+ EATEST_VERIFY(VerifySequence(toVector1.begin(), toVector1.end(), int(), "vector.insert", 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 17, 20, 10, 11, 12, 0, 1, -1));
+ EATEST_VERIFY(TestObject::sTOMoveCtorCount + TestObject::sTOMoveAssignCount == 7 &&
+ TestObject::sTOCopyCtorCount + TestObject::sTOCopyAssignCount == 11); // Move 7 existing element and copy the 10 inserted +
+ // the temporary one inside the function
+ }
+
+ TestObject::Reset();
+
+ {
+ using namespace eastl;
+
+ // reserve / resize / capacity / clear
+ vector<int> v(10, 17);
+ v.reserve(20);
+ EATEST_VERIFY(v.validate());
+ EATEST_VERIFY(v.size() == 10);
+ EATEST_VERIFY(v.capacity() == 20);
+
+ v.resize(7); // Shrink
+ EATEST_VERIFY(v.validate());
+ EATEST_VERIFY(v.capacity() == 20);
+
+ v.resize(17); // Grow without reallocation
+ EATEST_VERIFY(v.validate());
+ EATEST_VERIFY(v.capacity() == 20);
+
+ v.resize(42); // Grow with reallocation
+ vector<int>::size_type c = v.capacity();
+ EATEST_VERIFY(v.validate());
+ EATEST_VERIFY(v[41] == 0);
+ EATEST_VERIFY(c >= 42);
+
+ v.resize(44, 19); // Grow with reallocation
+ EATEST_VERIFY(v.validate());
+ EATEST_VERIFY(v[43] == 19);
+
+ c = v.capacity();
+ v.clear();
+ EATEST_VERIFY(v.validate());
+ EATEST_VERIFY(v.empty());
+ EATEST_VERIFY(v.capacity() == c);
+
+ // How to shrink a vector's capacity to be equal to its size.
+ vector<int>(v).swap(v);
+ EATEST_VERIFY(v.validate());
+ EATEST_VERIFY(v.empty());
+ EATEST_VERIFY(v.capacity() == v.size());
+
+ // How to completely clear a vector (size = 0, capacity = 0, no allocation).
+ vector<int>().swap(v);
+ EATEST_VERIFY(v.validate());
+ EATEST_VERIFY(v.empty());
+ EATEST_VERIFY(v.capacity() == 0);
+ }
+
+ { // set_capacity / reset
+ using namespace eastl;
+
+ const int intArray[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17};
+ const size_t kIntArraySize = sizeof(intArray) / sizeof(int);
+
+ vector<int> v(30);
+ EATEST_VERIFY(v.capacity() >= 30);
+
+ v.assign(intArray, intArray + kIntArraySize);
+ EATEST_VERIFY(VerifySequence(v.begin(), v.end(), int(), "vector.assign", 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, -1));
+
+ // set_capacity
+ v.set_capacity();
+ EATEST_VERIFY(v.capacity() == v.size());
+ EATEST_VERIFY(VerifySequence(v.begin(), v.end(), int(), "vector.set_capacity", 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, -1));
+
+ v.set_capacity(0);
+ EATEST_VERIFY(v.size() == 0);
+ EATEST_VERIFY(v.data() == NULL);
+ EATEST_VERIFY(v.capacity() == v.size());
+
+ // Test set_capacity doing a realloc of non-scalar class types.
+ eastl::vector<TestObject> toArray;
+ toArray.resize(16);
+ toArray.set_capacity(64);
+ EATEST_VERIFY(v.validate());
+
+ // reset_lose_memory
+ int* const pData = v.data();
+ vector<int>::size_type n = v.size();
+ vector<int>::allocator_type& allocator = v.get_allocator();
+ v.reset_lose_memory();
+ allocator.deallocate(pData, n);
+ EATEST_VERIFY(v.capacity() == 0);
+ EATEST_VERIFY(VerifySequence(v.begin(), v.end(), int(), "vector.reset", -1));
+
+ // Test set_capacity make a move when reducing size
+ vector<TestObject> toArray2(10, TestObject(7));
+ TestObject::Reset();
+ toArray2.set_capacity(5);
+ EATEST_VERIFY(TestObject::sTOMoveCtorCount == 5 &&
+ TestObject::sTOCopyCtorCount + TestObject::sTOCopyAssignCount == 0); // Move the 5 existing elements, no copy
+ EATEST_VERIFY(VerifySequence(toArray2.begin(), toArray2.end(), int(), "vector.set_capacity", 7, 7, 7, 7, 7, -1));
+ }
+
+ TestObject::Reset();
+
+ {
+ using namespace eastl;
+
+ // Regression for user-reported possible bug.
+ {
+ MallocAllocator::reset_all();
+
+ eastl::vector<int, MallocAllocator> v;
+ v.reserve(32); // does allocation
+
+ v.push_back(37); // may reallocate if we do enough of these to exceed 32
+ v.erase(v.begin());
+
+ v.set_capacity(0);
+
+ // Verify that all memory is freed by the set_capacity function.
+ EATEST_VERIFY((MallocAllocator::mAllocCountAll > 0) &&
+ (MallocAllocator::mAllocCountAll == MallocAllocator::mFreeCountAll));
+
+ MallocAllocator::reset_all();
+ }
+
+ {
+ MallocAllocator::reset_all();
+
+ eastl::vector<int, MallocAllocator> v;
+ v.reserve(32); // does allocation
+
+ for (int j = 0; j < 40; j++)
+ v.push_back(37); // may reallocate if we do enough of these to exceed 32
+ for (int k = 0; k < 40; k++)
+ v.erase(v.begin());
+
+ v.set_capacity(0);
+
+ // Verify that all memory is freed by the set_capacity function.
+ EATEST_VERIFY((MallocAllocator::mAllocCountAll > 0) &&
+ (MallocAllocator::mAllocCountAll == MallocAllocator::mFreeCountAll));
+
+ MallocAllocator::reset_all();
+ }
+ }
+
+ {
+ using namespace eastl;
+
+ // bool validate() const;
+ // bool validate_iterator(const_iterator i) const;
+
+ vector<int> intArray(20);
+
+ EATEST_VERIFY(intArray.validate());
+ EATEST_VERIFY((intArray.validate_iterator(intArray.begin()) & (isf_valid | isf_can_dereference)) != 0);
+ EATEST_VERIFY(intArray.validate_iterator(NULL) == isf_none);
+ }
+
+ {
+ using namespace eastl;
+
+ // global operators (==, !=, <, etc.)
+ vector<int> intArray1(10);
+ vector<int> intArray2(10);
+
+ for (i = 0; i < intArray1.size(); i++)
+ {
+ intArray1[i] = (int)i; // Make intArray1 equal to intArray2.
+ intArray2[i] = (int)i;
+ }
+
+ EATEST_VERIFY((intArray1 == intArray2));
+ EATEST_VERIFY(!(intArray1 != intArray2));
+ EATEST_VERIFY((intArray1 <= intArray2));
+ EATEST_VERIFY((intArray1 >= intArray2));
+ EATEST_VERIFY(!(intArray1 < intArray2));
+ EATEST_VERIFY(!(intArray1 > intArray2));
+
+ intArray1.push_back(100); // Make intArray1 less than intArray2.
+ intArray2.push_back(101);
+
+ EATEST_VERIFY(!(intArray1 == intArray2));
+ EATEST_VERIFY((intArray1 != intArray2));
+ EATEST_VERIFY((intArray1 <= intArray2));
+ EATEST_VERIFY(!(intArray1 >= intArray2));
+ EATEST_VERIFY((intArray1 < intArray2));
+ EATEST_VERIFY(!(intArray1 > intArray2));
+ }
+
+ // three way comparison operator
+#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON)
+ {
+ using namespace eastl;
+
+ vector<int> intArray1(10);
+ vector<int> intArray2(10);
+
+ for (i = 0; i < intArray1.size(); i++)
+ {
+ intArray1[i] = (int)i; // Make intArray1 equal to intArray2.
+ intArray2[i] = (int)i;
+ }
+
+ // Verify equality between intArray1 and intArray2
+ EATEST_VERIFY((intArray1 <=> intArray2) == 0);
+ EATEST_VERIFY(!((intArray1 <=> intArray2) != 0));
+ EATEST_VERIFY((intArray1 <=> intArray2) <= 0);
+ EATEST_VERIFY((intArray1 <=> intArray2) >= 0);
+ EATEST_VERIFY(!((intArray1 <=> intArray2) < 0));
+ EATEST_VERIFY(!((intArray1 <=> intArray2) > 0));
+
+ intArray1.push_back(100); // Make intArray1 less than intArray2.
+ intArray2.push_back(101);
+
+ // Verify intArray1 < intArray2
+ EATEST_VERIFY(!((intArray1 <=> intArray2) == 0));
+ EATEST_VERIFY((intArray1 <=> intArray2) != 0);
+ EATEST_VERIFY((intArray1 <=> intArray2) <= 0);
+ EATEST_VERIFY(!((intArray1 <=> intArray2) >= 0));
+ EATEST_VERIFY(((intArray1 <=> intArray2) < 0));
+ EATEST_VERIFY(!((intArray1 <=> intArray2) > 0));
+
+ for (i = 0; i < 3; i++) // Make the length of intArray2 less than intArray1
+ intArray2.pop_back();
+
+ // Verify intArray2.size() < intArray1.size() and intArray2 is a subset of intArray1
+ EATEST_VERIFY(!((intArray1 <=> intArray2) == 0));
+ EATEST_VERIFY((intArray1 <=> intArray2) != 0);
+ EATEST_VERIFY((intArray1 <=> intArray2) >= 0);
+ EATEST_VERIFY(!((intArray1 <=> intArray2) <= 0));
+ EATEST_VERIFY(((intArray1 <=> intArray2) > 0));
+ EATEST_VERIFY(!((intArray1 <=> intArray2) < 0));
+ }
+
+ {
+ using namespace eastl;
+
+ vector<int> intArray1 = {1, 2, 3, 4, 5, 6, 7};
+ vector<int> intArray2 = {7, 6, 5, 4, 3, 2, 1};
+ vector<int> intArray3 = {1, 2, 3, 4};
+
+ struct weak_ordering_vector
+ {
+ vector<int> vec;
+ inline std::weak_ordering operator<=>(const weak_ordering_vector& b) const { return vec <=> b.vec; }
+ };
+
+ EATEST_VERIFY(synth_three_way{}(weak_ordering_vector{intArray1}, weak_ordering_vector{intArray2}) == std::weak_ordering::less);
+ EATEST_VERIFY(synth_three_way{}(weak_ordering_vector{intArray3}, weak_ordering_vector{intArray1}) == std::weak_ordering::less);
+ EATEST_VERIFY(synth_three_way{}(weak_ordering_vector{intArray2}, weak_ordering_vector{intArray1}) == std::weak_ordering::greater);
+ EATEST_VERIFY(synth_three_way{}(weak_ordering_vector{intArray2}, weak_ordering_vector{intArray3}) == std::weak_ordering::greater);
+ EATEST_VERIFY(synth_three_way{}(weak_ordering_vector{intArray1}, weak_ordering_vector{intArray1}) == std::weak_ordering::equivalent);
+
+ struct strong_ordering_vector
+ {
+ vector<int> vec;
+ inline std::strong_ordering operator<=>(const strong_ordering_vector& b) const { return vec <=> b.vec; }
+ };
+
+ EATEST_VERIFY(synth_three_way{}(strong_ordering_vector{intArray1}, strong_ordering_vector{intArray2}) == std::strong_ordering::less);
+ EATEST_VERIFY(synth_three_way{}(strong_ordering_vector{intArray3}, strong_ordering_vector{intArray1}) == std::strong_ordering::less);
+ EATEST_VERIFY(synth_three_way{}(strong_ordering_vector{intArray2}, strong_ordering_vector{intArray1}) == std::strong_ordering::greater);
+ EATEST_VERIFY(synth_three_way{}(strong_ordering_vector{intArray2}, strong_ordering_vector{intArray3}) == std::strong_ordering::greater);
+ EATEST_VERIFY(synth_three_way{}(strong_ordering_vector{intArray1}, strong_ordering_vector{intArray1}) == std::strong_ordering::equal);
+ }
+#endif
+
+ {
+ using namespace eastl;
+
+ // Test vector<Align64>
+
+ // Aligned objects should be CustomAllocator instead of the default, because the
+ // EASTL default might be unable to do aligned allocations, but CustomAllocator always can.
+ vector<Align64, CustomAllocator> vA64(10);
+
+ vA64.resize(2);
+ EATEST_VERIFY(vA64.size() == 2);
+
+ vA64.push_back(Align64());
+ EATEST_VERIFY(vA64.size() == 3);
+
+ vA64.resize(0);
+ EATEST_VERIFY(vA64.size() == 0);
+
+ vA64.insert(vA64.begin(), Align64());
+ EATEST_VERIFY(vA64.size() == 1);
+
+ vA64.resize(20);
+ EATEST_VERIFY(vA64.size() == 20);
+ }
+
+ {
+ // Misc additional tests
+
+ eastl::vector<int> empty1;
+ EATEST_VERIFY(empty1.data() == NULL);
+ EATEST_VERIFY(empty1.size() == 0);
+ EATEST_VERIFY(empty1.capacity() == 0);
+
+ eastl::vector<int> empty2 = empty1;
+ EATEST_VERIFY(empty2.data() == NULL);
+ EATEST_VERIFY(empty2.size() == 0);
+ EATEST_VERIFY(empty2.capacity() == 0);
+ }
+
+ { // Test whose purpose is to see if calling vector::size() in a const loop results in the compiler optimizing the
+ // size() call to outside the loop.
+ eastl::vector<TestObject> toArray;
+
+ toArray.resize(7);
+
+ for (i = 0; i < toArray.size(); i++)
+ {
+ TestObject& to = toArray[i];
+
+ if (to.mX == 99999)
+ to.mX++;
+ }
+ }
+
+ { // Test assign from iterator type.
+ TestObject to;
+ eastl::vector<TestObject> toTest;
+
+ // InputIterator
+ demoted_iterator<TestObject*, EASTL_ITC_NS::forward_iterator_tag> toInput(&to);
+ toTest.assign(toInput, toInput);
+
+ // ForwardIterator
+ eastl::slist<TestObject> toSList;
+ toTest.assign(toSList.begin(), toSList.end());
+
+ // BidirectionalIterator
+ eastl::list<TestObject> toList;
+ toTest.assign(toList.begin(), toList.end());
+
+ // RandomAccessIterator
+ eastl::deque<TestObject> toDeque;
+ toTest.assign(toDeque.begin(), toDeque.end());
+
+ // ContiguousIterator (note: as of this writing, vector doesn't actually use contiguous_iterator_tag)
+ eastl::vector<TestObject> toArray;
+ toTest.assign(toArray.begin(), toArray.end());
+ }
+
+ EATEST_VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+
+ { // Test user report that they think they saw code like this leak memory.
+ eastl::vector<int> intTest;
+
+ intTest.push_back(1);
+ intTest = eastl::vector<int>();
+
+ eastl::vector<TestObject> toTest;
+
+ toTest.push_back(TestObject(1));
+ toTest = eastl::vector<TestObject>();
+ }
+
+ EATEST_VERIFY(TestObject::IsClear());
+ TestObject::Reset();
+
+ { // Regression of user error report for the case of vector<const type>.
+ eastl::vector<int> ctorValues;
+
+ for (int v = 0; v < 10; v++)
+ ctorValues.push_back(v);
+
+ eastl::vector<const ConstType> testStruct(ctorValues.begin(), ctorValues.end());
+ eastl::vector<const int> testInt(ctorValues.begin(), ctorValues.end());
+ }
+
+ { // Regression to verify that const vector works.
+ const eastl::vector<int> constIntVector1;
+ EATEST_VERIFY(constIntVector1.empty());
+
+ int intArray[3] = {37, 38, 39};
+ const eastl::vector<int> constIntVector2(intArray, intArray + 3);
+ EATEST_VERIFY(constIntVector2.size() == 3);
+
+ const eastl::vector<int> constIntVector3(4, 37);
+ EATEST_VERIFY(constIntVector3.size() == 4);
+
+ const eastl::vector<int> constIntVector4;
+ const eastl::vector<int> constIntVector5 = constIntVector4;
+ }
+
+ { // Regression to verify that a bug fix for a vector optimization works.
+ eastl::vector<int> intVector1;
+ intVector1.reserve(128);
+ intVector1.resize(128, 37);
+ intVector1.push_back(intVector1.front());
+ EATEST_VERIFY(intVector1.back() == 37);
+
+ eastl::vector<int> intVector2;
+ intVector2.reserve(1024);
+ intVector2.resize(1024, 37);
+ intVector2.resize(2048, intVector2.front());
+ EATEST_VERIFY(intVector2.back() == 37);
+ }
+
+ { // C++11 Range
+// EABase 2.00.34+ has EA_COMPILER_NO_RANGE_BASED_FOR_LOOP, which we can check instead.
+#if (defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1700)) || \
+ (defined(__clang__) && (EA_COMPILER_VERSION >= 300) && (__cplusplus >= 201103L)) || \
+ (defined(__GNUC__) && (EA_COMPILER_VERSION >= 4006) && defined(__GXX_EXPERIMENTAL_CXX0X__)) || \
+ (__cplusplus >= 201103L)
+
+ eastl::vector<float> floatVector;
+
+ floatVector.push_back(0.0);
+ floatVector.push_back(1.0);
+
+ for (auto& f : floatVector)
+ f += 1.0;
+
+ EATEST_VERIFY(floatVector.back() == 2.0);
+#endif
+ }
+
+ {
+// C++11 cbegin, cend, crbegin, crend
+#if !defined(EA_COMPILER_NO_AUTO)
+ // float vector
+ eastl::vector<float> floatVector;
+
+ auto cb = floatVector.cbegin();
+ auto ce = floatVector.cend();
+ auto crb = floatVector.crbegin();
+ auto cre = floatVector.crend();
+
+ EATEST_VERIFY(eastl::distance(cb, ce) == 0);
+ EATEST_VERIFY(eastl::distance(crb, cre) == 0);
+
+ // const float vector
+ const eastl::vector<float> cFloatVector;
+
+ auto ccb = cFloatVector.cbegin();
+ auto cce = cFloatVector.cend();
+ auto ccrb = cFloatVector.crbegin();
+ auto ccre = cFloatVector.crend();
+
+ EATEST_VERIFY(eastl::distance(ccb, cce) == 0);
+ EATEST_VERIFY(eastl::distance(ccrb, ccre) == 0);
+
+#endif
+ }
+
+ {
+ // Regression for failure in DoRealloc's use of uninitialize_move.
+ using namespace eastl;
+
+ const eastl::string str0 = "TestString0";
+ vector<eastl::string> v(1, str0);
+ vector<eastl::string> v_copy;
+
+ // Test operator=
+ v_copy = v;
+ EATEST_VERIFY_MSG(v_copy.size() == 1, "vector string8 copy size");
+ EATEST_VERIFY_MSG(eastl::find(v_copy.begin(), v_copy.end(), str0) != v_copy.end(), "vector copy string8");
+ EATEST_VERIFY_MSG(v.size() == 1, "vector string8 copy size");
+ EATEST_VERIFY_MSG(eastl::find(v.begin(), v.end(), str0) != v.end(), "vector copy string8");
+
+ // Test assign.
+ v.clear();
+ v.push_back(str0);
+ v_copy.assign(v.begin(), v.end());
+ EATEST_VERIFY_MSG(v_copy.size() == 1, "vector string8 copy size");
+ EATEST_VERIFY_MSG(eastl::find(v_copy.begin(), v_copy.end(), str0) != v_copy.end(), "vector copy string8");
+ EATEST_VERIFY_MSG(v.size() == 1, "vector string8 copy size");
+ EATEST_VERIFY_MSG(eastl::find(v.begin(), v.end(), str0) != v.end(), "vector copy string8");
+ }
+
+ {
+ // Regression of vector::operator= for the case of EASTL_ALLOCATOR_COPY_ENABLED=1
+ // For this test we need to use InstanceAllocator to create two containers of the same
+ // type but with different and unequal allocator instances. The bug was that when
+ // EASTL_ALLOCATOR_COPY_ENABLED was enabled operator=(this_type& x) assigned x.mAllocator
+ // to this and then proceeded to assign member elements from x to this. That's invalid
+ // because the existing elements of this were allocated by a different allocator and
+ // will be freed in the future with the allocator copied from x.
+ // The test below should work for the case of EASTL_ALLOCATOR_COPY_ENABLED == 0 or 1.
+ InstanceAllocator::reset_all();
+
+ InstanceAllocator ia0((uint8_t)0);
+ InstanceAllocator ia1((uint8_t)1);
+
+ eastl::vector<int, InstanceAllocator> v0((eastl_size_t)1, (int)0, ia0);
+ eastl::vector<int, InstanceAllocator> v1((eastl_size_t)1, (int)1, ia1);
+
+ EATEST_VERIFY((v0.front() == 0) && (v1.front() == 1));
+#if EASTL_ALLOCATOR_COPY_ENABLED
+ EATEST_VERIFY(v0.get_allocator() != v1.get_allocator());
+#endif
+ v0 = v1;
+ EATEST_VERIFY((v0.front() == 1) && (v1.front() == 1));
+ EATEST_VERIFY(InstanceAllocator::mMismatchCount == 0);
+ EATEST_VERIFY(v0.validate());
+ EATEST_VERIFY(v1.validate());
+#if EASTL_ALLOCATOR_COPY_ENABLED
+ EATEST_VERIFY(v0.get_allocator() == v1.get_allocator());
+#endif
+ }
+
+ {
+ // Test shrink_to_fit
+ eastl::vector<int> v;
+ EATEST_VERIFY(v.capacity() == 0);
+ v.resize(100);
+ EATEST_VERIFY(v.capacity() == 100);
+ v.clear();
+ EATEST_VERIFY(v.capacity() == 100);
+ v.shrink_to_fit();
+ EATEST_VERIFY(v.capacity() == 0);
+ }
+
+ {
+ // Regression for compilation errors found and fixed when integrating into Frostbite.
+ int j = 7;
+
+ eastl::vector<StructWithConstInt> v1;
+ v1.push_back(StructWithConstInt(j));
+
+ eastl::vector<StructWithConstRefToInt> v2;
+ v2.push_back(StructWithConstRefToInt(j));
+ }
+
+ {
+ // Regression for issue with vector containing non-copyable values reported by user
+ eastl::vector<testmovable> moveablevec;
+ testmovable moveable;
+ moveablevec.insert(moveablevec.end(), eastl::move(moveable));
+ }
+
+ {
+ // Calling erase of empty range should not call a move assignment to self
+ eastl::vector<TestMoveAssignToSelf> v1;
+ v1.push_back(TestMoveAssignToSelf());
+ EATEST_VERIFY(!v1[0].mMovedToSelf);
+ v1.erase(v1.begin(), v1.begin());
+ EATEST_VERIFY(!v1[0].mMovedToSelf);
+ }
+
+#if defined(EASTL_TEST_CONCEPT_IMPLS)
+ {
+ // vector default constructor should require no more than Destructible
+ eastl::vector<Destructible> v1;
+ EATEST_VERIFY(v1.empty());
+
+ // some basic vector operations (data(), capacity(), size(), empty(), clear(), erase()) should impose no
+ // requirements beyond Destructible
+ EATEST_VERIFY(v1.empty());
+ EATEST_VERIFY(v1.size() == 0);
+ EATEST_VERIFY(v1.capacity() == 0);
+ EATEST_VERIFY(eastl::distance(v1.data(), v1.data() + v1.size()) == 0);
+ v1.clear();
+ }
+
+ {
+ // vector default constructor should work with DefaultConstructible T
+ eastl::vector<DefaultConstructible> v1;
+ EATEST_VERIFY(v1.empty());
+ }
+
+ {
+ // vector constructor that takes an initial size should only require DefaultConstructible T
+ eastl::vector<DefaultConstructible> v2(2);
+ EATEST_VERIFY(v2.size() == 2 && v2[0].value == v2[1].value &&
+ v2[0].value == DefaultConstructible::defaultValue);
+ }
+
+ {
+ // vector constructor taking an initial size and a value should only require CopyConstructible
+ eastl::vector<CopyConstructible> v3(2, CopyConstructible::Create());
+ EATEST_VERIFY(v3.size() == 2 && v3[0].value == v3[1].value && v3[0].value == CopyConstructible::defaultValue);
+
+ // vector constructor taking a pair of iterators should work for CopyConstructible
+ eastl::vector<CopyConstructible> v4(cbegin(v3), cend(v3));
+ EATEST_VERIFY(v4.size() == 2 && v4[0].value == v4[1].value && v4[0].value == CopyConstructible::defaultValue);
+ }
+
+ {
+ // vector::reserve() should only require MoveInsertible
+ eastl::vector<MoveConstructible> v5;
+ v5.reserve(2);
+ v5.push_back(MoveConstructible::Create());
+ v5.push_back(MoveConstructible::Create());
+ EATEST_VERIFY(v5.size() == 2 && v5[0].value == v5[1].value && v5[0].value == MoveConstructible::defaultValue);
+ v5.pop_back();
+
+ // vector::shrink_to_fit() should only require MoveInsertible
+ v5.shrink_to_fit();
+ EATEST_VERIFY(v5.size() == 1 && v5.capacity() == 1 && v5[0].value == MoveConstructible::defaultValue);
+ }
+
+ {
+ // vector constructor taking a pair of iterators should only require MoveConstructible
+ MoveConstructible moveConstructibleArray[] = {MoveConstructible::Create()};
+ eastl::vector<MoveConstructible> v7(
+ eastl::move_iterator<MoveConstructible*>(eastl::begin(moveConstructibleArray)),
+ eastl::move_iterator<MoveConstructible*>(eastl::end(moveConstructibleArray)));
+ EATEST_VERIFY(v7.size() == 1 && v7[0].value == MoveConstructible::defaultValue);
+ }
+
+ {
+ // vector::swap() should only require Destructible. We also test with DefaultConstructible as it gives us a
+ // testable result.
+
+ eastl::vector<Destructible> v4, v5;
+ eastl::swap(v4, v5);
+ EATEST_VERIFY(v4.empty() && v5.empty());
+
+ eastl::vector<DefaultConstructible> v6(1), v7(2);
+ eastl::swap(v6, v7);
+ EATEST_VERIFY(v6.size() == 2 && v7.size() == 1);
+ }
+
+ {
+ // vector::resize() should only require MoveInsertable and DefaultInsertable
+ eastl::vector<MoveAndDefaultConstructible> v8;
+ v8.resize(2);
+ EATEST_VERIFY(v8.size() == 2 && v8[0].value == v8[1].value && v8[0].value ==
+ MoveAndDefaultConstructible::defaultValue);
+ }
+
+ {
+ eastl::vector<MoveAssignable> v1;
+ // vector::insert(pos, rv) should only require MoveAssignable
+ v1.insert(begin(v1), MoveAssignable::Create());
+ EATEST_VERIFY(v1.size() == 1 && v1.front().value == MoveAssignable::defaultValue);
+ // vector::erase(pos) should only require MoveAssignable
+ v1.erase(begin(v1));
+ EATEST_VERIFY(v1.empty());
+ }
+#endif // EASTL_TEST_CONCEPT_IMPLS
+
+ {
+ // validates our vector implementation does not use 'operator<' on input iterators during vector construction.
+ //
+ struct container_value_type { int data; };
+ struct container_with_custom_iterator
+ {
+ struct iterator
+ {
+ typedef EASTL_ITC_NS::input_iterator_tag iterator_category;
+ typedef int value_type;
+ typedef ptrdiff_t difference_type;
+ typedef int* pointer;
+ typedef int& reference;
+
+ bool operator!=(const iterator&) const { return false; }
+ iterator& operator++() { return *this; }
+ iterator operator++(int) { return *this; }
+ container_value_type operator*() { return {}; }
+ };
+
+ container_with_custom_iterator() EA_NOEXCEPT {}
+
+ iterator begin() const { return {}; }
+ iterator end() const { return {}; }
+ bool empty() const { return false; }
+
+ private:
+ eastl::vector<container_value_type> m_vector;
+ };
+
+ static_assert(!is_less_comparable<container_with_custom_iterator::iterator>::value, "type cannot support comparison by '<' for this test");
+ container_with_custom_iterator ci;
+ eastl::vector<container_value_type> v2(ci.begin(), ci.end());
+ }
+
+ // If the legacy code path is enabled we cannot handle non-copyable types
+ #ifndef EASTL_VECTOR_LEGACY_SWAP_BEHAVIOUR_REQUIRES_COPY_CTOR
+ // unique_ptr tests
+ {
+ // Simple move-assignment test to prevent regressions where eastl::vector utilizes operations on T that are not necessary.
+ {
+ eastl::vector<eastl::unique_ptr<int>> v1;
+ eastl::vector<eastl::unique_ptr<int>> v2;
+ v2 = eastl::move(v1);
+ }
+
+ {
+ // This test verifies that eastl::vector can handle the move-assignment case where its utilizes two
+ // different allocator instances that do not compare equal. An example of an allocator that compares equal
+ // but isn't the same object instance is an allocator that shares the same memory allocation mechanism (eg.
+ // malloc). The memory allocated from one instance can be freed by another instance in the case where
+ // allocators compare equal. This test is verifying functionality in the opposite case where allocators
+ // instances do not compare equal and must clean up its own allocated memory.
+ InstanceAllocator::reset_all();
+ {
+ InstanceAllocator a1(uint8_t(0)), a2(uint8_t(1));
+ eastl::vector<eastl::unique_ptr<int>, InstanceAllocator> v1(a1);
+ eastl::vector<eastl::unique_ptr<int>, InstanceAllocator> v2(a2);
+
+ VERIFY(v1.get_allocator() != v2.get_allocator());
+
+ // add some data in the vector so we can move it to the other vector.
+ v1.push_back(nullptr);
+ v1.push_back(nullptr);
+ v1.push_back(nullptr);
+ v1.push_back(nullptr);
+
+ VERIFY(!v1.empty() && v2.empty());
+ v2 = eastl::move(v1);
+ VERIFY(v1.empty() && !v2.empty());
+ v1.swap(v2);
+ VERIFY(!v1.empty() && v2.empty());
+ }
+ VERIFY(InstanceAllocator::mMismatchCount == 0);
+ }
+ }
+ #endif
+
+ {
+ // CustomAllocator has no data members which reduces the size of an eastl::vector via the empty base class optimization.
+ typedef eastl::vector<int, CustomAllocator> EboVector;
+ static_assert(sizeof(EboVector) == 3 * sizeof(void*), "");
+ }
+
+ // eastl::erase / eastl::erase_if tests
+ {
+ {
+ eastl::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+
+ auto numErased = eastl::erase(v, 5);
+ VERIFY((v == eastl::vector<int> {1, 2, 3, 4, 6, 7, 8, 9}));
+ VERIFY(numErased == 1);
+
+ numErased = eastl::erase(v, 2);
+ VERIFY((v == eastl::vector<int> {1, 3, 4, 6, 7, 8, 9}));
+ VERIFY(numErased == 1);
+
+ numErased = eastl::erase(v, 9);
+ VERIFY((v == eastl::vector<int> {1, 3, 4, 6, 7, 8}));
+ VERIFY(numErased == 1);
+ }
+
+ {
+ eastl::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+ auto numErased = eastl::erase_if(v, [](auto i) { return i % 2 == 0; });
+ VERIFY((v == eastl::vector<int>{1, 3, 5, 7, 9}));
+ VERIFY(numErased == 4);
+ }
+ }
+
+ return nErrorCount;
+}
diff --git a/EASTL/test/source/TestVectorMap.cpp b/EASTL/test/source/TestVectorMap.cpp
new file mode 100644
index 0000000..ca400ed
--- /dev/null
+++ b/EASTL/test/source/TestVectorMap.cpp
@@ -0,0 +1,235 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#include "TestMap.h"
+#include "EASTLTest.h"
+#include <EASTL/vector_map.h>
+#include <EASTL/vector_multimap.h>
+#include <EASTL/vector.h>
+#include <EASTL/deque.h>
+#include <EASTL/string.h>
+#include <EASTL/fixed_string.h>
+#include <EASTL/fixed_vector.h>
+#include <EASTL/utility.h>
+
+EA_DISABLE_ALL_VC_WARNINGS()
+#ifndef EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+ #include <map>
+#endif
+EA_RESTORE_ALL_VC_WARNINGS()
+
+using namespace eastl;
+
+
+// Template instantations.
+// These tell the compiler to compile all the functions for the given class.
+template class eastl::vector_map<int, int>;
+template class eastl::vector_multimap<float, int>;
+template class eastl::vector_map<TestObject, TestObject>;
+template class eastl::vector_multimap<TestObject, TestObject>;
+
+
+///////////////////////////////////////////////////////////////////////////////
+// typedefs
+//
+typedef eastl::vector_map<int, int> VM1;
+typedef eastl::vector_map<int, int, eastl::less<int>, EASTLAllocatorType, eastl::deque<eastl::pair<int, int> > > VM2;
+
+typedef eastl::vector_map<TestObject, TestObject> VM4;
+typedef eastl::vector_map<TestObject, TestObject, eastl::less<TestObject>, EASTLAllocatorType, eastl::deque<eastl::pair<TestObject, TestObject> > > VM5;
+
+typedef eastl::vector_multimap<int, int> VMM1;
+typedef eastl::vector_multimap<int, int, eastl::less<int>, EASTLAllocatorType, eastl::deque<eastl::pair<int, int> > > VMM2;
+
+typedef eastl::vector_multimap<TestObject, TestObject> VMM4;
+typedef eastl::vector_multimap<TestObject, TestObject, eastl::less<TestObject>, EASTLAllocatorType, eastl::deque<eastl::pair<TestObject, TestObject> > > VMM5;
+
+#ifndef EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+ typedef std::map<int, int> VM3;
+ typedef std::map<TestObject, TestObject> VM6;
+ typedef std::multimap<int, int> VMM3;
+ typedef std::multimap<TestObject, TestObject> VMM6;
+#endif
+///////////////////////////////////////////////////////////////////////////////
+
+
+
+int TestVectorMap()
+{
+ int nErrorCount = 0;
+
+ #ifndef EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+ { // Test construction
+ nErrorCount += TestMapConstruction<VM1, VM3, false>();
+ nErrorCount += TestMapConstruction<VM2, VM3, false>();
+ nErrorCount += TestMapConstruction<VM4, VM6, false>();
+ nErrorCount += TestMapConstruction<VM5, VM6, false>();
+
+ nErrorCount += TestMapConstruction<VMM1, VMM3, true>();
+ nErrorCount += TestMapConstruction<VMM2, VMM3, true>();
+ nErrorCount += TestMapConstruction<VMM4, VMM6, true>();
+ nErrorCount += TestMapConstruction<VMM5, VMM6, true>();
+ }
+
+
+ { // Test mutating functionality.
+ nErrorCount += TestMapMutation<VM1, VM3, false>();
+ nErrorCount += TestMapMutation<VM2, VM3, false>();
+ nErrorCount += TestMapMutation<VM4, VM6, false>();
+ nErrorCount += TestMapMutation<VM5, VM6, false>();
+
+ nErrorCount += TestMapMutation<VMM1, VMM3, true>();
+ nErrorCount += TestMapMutation<VMM2, VMM3, true>();
+ nErrorCount += TestMapMutation<VMM4, VMM6, true>();
+ nErrorCount += TestMapMutation<VMM5, VMM6, true>();
+ }
+ #endif // EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+
+
+ { // Test search functionality.
+ nErrorCount += TestMapSearch<VM1, false>();
+ nErrorCount += TestMapSearch<VM2, false>();
+ nErrorCount += TestMapSearch<VM4, false>();
+ nErrorCount += TestMapSearch<VM5, false>();
+
+ nErrorCount += TestMapSearch<VMM1, true>();
+ nErrorCount += TestMapSearch<VMM2, true>();
+ nErrorCount += TestMapSearch<VMM4, true>();
+ nErrorCount += TestMapSearch<VMM5, true>();
+ }
+
+
+ {
+ // C++11 emplace and related functionality
+ nErrorCount += TestMapCpp11<eastl::vector_map<int, TestObject> >();
+ nErrorCount += TestMapCpp11<eastl::vector_map<int, TestObject, eastl::less<int>, EASTLAllocatorType, eastl::deque<eastl::pair<int, TestObject> > > >();
+
+ nErrorCount += TestMultimapCpp11<eastl::vector_multimap<int, TestObject> >();
+ nErrorCount += TestMultimapCpp11<eastl::vector_multimap<int, TestObject, eastl::less<int>, EASTLAllocatorType, eastl::deque<eastl::pair<int, TestObject> > > >();
+ }
+
+
+ {
+ // insert at the upper bound of a range
+ VMM1 vmm = {{0, 0}};
+ VERIFY(vmm.emplace(0, 0) != vmm.begin());
+ }
+
+
+ { // Misc tests
+
+ // const key_compare& key_comp() const;
+ // key_compare& key_comp();
+ VM2 vm;
+ const VM2 vmc;
+
+ const VM2::key_compare& kc = vmc.key_comp();
+ vm.key_comp() = kc;
+
+ // ensure count can be called from a const object
+ vmc.count(0);
+ }
+
+ {
+ const VMM1 vmm;
+
+ // ensure count can be called from a const object
+ vmm.count(0);
+ }
+
+ {
+ // Misc testing
+ typedef eastl::fixed_vector<eastl::pair<int, float>, 8> FV;
+ typedef eastl::vector_map<int, float, eastl::less<int>, FV::allocator_type, FV> FixedVectorMap;
+
+ FixedVectorMap fvm;
+
+ for(int i = FV::kMaxSize - 1; i >= 0; i--)
+ fvm.insert(eastl::pair<int, float>(i, (float)i));
+
+ FixedVectorMap::iterator it = fvm.find(3);
+ EATEST_VERIFY(it != fvm.end());
+ }
+
+ {
+ // Misc testing
+ typedef eastl::fixed_string<char, 16> KeyStringType;
+ typedef eastl::fixed_string<char, 24> ValueStringType;
+ typedef eastl::pair<ValueStringType, bool> StringMapValueType;
+
+ typedef eastl::vector_map<KeyStringType, StringMapValueType> StringMapType;
+ StringMapType stringMap;
+
+ stringMap.reserve(20);
+ EATEST_VERIFY(stringMap.capacity() == 20);
+
+ StringMapValueType& v1 = stringMap["abc"];
+ EATEST_VERIFY(strlen(v1.first.c_str()) == 0);
+ v1.first.clear();
+ EATEST_VERIFY(strlen(v1.first.c_str()) == 0);
+
+ StringMapValueType& v2 = stringMap["def"];
+ EATEST_VERIFY(strlen(v2.first.c_str()) == 0);
+ v2.first = "def";
+ EATEST_VERIFY(strlen(v2.first.c_str()) == 3);
+ }
+
+ {
+ // Regression for problem observed in EAWebKit
+ typedef eastl::vector_map<eastl::string, void*> TestVectorMap;
+
+ TestVectorMap tvm;
+
+ tvm["Parameters"] = NULL;
+ tvm["ThemeParameters"] = NULL;
+ tvm["CookieInfo"] = NULL;
+ tvm["DiskCacheInfo"] = NULL;
+ tvm["RamCacheInfo"] = NULL;
+ tvm["SSLCert"] = NULL;
+ tvm["AllowedDomain"] = NULL;
+ }
+
+ { // find_as predicate
+ { // vector_map
+ eastl::vector_map<string, int> vss = {{"abc", 11}, {"def", 22}, {"ghi", 33}, {"jklmnop", 44},
+ {"qrstu", 55}, {"vw", 66}, {"x", 77}, {"yz", 88}};
+ VERIFY(vss.find_as("GHI", TestStrCmpI_2()) != vss.end());
+ }
+
+ { // const vector_map
+ const eastl::vector_map<string, int> vss = {{"abc", 11}, {"def", 22}, {"ghi", 33}, {"jklmnop", 44},
+ {"qrstu", 55}, {"vw", 66}, {"x", 77}, {"yz", 88}};
+ VERIFY(vss.find_as("GHI", TestStrCmpI_2()) != vss.end());
+ }
+
+ // vector_multimap
+ {
+ eastl::vector_multimap<string, int> vss = {{"abc", 11}, {"def", 22}, {"ghi", 33}, {"jklmnop", 44},
+ {"qrstu", 55}, {"vw", 66}, {"x", 77}, {"yz", 88}};
+ VERIFY(vss.find_as("GHI", TestStrCmpI_2()) != vss.end());
+ }
+
+ // const vector_multimap
+ {
+ const eastl::vector_multimap<string, int> vss = {{"abc", 11}, {"def", 22}, {"ghi", 33}, {"jklmnop", 44},
+ {"qrstu", 55}, {"vw", 66}, {"x", 77}, {"yz", 88}};
+ VERIFY(vss.find_as("GHI", TestStrCmpI_2()) != vss.end());
+ }
+ }
+
+ return nErrorCount;
+}
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/test/source/TestVectorSet.cpp b/EASTL/test/source/TestVectorSet.cpp
new file mode 100644
index 0000000..067630f
--- /dev/null
+++ b/EASTL/test/source/TestVectorSet.cpp
@@ -0,0 +1,170 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+
+#include "TestSet.h"
+#include "EASTLTest.h"
+#include <EASTL/vector_set.h>
+#include <EASTL/vector_multiset.h>
+#include <EASTL/vector.h>
+#include <EASTL/deque.h>
+#include <EABase/eabase.h>
+
+EA_DISABLE_ALL_VC_WARNINGS()
+#ifndef EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+ #include <set>
+#endif
+EA_RESTORE_ALL_VC_WARNINGS()
+
+using namespace eastl;
+
+
+// Template instantations.
+// These tell the compiler to compile all the functions for the given class.
+template class eastl::vector_set<int>;
+template class eastl::vector_multiset<float>;
+template class eastl::vector_set<TestObject>;
+template class eastl::vector_multiset<TestObject>;
+
+
+///////////////////////////////////////////////////////////////////////////////
+// typedefs
+//
+typedef eastl::vector_set<int> VS1;
+typedef eastl::vector_set<int, eastl::less<int>, EASTLAllocatorType, eastl::deque<int> > VS2;
+typedef eastl::vector_set<TestObject> VS4;
+typedef eastl::vector_set<TestObject, eastl::less<TestObject>, EASTLAllocatorType, eastl::deque<TestObject> > VS5;
+typedef eastl::vector_multiset<int> VMS1;
+typedef eastl::vector_multiset<int, eastl::less<int>, EASTLAllocatorType, eastl::deque<int> > VMS2;
+typedef eastl::vector_multiset<TestObject> VMS4;
+typedef eastl::vector_multiset<TestObject, eastl::less<TestObject>, EASTLAllocatorType, eastl::deque<TestObject> > VMS5;
+
+#ifndef EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+ typedef std::set<int> VS3;
+ typedef std::set<TestObject> VS6;
+ typedef std::multiset<int> VMS3;
+ typedef std::multiset<TestObject> VMS6;
+#endif
+///////////////////////////////////////////////////////////////////////////////
+
+
+int TestVectorSet()
+{
+ int nErrorCount = 0;
+
+ #ifndef EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+ { // Test construction
+ nErrorCount += TestSetConstruction<VS1, VS3, false>();
+ nErrorCount += TestSetConstruction<VS2, VS3, false>();
+ nErrorCount += TestSetConstruction<VS4, VS6, false>();
+ nErrorCount += TestSetConstruction<VS5, VS6, false>();
+
+ nErrorCount += TestSetConstruction<VMS1, VMS3, true>();
+ nErrorCount += TestSetConstruction<VMS2, VMS3, true>();
+ nErrorCount += TestSetConstruction<VMS4, VMS6, true>();
+ nErrorCount += TestSetConstruction<VMS5, VMS6, true>();
+ }
+
+
+ { // Test mutating functionality.
+ nErrorCount += TestSetMutation<VS1, VS3, false>();
+ nErrorCount += TestSetMutation<VS2, VS3, false>();
+ nErrorCount += TestSetMutation<VS4, VS6, false>();
+ nErrorCount += TestSetMutation<VS5, VS6, false>();
+
+ nErrorCount += TestSetMutation<VMS1, VMS3, true>();
+ nErrorCount += TestSetMutation<VMS2, VMS3, true>();
+ nErrorCount += TestSetMutation<VMS4, VMS6, true>();
+ nErrorCount += TestSetMutation<VMS5, VMS6, true>();
+ }
+ #endif // EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+
+
+ { // Test search functionality.
+ nErrorCount += TestSetSearch<VS1, false>();
+ nErrorCount += TestSetSearch<VS2, false>();
+ nErrorCount += TestSetSearch<VS4, false>();
+ nErrorCount += TestSetSearch<VS5, false>();
+
+ nErrorCount += TestSetSearch<VMS1, true>();
+ nErrorCount += TestSetSearch<VMS2, true>();
+ nErrorCount += TestSetSearch<VMS4, true>();
+ nErrorCount += TestSetSearch<VMS5, true>();
+ }
+
+
+ {
+ // C++11 emplace and related functionality
+ nErrorCount += TestSetCpp11<VS4>();
+ nErrorCount += TestSetCpp11<VS5>();
+
+ nErrorCount += TestMultisetCpp11<VMS4>();
+ nErrorCount += TestMultisetCpp11<VMS5>();
+ }
+
+
+ {
+ // insert at the upper bound of a range
+ VMS1 vms = {0};
+ VERIFY(vms.insert(0) != vms.begin());
+ }
+
+
+ { // Misc tests
+ {
+ // const key_compare& key_comp() const;
+ // key_compare& key_comp();
+ VS2 vs;
+ const VS2 vsc;
+
+ // ensure count can be called from a const object
+ const VS2::key_compare& kc = vsc.key_comp();
+ vs.key_comp() = kc;
+ vsc.count(0);
+ }
+
+ {
+ // ensure count can be called from a const object
+ const VMS1 vms;
+ vms.count(0);
+ }
+ }
+
+ { // find_as predicate
+ { // vector_set
+ eastl::vector_set<string> vss = {"abc", "def", "ghi", "jklmnop", "qrstu", "vw", "x", "yz"};
+ VERIFY(vss.find_as("GHI", TestStrCmpI_2()) != vss.end());
+ }
+
+ { // const vector_set
+ const eastl::vector_set<string> vss = {"abc", "def", "ghi", "jklmnop", "qrstu", "vw", "x", "yz"};
+ VERIFY(vss.find_as("GHI", TestStrCmpI_2()) != vss.end());
+ }
+
+ { // vector_multiset
+ eastl::vector_multiset<string> vss = {"abc", "def", "ghi", "jklmnop", "qrstu", "vw", "x", "yz"};
+ VERIFY(vss.find_as("GHI", TestStrCmpI_2()) != vss.end());
+ }
+
+ { // const vector_multiset
+ const eastl::vector_multiset<string> vss = {"abc", "def", "ghi", "jklmnop", "qrstu", "vw", "x", "yz"};
+ VERIFY(vss.find_as("GHI", TestStrCmpI_2()) != vss.end());
+ }
+ }
+
+ return nErrorCount;
+}
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/EASTL/test/source/main.cpp b/EASTL/test/source/main.cpp
new file mode 100644
index 0000000..132bab1
--- /dev/null
+++ b/EASTL/test/source/main.cpp
@@ -0,0 +1,166 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#include "EASTLTest.h"
+#include <EAStdC/EASprintf.h>
+#include <EASTL/internal/config.h>
+
+EA_DISABLE_ALL_VC_WARNINGS()
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <time.h>
+EA_RESTORE_ALL_VC_WARNINGS()
+
+
+#include "EAMain/EAEntryPointMain.inl"
+#include "EASTLTestAllocator.h"
+
+///////////////////////////////////////////////////////////////////////////////
+// Required by EASTL.
+//
+#if !EASTL_EASTDC_VSNPRINTF
+ int Vsnprintf8(char* pDestination, size_t n, const char* pFormat, va_list arguments)
+ {
+ return EA::StdC::Vsnprintf(pDestination, n, pFormat, arguments);
+ }
+
+ int Vsnprintf16(char16_t* pDestination, size_t n, const char16_t* pFormat, va_list arguments)
+ {
+ return EA::StdC::Vsnprintf(pDestination, n, pFormat, arguments);
+ }
+
+ int Vsnprintf32(char32_t* pDestination, size_t n, const char32_t* pFormat, va_list arguments)
+ {
+ return EA::StdC::Vsnprintf(pDestination, n, pFormat, arguments);
+ }
+
+ #if defined(EA_CHAR8_UNIQUE) && EA_CHAR8_UNIQUE
+ int Vsnprintf8(char8_t* pDestination, size_t n, const char8_t* pFormat, va_list arguments)
+ {
+ return EA::StdC::Vsnprintf(pDestination, n, pFormat, arguments);
+ }
+ #endif
+
+ #if defined(EA_WCHAR_UNIQUE) && EA_WCHAR_UNIQUE
+ int VsnprintfW(wchar_t* pDestination, size_t n, const wchar_t* pFormat, va_list arguments)
+ {
+ return EA::StdC::Vsnprintf(pDestination, n, pFormat, arguments);
+ }
+ #endif
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EAMain
+//
+int EAMain(int argc, char* argv[])
+{
+ using namespace EA::UnitTest;
+
+ int nErrorCount = 0;
+
+ EA::EAMain::PlatformStartup();
+
+ EASTLTest_SetGeneralAllocator();
+
+ nErrorCount += EASTLTest_CheckMemory();
+
+ // Parse command line arguments
+ for(int i = 1; i < argc; i++)
+ {
+ // Example usage: -l:7
+ if(strstr(argv[i], "-l:") == argv[i])
+ {
+ gEASTL_TestLevel = atoi(argv[i] + 3);
+
+ if(gEASTL_TestLevel < kEASTL_TestLevelLow)
+ gEASTL_TestLevel = kEASTL_TestLevelLow;
+ if(gEASTL_TestLevel > kEASTL_TestLevelHigh)
+ gEASTL_TestLevel = kEASTL_TestLevelHigh;
+ }
+ }
+
+ TestApplication testSuite("EASTL Unit Tests", argc, argv);
+
+ testSuite.AddTest("Algorithm", TestAlgorithm);
+ testSuite.AddTest("Allocator", TestAllocator);
+ testSuite.AddTest("Any", TestAny);
+ testSuite.AddTest("Array", TestArray);
+ testSuite.AddTest("BitVector", TestBitVector);
+ testSuite.AddTest("Bitset", TestBitset);
+ testSuite.AddTest("CharTraits", TestCharTraits);
+ testSuite.AddTest("Chrono", TestChrono);
+ testSuite.AddTest("Deque", TestDeque);
+ testSuite.AddTest("Extra", TestExtra);
+ testSuite.AddTest("Finally", TestFinally);
+ testSuite.AddTest("FixedFunction", TestFixedFunction);
+ testSuite.AddTest("FixedHash", TestFixedHash);
+ testSuite.AddTest("FixedList", TestFixedList);
+ testSuite.AddTest("FixedMap", TestFixedMap);
+ testSuite.AddTest("FixedSList", TestFixedSList);
+ testSuite.AddTest("FixedSet", TestFixedSet);
+ testSuite.AddTest("FixedString", TestFixedString);
+ testSuite.AddTest("FixedTupleVector", TestFixedTupleVector);
+ testSuite.AddTest("FixedVector", TestFixedVector);
+ testSuite.AddTest("Functional", TestFunctional);
+ testSuite.AddTest("Hash", TestHash);
+ testSuite.AddTest("Heap", TestHeap);
+ testSuite.AddTest("IntrusiveHash", TestIntrusiveHash);
+ testSuite.AddTest("IntrusiveList", TestIntrusiveList);
+ testSuite.AddTest("IntrusiveSDList", TestIntrusiveSDList);
+ testSuite.AddTest("IntrusiveSList", TestIntrusiveSList);
+ testSuite.AddTest("Iterator", TestIterator);
+ testSuite.AddTest("LRUCache", TestLruCache);
+ testSuite.AddTest("List", TestList);
+ testSuite.AddTest("ListMap", TestListMap);
+ testSuite.AddTest("Map", TestMap);
+ testSuite.AddTest("Memory", TestMemory);
+ testSuite.AddTest("Meta", TestMeta);
+ testSuite.AddTest("NumericLimits", TestNumericLimits);
+ testSuite.AddTest("Optional", TestOptional);
+ testSuite.AddTest("Random", TestRandom);
+ testSuite.AddTest("Ratio", TestRatio);
+ testSuite.AddTest("RingBuffer", TestRingBuffer);
+ testSuite.AddTest("SList", TestSList);
+ testSuite.AddTest("SegmentedVector", TestSegmentedVector);
+ testSuite.AddTest("Set", TestSet);
+ testSuite.AddTest("SmartPtr", TestSmartPtr);
+ testSuite.AddTest("Sort", TestSort);
+ testSuite.AddTest("Span", TestSpan);
+ testSuite.AddTest("String", TestString);
+ testSuite.AddTest("StringHashMap", TestStringHashMap);
+ testSuite.AddTest("StringMap", TestStringMap);
+ testSuite.AddTest("StringView", TestStringView);
+ testSuite.AddTest("TestCppCXTypeTraits", TestCppCXTypeTraits);
+ testSuite.AddTest("Tuple", TestTuple);
+ testSuite.AddTest("TupleVector", TestTupleVector);
+ testSuite.AddTest("TypeTraits", TestTypeTraits);
+ testSuite.AddTest("Utility", TestUtility);
+ testSuite.AddTest("Variant", TestVariant);
+ testSuite.AddTest("Vector", TestVector);
+ testSuite.AddTest("VectorMap", TestVectorMap);
+ testSuite.AddTest("VectorSet", TestVectorSet);
+ testSuite.AddTest("AtomicBasic", TestAtomicBasic);
+ testSuite.AddTest("AtomicAsm", TestAtomicAsm);
+ testSuite.AddTest("Bitcast", TestBitcast);
+
+
+ nErrorCount += testSuite.Run();
+
+ nErrorCount += EASTLTest_CheckMemory();
+
+ EA::EAMain::PlatformShutdown(nErrorCount);
+
+ return nErrorCount;
+}
+
+
+
+
+
+
+
+