aboutsummaryrefslogtreecommitdiff
path: root/include/EASTL/bonus
diff options
context:
space:
mode:
authorToni Uhlig <matzeton@googlemail.com>2021-04-08 16:43:58 +0200
committerToni Uhlig <matzeton@googlemail.com>2021-04-08 16:43:58 +0200
commite59cf7b09e7388d369e8d2bf73501cde79c28708 (patch)
tree6099307032bb86f4a969721f9ac447d3d1be67d4 /include/EASTL/bonus
Squashed 'EASTL/' content from commit fad5471
git-subtree-dir: EASTL git-subtree-split: fad54717f8e4ebb13b20095da7efd07a53af0f10
Diffstat (limited to 'include/EASTL/bonus')
-rw-r--r--include/EASTL/bonus/adaptors.h88
-rw-r--r--include/EASTL/bonus/call_traits.h117
-rw-r--r--include/EASTL/bonus/compressed_pair.h460
-rw-r--r--include/EASTL/bonus/fixed_ring_buffer.h50
-rw-r--r--include/EASTL/bonus/fixed_tuple_vector.h210
-rw-r--r--include/EASTL/bonus/intrusive_sdlist.h694
-rw-r--r--include/EASTL/bonus/intrusive_slist.h321
-rw-r--r--include/EASTL/bonus/list_map.h932
-rw-r--r--include/EASTL/bonus/lru_cache.h424
-rw-r--r--include/EASTL/bonus/ring_buffer.h1581
-rw-r--r--include/EASTL/bonus/sort_extra.h204
-rw-r--r--include/EASTL/bonus/tuple_vector.h1592
12 files changed, 6673 insertions, 0 deletions
diff --git a/include/EASTL/bonus/adaptors.h b/include/EASTL/bonus/adaptors.h
new file mode 100644
index 0000000..423cacd
--- /dev/null
+++ b/include/EASTL/bonus/adaptors.h
@@ -0,0 +1,88 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ADAPTORS_H
+#define EASTL_ADAPTORS_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/move_help.h>
+#include <EASTL/type_traits.h>
+#include <EASTL/iterator.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+EA_DISABLE_VC_WARNING(4512 4626)
+#if defined(_MSC_VER) && (_MSC_VER >= 1900) // VS2015+
+ EA_DISABLE_VC_WARNING(5027) // move assignment operator was implicitly defined as deleted
+#endif
+
+
+namespace eastl
+{
+ /// reverse
+ ///
+ /// This adaptor allows reverse iteration of a container in ranged base for-loops.
+ ///
+ /// for (auto& i : reverse(c)) { ... }
+ ///
+ template <typename Container>
+ struct reverse_wrapper
+ {
+ template <typename C>
+ reverse_wrapper(C&& c)
+ : mContainer(eastl::forward<C>(c))
+ {
+ /**
+ * NOTE:
+ *
+ * Due to reference collapsing rules of universal references Container type is either
+ *
+ * const C& if the input is a const lvalue
+ * C& if the input is a non-const lvalue
+ * C if the input is an rvalue
+ * const C if the input is a const rvalue thus the object will have to be copied and the copy-ctor will be called
+ *
+ *
+ * Thus we either move the whole container into this object or take a reference to the lvalue avoiding the copy.
+ * The static_assert below ensures this.
+ */
+ static_assert(eastl::is_same_v<C, Container>, "Reference collapsed deduced type must be the same as the deduced Container type!");
+ }
+
+ Container mContainer;
+ };
+
+ template <typename Container>
+ auto begin(const reverse_wrapper<Container>& w) -> decltype(eastl::rbegin(w.mContainer))
+ {
+ return eastl::rbegin(w.mContainer);
+ }
+
+ template <typename Container>
+ auto end(const reverse_wrapper<Container>& w) -> decltype(eastl::rend(w.mContainer))
+ {
+ return eastl::rend(w.mContainer);
+ }
+
+ template <typename Container>
+ reverse_wrapper<Container> reverse(Container&& c)
+ {
+ return reverse_wrapper<Container>(eastl::forward<Container>(c));
+ }
+
+} // namespace eastl
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1900) // VS2015+
+ EA_RESTORE_VC_WARNING()
+#endif
+EA_RESTORE_VC_WARNING()
+
+#endif // Header include guard
diff --git a/include/EASTL/bonus/call_traits.h b/include/EASTL/bonus/call_traits.h
new file mode 100644
index 0000000..0995d05
--- /dev/null
+++ b/include/EASTL/bonus/call_traits.h
@@ -0,0 +1,117 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// The design for call_traits here is very similar to that found in template
+// metaprogramming libraries such as Boost, GCC, and Metrowerks, given that
+// these libraries have established this interface as a defacto standard for
+// solving this problem. Also, these are described in various books on the
+// topic of template metaprogramming, such as "Modern C++ Design".
+//
+// See http://www.boost.org/libs/utility/call_traits.htm or search for
+// call_traits in Google for a description of call_traits.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_CALL_TRAITS_H
+#define EASTL_CALL_TRAITS_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/type_traits.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+
+
+ template <typename T, bool small_>
+ struct ct_imp2 { typedef const T& param_type; };
+
+ template <typename T>
+ struct ct_imp2<T, true> { typedef const T param_type; };
+
+ template <typename T, bool isp, bool b1>
+ struct ct_imp { typedef const T& param_type; };
+
+ template <typename T, bool isp>
+ struct ct_imp<T, isp, true> { typedef typename ct_imp2<T, sizeof(T) <= sizeof(void*)>::param_type param_type; };
+
+ template <typename T, bool b1>
+ struct ct_imp<T, true, b1> { typedef T const param_type; };
+
+
+
+ template <typename T>
+ struct call_traits
+ {
+ public:
+ typedef T value_type;
+ typedef T& reference;
+ typedef const T& const_reference;
+ typedef typename ct_imp<T, is_pointer<T>::value, is_arithmetic<T>::value>::param_type param_type;
+ };
+
+
+ template <typename T>
+ struct call_traits<T&>
+ {
+ typedef T& value_type;
+ typedef T& reference;
+ typedef const T& const_reference;
+ typedef T& param_type;
+ };
+
+
+ template <typename T, size_t N>
+ struct call_traits<T [N]>
+ {
+ private:
+ typedef T array_type[N];
+
+ public:
+ typedef const T* value_type;
+ typedef array_type& reference;
+ typedef const array_type& const_reference;
+ typedef const T* const param_type;
+ };
+
+
+ template <typename T, size_t N>
+ struct call_traits<const T [N]>
+ {
+ private:
+ typedef const T array_type[N];
+
+ public:
+ typedef const T* value_type;
+ typedef array_type& reference;
+ typedef const array_type& const_reference;
+ typedef const T* const param_type;
+ };
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/include/EASTL/bonus/compressed_pair.h b/include/EASTL/bonus/compressed_pair.h
new file mode 100644
index 0000000..379642b
--- /dev/null
+++ b/include/EASTL/bonus/compressed_pair.h
@@ -0,0 +1,460 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// The compressed pair class is very similar to std::pair, but if either of the
+// template arguments are empty classes, then the "empty base-class optimization"
+// is applied to compress the size of the pair.
+//
+// The design for compressed_pair here is very similar to that found in template
+// metaprogramming libraries such as Boost, GCC, and Metrowerks, given that
+// these libraries have established this interface as a defacto standard for
+// solving this problem. Also, these are described in various books on the
+// topic of template metaprogramming, such as "Modern C++ Design".
+//
+// template <typename T1, typename T2>
+// class compressed_pair
+// {
+// public:
+// typedef T1 first_type;
+// typedef T2 second_type;
+// typedef typename call_traits<first_type>::param_type first_param_type;
+// typedef typename call_traits<second_type>::param_type second_param_type;
+// typedef typename call_traits<first_type>::reference first_reference;
+// typedef typename call_traits<second_type>::reference second_reference;
+// typedef typename call_traits<first_type>::const_reference first_const_reference;
+// typedef typename call_traits<second_type>::const_reference second_const_reference;
+//
+// compressed_pair() : base() {}
+// compressed_pair(first_param_type x, second_param_type y);
+// explicit compressed_pair(first_param_type x);
+// explicit compressed_pair(second_param_type y);
+//
+// compressed_pair& operator=(const compressed_pair&);
+//
+// first_reference first();
+// first_const_reference first() const;
+//
+// second_reference second();
+// second_const_reference second() const;
+//
+// void swap(compressed_pair& y);
+// };
+//
+// The two members of the pair can be accessed using the member functions first()
+// and second(). Note that not all member functions can be instantiated for all
+// template parameter types. In particular compressed_pair can be instantiated for
+// reference and array types, however in these cases the range of constructors that
+// can be used are limited. If types T1 and T2 are the same type, then there is
+// only one version of the single-argument constructor, and this constructor
+// initialises both values in the pair to the passed value.
+//
+// Note that compressed_pair can not be instantiated if either of the template
+// arguments is a union type, unless there is compiler support for is_union,
+// or if is_union is specialised for the union type.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_COMPRESSED_PAIR_H
+#define EASTL_COMPRESSED_PAIR_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/type_traits.h>
+#include <EASTL/bonus/call_traits.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1900) // VS2015 or later
+ EA_DISABLE_VC_WARNING(4626 5027) // warning C4626: 'eastl::compressed_pair_imp<T1,T2,0>': assignment operator was implicitly defined as deleted because a base class assignment operator is inaccessible or deleted
+#endif
+
+namespace eastl
+{
+
+ template <typename T1, typename T2>
+ class compressed_pair;
+
+
+ template <typename T1, typename T2, bool isSame, bool firstEmpty, bool secondEmpty>
+ struct compressed_pair_switch;
+
+ template <typename T1, typename T2>
+ struct compressed_pair_switch<T1, T2, false, false, false>{ static const int value = 0; };
+
+ template <typename T1, typename T2>
+ struct compressed_pair_switch<T1, T2, false, true, false> { static const int value = 1; };
+
+ template <typename T1, typename T2>
+ struct compressed_pair_switch<T1, T2, false, false, true> { static const int value = 2; };
+
+ template <typename T1, typename T2>
+ struct compressed_pair_switch<T1, T2, false, true, true> { static const int value = 3; };
+
+ template <typename T1, typename T2>
+ struct compressed_pair_switch<T1, T2, true, true, true> { static const int value = 4; };
+
+ template <typename T1, typename T2>
+ struct compressed_pair_switch<T1, T2, true, false, false> { static const int value = 5; };
+
+ template <typename T1, typename T2, int version>
+ class compressed_pair_imp;
+
+
+
+ template <typename T>
+ inline void cp_swap(T& t1, T& t2)
+ {
+ T tTemp = t1;
+ t1 = t2;
+ t2 = tTemp;
+ }
+
+
+ // Derive from neither
+ template <typename T1, typename T2>
+ class compressed_pair_imp<T1, T2, 0>
+ {
+ public:
+ typedef T1 first_type;
+ typedef T2 second_type;
+ typedef typename call_traits<first_type>::param_type first_param_type;
+ typedef typename call_traits<second_type>::param_type second_param_type;
+ typedef typename call_traits<first_type>::reference first_reference;
+ typedef typename call_traits<second_type>::reference second_reference;
+ typedef typename call_traits<first_type>::const_reference first_const_reference;
+ typedef typename call_traits<second_type>::const_reference second_const_reference;
+
+ compressed_pair_imp() {}
+
+ compressed_pair_imp(first_param_type x, second_param_type y)
+ : mFirst(x), mSecond(y) {}
+
+ compressed_pair_imp(first_param_type x)
+ : mFirst(x) {}
+
+ compressed_pair_imp(second_param_type y)
+ : mSecond(y) {}
+
+ first_reference first() { return mFirst; }
+ first_const_reference first() const { return mFirst; }
+
+ second_reference second() { return mSecond; }
+ second_const_reference second() const { return mSecond; }
+
+ void swap(compressed_pair<T1, T2>& y)
+ {
+ cp_swap(mFirst, y.first());
+ cp_swap(mSecond, y.second());
+ }
+
+ private:
+ first_type mFirst;
+ second_type mSecond;
+ };
+
+
+ // Derive from T1
+ template <typename T1, typename T2>
+ class compressed_pair_imp<T1, T2, 1> : private T1
+ {
+ public:
+ typedef T1 first_type;
+ typedef T2 second_type;
+ typedef typename call_traits<first_type>::param_type first_param_type;
+ typedef typename call_traits<second_type>::param_type second_param_type;
+ typedef typename call_traits<first_type>::reference first_reference;
+ typedef typename call_traits<second_type>::reference second_reference;
+ typedef typename call_traits<first_type>::const_reference first_const_reference;
+ typedef typename call_traits<second_type>::const_reference second_const_reference;
+
+ compressed_pair_imp() {}
+
+ compressed_pair_imp(first_param_type x, second_param_type y)
+ : first_type(x), mSecond(y) {}
+
+ compressed_pair_imp(first_param_type x)
+ : first_type(x) {}
+
+ compressed_pair_imp(second_param_type y)
+ : mSecond(y) {}
+
+ first_reference first() { return *this; }
+ first_const_reference first() const { return *this; }
+
+ second_reference second() { return mSecond; }
+ second_const_reference second() const { return mSecond; }
+
+ void swap(compressed_pair<T1,T2>& y)
+ {
+ // No need to swap empty base class
+ cp_swap(mSecond, y.second());
+ }
+
+ private:
+ second_type mSecond;
+ };
+
+
+
+ // Derive from T2
+ template <typename T1, typename T2>
+ class compressed_pair_imp<T1, T2, 2> : private T2
+ {
+ public:
+ typedef T1 first_type;
+ typedef T2 second_type;
+ typedef typename call_traits<first_type>::param_type first_param_type;
+ typedef typename call_traits<second_type>::param_type second_param_type;
+ typedef typename call_traits<first_type>::reference first_reference;
+ typedef typename call_traits<second_type>::reference second_reference;
+ typedef typename call_traits<first_type>::const_reference first_const_reference;
+ typedef typename call_traits<second_type>::const_reference second_const_reference;
+
+ compressed_pair_imp() {}
+
+ compressed_pair_imp(first_param_type x, second_param_type y)
+ : second_type(y), mFirst(x) {}
+
+ compressed_pair_imp(first_param_type x)
+ : mFirst(x) {}
+
+ compressed_pair_imp(second_param_type y)
+ : second_type(y) {}
+
+ first_reference first() { return mFirst; }
+ first_const_reference first() const { return mFirst; }
+
+ second_reference second() { return *this; }
+ second_const_reference second() const { return *this; }
+
+ void swap(compressed_pair<T1,T2>& y)
+ {
+ // No need to swap empty base class
+ cp_swap(mFirst, y.first());
+ }
+
+ private:
+ first_type mFirst;
+ };
+
+
+
+ // Derive from T1 and T2
+ template <typename T1, typename T2>
+ class compressed_pair_imp<T1, T2, 3> : private T1, private T2
+ {
+ public:
+ typedef T1 first_type;
+ typedef T2 second_type;
+ typedef typename call_traits<first_type>::param_type first_param_type;
+ typedef typename call_traits<second_type>::param_type second_param_type;
+ typedef typename call_traits<first_type>::reference first_reference;
+ typedef typename call_traits<second_type>::reference second_reference;
+ typedef typename call_traits<first_type>::const_reference first_const_reference;
+ typedef typename call_traits<second_type>::const_reference second_const_reference;
+
+ compressed_pair_imp() {}
+
+ compressed_pair_imp(first_param_type x, second_param_type y)
+ : first_type(x), second_type(y) {}
+
+ compressed_pair_imp(first_param_type x)
+ : first_type(x) {}
+
+ compressed_pair_imp(second_param_type y)
+ : second_type(y) {}
+
+ first_reference first() { return *this; }
+ first_const_reference first() const { return *this; }
+
+ second_reference second() { return *this; }
+ second_const_reference second() const { return *this; }
+
+ // No need to swap empty bases
+ void swap(compressed_pair<T1, T2>&)
+ { }
+ };
+
+
+ // T1 == T2, T1 and T2 are both empty
+ // Note does not actually store an instance of T2 at all;
+ // but reuses T1 base class for both first() and second().
+ template <typename T1, typename T2>
+ class compressed_pair_imp<T1, T2, 4> : private T1
+ {
+ public:
+ typedef T1 first_type;
+ typedef T2 second_type;
+ typedef typename call_traits<first_type>::param_type first_param_type;
+ typedef typename call_traits<second_type>::param_type second_param_type;
+ typedef typename call_traits<first_type>::reference first_reference;
+ typedef typename call_traits<second_type>::reference second_reference;
+ typedef typename call_traits<first_type>::const_reference first_const_reference;
+ typedef typename call_traits<second_type>::const_reference second_const_reference;
+
+ compressed_pair_imp() {}
+
+ compressed_pair_imp(first_param_type x, second_param_type)
+ : first_type(x) {}
+
+ compressed_pair_imp(first_param_type x)
+ : first_type(x) {}
+
+ first_reference first() { return *this; }
+ first_const_reference first() const { return *this; }
+
+ second_reference second() { return *this; }
+ second_const_reference second() const { return *this; }
+
+ void swap(compressed_pair<T1, T2>&) { }
+ };
+
+
+ // T1 == T2 and are not empty
+ template <typename T1, typename T2>
+ class compressed_pair_imp<T1, T2, 5>
+ {
+ public:
+ typedef T1 first_type;
+ typedef T2 second_type;
+ typedef typename call_traits<first_type>::param_type first_param_type;
+ typedef typename call_traits<second_type>::param_type second_param_type;
+ typedef typename call_traits<first_type>::reference first_reference;
+ typedef typename call_traits<second_type>::reference second_reference;
+ typedef typename call_traits<first_type>::const_reference first_const_reference;
+ typedef typename call_traits<second_type>::const_reference second_const_reference;
+
+ compressed_pair_imp() {}
+
+ compressed_pair_imp(first_param_type x, second_param_type y)
+ : mFirst(x), mSecond(y) {}
+
+ compressed_pair_imp(first_param_type x)
+ : mFirst(x), mSecond(x) {}
+
+ first_reference first() { return mFirst; }
+ first_const_reference first() const { return mFirst; }
+
+ second_reference second() { return mSecond; }
+ second_const_reference second() const { return mSecond; }
+
+ void swap(compressed_pair<T1, T2>& y)
+ {
+ cp_swap(mFirst, y.first());
+ cp_swap(mSecond, y.second());
+ }
+
+ private:
+ first_type mFirst;
+ second_type mSecond;
+ };
+
+
+
+ template <typename T1, typename T2>
+ class compressed_pair
+ : private compressed_pair_imp<T1, T2,
+ compressed_pair_switch<
+ T1,
+ T2,
+ is_same<typename remove_cv<T1>::type, typename remove_cv<T2>::type>::value,
+ is_empty<T1>::value,
+ is_empty<T2>::value>::value>
+ {
+ private:
+ typedef compressed_pair_imp<T1, T2,
+ compressed_pair_switch<
+ T1,
+ T2,
+ is_same<typename remove_cv<T1>::type, typename remove_cv<T2>::type>::value,
+ is_empty<T1>::value,
+ is_empty<T2>::value>::value> base;
+ public:
+ typedef T1 first_type;
+ typedef T2 second_type;
+ typedef typename call_traits<first_type>::param_type first_param_type;
+ typedef typename call_traits<second_type>::param_type second_param_type;
+ typedef typename call_traits<first_type>::reference first_reference;
+ typedef typename call_traits<second_type>::reference second_reference;
+ typedef typename call_traits<first_type>::const_reference first_const_reference;
+ typedef typename call_traits<second_type>::const_reference second_const_reference;
+
+ compressed_pair() : base() {}
+ compressed_pair(first_param_type x, second_param_type y) : base(x, y) {}
+ explicit compressed_pair(first_param_type x) : base(x) {}
+ explicit compressed_pair(second_param_type y) : base(y) {}
+
+ first_reference first() { return base::first(); }
+ first_const_reference first() const { return base::first(); }
+
+ second_reference second() { return base::second(); }
+ second_const_reference second() const { return base::second(); }
+
+ void swap(compressed_pair& y) { base::swap(y); }
+ };
+
+
+ // Partial specialisation for case where T1 == T2:
+ template <typename T>
+ class compressed_pair<T, T>
+ : private compressed_pair_imp<T, T,
+ compressed_pair_switch<
+ T,
+ T,
+ is_same<typename remove_cv<T>::type, typename remove_cv<T>::type>::value,
+ is_empty<T>::value,
+ is_empty<T>::value>::value>
+ {
+ private:
+ typedef compressed_pair_imp<T, T,
+ compressed_pair_switch<
+ T,
+ T,
+ is_same<typename remove_cv<T>::type, typename remove_cv<T>::type>::value,
+ is_empty<T>::value,
+ is_empty<T>::value>::value> base;
+ public:
+ typedef T first_type;
+ typedef T second_type;
+ typedef typename call_traits<first_type>::param_type first_param_type;
+ typedef typename call_traits<second_type>::param_type second_param_type;
+ typedef typename call_traits<first_type>::reference first_reference;
+ typedef typename call_traits<second_type>::reference second_reference;
+ typedef typename call_traits<first_type>::const_reference first_const_reference;
+ typedef typename call_traits<second_type>::const_reference second_const_reference;
+
+ compressed_pair() : base() {}
+ compressed_pair(first_param_type x, second_param_type y) : base(x, y) {}
+ explicit compressed_pair(first_param_type x) : base(x) {}
+
+ first_reference first() { return base::first(); }
+ first_const_reference first() const { return base::first(); }
+
+ second_reference second() { return base::second(); }
+ second_const_reference second() const { return base::second(); }
+
+ void swap(compressed_pair<T, T>& y) { base::swap(y); }
+ };
+
+
+ template <typename T1, typename T2>
+ inline void swap(compressed_pair<T1, T2>& x, compressed_pair<T1, T2>& y)
+ {
+ x.swap(y);
+ }
+
+
+} // namespace eastl
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1900) // VS2015 or later
+ EA_RESTORE_VC_WARNING()
+#endif
+
+#endif // Header include guard
+
+
+
diff --git a/include/EASTL/bonus/fixed_ring_buffer.h b/include/EASTL/bonus/fixed_ring_buffer.h
new file mode 100644
index 0000000..2bb54e4
--- /dev/null
+++ b/include/EASTL/bonus/fixed_ring_buffer.h
@@ -0,0 +1,50 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_FIXED_RING_BUFFER_H
+#define EASTL_FIXED_RING_BUFFER_H
+
+#include <EASTL/internal/config.h>
+#include <EASTL/fixed_vector.h>
+#include <EASTL/bonus/ring_buffer.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+namespace eastl
+{
+
+ /// fixed_ring_buffer
+ ///
+ /// This is a convenience template alias for creating a fixed-sized
+ /// ring_buffer using eastl::fixed_vector as its storage container. This has
+ /// been tricky for users to get correct due to the constructor requirements
+ /// of eastl::ring_buffer leaking the implementation detail of the sentinel
+ /// value being used internally. In addition, it was not obvious what the
+ /// correct allocator_type template parameter should be used for containers
+ /// providing both a default allocator type and an overflow allocator type.
+ ///
+ /// We are over-allocating the fixed_vector container to accommodate the
+ /// ring_buffer sentinel to prevent that implementation detail leaking into
+ /// user code.
+ ///
+ /// Example usage:
+ ///
+ /// fixed_ring_buffer<int, 8> rb = {0, 1, 2, 3, 4, 5, 6, 7};
+ /// or
+ /// fixed_ring_buffer<int, 8> rb(8); // capacity doesn't need to respect sentinel
+ /// rb.push_back(0);
+ ///
+ ///
+#if !defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ template <typename T, size_t N>
+ using fixed_ring_buffer =
+ ring_buffer<T, fixed_vector<T, N + 1, false>, typename fixed_vector<T, N + 1, false>::overflow_allocator_type>;
+#endif
+
+} // namespace eastl
+
+#endif // Header include guard
+
diff --git a/include/EASTL/bonus/fixed_tuple_vector.h b/include/EASTL/bonus/fixed_tuple_vector.h
new file mode 100644
index 0000000..e9ce0ec
--- /dev/null
+++ b/include/EASTL/bonus/fixed_tuple_vector.h
@@ -0,0 +1,210 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_FIXEDTUPLEVECTOR_H
+#define EASTL_FIXEDTUPLEVECTOR_H
+
+#include <EASTL/bonus/tuple_vector.h>
+#include <EASTL/internal/fixed_pool.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+namespace eastl
+{
+
+ /// EASTL_FIXED_TUPLE_VECTOR_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ /// In the case of fixed-size containers, the allocator name always refers
+ /// to overflow allocations.
+ ///
+ #ifndef EASTL_FIXED_TUPLE_VECTOR_DEFAULT_NAME
+ #define EASTL_FIXED_TUPLE_VECTOR_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_tuple_vector" // Unless the user overrides something, this is "EASTL fixed_vector".
+ #endif
+
+
+ /// EASTL_FIXED_TUPLE_VECTOR_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_FIXED_TUPLE_VECTOR_DEFAULT_ALLOCATOR
+ #define EASTL_FIXED_TUPLE_VECTOR_DEFAULT_ALLOCATOR overflow_allocator_type(EASTL_FIXED_TUPLE_VECTOR_DEFAULT_NAME)
+ #endif
+
+// External interface of fixed_tuple_vector
+template <size_t nodeCount, bool bEnableOverflow, typename... Ts>
+class fixed_tuple_vector : public TupleVecInternal::TupleVecImpl<fixed_vector_allocator<
+ TupleVecInternal::TupleRecurser<Ts...>::GetTotalAllocationSize(nodeCount, 0), 1,
+ TupleVecInternal::TupleRecurser<Ts...>::GetTotalAlignment(), 0,
+ bEnableOverflow, EASTLAllocatorType>, make_index_sequence<sizeof...(Ts)>, Ts...>
+{
+public:
+ typedef fixed_vector_allocator<
+ TupleVecInternal::TupleRecurser<Ts...>::GetTotalAllocationSize(nodeCount, 0), 1,
+ TupleVecInternal::TupleRecurser<Ts...>::GetTotalAlignment(), 0,
+ bEnableOverflow, EASTLAllocatorType> fixed_allocator_type;
+ typedef aligned_buffer<fixed_allocator_type::kNodesSize, fixed_allocator_type::kNodeAlignment> aligned_buffer_type;
+ typedef fixed_tuple_vector<nodeCount, bEnableOverflow, Ts...> this_type;
+ typedef EASTLAllocatorType overflow_allocator_type;
+
+ typedef TupleVecInternal::TupleVecImpl<fixed_allocator_type, make_index_sequence<sizeof...(Ts)>, Ts...> base_type;
+ typedef typename base_type::size_type size_type;
+
+private:
+ aligned_buffer_type mBuffer;
+
+public:
+ fixed_tuple_vector()
+ : base_type(fixed_allocator_type(mBuffer.buffer), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize)
+ { }
+
+ fixed_tuple_vector(const overflow_allocator_type& allocator)
+ : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize)
+ { }
+
+ fixed_tuple_vector(this_type&& x)
+ : base_type(fixed_allocator_type(mBuffer.buffer), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize)
+ {
+ base_type::get_allocator().copy_overflow_allocator(x.get_allocator());
+ base_type::DoInitFromIterator(make_move_iterator(x.begin()), make_move_iterator(x.end()));
+ x.clear();
+ }
+
+ fixed_tuple_vector(this_type&& x, const overflow_allocator_type& allocator)
+ : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize)
+ {
+ base_type::DoInitFromIterator(make_move_iterator(x.begin()), make_move_iterator(x.end()));
+ x.clear();
+ }
+
+ fixed_tuple_vector(const this_type& x)
+ : base_type(fixed_allocator_type(mBuffer.buffer), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize)
+ {
+ base_type::get_allocator().copy_overflow_allocator(x.get_allocator());
+ base_type::DoInitFromIterator(x.begin(), x.end());
+ }
+
+ fixed_tuple_vector(const this_type& x, const overflow_allocator_type& allocator)
+ : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize)
+ {
+ base_type::DoInitFromIterator(x.begin(), x.end());
+ }
+
+ template <typename MoveIterBase>
+ fixed_tuple_vector(move_iterator<MoveIterBase> begin, move_iterator<MoveIterBase> end, const overflow_allocator_type& allocator = EASTL_FIXED_TUPLE_VECTOR_DEFAULT_ALLOCATOR)
+ : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize)
+ {
+ base_type::DoInitFromIterator(begin, end);
+ }
+
+ template <typename Iterator>
+ fixed_tuple_vector(Iterator begin, Iterator end, const overflow_allocator_type& allocator = EASTL_FIXED_TUPLE_VECTOR_DEFAULT_ALLOCATOR)
+ : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize)
+ {
+ base_type::DoInitFromIterator(begin, end);
+ }
+
+ fixed_tuple_vector(size_type n, const overflow_allocator_type& allocator = EASTL_FIXED_TUPLE_VECTOR_DEFAULT_ALLOCATOR)
+ : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize)
+ {
+ base_type::DoInitDefaultFill(n);
+ }
+
+ fixed_tuple_vector(size_type n, const Ts&... args)
+ : base_type(fixed_allocator_type(mBuffer.buffer), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize)
+ {
+ base_type::DoInitFillArgs(n, args...);
+ }
+
+ fixed_tuple_vector(size_type n, const Ts&... args, const overflow_allocator_type& allocator)
+ : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize)
+ {
+ base_type::DoInitFillArgs(n, args...);
+ }
+
+ fixed_tuple_vector(size_type n,
+ typename base_type::const_reference_tuple tup,
+ const overflow_allocator_type& allocator = EASTL_FIXED_TUPLE_VECTOR_DEFAULT_ALLOCATOR)
+ : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize)
+ {
+ base_type::DoInitFillTuple(n, tup);
+ }
+
+ fixed_tuple_vector(const typename base_type::value_tuple* first, const typename base_type::value_tuple* last,
+ const overflow_allocator_type& allocator = EASTL_FIXED_TUPLE_VECTOR_DEFAULT_ALLOCATOR)
+ : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize)
+ {
+ base_type::DoInitFromTupleArray(first, last);
+ }
+
+ fixed_tuple_vector(std::initializer_list<typename base_type::value_tuple> iList,
+ const overflow_allocator_type& allocator = EASTL_FIXED_TUPLE_VECTOR_DEFAULT_ALLOCATOR)
+ : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize)
+ {
+ base_type::DoInitFromTupleArray(iList.begin(), iList.end());
+ }
+
+ this_type& operator=(const this_type& other)
+ {
+ base_type::operator=(other);
+ return *this;
+ }
+
+ this_type& operator=(this_type&& other)
+ {
+ base_type::clear();
+ // OK to call DoInitFromIterator in a non-ctor scenario because clear() reset everything, more-or-less
+ base_type::DoInitFromIterator(make_move_iterator(other.begin()), make_move_iterator(other.end()));
+ other.clear();
+ return *this;
+ }
+
+ this_type& operator=(std::initializer_list<typename base_type::value_tuple> iList)
+ {
+ base_type::operator=(iList);
+ return *this;
+ }
+
+ void swap(this_type& x)
+ {
+ // If both containers are using the heap instead of local memory
+ // then we can do a fast pointer swap instead of content swap.
+ if ((has_overflowed() && x.has_overflowed()) && (get_overflow_allocator() == x.get_overflow_allocator()))
+ {
+ base_type::swap(x);
+ }
+ else
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(*this, x);
+ }
+ }
+
+ // Returns the max fixed size, which is the user-supplied nodeCount parameter.
+ size_type max_size() const { return nodeCount; }
+ // Returns true if the fixed space has been fully allocated. Note that if overflow is enabled,
+ // the container size can be greater than nodeCount but full() could return true because the
+ // fixed space may have a recently freed slot.
+ bool full() const { return (base_type::mNumElements >= nodeCount) || ((void*)base_type::mpData != (void*)mBuffer.buffer); }
+ // Returns true if the allocations spilled over into the overflow allocator. Meaningful
+ // only if overflow is enabled.
+ bool has_overflowed() const { return ((void*)base_type::mpData != (void*)mBuffer.buffer); }
+ // Returns the value of the bEnableOverflow template parameter.
+ bool can_overflow() const { return bEnableOverflow; }
+
+ const overflow_allocator_type& get_overflow_allocator() const { return base_type::get_allocator().get_overflow_allocator(); }
+};
+
+
+template <size_t nodeCount, bool bEnableOverflow, typename... Ts>
+inline void swap(fixed_tuple_vector<nodeCount, bEnableOverflow, Ts...>& a,
+ fixed_tuple_vector<nodeCount, bEnableOverflow, Ts...>& b)
+{
+ a.swap(b);
+}
+
+
+} // namespace eastl
+
+#endif // EASTL_TUPLEVECTOR_H
diff --git a/include/EASTL/bonus/intrusive_sdlist.h b/include/EASTL/bonus/intrusive_sdlist.h
new file mode 100644
index 0000000..1b126d4
--- /dev/null
+++ b/include/EASTL/bonus/intrusive_sdlist.h
@@ -0,0 +1,694 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// intrusive_sdlist is a special kind of intrusive list which we say is
+// "singly-doubly" linked. Instead of having a typical intrusive list node
+// which looks like this:
+//
+// struct intrusive_sdlist_node {
+// intrusive_sdlist_node *mpNext;
+// intrusive_sdlist_node *mpPrev;
+// };
+//
+// We instead have one that looks like this:
+//
+// struct intrusive_sdlist_node {
+// intrusive_sdlist_node* mpNext;
+// intrusive_sdlist_node** mppPrevNext;
+// };
+//
+// This may seem to be suboptimal, but it has one specific advantage: it allows
+// the intrusive_sdlist class to be the size of only one pointer instead of two.
+// This may seem like a minor optimization, but some users have wanted to create
+// thousands of empty instances of these.
+// This is because while an intrusive_list class looks like this:
+//
+// class intrusive_list {
+// intrusive_list_node mBaseNode;
+// };
+//
+// an intrusive_sdlist class looks like this:
+//
+// class intrusive_sdlist {
+// intrusive_sdlist_node* mpNext;
+// };
+//
+// So here we make a list of plusses and minuses of intrusive sdlists
+// compared to intrusive_lists and intrusive_slists:
+//
+// | list | slist | sdlist
+// ---------------------------------------------------------
+// min size | 8 | 4 | 4
+// node size | 8 | 4 | 8
+// anonymous erase | yes | no | yes
+// reverse iteration | yes | no | no
+//
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTRUSIVE_SDLIST_H
+#define EASTL_INTRUSIVE_SDLIST_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/iterator.h>
+#include <EASTL/algorithm.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+
+
+ /// intrusive_sdlist_node
+ ///
+ struct intrusive_sdlist_node
+ {
+ intrusive_sdlist_node* mpNext;
+ intrusive_sdlist_node** mppPrevNext;
+ };
+
+
+ /// IntrusiveSDListIterator
+ ///
+ template <typename T, typename Pointer, typename Reference>
+ struct IntrusiveSDListIterator
+ {
+ typedef IntrusiveSDListIterator<T, Pointer, Reference> this_type;
+ typedef IntrusiveSDListIterator<T, T*, T&> iterator;
+ typedef IntrusiveSDListIterator<T, const T*, const T&> const_iterator;
+ typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t.
+ typedef ptrdiff_t difference_type;
+ typedef T value_type;
+ typedef T node_type;
+ typedef Pointer pointer;
+ typedef Reference reference;
+ typedef EASTL_ITC_NS::forward_iterator_tag iterator_category;
+
+ public:
+ pointer mpNode;
+
+ public:
+ IntrusiveSDListIterator();
+ explicit IntrusiveSDListIterator(pointer pNode); // Note that you can also construct an iterator from T via this, since value_type == node_type.
+ IntrusiveSDListIterator(const iterator& x);
+
+ reference operator*() const;
+ pointer operator->() const;
+
+ this_type& operator++();
+ this_type operator++(int);
+
+ }; // struct IntrusiveSDListIterator
+
+
+
+
+ /// intrusive_sdlist_base
+ ///
+ /// Provides a template-less base class for intrusive_sdlist.
+ ///
+ class intrusive_sdlist_base
+ {
+ public:
+ typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t.
+ typedef ptrdiff_t difference_type;
+
+ protected:
+ intrusive_sdlist_node* mpNext;
+
+ public:
+ intrusive_sdlist_base();
+
+ bool empty() const; ///< Returns true if the container is empty.
+ size_type size() const; ///< Returns the number of elements in the list; O(n).
+
+ void clear(); ///< Clears the list; O(1). No deallocation occurs.
+ void pop_front(); ///< Removes an element from the front of the list; O(1). The element must be present, but is not deallocated.
+ void reverse(); ///< Reverses a list so that front and back are swapped; O(n).
+
+ //bool validate() const; ///< Scans a list for linkage inconsistencies; O(n) time, O(1) space. Returns false if errors are detected, such as loops or branching.
+
+ }; // class intrusive_sdlist_base
+
+
+
+ /// intrusive_sdlist
+ ///
+ template <typename T = intrusive_sdlist_node>
+ class intrusive_sdlist : public intrusive_sdlist_base
+ {
+ public:
+ typedef intrusive_sdlist<T> this_type;
+ typedef intrusive_sdlist_base base_type;
+ typedef T node_type;
+ typedef T value_type;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::difference_type difference_type;
+ typedef T& reference;
+ typedef const T& const_reference;
+ typedef T* pointer;
+ typedef const T* const_pointer;
+ typedef IntrusiveSDListIterator<T, T*, T&> iterator;
+ typedef IntrusiveSDListIterator<T, const T*, const T&> const_iterator;
+ typedef eastl::reverse_iterator<iterator> reverse_iterator;
+ typedef eastl::reverse_iterator<const_iterator> const_reverse_iterator;
+
+ public:
+ intrusive_sdlist(); ///< Creates an empty list.
+ intrusive_sdlist(const this_type& x); ///< Creates an empty list; ignores the argument.
+ this_type& operator=(const this_type& x); ///< Clears the list; ignores the argument.
+
+ iterator begin(); ///< Returns an iterator pointing to the first element in the list.
+ const_iterator begin() const; ///< Returns a const_iterator pointing to the first element in the list.
+ const_iterator cbegin() const; ///< Returns a const_iterator pointing to the first element in the list.
+
+ iterator end(); ///< Returns an iterator pointing one-after the last element in the list.
+ const_iterator end() const; ///< Returns a const_iterator pointing one-after the last element in the list.
+ const_iterator cend() const; ///< Returns a const_iterator pointing one-after the last element in the list.
+
+ reference front(); ///< Returns a reference to the first element. The list must be empty.
+ const_reference front() const; ///< Returns a const reference to the first element. The list must be empty.
+
+ void push_front(value_type& value); ///< Adds an element to the front of the list; O(1). The element is not copied. The element must not be in any other list.
+ void push_back(value_type& value); ///< Adds an element to the back of the list; O(N). The element is not copied. The element must not be in any other list.
+ void pop_back(); ///< Removes an element from the back of the list; O(N). The element must be present, but is not deallocated.
+
+ bool contains(const value_type& value) const; ///< Returns true if the given element is in the list; O(n). Equivalent to (locate(x) != end()).
+
+ iterator locate(value_type& value); ///< Converts a reference to an object in the list back to an iterator, or returns end() if it is not part of the list. O(n)
+ const_iterator locate(const value_type& value) const; ///< Converts a const reference to an object in the list back to a const iterator, or returns end() if it is not part of the list. O(n)
+
+ iterator insert(iterator position, value_type& value); ///< Inserts an element before the element pointed to by the iterator. O(1)
+ iterator erase(iterator position); ///< Erases the element pointed to by the iterator. O(1)
+ iterator erase(iterator first, iterator last); ///< Erases elements within the iterator range [first, last). O(1).
+ void swap(intrusive_sdlist& x); ///< Swaps the contents of two intrusive lists; O(1).
+
+ static void remove(value_type& value); ///< Erases an element from a list; O(1). Note that this is static so you don't need to know which list the element, although it must be in some list.
+
+ void splice(iterator position, value_type& value); ///< Moves the given element into this list before the element pointed to by position; O(1).
+ ///< Required: x must be in some list or have first/next pointers that point it itself.
+
+ void splice(iterator position, this_type& x); ///< Moves the contents of a list into this list before the element pointed to by position; O(1).
+ ///< Required: &x != this (same as std::list).
+
+ void splice(iterator position, this_type& x, iterator xPosition); ///< Moves the given element pointed to i within the list x into the current list before
+ ///< the element pointed to by position; O(1).
+
+ void splice(iterator position, this_type& x, iterator first, iterator last); ///< Moves the range of elements [first, last) from list x into the current list before
+ ///< the element pointed to by position; O(1).
+ ///< Required: position must not be in [first, last). (same as std::list).
+ bool validate() const;
+ int validate_iterator(const_iterator i) const;
+
+ }; // intrusive_sdlist
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // IntrusiveSDListIterator functions
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Pointer, typename Reference>
+ inline IntrusiveSDListIterator<T, Pointer, Reference>::IntrusiveSDListIterator()
+ {
+ #if EASTL_DEBUG
+ mpNode = NULL;
+ #endif
+ }
+
+ template <typename T, typename Pointer, typename Reference>
+ inline IntrusiveSDListIterator<T, Pointer, Reference>::IntrusiveSDListIterator(pointer pNode)
+ : mpNode(pNode)
+ {
+ }
+
+ template <typename T, typename Pointer, typename Reference>
+ inline IntrusiveSDListIterator<T, Pointer, Reference>::IntrusiveSDListIterator(const iterator& x)
+ : mpNode(x.mpNode)
+ {
+ }
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename IntrusiveSDListIterator<T, Pointer, Reference>::reference
+ IntrusiveSDListIterator<T, Pointer, Reference>::operator*() const
+ {
+ return *mpNode;
+ }
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename IntrusiveSDListIterator<T, Pointer, Reference>::pointer
+ IntrusiveSDListIterator<T, Pointer, Reference>::operator->() const
+ {
+ return mpNode;
+ }
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename IntrusiveSDListIterator<T, Pointer, Reference>::this_type&
+ IntrusiveSDListIterator<T, Pointer, Reference>::operator++()
+ {
+ mpNode = static_cast<node_type*>(mpNode->mpNext);
+ return *this;
+ }
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename IntrusiveSDListIterator<T, Pointer, Reference>::this_type
+ IntrusiveSDListIterator<T, Pointer, Reference>::operator++(int)
+ {
+ this_type temp = *this;
+ mpNode = static_cast<node_type*>(mpNode->mpNext);
+ return temp;
+ }
+
+ // The C++ defect report #179 requires that we support comparisons between const and non-const iterators.
+ // Thus we provide additional template paremeters here to support this. The defect report does not
+ // require us to support comparisons between reverse_iterators and const_reverse_iterators.
+ template <typename T, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB>
+ inline bool operator==(const IntrusiveSDListIterator<T, PointerA, ReferenceA>& a,
+ const IntrusiveSDListIterator<T, PointerB, ReferenceB>& b)
+ {
+ return a.mpNode == b.mpNode;
+ }
+
+
+ template <typename T, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB>
+ inline bool operator!=(const IntrusiveSDListIterator<T, PointerA, ReferenceA>& a,
+ const IntrusiveSDListIterator<T, PointerB, ReferenceB>& b)
+ {
+ return a.mpNode != b.mpNode;
+ }
+
+
+ // We provide a version of operator!= for the case where the iterators are of the
+ // same type. This helps prevent ambiguity errors in the presence of rel_ops.
+ template <typename T, typename Pointer, typename Reference>
+ inline bool operator!=(const IntrusiveSDListIterator<T, Pointer, Reference>& a,
+ const IntrusiveSDListIterator<T, Pointer, Reference>& b)
+ {
+ return a.mpNode != b.mpNode;
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // intrusive_sdlist_base
+ ///////////////////////////////////////////////////////////////////////
+
+ inline intrusive_sdlist_base::intrusive_sdlist_base()
+ { mpNext = NULL; }
+
+
+ inline bool intrusive_sdlist_base::empty() const
+ { return mpNext == NULL; }
+
+
+ inline intrusive_sdlist_base::size_type intrusive_sdlist_base::size() const
+ {
+ size_type n = 0;
+ for(const intrusive_sdlist_node* pCurrent = mpNext; pCurrent; pCurrent = pCurrent->mpNext)
+ n++;
+ return n;
+ }
+
+
+ inline void intrusive_sdlist_base::clear()
+ { mpNext = NULL; } // Note that we don't do anything with the list nodes.
+
+
+ inline void intrusive_sdlist_base::pop_front()
+ {
+ // To consider: Set mpNext's pointers to NULL in debug builds.
+ mpNext = mpNext->mpNext;
+ mpNext->mppPrevNext = &mpNext;
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // intrusive_sdlist
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T>
+ inline intrusive_sdlist<T>::intrusive_sdlist()
+ {
+ }
+
+
+ template <typename T>
+ inline intrusive_sdlist<T>::intrusive_sdlist(const this_type& /*x*/)
+ : intrusive_sdlist_base()
+ {
+ // We intentionally ignore argument x.
+ }
+
+
+ template <typename T>
+ inline typename intrusive_sdlist<T>::this_type& intrusive_sdlist<T>::operator=(const this_type& /*x*/)
+ {
+ return *this; // We intentionally ignore argument x.
+ }
+
+
+ template <typename T>
+ inline typename intrusive_sdlist<T>::iterator intrusive_sdlist<T>::begin()
+ { return iterator(static_cast<T*>(mpNext)); }
+
+
+ template <typename T>
+ inline typename intrusive_sdlist<T>::const_iterator intrusive_sdlist<T>::begin() const
+ { return const_iterator(static_cast<T*>(const_cast<intrusive_sdlist_node*>(mpNext))); }
+
+
+ template <typename T>
+ inline typename intrusive_sdlist<T>::const_iterator intrusive_sdlist<T>::cbegin() const
+ { return const_iterator(static_cast<T*>(const_cast<intrusive_sdlist_node*>(mpNext))); }
+
+
+ template <typename T>
+ inline typename intrusive_sdlist<T>::iterator intrusive_sdlist<T>::end()
+ { return iterator(static_cast<T*>(NULL)); }
+
+
+ template <typename T>
+ inline typename intrusive_sdlist<T>::const_iterator intrusive_sdlist<T>::end() const
+ { return const_iterator(static_cast<const T*>(NULL)); }
+
+
+ template <typename T>
+ inline typename intrusive_sdlist<T>::const_iterator intrusive_sdlist<T>::cend() const
+ { return const_iterator(static_cast<const T*>(NULL)); }
+
+
+ template <typename T>
+ inline typename intrusive_sdlist<T>::reference intrusive_sdlist<T>::front()
+ { return *static_cast<T*>(mpNext); }
+
+
+ template <typename T>
+ inline typename intrusive_sdlist<T>::const_reference intrusive_sdlist<T>::front() const
+ { return *static_cast<const T*>(mpNext); }
+
+
+ template <typename T>
+ inline void intrusive_sdlist<T>::push_front(value_type& value)
+ {
+ value.mpNext = mpNext;
+ value.mppPrevNext = &mpNext;
+ if(mpNext)
+ mpNext->mppPrevNext = &value.mpNext;
+ mpNext = &value;
+ }
+
+
+ template <typename T>
+ inline void intrusive_sdlist<T>::push_back(value_type& value)
+ {
+ intrusive_sdlist_node* pNext = mpNext;
+ intrusive_sdlist_node** ppPrevNext = &mpNext;
+
+ while(pNext)
+ {
+ ppPrevNext = &pNext->mpNext;
+ pNext = pNext->mpNext;
+ }
+
+ *ppPrevNext = &value;
+ value.mppPrevNext = ppPrevNext;
+ value.mpNext = NULL;
+ }
+
+
+ template <typename T>
+ inline void intrusive_sdlist<T>::pop_back()
+ {
+ node_type* pCurrent = static_cast<node_type*>(mpNext);
+
+ while(pCurrent->mpNext)
+ pCurrent = static_cast<node_type*>(pCurrent->mpNext);
+
+ *pCurrent->mppPrevNext = NULL;
+ }
+
+ template <typename T>
+ inline bool intrusive_sdlist<T>::contains(const value_type& value) const
+ {
+ const intrusive_sdlist_node* pCurrent;
+
+ for(pCurrent = mpNext; pCurrent; pCurrent = pCurrent->mpNext)
+ {
+ if(pCurrent == &value)
+ break;
+ }
+
+ return (pCurrent != NULL);
+ }
+
+
+ template <typename T>
+ inline typename intrusive_sdlist<T>::iterator intrusive_sdlist<T>::locate(value_type& value)
+ {
+ intrusive_sdlist_node* pCurrent;
+
+ for(pCurrent = static_cast<value_type*>(mpNext); pCurrent; pCurrent = pCurrent->mpNext)
+ {
+ if(pCurrent == &value)
+ break;
+ }
+
+ return iterator(static_cast<value_type*>(pCurrent));
+ }
+
+
+ template <typename T>
+ inline typename intrusive_sdlist<T>::const_iterator intrusive_sdlist<T>::locate(const T& value) const
+ {
+ const intrusive_sdlist_node* pCurrent;
+
+ for(pCurrent = static_cast<value_type*>(mpNext); pCurrent; pCurrent = pCurrent->mpNext)
+ {
+ if(pCurrent == &value)
+ break;
+ }
+
+ return const_iterator(static_cast<value_type*>(const_cast<intrusive_sdlist_node*>(pCurrent)));
+ }
+
+
+ template <typename T>
+ inline typename intrusive_sdlist<T>::iterator
+ intrusive_sdlist<T>::insert(iterator position, value_type& value)
+ {
+ value.mppPrevNext = position.mpNode->mppPrevNext;
+ value.mpNext = position.mpNode;
+ *value.mppPrevNext = &value;
+ position.mpNode->mppPrevNext = &value.mpNext;
+
+ return iterator(&value);
+ }
+
+
+ template <typename T>
+ inline typename intrusive_sdlist<T>::iterator
+ intrusive_sdlist<T>::erase(iterator position)
+ {
+ *position.mpNode->mppPrevNext = position.mpNode->mpNext;
+ position.mpNode->mpNext->mppPrevNext = position.mpNode->mppPrevNext;
+
+ return iterator(position.mpNode);
+ }
+
+
+ template <typename T>
+ inline typename intrusive_sdlist<T>::iterator
+ intrusive_sdlist<T>::erase(iterator first, iterator last)
+ {
+ if(first.mpNode) // If not erasing the end...
+ {
+ *first.mpNode->mppPrevNext = last.mpNode;
+
+ if(last.mpNode) // If not erasing to the end...
+ last.mpNode->mppPrevNext = first.mpNode->mppPrevNext;
+ }
+
+ return last;
+ }
+
+
+ template <typename T>
+ inline void intrusive_sdlist<T>::remove(value_type& value)
+ {
+ *value.mppPrevNext = value.mpNext;
+ if(value.mpNext)
+ value.mpNext->mppPrevNext = value.mppPrevNext;
+ }
+
+
+ template <typename T>
+ void intrusive_sdlist<T>::swap(intrusive_sdlist& x)
+ {
+ // swap anchors
+ intrusive_sdlist_node* const temp(mpNext);
+ mpNext = x.mpNext;
+ x.mpNext = temp;
+
+ if(x.mpNext)
+ x.mpNext->mppPrevNext = &mpNext;
+
+ if(mpNext)
+ mpNext->mppPrevNext = &x.mpNext;
+ }
+
+
+
+
+
+ // To do: Complete these splice functions. Might want to look at intrusive_sdlist for help.
+
+ template <typename T>
+ void intrusive_sdlist<T>::splice(iterator /*position*/, value_type& /*value*/)
+ {
+ EASTL_ASSERT(false); // If you need this working, ask Paul Pedriana or submit a working version for inclusion.
+ }
+
+
+ template <typename T>
+ void intrusive_sdlist<T>::splice(iterator /*position*/, intrusive_sdlist& /*x*/)
+ {
+ EASTL_ASSERT(false); // If you need this working, ask Paul Pedriana or submit a working version for inclusion.
+ }
+
+
+ template <typename T>
+ void intrusive_sdlist<T>::splice(iterator /*position*/, intrusive_sdlist& /*x*/, iterator /*xPosition*/)
+ {
+ EASTL_ASSERT(false); // If you need this working, ask Paul Pedriana or submit a working version for inclusion.
+ }
+
+
+ template <typename T>
+ void intrusive_sdlist<T>::splice(iterator /*position*/, intrusive_sdlist& /*x*/, iterator /*first*/, iterator /*last*/)
+ {
+ EASTL_ASSERT(false); // If you need this working, ask Paul Pedriana or submit a working version for inclusion.
+ }
+
+
+ template <typename T>
+ inline bool intrusive_sdlist<T>::validate() const
+ {
+ return true; // To do.
+ }
+
+
+ template <typename T>
+ inline int intrusive_sdlist<T>::validate_iterator(const_iterator i) const
+ {
+ // To do: Come up with a more efficient mechanism of doing this.
+
+ for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp)
+ {
+ if(temp == i)
+ return (isf_valid | isf_current | isf_can_dereference);
+ }
+
+ if(i == end())
+ return (isf_valid | isf_current);
+
+ return isf_none;
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T>
+ bool operator==(const intrusive_sdlist<T>& a, const intrusive_sdlist<T>& b)
+ {
+ // If we store an mSize member for intrusive_sdlist, we want to take advantage of it here.
+ typename intrusive_sdlist<T>::const_iterator ia = a.begin();
+ typename intrusive_sdlist<T>::const_iterator ib = b.begin();
+ typename intrusive_sdlist<T>::const_iterator enda = a.end();
+ typename intrusive_sdlist<T>::const_iterator endb = b.end();
+
+ while((ia != enda) && (ib != endb) && (*ia == *ib))
+ {
+ ++ia;
+ ++ib;
+ }
+ return (ia == enda) && (ib == endb);
+ }
+
+ template <typename T>
+ bool operator<(const intrusive_sdlist<T>& a, const intrusive_sdlist<T>& b)
+ {
+ return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end());
+ }
+
+ template <typename T>
+ bool operator!=(const intrusive_sdlist<T>& a, const intrusive_sdlist<T>& b)
+ {
+ return !(a == b);
+ }
+
+ template <typename T>
+ bool operator>(const intrusive_sdlist<T>& a, const intrusive_sdlist<T>& b)
+ {
+ return b < a;
+ }
+
+ template <typename T>
+ bool operator<=(const intrusive_sdlist<T>& a, const intrusive_sdlist<T>& b)
+ {
+ return !(b < a);
+ }
+
+ template <typename T>
+ bool operator>=(const intrusive_sdlist<T>& a, const intrusive_sdlist<T>& b)
+ {
+ return !(a < b);
+ }
+
+ template <typename T>
+ void swap(intrusive_sdlist<T>& a, intrusive_sdlist<T>& b)
+ {
+ a.swap(b);
+ }
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/include/EASTL/bonus/intrusive_slist.h b/include/EASTL/bonus/intrusive_slist.h
new file mode 100644
index 0000000..28d445d
--- /dev/null
+++ b/include/EASTL/bonus/intrusive_slist.h
@@ -0,0 +1,321 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+///////////////////////////////////////////////////////////////////////////////
+// *** Note ***
+// This implementation is incomplete.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTRUSIVE_SLIST_H
+#define EASTL_INTRUSIVE_SLIST_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/iterator.h>
+#include <EASTL/algorithm.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+
+ /// intrusive_slist_node
+ ///
+ struct intrusive_slist_node
+ {
+ intrusive_slist_node* mpNext;
+ };
+
+
+ /// IntrusiveSListIterator
+ ///
+ template <typename T, typename Pointer, typename Reference>
+ struct IntrusiveSListIterator
+ {
+ typedef IntrusiveSListIterator<T, Pointer, Reference> this_type;
+ typedef IntrusiveSListIterator<T, T*, T&> iterator;
+ typedef IntrusiveSListIterator<T, const T*, const T&> const_iterator;
+ typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t.
+ typedef ptrdiff_t difference_type;
+ typedef T value_type;
+ typedef T node_type;
+ typedef Pointer pointer;
+ typedef Reference reference;
+ typedef EASTL_ITC_NS::forward_iterator_tag iterator_category;
+
+ public:
+ node_type* mpNode;
+
+ public:
+ IntrusiveSListIterator();
+ explicit IntrusiveSListIterator(pointer pNode); // Note that you can also construct an iterator from T via this, since value_type == node_type.
+ IntrusiveSListIterator(const iterator& x);
+
+ reference operator*() const;
+ pointer operator->() const;
+
+ this_type& operator++();
+ this_type operator++(int);
+
+ }; // struct IntrusiveSListIterator
+
+
+
+ /// intrusive_slist_base
+ ///
+ /// Provides a template-less base class for intrusive_slist.
+ ///
+ class intrusive_slist_base
+ {
+ public:
+ typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t.
+ typedef ptrdiff_t difference_type;
+
+ protected:
+ intrusive_slist_node* mpNext;
+
+ public:
+ intrusive_slist_base();
+
+ bool empty() const; ///< Returns true if the container is empty.
+ size_type size() const; ///< Returns the number of elements in the list; O(n).
+
+ void clear(); ///< Clears the list; O(1). No deallocation occurs.
+ void pop_front(); ///< Removes an element from the front of the list; O(1). The element must be present, but is not deallocated.
+ void reverse(); ///< Reverses a list so that front and back are swapped; O(n).
+
+ //bool validate() const; ///< Scans a list for linkage inconsistencies; O(n) time, O(1) space. Returns false if errors are detected, such as loops or branching.
+
+ }; // class intrusive_slist_base
+
+
+
+ /// intrusive_slist
+ ///
+ template <typename T = intrusive_slist_node>
+ class intrusive_slist : public intrusive_slist_base
+ {
+ public:
+ typedef intrusive_slist<T> this_type;
+ typedef intrusive_slist_base base_type;
+ typedef T node_type;
+ typedef T value_type;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::difference_type difference_type;
+ typedef T& reference;
+ typedef const T& const_reference;
+ typedef T* pointer;
+ typedef const T* const_pointer;
+ typedef IntrusiveSListIterator<T, T*, T&> iterator;
+ typedef IntrusiveSListIterator<T, const T*, const T&> const_iterator;
+
+ public:
+ intrusive_slist(); ///< Creates an empty list.
+ //intrusive_slist(const this_type& x); ///< Creates an empty list; ignores the argument. To consider: Is this a useful function?
+ //this_type& operator=(const this_type& x); ///< Clears the list; ignores the argument. To consider: Is this a useful function?
+
+ iterator begin(); ///< Returns an iterator pointing to the first element in the list. O(1).
+ const_iterator begin() const; ///< Returns a const_iterator pointing to the first element in the list. O(1).
+ const_iterator cbegin() const; ///< Returns a const_iterator pointing to the first element in the list. O(1).
+ iterator end(); ///< Returns an iterator pointing one-after the last element in the list. O(1).
+ const_iterator end() const; ///< Returns a const_iterator pointing one-after the last element in the list. O(1).
+ const_iterator cend() const; ///< Returns a const_iterator pointing one-after the last element in the list. O(1).
+ iterator before_begin(); ///< Returns iterator to position before begin. O(1).
+ const_iterator before_begin() const; ///< Returns iterator to previous position. O(1).
+ const_iterator cbefore_begin() const; ///< Returns iterator to previous position. O(1).
+
+ iterator previous(const_iterator position); ///< Returns iterator to previous position. O(n).
+ const_iterator previous(const_iterator position) const; ///< Returns iterator to previous position. O(n).
+
+ reference front(); ///< Returns a reference to the first element. The list must be empty.
+ const_reference front() const; ///< Returns a const reference to the first element. The list must be empty.
+
+ void push_front(value_type& value); ///< Adds an element to the front of the list; O(1). The element is not copied. The element must not be in any other list.
+ void pop_front(); ///< Removes an element from the back of the list; O(n). The element must be present, but is not deallocated.
+
+ bool contains(const value_type& value) const; ///< Returns true if the given element is in the list; O(n). Equivalent to (locate(x) != end()).
+
+ iterator locate(value_type& value); ///< Converts a reference to an object in the list back to an iterator, or returns end() if it is not part of the list. O(n)
+ const_iterator locate(const value_type& value) const; ///< Converts a const reference to an object in the list back to a const iterator, or returns end() if it is not part of the list. O(n)
+
+ iterator insert(iterator position, value_type& value); ///< Inserts an element before the element pointed to by the iterator. O(n)
+ iterator insert_after(iterator position, value_type& value); ///< Inserts an element after the element pointed to by the iterator. O(1)
+
+ iterator erase(iterator position); ///< Erases the element pointed to by the iterator. O(n)
+ iterator erase_after(iterator position); ///< Erases the element after the element pointed to by the iterator. O(1)
+
+ iterator erase(iterator first, iterator last); ///< Erases elements within the iterator range [first, last). O(n).
+ iterator erase_after(iterator before_first, iterator last); ///< Erases elements within the iterator range [before_first, last). O(1).
+
+ void swap(this_type& x); ///< Swaps the contents of two intrusive lists; O(1).
+
+
+ void splice(iterator position, value_type& value); ///< Moves the given element into this list before the element pointed to by position; O(n).
+ ///< Required: x must be in some list or have first/next pointers that point it itself.
+
+ void splice(iterator position, this_type& x); ///< Moves the contents of a list into this list before the element pointed to by position; O(n).
+ ///< Required: &x != this (same as std::list).
+
+ void splice(iterator position, this_type& x, iterator xPosition); ///< Moves the given element pointed to i within the list x into the current list before
+ ///< the element pointed to by position; O(n).
+
+ void splice(iterator position, this_type& x, iterator first, iterator last); ///< Moves the range of elements [first, last) from list x into the current list before
+ ///< the element pointed to by position; O(n).
+ ///< Required: position must not be in [first, last). (same as std::list).
+
+ void splice_after(iterator position, value_type& value); ///< Moves the given element into this list after the element pointed to by position; O(1).
+ ///< Required: x must be in some list or have first/next pointers that point it itself.
+
+ void splice_after(iterator position, this_type& x); ///< Moves the contents of a list into this list after the element pointed to by position; O(n).
+ ///< Required: &x != this (same as std::list).
+
+ void splice_after(iterator position, this_type& x, iterator xPrevious); ///< Moves the element after xPrevious to be after position. O(1).
+ ///< Required: &x != this (same as std::list).
+
+ void splice_after(iterator position, this_type& x, iterator before_first, iterator before_last); ///< Moves the elements in the range of [before_first+1, before_last+1) to be after position. O(1).
+
+ bool validate() const;
+ int validate_iterator(const_iterator i) const;
+
+ }; // intrusive_slist
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // IntrusiveSListIterator
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Pointer, typename Reference>
+ inline IntrusiveSListIterator<T, Pointer, Reference>::IntrusiveSListIterator()
+ {
+ #if EASTL_DEBUG
+ mpNode = NULL;
+ #endif
+ }
+
+ template <typename T, typename Pointer, typename Reference>
+ inline IntrusiveSListIterator<T, Pointer, Reference>::IntrusiveSListIterator(pointer pNode)
+ : mpNode(pNode)
+ {
+ }
+
+ template <typename T, typename Pointer, typename Reference>
+ inline IntrusiveSListIterator<T, Pointer, Reference>::IntrusiveSListIterator(const iterator& x)
+ : mpNode(x.mpNode)
+ {
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // intrusive_slist_base
+ ///////////////////////////////////////////////////////////////////////
+
+ // To do.
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // intrusive_slist
+ ///////////////////////////////////////////////////////////////////////
+
+ // To do.
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T>
+ bool operator==(const intrusive_slist<T>& a, const intrusive_slist<T>& b)
+ {
+ // If we store an mSize member for intrusive_slist, we want to take advantage of it here.
+ typename intrusive_slist<T>::const_iterator ia = a.begin();
+ typename intrusive_slist<T>::const_iterator ib = b.begin();
+ typename intrusive_slist<T>::const_iterator enda = a.end();
+ typename intrusive_slist<T>::const_iterator endb = b.end();
+
+ while((ia != enda) && (ib != endb) && (*ia == *ib))
+ {
+ ++ia;
+ ++ib;
+ }
+ return (ia == enda) && (ib == endb);
+ }
+
+ template <typename T>
+ bool operator<(const intrusive_slist<T>& a, const intrusive_slist<T>& b)
+ {
+ return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end());
+ }
+
+ template <typename T>
+ bool operator!=(const intrusive_slist<T>& a, const intrusive_slist<T>& b)
+ {
+ return !(a == b);
+ }
+
+ template <typename T>
+ bool operator>(const intrusive_slist<T>& a, const intrusive_slist<T>& b)
+ {
+ return b < a;
+ }
+
+ template <typename T>
+ bool operator<=(const intrusive_slist<T>& a, const intrusive_slist<T>& b)
+ {
+ return !(b < a);
+ }
+
+ template <typename T>
+ bool operator>=(const intrusive_slist<T>& a, const intrusive_slist<T>& b)
+ {
+ return !(a < b);
+ }
+
+ template <typename T>
+ void swap(intrusive_slist<T>& a, intrusive_slist<T>& b)
+ {
+ a.swap(b);
+ }
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/include/EASTL/bonus/list_map.h b/include/EASTL/bonus/list_map.h
new file mode 100644
index 0000000..8a080d6
--- /dev/null
+++ b/include/EASTL/bonus/list_map.h
@@ -0,0 +1,932 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_LIST_MAP_H
+#define EASTL_LIST_MAP_H
+
+
+#include <EASTL/map.h>
+
+
+namespace eastl
+{
+
+ /// EASTL_MAP_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_LIST_MAP_DEFAULT_NAME
+ #define EASTL_LIST_MAP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " list_map" // Unless the user overrides something, this is "EASTL list_map".
+ #endif
+
+ /// EASTL_MAP_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_LIST_MAP_DEFAULT_ALLOCATOR
+ #define EASTL_LIST_MAP_DEFAULT_ALLOCATOR allocator_type(EASTL_LIST_MAP_DEFAULT_NAME)
+ #endif
+
+
+ /// list_map_data_base
+ ///
+ /// We define a list_map_data_base separately from list_map_data (below), because it
+ /// allows us to have non-templated operations, and it makes it so that the
+ /// list_map anchor node doesn't carry a T with it, which would waste space and
+ /// possibly lead to surprising the user due to extra Ts existing that the user
+ /// didn't explicitly create. The downside to all of this is that it makes debug
+ /// viewing of an list_map harder, given that the node pointers are of type
+ /// list_map_data_base and not list_map_data.
+ ///
+ struct list_map_data_base
+ {
+ list_map_data_base* mpNext;
+ list_map_data_base* mpPrev;
+ };
+
+
+ /// list_map_data
+ ///
+ template <typename Value>
+ struct list_map_data : public list_map_data_base
+ {
+ typedef Value value_type;
+
+ list_map_data(const value_type& value);
+
+ value_type mValue; // This is a pair of key/value.
+ };
+
+
+ /// list_map_iterator
+ ///
+ template <typename T, typename Pointer, typename Reference>
+ struct list_map_iterator
+ {
+ typedef list_map_iterator<T, Pointer, Reference> this_type;
+ typedef list_map_iterator<T, T*, T&> iterator;
+ typedef list_map_iterator<T, const T*, const T&> const_iterator;
+ typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t.
+ typedef ptrdiff_t difference_type;
+ typedef T value_type;
+ typedef list_map_data_base base_node_type;
+ typedef list_map_data<T> node_type;
+ typedef Pointer pointer;
+ typedef Reference reference;
+ typedef EASTL_ITC_NS::bidirectional_iterator_tag iterator_category;
+
+ public:
+ node_type* mpNode;
+
+ public:
+ list_map_iterator();
+ list_map_iterator(const base_node_type* pNode);
+ list_map_iterator(const iterator& x);
+
+ reference operator*() const;
+ pointer operator->() const;
+
+ this_type& operator++();
+ this_type operator++(int);
+
+ this_type& operator--();
+ this_type operator--(int);
+
+ }; // list_map_iterator
+
+
+ /// use_value_first
+ ///
+ /// operator()(x) simply returns x.mValue.first. Used in list_map.
+ /// This is similar to eastl::use_first, however it assumes that the input type is an object
+ /// whose mValue is an eastl::pair, and the first value in the pair is the desired return.
+ ///
+ template <typename Object>
+ struct use_value_first
+ {
+ typedef Object argument_type;
+ typedef typename Object::value_type::first_type result_type;
+
+ const result_type& operator()(const Object& x) const
+ { return x.mValue.first; }
+ };
+
+
+ /// list_map
+ ///
+ /// Implements a map like container, which also provides functionality similar to a list.
+ ///
+ /// Note: Like a map, keys must still be unique. As such, push_back() and push_front() operations
+ /// return a bool indicating success, or failure if the entry's key is already in use.
+ ///
+ /// list_map is designed to improve performance for situations commonly implemented as:
+ /// A map, which must be iterated over to find the oldest entry, or purge expired entries.
+ /// A list, which must be iterated over to remove a player's record when they sign off.
+ ///
+ /// list_map requires a little more memory per node than either a list or map alone,
+ /// and many of list_map's functions have a higher operational cost (CPU time) than their
+ /// counterparts in list and map. However, as the node count increases, list_map quickly outperforms
+ /// either a list or a map when find [by-index] and front/back type operations are required.
+ ///
+ /// In essence, list_map avoids O(n) iterations at the expense of additional costs to quick (O(1) and O(log n) operations:
+ /// push_front(), push_back(), pop_front() and pop_back() have O(log n) operation time, similar to map::insert(), rather than O(1) time like a list,
+ /// however, front() and back() maintain O(1) operation time.
+ ///
+ /// As a canonical example, consider a large backlog of player group invites, which are removed when either:
+ /// The invitation times out - in main loop: while( !listMap.empty() && listMap.front().IsExpired() ) { listMap.pop_front(); }
+ /// The player rejects the outstanding invitation - on rejection: iter = listMap.find(playerId); if (iter != listMap.end()) { listMap.erase(iter); }
+ ///
+ /// For a similar example, consider a high volume pending request container which must:
+ /// Time out old requests (similar to invites timing out above)
+ /// Remove requests once they've been handled (similar to rejecting invites above)
+ ///
+ /// For such usage patterns, the performance benefits of list_map become dramatic with
+ /// common O(n) operations once the node count rises to hundreds or more.
+ ///
+ /// When high performance is a priority, Containers with thousands of nodes or more
+ /// can quickly result in unacceptable performance when executing even infrequenty O(n) operations.
+ ///
+ /// In order to maintain strong performance, avoid iterating over list_map whenever possible.
+ ///
+ ///////////////////////////////////////////////////////////////////////
+ /// find_as
+ /// In order to support the ability to have a tree of strings but
+ /// be able to do efficiently lookups via char pointers (i.e. so they
+ /// aren't converted to string objects), we provide the find_as
+ /// function. This function allows you to do a find with a key of a
+ /// type other than the tree's key type. See the find_as function
+ /// for more documentation on this.
+ ///
+ ///////////////////////////////////////////////////////////////////////
+ /// Pool allocation
+ /// If you want to make a custom memory pool for a list_map container, your pool
+ /// needs to contain items of type list_map::node_type. So if you have a memory
+ /// pool that has a constructor that takes the size of pool items and the
+ /// count of pool items, you would do this (assuming that MemoryPool implements
+ /// the Allocator interface):
+ /// typedef list_map<Widget, int, less<Widget>, MemoryPool> WidgetMap; // Delare your WidgetMap type.
+ /// MemoryPool myPool(sizeof(WidgetMap::node_type), 100); // Make a pool of 100 Widget nodes.
+ /// WidgetMap myMap(&myPool); // Create a map that uses the pool.
+ ///
+ template <typename Key, typename T, typename Compare = eastl::less<Key>, typename Allocator = EASTLAllocatorType>
+ class list_map
+ : protected rbtree<Key, eastl::list_map_data<eastl::pair<const Key, T> >, Compare, Allocator, eastl::use_value_first<eastl::list_map_data<eastl::pair<const Key, T> > >, true, true>
+ {
+ public:
+ typedef rbtree<Key, eastl::list_map_data<eastl::pair<const Key, T> >, Compare, Allocator,
+ eastl::use_value_first<eastl::list_map_data<eastl::pair<const Key, T> > >, true, true> base_type;
+ typedef list_map<Key, T, Compare, Allocator> this_type;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::key_type key_type;
+ typedef T mapped_type;
+ typedef typename eastl::pair<const Key, T> value_type; // This is intentionally different from base_type::value_type
+ typedef value_type& reference;
+ typedef const value_type& const_reference;
+ typedef typename base_type::node_type node_type; // Despite the internal and external values being different, we're keeping the node type the same as the base
+ // in order to allow for pool allocation. See EASTL/map.h for more information.
+ typedef typename eastl::list_map_iterator<value_type, value_type*, value_type&> iterator; // This is intentionally different from base_type::iterator
+ typedef typename eastl::list_map_iterator<value_type, const value_type*, const value_type&> const_iterator; // This is intentionally different from base_type::const_iterator
+ typedef eastl::reverse_iterator<iterator> reverse_iterator;
+ typedef eastl::reverse_iterator<const_iterator> const_reverse_iterator;
+ typedef typename base_type::allocator_type allocator_type;
+ typedef typename eastl::pair<iterator, bool> insert_return_type; // This is intentionally removed, as list_map doesn't support insert() functions, in favor of list like push_back and push_front
+ typedef typename eastl::use_first<value_type> extract_key; // This is intentionally different from base_type::extract_key
+
+ using base_type::get_allocator;
+ using base_type::set_allocator;
+ using base_type::key_comp;
+ using base_type::empty;
+ using base_type::size;
+
+ protected:
+ typedef typename eastl::list_map_data<eastl::pair<const Key, T> > internal_value_type;
+
+ protected:
+ // internal base node, acting as the sentinel for list like behaviors
+ list_map_data_base mNode;
+
+ public:
+ list_map(const allocator_type& allocator = EASTL_LIST_MAP_DEFAULT_ALLOCATOR);
+ list_map(const Compare& compare, const allocator_type& allocator = EASTL_MAP_DEFAULT_ALLOCATOR);
+
+ // To do: Implement the following:
+
+ //list_map(const this_type& x);
+ //list_map(this_type&& x);
+ //list_map(this_type&& x, const allocator_type& allocator);
+ //list_map(std::initializer_list<mapped_type> ilist, const Compare& compare = Compare(), const allocator_type& allocator = EASTL_LIST_MAP_DEFAULT_ALLOCATOR);
+
+ //template <typename Iterator>
+ //list_map(Iterator itBegin, Iterator itEnd);
+
+ //this_type& operator=(const this_type& x);
+ //this_type& operator=(std::initializer_list<mapped_type> ilist);
+ //this_type& operator=(this_type&& x);
+
+ //void swap(this_type& x);
+
+ public:
+ // iterators
+ iterator begin() EA_NOEXCEPT;
+ const_iterator begin() const EA_NOEXCEPT;
+ const_iterator cbegin() const EA_NOEXCEPT;
+
+ iterator end() EA_NOEXCEPT;
+ const_iterator end() const EA_NOEXCEPT;
+ const_iterator cend() const EA_NOEXCEPT;
+
+ reverse_iterator rbegin() EA_NOEXCEPT;
+ const_reverse_iterator rbegin() const EA_NOEXCEPT;
+ const_reverse_iterator crbegin() const EA_NOEXCEPT;
+
+ reverse_iterator rend() EA_NOEXCEPT;
+ const_reverse_iterator rend() const EA_NOEXCEPT;
+ const_reverse_iterator crend() const EA_NOEXCEPT;
+
+ public:
+ // List like methods
+ reference front();
+ const_reference front() const;
+
+ reference back();
+ const_reference back() const;
+
+ // push_front and push_back which takes in a key/value pair
+ bool push_front(const value_type& value);
+ bool push_back(const value_type& value);
+
+ // push_front and push_back which take key and value separately, for convenience
+ bool push_front(const key_type& key, const mapped_type& value);
+ bool push_back(const key_type& key, const mapped_type& value);
+
+ void pop_front();
+ void pop_back();
+
+ public:
+ // Map like methods
+ iterator find(const key_type& key);
+ const_iterator find(const key_type& key) const;
+
+ template <typename U, typename Compare2>
+ iterator find_as(const U& u, Compare2 compare2);
+ template <typename U, typename Compare2>
+ const_iterator find_as(const U& u, Compare2 compare2) const;
+
+ size_type count(const key_type& key) const;
+ size_type erase(const key_type& key);
+
+ public:
+ // Shared methods which are common to list and map
+ iterator erase(const_iterator position);
+ reverse_iterator erase(const_reverse_iterator position);
+
+ void clear();
+ void reset_lose_memory();
+
+ bool validate() const;
+ int validate_iterator(const_iterator i) const;
+
+ public:
+ // list like functionality which is in consideration for implementation:
+ // iterator insert(const_iterator position, const value_type& value);
+ // void remove(const mapped_type& x);
+
+ public:
+ // list like functionality which may be implemented, but is discouraged from implementation:
+ // due to the liklihood that they would require O(n) time to execute.
+ // template <typename Predicate>
+ // void remove_if(Predicate);
+ // void reverse();
+ // void sort();
+ // template<typename Compare>
+ // void sort(Compare compare);
+
+ public:
+ // map like functionality which list_map does not support, due to abmiguity with list like functionality:
+ #if !defined(EA_COMPILER_NO_DELETED_FUNCTIONS)
+ template <typename InputIterator>
+ list_map(InputIterator first, InputIterator last, const Compare& compare, const allocator_type& allocator = EASTL_RBTREE_DEFAULT_ALLOCATOR) = delete;
+
+ insert_return_type insert(const value_type& value) = delete;
+ iterator insert(const_iterator position, const value_type& value) = delete;
+
+ template <typename InputIterator>
+ void insert(InputIterator first, InputIterator last) = delete;
+
+ insert_return_type insert(const key_type& key) = delete;
+
+ iterator erase(const_iterator first, const_iterator last) = delete;
+ reverse_iterator erase(reverse_iterator first, reverse_iterator last) = delete;
+
+ void erase(const key_type* first, const key_type* last) = delete;
+
+ iterator lower_bound(const key_type& key) = delete;
+ const_iterator lower_bound(const key_type& key) const = delete;
+
+ iterator upper_bound(const key_type& key) = delete;
+ const_iterator upper_bound(const key_type& key) const = delete;
+
+ eastl::pair<iterator, iterator> equal_range(const key_type& key) = delete;
+ eastl::pair<const_iterator, const_iterator> equal_range(const key_type& key) const = delete;
+
+ mapped_type& operator[](const key_type& key) = delete; // Of map, multimap, set, and multimap, only map has operator[].
+ #endif
+
+ public:
+ // list like functionality which list_map does not support, due to ambiguity with map like functionality:
+ #if 0
+ reference push_front() = delete;
+ void* push_front_uninitialized() = delete;
+
+ reference push_back() = delete;
+ void* push_back_uninitialized() = delete;
+
+ iterator insert(const_iterator position) = delete;
+
+ void insert(const_iterator position, size_type n, const value_type& value) = delete;
+
+ template <typename InputIterator>
+ void insert(const_iterator position, InputIterator first, InputIterator last) = delete;
+
+ iterator erase(const_iterator first, const_iterator last) = delete;
+ reverse_iterator erase(const_reverse_iterator first, const_reverse_iterator last) = delete;
+
+ void splice(const_iterator position, this_type& x) = delete
+ void splice(const_iterator position, this_type& x, const_iterator i) = delete;
+ void splice(const_iterator position, this_type& x, const_iterator first, const_iterator last) = delete;
+
+ void merge(this_type& x) = delete;
+
+ template <typename Compare>
+ void merge(this_type& x, Compare compare) = delete;
+
+ void unique() = delete; // Uniqueness is enforced by map functionality
+
+ template <typename BinaryPredicate>
+ void unique(BinaryPredicate) = delete; // Uniqueness is enforced by map functionality
+ #endif
+
+ }; // list_map
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // list_map_data
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Value>
+ inline list_map_data<Value>::list_map_data(const Value& value)
+ : mValue(value)
+ {
+ mpNext = NULL; // GCC 4.8 is generating warnings about referencing these values in list_map::push_front unless we
+ mpPrev = NULL; // initialize them here. The compiler seems to be mistaken, as our code isn't actually using them unintialized.
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // list_map_iterator
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Pointer, typename Reference>
+ inline list_map_iterator<T, Pointer, Reference>::list_map_iterator()
+ : mpNode(NULL)
+ {
+ // Empty
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline list_map_iterator<T, Pointer, Reference>::list_map_iterator(const base_node_type* pNode)
+ : mpNode(static_cast<node_type*>(const_cast<base_node_type*>(pNode)))
+ {
+ // Empty
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline list_map_iterator<T, Pointer, Reference>::list_map_iterator(const iterator& x)
+ : mpNode(const_cast<node_type*>(x.mpNode))
+ {
+ // Empty
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename list_map_iterator<T, Pointer, Reference>::reference
+ list_map_iterator<T, Pointer, Reference>::operator*() const
+ {
+ return mpNode->mValue;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename list_map_iterator<T, Pointer, Reference>::pointer
+ list_map_iterator<T, Pointer, Reference>::operator->() const
+ {
+ return &mpNode->mValue;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename list_map_iterator<T, Pointer, Reference>::this_type&
+ list_map_iterator<T, Pointer, Reference>::operator++()
+ {
+ mpNode = static_cast<node_type*>(mpNode->mpNext);
+ return *this;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename list_map_iterator<T, Pointer, Reference>::this_type
+ list_map_iterator<T, Pointer, Reference>::operator++(int)
+ {
+ this_type temp(*this);
+ mpNode = static_cast<node_type*>(mpNode->mpNext);
+ return temp;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename list_map_iterator<T, Pointer, Reference>::this_type&
+ list_map_iterator<T, Pointer, Reference>::operator--()
+ {
+ mpNode = static_cast<node_type*>(mpNode->mpPrev);
+ return *this;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename list_map_iterator<T, Pointer, Reference>::this_type
+ list_map_iterator<T, Pointer, Reference>::operator--(int)
+ {
+ this_type temp(*this);
+ mpNode = static_cast<node_type*>(mpNode->mpPrev);
+ return temp;
+ }
+
+
+ // We provide additional template paremeters here to support comparisons between const and non-const iterators.
+ // See C++ defect report #179, or EASTL/list.h for more information.
+ template <typename T, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB>
+ inline bool operator==(const list_map_iterator<T, PointerA, ReferenceA>& a,
+ const list_map_iterator<T, PointerB, ReferenceB>& b)
+ {
+ return a.mpNode == b.mpNode;
+ }
+
+
+ template <typename T, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB>
+ inline bool operator!=(const list_map_iterator<T, PointerA, ReferenceA>& a,
+ const list_map_iterator<T, PointerB, ReferenceB>& b)
+ {
+ return a.mpNode != b.mpNode;
+ }
+
+
+ // We provide a version of operator!= for the case where the iterators are of the
+ // same type. This helps prevent ambiguity errors in the presence of rel_ops.
+ template <typename T, typename Pointer, typename Reference>
+ inline bool operator!=(const list_map_iterator<T, Pointer, Reference>& a,
+ const list_map_iterator<T, Pointer, Reference>& b)
+ {
+ return a.mpNode != b.mpNode;
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // list_map
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline list_map<Key, T, Compare, Allocator>::list_map(const allocator_type& allocator)
+ : base_type(allocator)
+ {
+ mNode.mpNext = &mNode;
+ mNode.mpPrev = &mNode;
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline list_map<Key, T, Compare, Allocator>::list_map(const Compare& compare, const allocator_type& allocator)
+ : base_type(compare, allocator)
+ {
+ mNode.mpNext = &mNode;
+ mNode.mpPrev = &mNode;
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::iterator
+ list_map<Key, T, Compare, Allocator>::begin() EA_NOEXCEPT
+ {
+ return iterator(mNode.mpNext);
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::const_iterator
+ list_map<Key, T, Compare, Allocator>::begin() const EA_NOEXCEPT
+ {
+ return const_iterator(mNode.mpNext);
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::const_iterator
+ list_map<Key, T, Compare, Allocator>::cbegin() const EA_NOEXCEPT
+ {
+ return const_iterator(mNode.mpNext);
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::iterator
+ list_map<Key, T, Compare, Allocator>::end() EA_NOEXCEPT
+ {
+ return iterator(&mNode);
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::const_iterator
+ list_map<Key, T, Compare, Allocator>::end() const EA_NOEXCEPT
+ {
+ return const_iterator(&mNode);
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::const_iterator
+ list_map<Key, T, Compare, Allocator>::cend() const EA_NOEXCEPT
+ {
+ return const_iterator(&mNode);
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::reverse_iterator
+ list_map<Key, T, Compare, Allocator>::rbegin() EA_NOEXCEPT
+ {
+ return reverse_iterator(&mNode);
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::const_reverse_iterator
+ list_map<Key, T, Compare, Allocator>::rbegin() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(&mNode);
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::const_reverse_iterator
+ list_map<Key, T, Compare, Allocator>::crbegin() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(&mNode);
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::reverse_iterator
+ list_map<Key, T, Compare, Allocator>::rend() EA_NOEXCEPT
+ {
+ return reverse_iterator(mNode.mpNext);
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::const_reverse_iterator
+ list_map<Key, T, Compare, Allocator>::rend() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(mNode.mpNext);
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::const_reverse_iterator
+ list_map<Key, T, Compare, Allocator>::crend() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(mNode.mpNext);
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::reference
+ list_map<Key, T, Compare, Allocator>::front()
+ {
+ #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(static_cast<internal_value_type*>(mNode.mpNext) == &mNode))
+ EASTL_FAIL_MSG("list_map::front -- empty container");
+ #else
+ // We allow the user to reference an empty container.
+ #endif
+
+ return static_cast<internal_value_type*>(mNode.mpNext)->mValue;
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::const_reference
+ list_map<Key, T, Compare, Allocator>::front() const
+ {
+ #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(static_cast<internal_value_type*>(mNode.mpNext) == &mNode))
+ EASTL_FAIL_MSG("list_map::front -- empty container");
+ #else
+ // We allow the user to reference an empty container.
+ #endif
+
+ return static_cast<internal_value_type*>(mNode.mpNext)->mValue;
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::reference
+ list_map<Key, T, Compare, Allocator>::back()
+ {
+ #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(static_cast<internal_value_type*>(mNode.mpNext) == &mNode))
+ EASTL_FAIL_MSG("list_map::back -- empty container");
+ #else
+ // We allow the user to reference an empty container.
+ #endif
+
+ return static_cast<internal_value_type*>(mNode.mpPrev)->mValue;
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::const_reference
+ list_map<Key, T, Compare, Allocator>::back() const
+ {
+ #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(static_cast<internal_value_type*>(mNode.mpNext) == &mNode))
+ EASTL_FAIL_MSG("list_map::back -- empty container");
+ #else
+ // We allow the user to reference an empty container.
+ #endif
+
+ return static_cast<internal_value_type*>(mNode.mpPrev)->mValue;
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ bool list_map<Key, T, Compare, Allocator>::push_front(const value_type& value)
+ {
+ internal_value_type tempValue(value);
+ typename base_type::insert_return_type baseReturn = base_type::insert(tempValue);
+
+ // Did the insert succeed?
+ if (baseReturn.second)
+ {
+ internal_value_type* pNode = &(*baseReturn.first);
+
+ pNode->mpNext = mNode.mpNext;
+ pNode->mpPrev = &mNode;
+
+ mNode.mpNext->mpPrev = pNode;
+ mNode.mpNext = pNode;
+
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ bool list_map<Key, T, Compare, Allocator>::push_back(const value_type& value)
+ {
+ internal_value_type tempValue(value);
+ typename base_type::insert_return_type baseReturn = base_type::insert(tempValue);
+
+ // Did the insert succeed?
+ if (baseReturn.second)
+ {
+ internal_value_type* pNode = &(*baseReturn.first);
+
+ pNode->mpPrev = mNode.mpPrev;
+ pNode->mpNext = &mNode;
+
+ mNode.mpPrev->mpNext = pNode;
+ mNode.mpPrev = pNode;
+
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ bool list_map<Key, T, Compare, Allocator>::push_front(const key_type& key, const mapped_type& value)
+ {
+ return push_front(eastl::make_pair(key, value));
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ bool list_map<Key, T, Compare, Allocator>::push_back(const key_type& key, const mapped_type& value)
+ {
+ return push_back(eastl::make_pair(key, value));
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ void list_map<Key, T, Compare, Allocator>::pop_front()
+ {
+ #if EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(empty()))
+ EASTL_FAIL_MSG("list_map::pop_front -- empty container");
+ #endif
+
+ erase(static_cast<internal_value_type*>(mNode.mpNext)->mValue.first);
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ void list_map<Key, T, Compare, Allocator>::pop_back()
+ {
+ #if EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(empty()))
+ EASTL_FAIL_MSG("list_map::pop_back -- empty container");
+ #endif
+
+ erase(static_cast<internal_value_type*>(mNode.mpPrev)->mValue.first);
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::iterator
+ list_map<Key, T, Compare, Allocator>::find(const key_type& key)
+ {
+ typename base_type::iterator baseIter = base_type::find(key);
+ if (baseIter != base_type::end())
+ {
+ return iterator(&(*baseIter));
+ }
+ else
+ {
+ return end();
+ }
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::const_iterator
+ list_map<Key, T, Compare, Allocator>::find(const key_type& key) const
+ {
+ typename base_type::const_iterator baseIter = base_type::find(key);
+ if (baseIter != base_type::end())
+ {
+ return const_iterator(&(*baseIter));
+ }
+ else
+ {
+ return end();
+ }
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ template <typename U, typename Compare2>
+ inline typename list_map<Key, T, Compare, Allocator>::iterator
+ list_map<Key, T, Compare, Allocator>::find_as(const U& u, Compare2 compare2)
+ {
+ typename base_type::iterator baseIter = base_type::find_as(u, compare2);
+ if (baseIter != base_type::end())
+ {
+ return iterator(&(*baseIter));
+ }
+ else
+ {
+ return end();
+ }
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ template <typename U, typename Compare2>
+ inline typename list_map<Key, T, Compare, Allocator>::const_iterator
+ list_map<Key, T, Compare, Allocator>::find_as(const U& u, Compare2 compare2) const
+ {
+ typename base_type::const_iterator baseIter = base_type::find_as(u, compare2);
+ if (baseIter != base_type::end())
+ {
+ return const_iterator(&(*baseIter));
+ }
+ else
+ {
+ return end();
+ }
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::size_type
+ list_map<Key, T, Compare, Allocator>::count(const key_type& key) const
+ {
+ const typename base_type::const_iterator it = base_type::find(key);
+ return (it != base_type::end()) ? 1 : 0;
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::size_type
+ list_map<Key, T, Compare, Allocator>::erase(const key_type& key)
+ {
+ typename base_type::iterator baseIter = base_type::find(key);
+ if (baseIter != base_type::end())
+ {
+ internal_value_type* node = &(*baseIter);
+
+ node->mpNext->mpPrev = node->mpPrev;
+ node->mpPrev->mpNext = node->mpNext;
+
+ base_type::erase(baseIter);
+
+ return 1;
+ }
+ return 0;
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::iterator
+ list_map<Key, T, Compare, Allocator>::erase(const_iterator position)
+ {
+ iterator posIter(position.mpNode); // Convert from const.
+ iterator eraseIter(posIter++);
+ erase(eraseIter->first);
+ return posIter;
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename list_map<Key, T, Compare, Allocator>::reverse_iterator
+ list_map<Key, T, Compare, Allocator>::erase(const_reverse_iterator position)
+ {
+ return reverse_iterator(erase((++position).base()));
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ void list_map<Key, T, Compare, Allocator>::clear()
+ {
+ base_type::clear();
+
+ mNode.mpNext = &mNode;
+ mNode.mpPrev = &mNode;
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ void list_map<Key, T, Compare, Allocator>::reset_lose_memory()
+ {
+ base_type::reset_lose_memory();
+
+ mNode.mpNext = &mNode;
+ mNode.mpPrev = &mNode;
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ bool list_map<Key, T, Compare, Allocator>::validate() const
+ {
+ if (!base_type::validate())
+ {
+ return false;
+ }
+
+ size_type nodeCount(0);
+ list_map_data_base* node = mNode.mpNext;
+ while (node != &mNode)
+ {
+ internal_value_type* data = static_cast<internal_value_type*>(node);
+ if (base_type::find(data->mValue.first) == base_type::end())
+ {
+ return false;
+ }
+ node = node->mpNext;
+ ++nodeCount;
+ }
+ if (nodeCount != size())
+ {
+ return false;
+ }
+ nodeCount = 0;
+ node = mNode.mpPrev;
+ while (node != &mNode)
+ {
+ internal_value_type* data = static_cast<internal_value_type*>(node);
+ if (base_type::find(data->mValue.first) == base_type::end())
+ {
+ return false;
+ }
+ node = node->mpPrev;
+ ++nodeCount;
+ }
+ if (nodeCount != size())
+ {
+ return false;
+ }
+
+ return true;
+ }
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ int list_map<Key, T, Compare, Allocator>::validate_iterator(const_iterator iter) const
+ {
+ for (const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp)
+ {
+ if (temp == iter)
+ {
+ return (isf_valid | isf_current | isf_can_dereference);
+ }
+ }
+
+ if (iter == end())
+ return (isf_valid | isf_current);
+
+ return isf_none;
+ }
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
diff --git a/include/EASTL/bonus/lru_cache.h b/include/EASTL/bonus/lru_cache.h
new file mode 100644
index 0000000..46d053d
--- /dev/null
+++ b/include/EASTL/bonus/lru_cache.h
@@ -0,0 +1,424 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// lru_cache is a container that simplifies caching of objects in a map.
+// Basically, you give the container a key, like a string, and the data you want.
+// The container provides callback mechanisms to generate data if it's missing
+// as well as delete data when it's purged from the cache. This container
+// uses a least recently used method: whatever the oldest item is will be
+// replaced with a new entry.
+//
+// Algorithmically, the container is a combination of a map and a list.
+// The list stores the age of the entries by moving the entry to the head
+// of the list on each access, either by a call to get() or to touch().
+// The map is just the map as one would expect.
+//
+// This is useful for caching off data that is expensive to generate,
+// for example text to speech wave files that are dynamically generated,
+// but that will need to be reused, as is the case in narration of menu
+// entries as a user scrolls through the entries.
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_LRUCACHE_H
+#define EASTL_LRUCACHE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+#pragma once
+#endif
+
+#include <EASTL/list.h>
+#include <EASTL/unordered_map.h>
+#include <EASTL/optional.h>
+
+namespace eastl
+{
+ /// EASTL_LRUCACHE_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_LRUCACHE_DEFAULT_NAME
+ #define EASTL_LRUCACHE_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " lru_cache" // Unless the user overrides something, this is "EASTL lru_cache".
+ #endif
+
+
+ /// EASTL_LRUCACHE_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_LRUCACHE_DEFAULT_ALLOCATOR
+ #define EASTL_LRUCACHE_DEFAULT_ALLOCATOR allocator_type(EASTL_LRUCACHE_DEFAULT_NAME)
+ #endif
+
+ /// lru_cache
+ ///
+ /// Implements a caching map based off of a key and data.
+ /// LRUList parameter is any container that guarantees the validity of its iterator even after a modification (e.g. list)
+ /// LRUMap is any mapping container that can map a key to some data. By default, we use unordered_set, but it might be better
+ /// to use hash_map or some other structure depending on your key/data combination. For example, you may want to swap the
+ /// map backing if using strings as keys or if the data objects are small. In any case, unordered_set is a good default and should
+ /// work well enough since the purpose of this class is to cache results of expensive, order of milliseconds, operations
+ ///
+ /// Algorithmic Performance (default data structures):
+ /// touch() -> O(1)
+ /// insert() / update(), get() / operator[] -> equivalent to unordered_set (O(1) on average, O(n) worst)
+ /// size() -> O(1)
+ ///
+ /// All accesses to a given key (insert, update, get) will push that key to most recently used.
+ /// If the data objects are shared between threads, it would be best to use a smartptr to manage the lifetime of the data.
+ /// as it could be removed from the cache while in use by another thread.
+ template <typename Key,
+ typename Value,
+ typename Allocator = EASTLAllocatorType,
+ typename list_type = eastl::list<Key, Allocator>,
+ typename map_type = eastl::unordered_map<Key,
+ eastl::pair<Value, typename list_type::iterator>,
+ eastl::hash<Key>,
+ eastl::equal_to<Key>,
+ Allocator>>
+ class lru_cache
+ {
+ public:
+ using key_type = Key;
+ using value_type = Value;
+ using allocator_type = Allocator;
+ using size_type = eastl_size_t;
+ using list_iterator = typename list_type::iterator;
+ using map_iterator = typename map_type::iterator;
+ using data_container_type = eastl::pair<value_type, list_iterator>;
+ using iterator = typename map_type::iterator;
+ using const_iterator = typename map_type::const_iterator;
+ using this_type = lru_cache<key_type, value_type, Allocator, list_type, map_type>;
+ using create_callback_type = eastl::function<value_type(key_type)>;
+ using delete_callback_type = eastl::function<void(const value_type &)>;
+
+ /// lru_cache constructor
+ ///
+ /// Creates a Key / Value map that only stores size Value objects until it deletes them.
+ /// For complex objects or operations, the creator and deletor callbacks can be used.
+ /// This works just like a regular map object: on access, the Value will be created if it doesn't exist, returned otherwise.
+ explicit lru_cache(size_type size,
+ const allocator_type& allocator = EASTL_LRUCACHE_DEFAULT_ALLOCATOR,
+ create_callback_type creator = nullptr,
+ delete_callback_type deletor = nullptr)
+ : m_list(allocator)
+ , m_map(allocator)
+ , m_capacity(size)
+ , m_create_callback(creator)
+ , m_delete_callback(deletor)
+ {
+ }
+
+ /// lru_cache destructor
+ ///
+ /// Iterates across every entry in the map and calls the deletor before calling the standard destructors
+ ~lru_cache()
+ {
+ // Destruct everything we have cached
+ for (auto& iter : m_map)
+ {
+ if (m_delete_callback)
+ m_delete_callback(iter.second.first);
+ }
+ }
+
+ lru_cache(std::initializer_list<eastl::pair<Key, Value>> il)
+ : lru_cache(il.size())
+ {
+ for(auto& p : il)
+ insert_or_assign(p.first, p.second);
+ }
+
+ // TODO(rparolin): Why do we prevent copies? And what about moves?
+ lru_cache(const this_type&) = delete;
+ this_type &operator=(const this_type&) = delete;
+
+ /// insert
+ ///
+ /// insert key k with value v.
+ /// If key already exists, no change is made and the return value is false.
+ /// If the key doesn't exist, the data is added to the map and the return value is true.
+ bool insert(const key_type& k, const value_type& v)
+ {
+ if (m_map.find(k) == m_map.end())
+ {
+ make_space();
+
+ m_list.push_front(k);
+ m_map[k] = data_container_type(v, m_list.begin());
+
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+ }
+
+ /// emplace
+ ///
+ /// Places a new object in place k created with args
+ /// If the key already exists, it is replaced.
+ template <typename... Args>
+ void emplace(const key_type& k, Args&&... args)
+ {
+ make_space();
+
+ m_list.push_front(k);
+ m_map.emplace(k, data_container_type(eastl::forward<Args>(args)..., m_list.begin()));
+ }
+
+ /// insert_or_assign
+ ///
+ /// Same as add, but replaces the data at key k, if it exists, with the new entry v
+ /// Note that the deletor for the old v will be called before it's replaced with the new value of v
+ void insert_or_assign(const key_type& k, const value_type& v)
+ {
+ auto iter = m_map.find(k);
+
+ if (m_map.find(k) != m_map.end())
+ {
+ assign(iter, v);
+ }
+ else
+ {
+ insert(k, v);
+ }
+ }
+
+ /// contains
+ ///
+ /// Returns true if key k exists in the cache
+ bool contains(const key_type& k) const
+ {
+ return m_map.find(k) != m_map.end();
+ }
+
+ /// at
+ ///
+ /// Retrives the data for key k, not valid if k does not exist
+ eastl::optional<value_type> at(const key_type& k)
+ {
+ auto iter = m_map.find(k);
+
+ if (iter != m_map.end())
+ {
+ return iter->second.first;
+ }
+ else
+ {
+ return eastl::nullopt;
+ }
+ }
+
+ /// get
+ ///
+ /// Retrives the data for key k. If no data exists, it will be created by calling the
+ /// creator.
+ value_type& get(const key_type& k)
+ {
+ auto iter = m_map.find(k);
+
+ // The entry exists in the cache
+ if (iter != m_map.end())
+ {
+ touch(k);
+ return iter->second.first;
+ }
+ else // The entry doesn't exist in the cache, so create one
+ {
+ // Add the entry to the map
+ insert(k, m_create_callback ? m_create_callback(k) : value_type());
+
+ // return the new data
+ return m_map[k].first;
+ }
+ }
+
+ /// Equivalent to get(k)
+ value_type& operator[](const key_type& k) { return get(k); }
+
+ /// erase
+ ///
+ /// erases key k from the cache.
+ /// If k does not exist, returns false. If k exists, returns true.
+ bool erase(const key_type& k)
+ {
+ auto iter = m_map.find(k);
+
+ if (iter != m_map.end())
+ {
+ m_list.erase(iter->second.second);
+
+ // Delete the actual entry
+ map_erase(iter);
+
+ return true;
+ }
+
+ return false;
+ }
+
+ /// erase_oldest
+ ///
+ /// Removes the oldest entry from the cache.
+ void erase_oldest()
+ {
+ auto key = m_list.back();
+ m_list.pop_back();
+
+ // Delete the actual entry
+ auto iter = m_map.find(key);
+ map_erase(iter);
+ }
+
+ /// touch
+ ///
+ /// Touches key k, marking it as most recently used.
+ /// If k does not exist, returns false. If the touch was successful, returns true.
+ bool touch(const key_type& k)
+ {
+ auto iter = m_map.find(k);
+
+ if (iter != m_map.end())
+ {
+ touch(iter);
+ return true;
+ }
+
+ return false;
+ }
+
+ /// touch
+ ///
+ /// Touches key at iterator iter, moving it to most recently used position
+ void touch(iterator& iter)
+ {
+ auto listRef = iter->second.second;
+
+ m_list.erase(listRef);
+ m_list.push_front(iter->first);
+ iter->second.second = m_list.begin();
+ }
+
+ /// assign
+ ///
+ /// Updates key k with data v.
+ /// If key k does not exist, returns false and no changes are made.
+ /// If key k exists, existing data has its deletor called and key k's data is replaced with new v data
+ bool assign(const key_type& k, const value_type& v)
+ {
+ auto iter = m_map.find(k);
+
+ if (iter != m_map.end())
+ {
+ assign(iter, v);
+ return true;
+ }
+
+ return false;
+ }
+
+ /// assign
+ ///
+ /// Updates data at spot iter with data v.
+ void assign(iterator& iter, const value_type& v)
+ {
+ if (m_delete_callback)
+ m_delete_callback(iter->second.first);
+ touch(iter);
+ iter->second.first = v;
+ }
+
+ // standard container functions
+ iterator begin() EA_NOEXCEPT { return m_map.begin(); }
+ iterator end() EA_NOEXCEPT { return m_map.end(); }
+ iterator rbegin() EA_NOEXCEPT { return m_map.rbegin(); }
+ iterator rend() EA_NOEXCEPT { return m_map.rend(); }
+ const_iterator begin() const EA_NOEXCEPT { return m_map.begin(); }
+ const_iterator cbegin() const EA_NOEXCEPT { return m_map.cbegin(); }
+ const_iterator crbegin() const EA_NOEXCEPT { return m_map.crbegin(); }
+ const_iterator end() const EA_NOEXCEPT { return m_map.end(); }
+ const_iterator cend() const EA_NOEXCEPT { return m_map.cend(); }
+ const_iterator crend() const EA_NOEXCEPT { return m_map.crend(); }
+
+ bool empty() const EA_NOEXCEPT { return m_map.empty(); }
+ size_type size() const EA_NOEXCEPT { return m_map.size(); }
+ size_type capacity() const EA_NOEXCEPT { return m_capacity; }
+
+ void clear() EA_NOEXCEPT
+ {
+ // Since we have a delete callback, we want to reuse the trim function by cheating the max
+ // size to clear all the entries to avoid duplicating code.
+ auto old_max = m_capacity;
+
+ m_capacity = 0;
+ trim();
+ m_capacity = old_max;
+ }
+
+ /// resize
+ ///
+ /// Resizes the cache. Can be used to either expand or contract the cache.
+ /// In the case of a contraction, the oldest entries will be evicted with their respective
+ /// deletors called before completing.
+ void resize(size_type newSize)
+ {
+ m_capacity = newSize;
+ trim();
+ }
+
+ void setCreateCallback(create_callback_type callback) { m_create_callback = callback; }
+ void setDeleteCallback(delete_callback_type callback) { m_delete_callback = callback; }
+
+ // EASTL extensions
+ const allocator_type& get_allocator() const EA_NOEXCEPT { return m_map.get_allocator(); }
+ allocator_type& get_allocator() EA_NOEXCEPT { return m_map.get_allocator(); }
+ void set_allocator(const allocator_type& allocator) { m_map.set_allocator(allocator); m_list.set_allocator(allocator); }
+
+ /// Does not reset the callbacks
+ void reset_lose_memory() EA_NOEXCEPT { m_map.reset_lose_memory(); m_list.reset_lose_memory(); }
+
+ private:
+ inline void map_erase(map_iterator pos)
+ {
+ if (m_delete_callback)
+ m_delete_callback(pos->second.first);
+ m_map.erase(pos);
+ }
+
+ bool trim()
+ {
+ if (size() <= m_capacity)
+ {
+ return false; // No trim necessary
+ }
+
+ // We need to trim
+ do
+ {
+ erase_oldest();
+ } while (m_list.size() > m_capacity);
+
+ return true;
+ }
+
+ void make_space()
+ {
+ if (size() == m_capacity)
+ {
+ erase_oldest();
+ }
+ }
+
+ private:
+ list_type m_list;
+ map_type m_map;
+ size_type m_capacity;
+ create_callback_type m_create_callback;
+ delete_callback_type m_delete_callback;
+ };
+}
+
+
+
+#endif
diff --git a/include/EASTL/bonus/ring_buffer.h b/include/EASTL/bonus/ring_buffer.h
new file mode 100644
index 0000000..fcd8fd2
--- /dev/null
+++ b/include/EASTL/bonus/ring_buffer.h
@@ -0,0 +1,1581 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// A ring buffer is a FIFO (first-in, first-out) container which acts
+// much like a queue. The difference is that a ring buffer is implemented
+// via chasing pointers around a given container instead of like queue
+// adds to the writes to the end of the container are reads from the begin.
+// The benefit of a ring buffer is that memory allocations don't occur
+// and new elements are neither added nor removed from the container.
+// Elements in the container are simply assigned values in circles around
+// the container.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_RING_BUFFER_H
+#define EASTL_RING_BUFFER_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/iterator.h>
+#include <EASTL/vector.h>
+#include <EASTL/initializer_list.h>
+#include <stddef.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+ /// EASTL_RING_BUFFER_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_RING_BUFFER_DEFAULT_NAME
+ #define EASTL_RING_BUFFER_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " ring_buffer" // Unless the user overrides something, this is "EASTL ring_buffer".
+ #endif
+
+ /// EASTL_RING_BUFFER_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_RING_BUFFER_DEFAULT_ALLOCATOR
+ #define EASTL_RING_BUFFER_DEFAULT_ALLOCATOR allocator_type(EASTL_RING_BUFFER_DEFAULT_NAME)
+ #endif
+
+
+ /// ring_buffer_iterator
+ ///
+ /// We force this iterator to act like a random access iterator even if
+ /// the underlying container doesn't support random access iteration.
+ /// Any BidirectionalIterator can be a RandomAccessIterator; it just
+ /// might be inefficient in some cases.
+ ///
+ template <typename T, typename Pointer, typename Reference, typename Container>
+ struct ring_buffer_iterator
+ {
+ public:
+ typedef ring_buffer_iterator<T, Pointer, Reference, Container> this_type;
+ typedef T value_type;
+ typedef Pointer pointer;
+ typedef Reference reference;
+ typedef typename Container::size_type size_type;
+ typedef typename Container::difference_type difference_type;
+ typedef typename Container::iterator container_iterator;
+ typedef typename Container::const_iterator container_const_iterator;
+ typedef ring_buffer_iterator<T, T*, T&, Container> iterator;
+ typedef ring_buffer_iterator<T, const T*, const T&, Container> const_iterator;
+ typedef EASTL_ITC_NS::random_access_iterator_tag iterator_category;
+
+ public:
+ Container* mpContainer;
+ container_iterator mContainerIterator;
+
+ public:
+ ring_buffer_iterator();
+ ring_buffer_iterator(Container* pContainer, const container_iterator& containerIterator);
+ ring_buffer_iterator(const iterator& x);
+
+ ring_buffer_iterator& operator=(const iterator& x);
+
+ reference operator*() const;
+ pointer operator->() const;
+
+ this_type& operator++();
+ this_type operator++(int);
+
+ this_type& operator--();
+ this_type operator--(int);
+
+ this_type& operator+=(difference_type n);
+ this_type& operator-=(difference_type n);
+
+ this_type operator+(difference_type n) const;
+ this_type operator-(difference_type n) const;
+
+ protected:
+ void increment(difference_type n, EASTL_ITC_NS::input_iterator_tag);
+ void increment(difference_type n, EASTL_ITC_NS::random_access_iterator_tag);
+
+ }; // struct ring_buffer_iterator
+
+
+
+ /// ring_buffer
+ ///
+ /// Implements a ring buffer via a given container type, which would
+ /// typically be a vector or array, though any container which supports
+ /// bidirectional iteration would work.
+ ///
+ /// A ring buffer is a FIFO (first-in, first-out) container which acts
+ /// much like a queue. The difference is that a ring buffer is implemented
+ /// via chasing pointers around a container and moving the read and write
+ /// positions forward (and possibly wrapping around) as the container is
+ /// read and written via pop_front and push_back.
+ ///
+ /// The benefit of a ring buffer is that memory allocations don't occur
+ /// and new elements are neither added nor removed from the container.
+ /// Elements in the container are simply assigned values in circles around
+ /// the container.
+ ///
+ /// ring_buffer is different from other containers -- including adapter
+ /// containers -- in how iteration is done. Iteration of a ring buffer
+ /// starts at the current begin position, proceeds to the end of the underlying
+ /// container, and continues at the begin of the underlying container until
+ /// the ring buffer's current end position. Thus a ring_buffer does
+ /// indeed have a begin and an end, though the values of begin and end
+ /// chase each other around the container. An empty ring_buffer is one
+ /// in which end == begin, and a full ring_buffer is one in which
+ /// end + 1 == begin.
+ ///
+ /// Example of a ring buffer layout, where + indicates queued items:
+ /// ++++++++++--------------------------------+++++++++
+ /// ^ ^
+ /// end begin
+ ///
+ /// Empty ring buffer:
+ /// ---------------------------------------------------
+ /// ^
+ /// begin / end
+ ///
+ /// Full ring buffer. Note that one item is necessarily unused; it is
+ /// analagous to a '\0' at the end of a C string:
+ /// +++++++++++++++++++++++++++++++++++++++++-+++++++++
+ /// ^^
+ /// end begin
+ ///
+ /// A push_back operation on a ring buffer assigns the new value to end.
+ /// If there is no more space in the buffer, this will result in begin
+ /// being overwritten and the begin position being moved foward one position.
+ /// The user can use the full() function to detect this condition.
+ /// Note that elements in a ring buffer are not created or destroyed as
+ /// their are added and removed; they are merely assigned. Only on
+ /// container construction and destruction are any elements created and
+ /// destroyed.
+ ///
+ /// The ring buffer can be used in either direction. By this we mean that
+ /// you can use push_back to add items and pop_front to remove them; or you can
+ /// use push_front to add items and pop_back to remove them. You aren't
+ /// limited to these operations; you can push or pop from either side
+ /// arbitrarily and you can insert or erase anywhere in the container.
+ ///
+ /// The ring buffer requires the user to specify a Container type, which
+ /// by default is vector. However, any container with bidirectional iterators
+ /// will work, such as list, deque, string or any of the fixed_* versions
+ /// of these containers, such as fixed_string. Since ring buffer works via copying
+ /// elements instead of allocating and freeing nodes, inserting in the middle
+ /// of a ring buffer based on list (instead of vector) is no more efficient.
+ ///
+ /// To use the ring buffer, its container must be resized to the desired
+ /// ring buffer size. Changing the size of a ring buffer may cause ring
+ /// buffer iterators to invalidate.
+ ///
+ /// An alternative to using a ring buffer is to use a list with a user-created
+ /// node pool and custom allocator. There are various tradeoffs that result from this.
+ ///
+ /// Example usage:
+ /// ring_buffer< int, list<int> > rb(100);
+ /// rb.push_back(1);
+ ///
+ /// Example usage:
+ /// // Example of creating an on-screen debug log that shows 16
+ /// // strings at a time and scrolls older strings away.
+ ///
+ /// // Create ring buffer of 16 strings.
+ /// ring_buffer< string, vector<string> > debugLogText(16);
+ ///
+ /// // Reserve 128 chars for each line. This can make it so that no
+ /// // runtime memory allocations occur.
+ /// for(vector<string>::iterator it = debugLogText.get_container().begin(),
+ /// itEnd = debugLogText.get_container().end(); it != itEnd; ++it)
+ /// {
+ /// (*it).reserve(128);
+ /// }
+ ///
+ /// // Add a new string, using push_front() and front() instead of
+ /// // push_front(str) in order to avoid creating a temporary str.
+ /// debugLogText.push_front();
+ /// debugLogText.front() = "Player fired weapon";
+ ///
+ template <typename T, typename Container = eastl::vector<T>, typename Allocator = typename Container::allocator_type>
+ class ring_buffer
+ {
+ public:
+ typedef ring_buffer<T, Container, Allocator> this_type;
+ typedef Container container_type;
+ typedef Allocator allocator_type;
+
+ typedef typename Container::value_type value_type;
+ typedef typename Container::reference reference;
+ typedef typename Container::const_reference const_reference;
+ typedef typename Container::size_type size_type;
+ typedef typename Container::difference_type difference_type;
+ typedef typename Container::iterator container_iterator;
+ typedef typename Container::const_iterator container_const_iterator;
+ typedef ring_buffer_iterator<T, T*, T&, Container> iterator;
+ typedef ring_buffer_iterator<T, const T*, const T&, Container> const_iterator;
+ typedef eastl::reverse_iterator<iterator> reverse_iterator;
+ typedef eastl::reverse_iterator<const_iterator> const_reverse_iterator;
+
+ public: // We declare public so that global comparison operators can be implemented without adding an inline level and without tripping up GCC 2.x friend declaration failures. GCC (through at least v4.0) is poor at inlining and performance wins over correctness.
+ Container c; // We follow the naming convention established for stack, queue, priority_queue and name this 'c'. This variable must always have a size of at least 1, as even an empty ring_buffer has an unused terminating element.
+
+ protected:
+ container_iterator mBegin; // We keep track of where our begin and end are by using Container iterators.
+ container_iterator mEnd;
+ size_type mSize;
+
+ public:
+ // There currently isn't a ring_buffer constructor that specifies an initial size, unlike other containers.
+ explicit ring_buffer(size_type cap = 0); // Construct with an initial capacity (but size of 0).
+ explicit ring_buffer(size_type cap, const allocator_type& allocator);
+ explicit ring_buffer(const Container& x);
+ explicit ring_buffer(const allocator_type& allocator);
+ ring_buffer(const this_type& x);
+ ring_buffer(this_type&& x);
+ ring_buffer(this_type&& x, const allocator_type& allocator);
+ ring_buffer(std::initializer_list<value_type> ilist, const allocator_type& allocator = EASTL_RING_BUFFER_DEFAULT_ALLOCATOR); // This function sets the capacity to be equal to the size of the initializer list.
+
+ // No destructor necessary. Default will do.
+
+ this_type& operator=(const this_type& x);
+ this_type& operator=(std::initializer_list<value_type> ilist);
+ this_type& operator=(this_type&& x);
+
+ template <typename InputIterator>
+ void assign(InputIterator first, InputIterator last);
+
+ void swap(this_type& x);
+
+ iterator begin() EA_NOEXCEPT;
+ const_iterator begin() const EA_NOEXCEPT;
+ const_iterator cbegin() const EA_NOEXCEPT;
+
+ iterator end() EA_NOEXCEPT;
+ const_iterator end() const EA_NOEXCEPT;
+ const_iterator cend() const EA_NOEXCEPT;
+
+ reverse_iterator rbegin() EA_NOEXCEPT;
+ const_reverse_iterator rbegin() const EA_NOEXCEPT;
+ const_reverse_iterator crbegin() const EA_NOEXCEPT;
+
+ reverse_iterator rend() EA_NOEXCEPT;
+ const_reverse_iterator rend() const EA_NOEXCEPT;
+ const_reverse_iterator crend() const EA_NOEXCEPT;
+
+ bool empty() const EA_NOEXCEPT;
+ bool full() const EA_NOEXCEPT;
+ size_type size() const EA_NOEXCEPT;
+ size_type capacity() const EA_NOEXCEPT;
+
+ void resize(size_type n);
+ void set_capacity(size_type n); // Sets the capacity to the given value, including values less than the current capacity. Adjusts the size downward if n < size, by throwing out the oldest elements in the buffer.
+ void reserve(size_type n); // Reserve a given capacity. Doesn't decrease the capacity; it only increases it (for compatibility with other containers' behavior).
+
+ reference front();
+ const_reference front() const;
+
+ reference back();
+ const_reference back() const;
+
+ void push_back(const value_type& value);
+ reference push_back();
+
+ void push_front(const value_type& value);
+ reference push_front();
+
+ void pop_back();
+ void pop_front();
+
+ reference operator[](size_type n);
+ const_reference operator[](size_type n) const;
+
+ // To consider:
+ // size_type read(value_type* pDestination, size_type nCount);
+ // size_type read(iterator** pPosition1, iterator** pPosition2, size_type& nCount1, size_type& nCount2);
+
+ /* To do:
+ template <class... Args>
+ void emplace_front(Args&&... args);
+
+ template <class... Args>
+ void emplace_back(Args&&... args);
+
+ template <class... Args>
+ iterator emplace(const_iterator position, Args&&... args);
+ */
+
+ iterator insert(const_iterator position, const value_type& value);
+ void insert(const_iterator position, size_type n, const value_type& value);
+ void insert(const_iterator position, std::initializer_list<value_type> ilist);
+
+ template <typename InputIterator>
+ void insert(const_iterator position, InputIterator first, InputIterator last);
+
+ iterator erase(const_iterator position);
+ iterator erase(const_iterator first, const_iterator last);
+ reverse_iterator erase(const_reverse_iterator position);
+ reverse_iterator erase(const_reverse_iterator first, const_reverse_iterator last);
+
+ void clear();
+
+ container_type& get_container();
+ const container_type& get_container() const;
+
+ bool validate() const;
+ int validate_iterator(const_iterator i) const;
+
+ protected:
+ //size_type DoGetSize(EASTL_ITC_NS::input_iterator_tag) const;
+ //size_type DoGetSize(EASTL_ITC_NS::random_access_iterator_tag) const;
+
+ }; // class ring_buffer
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // ring_buffer_iterator
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Pointer, typename Reference, typename Container>
+ ring_buffer_iterator<T, Pointer, Reference, Container>::ring_buffer_iterator()
+ : mpContainer(NULL), mContainerIterator()
+ {
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, typename Container>
+ ring_buffer_iterator<T, Pointer, Reference, Container>::ring_buffer_iterator(Container* pContainer, const container_iterator& containerIterator)
+ : mpContainer(pContainer), mContainerIterator(containerIterator)
+ {
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, typename Container>
+ ring_buffer_iterator<T, Pointer, Reference, Container>::ring_buffer_iterator(const iterator& x)
+ : mpContainer(x.mpContainer), mContainerIterator(x.mContainerIterator)
+ {
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, typename Container>
+ ring_buffer_iterator<T, Pointer, Reference, Container>&
+ ring_buffer_iterator<T, Pointer, Reference, Container>::operator=(const iterator& x)
+ {
+ mpContainer = x.mpContainer;
+ mContainerIterator = x.mContainerIterator;
+ return *this;
+ }
+
+ template <typename T, typename Pointer, typename Reference, typename Container>
+ typename ring_buffer_iterator<T, Pointer, Reference, Container>::reference
+ ring_buffer_iterator<T, Pointer, Reference, Container>::operator*() const
+ {
+ return *mContainerIterator;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, typename Container>
+ typename ring_buffer_iterator<T, Pointer, Reference, Container>::pointer
+ ring_buffer_iterator<T, Pointer, Reference, Container>::operator->() const
+ {
+ return &*mContainerIterator;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, typename Container>
+ typename ring_buffer_iterator<T, Pointer, Reference, Container>::this_type&
+ ring_buffer_iterator<T, Pointer, Reference, Container>::operator++()
+ {
+ if(EASTL_UNLIKELY(++mContainerIterator == mpContainer->end()))
+ mContainerIterator = mpContainer->begin();
+ return *this;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, typename Container>
+ typename ring_buffer_iterator<T, Pointer, Reference, Container>::this_type
+ ring_buffer_iterator<T, Pointer, Reference, Container>::operator++(int)
+ {
+ const this_type temp(*this);
+ if(EASTL_UNLIKELY(++mContainerIterator == mpContainer->end()))
+ mContainerIterator = mpContainer->begin();
+ return temp;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, typename Container>
+ typename ring_buffer_iterator<T, Pointer, Reference, Container>::this_type&
+ ring_buffer_iterator<T, Pointer, Reference, Container>::operator--()
+ {
+ if(EASTL_UNLIKELY(mContainerIterator == mpContainer->begin()))
+ mContainerIterator = mpContainer->end();
+ --mContainerIterator;
+ return *this;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, typename Container>
+ typename ring_buffer_iterator<T, Pointer, Reference, Container>::this_type
+ ring_buffer_iterator<T, Pointer, Reference, Container>::operator--(int)
+ {
+ const this_type temp(*this);
+ if(EASTL_UNLIKELY(mContainerIterator == mpContainer->begin()))
+ mContainerIterator = mpContainer->end();
+ --mContainerIterator;
+ return temp;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, typename Container>
+ typename ring_buffer_iterator<T, Pointer, Reference, Container>::this_type&
+ ring_buffer_iterator<T, Pointer, Reference, Container>::operator+=(difference_type n)
+ {
+ typedef typename eastl::iterator_traits<container_iterator>::iterator_category IC;
+ increment(n, IC());
+ return *this;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, typename Container>
+ typename ring_buffer_iterator<T, Pointer, Reference, Container>::this_type&
+ ring_buffer_iterator<T, Pointer, Reference, Container>::operator-=(difference_type n)
+ {
+ typedef typename eastl::iterator_traits<container_iterator>::iterator_category IC;
+ increment(-n, IC());
+ return *this;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, typename Container>
+ typename ring_buffer_iterator<T, Pointer, Reference, Container>::this_type
+ ring_buffer_iterator<T, Pointer, Reference, Container>::operator+(difference_type n) const
+ {
+ return this_type(*this).operator+=(n);
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, typename Container>
+ typename ring_buffer_iterator<T, Pointer, Reference, Container>::this_type
+ ring_buffer_iterator<T, Pointer, Reference, Container>::operator-(difference_type n) const
+ {
+ return this_type(*this).operator+=(-n);
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, typename Container>
+ void ring_buffer_iterator<T, Pointer, Reference, Container>::increment(difference_type n, EASTL_ITC_NS::input_iterator_tag)
+ {
+ // n cannot be negative, as input iterators don't support reverse iteration.
+ while(n-- > 0)
+ operator++();
+ }
+
+
+ template <typename T, typename Pointer, typename Reference, typename Container>
+ void ring_buffer_iterator<T, Pointer, Reference, Container>::increment(difference_type n, EASTL_ITC_NS::random_access_iterator_tag)
+ {
+ // We make the assumption here that the user is incrementing from a valid
+ // starting position to a valid ending position. Thus *this + n yields a
+ // valid iterator, including if n happens to be a negative value.
+
+ if(n >= 0)
+ {
+ const difference_type d = mpContainer->end() - mContainerIterator;
+
+ if(n < d)
+ mContainerIterator += n;
+ else
+ mContainerIterator = mpContainer->begin() + (n - d);
+ }
+ else
+ {
+ // Recall that n and d here will be negative and so the logic here works as intended.
+ const difference_type d = mpContainer->begin() - mContainerIterator;
+
+ if(n >= d)
+ mContainerIterator += n;
+ else
+ mContainerIterator = mpContainer->end() + (n - d);
+ }
+ }
+
+
+ // Random access iterators must support operator + and operator -.
+ // You can only add an integer to an iterator, and you cannot add two iterators.
+ template <typename T, typename Pointer, typename Reference, typename Container>
+ inline ring_buffer_iterator<T, Pointer, Reference, Container>
+ operator+(ptrdiff_t n, const ring_buffer_iterator<T, Pointer, Reference, Container>& x)
+ {
+ return x + n; // Implement (n + x) in terms of (x + n).
+ }
+
+
+ // You can only add an integer to an iterator, but you can subtract two iterators.
+ template <typename T, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB, typename Container>
+ inline typename ring_buffer_iterator<T, PointerA, ReferenceA, Container>::difference_type
+ operator-(const ring_buffer_iterator<T, PointerA, ReferenceA, Container>& a,
+ const ring_buffer_iterator<T, PointerB, ReferenceB, Container>& b)
+ {
+ typedef typename ring_buffer_iterator<T, PointerA, ReferenceA, Container>::difference_type difference_type;
+
+ // To do: If container_iterator is a random access iterator, then do a simple calculation.
+ // Otherwise, we have little choice but to iterate from a to b and count as we go.
+ // See the ring_buffer::size function for an implementation of this.
+
+ // Iteration implementation:
+ difference_type d = 0;
+
+ for(ring_buffer_iterator<T, PointerA, ReferenceA, Container> temp(b); temp != a; ++temp)
+ ++d;
+
+ return d;
+ }
+
+
+ // The C++ defect report #179 requires that we support comparisons between const and non-const iterators.
+ // Thus we provide additional template paremeters here to support this. The defect report does not
+ // require us to support comparisons between reverse_iterators and const_reverse_iterators.
+ template <typename T, typename PointerA, typename ReferenceA, typename ContainerA,
+ typename PointerB, typename ReferenceB, typename ContainerB>
+ inline bool operator==(const ring_buffer_iterator<T, PointerA, ReferenceA, ContainerA>& a,
+ const ring_buffer_iterator<T, PointerB, ReferenceB, ContainerB>& b)
+ {
+ // Perhaps we should compare the container pointer as well.
+ // However, for valid iterators this shouldn't be necessary.
+ return a.mContainerIterator == b.mContainerIterator;
+ }
+
+
+ template <typename T, typename PointerA, typename ReferenceA, typename ContainerA,
+ typename PointerB, typename ReferenceB, typename ContainerB>
+ inline bool operator!=(const ring_buffer_iterator<T, PointerA, ReferenceA, ContainerA>& a,
+ const ring_buffer_iterator<T, PointerB, ReferenceB, ContainerB>& b)
+ {
+ // Perhaps we should compare the container pointer as well.
+ // However, for valid iterators this shouldn't be necessary.
+ return !(a.mContainerIterator == b.mContainerIterator);
+ }
+
+
+ // We provide a version of operator!= for the case where the iterators are of the
+ // same type. This helps prevent ambiguity errors in the presence of rel_ops.
+ template <typename T, typename Pointer, typename Reference, typename Container>
+ inline bool operator!=(const ring_buffer_iterator<T, Pointer, Reference, Container>& a,
+ const ring_buffer_iterator<T, Pointer, Reference, Container>& b)
+ {
+ return !(a.mContainerIterator == b.mContainerIterator);
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // ring_buffer
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Container, typename Allocator>
+ ring_buffer<T, Container, Allocator>::ring_buffer(size_type cap)
+ : c() // Default construction with default allocator for the container.
+ {
+ // To do: This code needs to be amended to deal with possible exceptions
+ // that could occur during the resize call below.
+
+ // We add one because the element at mEnd is necessarily unused.
+ c.resize(cap + 1); // Possibly we could construct 'c' with size, but c may not have such a ctor, though we rely on it having a resize function.
+ mBegin = c.begin();
+ mEnd = mBegin;
+ mSize = 0;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ ring_buffer<T, Container, Allocator>::ring_buffer(size_type cap, const allocator_type& allocator)
+ : c(allocator)
+ {
+ // To do: This code needs to be amended to deal with possible exceptions
+ // that could occur during the resize call below.
+
+ // We add one because the element at mEnd is necessarily unused.
+ c.resize(cap + 1); // Possibly we could construct 'c' with size, but c may not have such a ctor, though we rely on it having a resize function.
+ mBegin = c.begin();
+ mEnd = mBegin;
+ mSize = 0;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ ring_buffer<T, Container, Allocator>::ring_buffer(const Container& x)
+ : c(x) // This copies elements from x, but unless the user is doing some tricks, the only thing that matters is that c.size() == x.size().
+ {
+ // To do: This code needs to be amended to deal with possible exceptions
+ // that could occur during the resize call below.
+ if(c.empty())
+ c.resize(1);
+ mBegin = c.begin();
+ mEnd = mBegin;
+ mSize = 0;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ ring_buffer<T, Container, Allocator>::ring_buffer(const allocator_type& allocator)
+ : c(allocator)
+ {
+ // To do: This code needs to be amended to deal with possible exceptions
+ // that could occur during the resize call below.
+
+ // We add one because the element at mEnd is necessarily unused.
+ c.resize(1); // Possibly we could construct 'c' with size, but c may not have such a ctor, though we rely on it having a resize function.
+ mBegin = c.begin();
+ mEnd = mBegin;
+ mSize = 0;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ ring_buffer<T, Container, Allocator>::ring_buffer(const this_type& x)
+ : c(x.c)
+ {
+ mBegin = c.begin();
+ mEnd = mBegin;
+ mSize = x.mSize;
+
+ eastl::advance(mBegin, eastl::distance(const_cast<this_type&>(x).c.begin(), x.mBegin)); // We can do a simple distance algorithm here, as there will be no wraparound.
+ eastl::advance(mEnd, eastl::distance(const_cast<this_type&>(x).c.begin(), x.mEnd));
+ }
+
+ template <typename T, typename Container, typename Allocator>
+ ring_buffer<T, Container, Allocator>::ring_buffer(this_type&& x)
+ : c() // Default construction with default allocator for the container.
+ {
+ c.resize(1); // Possibly we could construct 'c' with size, but c may not have such a ctor, though we rely on it having a resize function.
+ mBegin = c.begin();
+ mEnd = mBegin;
+ mSize = 0;
+
+ swap(x); // We are leaving x in an unusual state by swapping default-initialized members with it, as it won't be usable and can be only destructible.
+ }
+
+ template <typename T, typename Container, typename Allocator>
+ ring_buffer<T, Container, Allocator>::ring_buffer(this_type&& x, const allocator_type& allocator)
+ : c(allocator)
+ {
+ c.resize(1); // Possibly we could construct 'c' with size, but c may not have such a ctor, though we rely on it having a resize function.
+ mBegin = c.begin();
+ mEnd = mBegin;
+ mSize = 0;
+
+ if(c.get_allocator() == x.c.get_allocator())
+ swap(x); // We are leaving x in an unusual state by swapping default-initialized members with it, as it won't be usable and can be only destructible.
+ else
+ operator=(x);
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ ring_buffer<T, Container, Allocator>::ring_buffer(std::initializer_list<value_type> ilist, const allocator_type& allocator)
+ : c(allocator)
+ {
+ c.resize((eastl_size_t)ilist.size() + 1);
+ mBegin = c.begin();
+ mEnd = mBegin;
+ mSize = 0;
+
+ assign(ilist.begin(), ilist.end());
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::this_type&
+ ring_buffer<T, Container, Allocator>::operator=(const this_type& x)
+ {
+ if(&x != this)
+ {
+ c = x.c;
+
+ mBegin = c.begin();
+ mEnd = mBegin;
+ mSize = x.mSize;
+
+ eastl::advance(mBegin, eastl::distance(const_cast<this_type&>(x).c.begin(), x.mBegin)); // We can do a simple distance algorithm here, as there will be no wraparound.
+ eastl::advance(mEnd, eastl::distance(const_cast<this_type&>(x).c.begin(), x.mEnd));
+ }
+
+ return *this;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::this_type&
+ ring_buffer<T, Container, Allocator>::operator=(this_type&& x)
+ {
+ swap(x);
+ return *this;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::this_type&
+ ring_buffer<T, Container, Allocator>::operator=(std::initializer_list<value_type> ilist)
+ {
+ assign(ilist.begin(), ilist.end());
+ return *this;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ template <typename InputIterator>
+ void ring_buffer<T, Container, Allocator>::assign(InputIterator first, InputIterator last)
+ {
+ // To consider: We can make specializations of this for pointer-based
+ // iterators to PODs and turn the action into a memcpy.
+ clear();
+
+ for(; first != last; ++first)
+ push_back(*first);
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ void ring_buffer<T, Container, Allocator>::swap(this_type& x)
+ {
+ if(&x != this)
+ {
+ const difference_type dBegin = eastl::distance(c.begin(), mBegin); // We can do a simple distance algorithm here, as there will be no wraparound.
+ const difference_type dEnd = eastl::distance(c.begin(), mEnd);
+
+ const difference_type dxBegin = eastl::distance(x.c.begin(), x.mBegin);
+ const difference_type dxEnd = eastl::distance(x.c.begin(), x.mEnd);
+
+ eastl::swap(c, x.c);
+ eastl::swap(mSize, x.mSize);
+
+ mBegin = c.begin();
+ eastl::advance(mBegin, dxBegin); // We can do a simple advance algorithm here, as there will be no wraparound.
+
+ mEnd = c.begin();
+ eastl::advance(mEnd, dxEnd);
+
+ x.mBegin = x.c.begin();
+ eastl::advance(x.mBegin, dBegin);
+
+ x.mEnd = x.c.begin();
+ eastl::advance(x.mEnd, dEnd);
+ }
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::iterator
+ ring_buffer<T, Container, Allocator>::begin() EA_NOEXCEPT
+ {
+ return iterator(&c, mBegin);
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::const_iterator
+ ring_buffer<T, Container, Allocator>::begin() const EA_NOEXCEPT
+ {
+ return const_iterator(const_cast<Container*>(&c), mBegin); // We trust that the const_iterator will respect const-ness.
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::const_iterator
+ ring_buffer<T, Container, Allocator>::cbegin() const EA_NOEXCEPT
+ {
+ return const_iterator(const_cast<Container*>(&c), mBegin); // We trust that the const_iterator will respect const-ness.
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::iterator
+ ring_buffer<T, Container, Allocator>::end() EA_NOEXCEPT
+ {
+ return iterator(&c, mEnd);
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::const_iterator
+ ring_buffer<T, Container, Allocator>::end() const EA_NOEXCEPT
+ {
+ return const_iterator(const_cast<Container*>(&c), mEnd); // We trust that the const_iterator will respect const-ness.
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::const_iterator
+ ring_buffer<T, Container, Allocator>::cend() const EA_NOEXCEPT
+ {
+ return const_iterator(const_cast<Container*>(&c), mEnd); // We trust that the const_iterator will respect const-ness.
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::reverse_iterator
+ ring_buffer<T, Container, Allocator>::rbegin() EA_NOEXCEPT
+ {
+ return reverse_iterator(iterator(&c, mEnd));
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::const_reverse_iterator
+ ring_buffer<T, Container, Allocator>::rbegin() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(const_iterator(const_cast<Container*>(&c), mEnd));
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::const_reverse_iterator
+ ring_buffer<T, Container, Allocator>::crbegin() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(const_iterator(const_cast<Container*>(&c), mEnd));
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::reverse_iterator
+ ring_buffer<T, Container, Allocator>::rend() EA_NOEXCEPT
+ {
+ return reverse_iterator(iterator(&c, mBegin));
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::const_reverse_iterator
+ ring_buffer<T, Container, Allocator>::rend() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(const_iterator(const_cast<Container*>(&c), mBegin));
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::const_reverse_iterator
+ ring_buffer<T, Container, Allocator>::crend() const EA_NOEXCEPT
+ {
+ return const_reverse_iterator(const_iterator(const_cast<Container*>(&c), mBegin));
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ bool ring_buffer<T, Container, Allocator>::empty() const EA_NOEXCEPT
+ {
+ return mBegin == mEnd;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ bool ring_buffer<T, Container, Allocator>::full() const EA_NOEXCEPT
+ {
+ // Implementation that relies on c.size() being a fast operation:
+ // return mSize == (c.size() - 1); // (c.size() - 1) == capacity(); we are attempting to reduce function calls.
+
+ // Version that has constant speed guarantees, but is still pretty fast.
+ const_iterator afterEnd(end());
+ ++afterEnd;
+ return afterEnd.mContainerIterator == mBegin;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::size_type
+ ring_buffer<T, Container, Allocator>::size() const EA_NOEXCEPT
+ {
+ return mSize;
+
+ // Alternatives:
+ // return eastl::distance(begin(), end());
+ // return end() - begin(); // This is more direct than using distance().
+ //typedef typename eastl::iterator_traits<container_iterator>::iterator_category IC;
+ //return DoGetSize(IC()); // This is more direct than using iterator math.
+ }
+
+
+ /*
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::size_type
+ ring_buffer<T, Container, Allocator>::DoGetSize(EASTL_ITC_NS::input_iterator_tag) const
+ {
+ // We could alternatively just use eastl::distance() here, but we happen to
+ // know that such code would boil down to what we have here, and we might
+ // as well remove function calls where possible.
+ difference_type d = 0;
+
+ for(const_iterator temp(begin()), tempEnd(end()); temp != tempEnd; ++temp)
+ ++d;
+
+ return (size_type)d;
+ }
+ */
+
+ /*
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::size_type
+ ring_buffer<T, Container, Allocator>::DoGetSize(EASTL_ITC_NS::random_access_iterator_tag) const
+ {
+ // A simpler but less efficient implementation fo this function would be:
+ // return eastl::distance(mBegin, mEnd);
+ //
+ // The calculation of distance here takes advantage of the fact that random
+ // access iterators' distances can be calculated by simple pointer calculation.
+ // Thus the code below boils down to a few subtractions when using a vector,
+ // string, or array as the Container type.
+ //
+ const difference_type dBegin = eastl::distance(const_cast<Container&>(c).begin(), mBegin); // const_cast here solves a little compiler
+ const difference_type dEnd = eastl::distance(const_cast<Container&>(c).begin(), mEnd); // argument matching problem.
+
+ if(dEnd >= dBegin)
+ return dEnd - dBegin;
+
+ return c.size() - (dBegin - dEnd);
+ }
+ */
+
+
+ namespace Internal
+ {
+ ///////////////////////////////////////////////////////////////
+ // has_overflow_allocator
+ //
+ // returns true_type when the specified container type is an
+ // eastl::fixed_* container and therefore has an overflow
+ // allocator type.
+ //
+ template <typename T, typename = void>
+ struct has_overflow_allocator : false_type {};
+
+ template <typename T>
+ struct has_overflow_allocator<T, void_t<decltype(declval<T>().get_overflow_allocator())>> : true_type {};
+
+
+ ///////////////////////////////////////////////////////////////
+ // GetFixedContainerCtorAllocator
+ //
+ // eastl::fixed_* containers are only constructible via their
+ // overflow allocator type. This helper select the appropriate
+ // allocator from the specified container.
+ //
+ template <typename Container, bool UseOverflowAllocator = has_overflow_allocator<Container>()()>
+ struct GetFixedContainerCtorAllocator
+ {
+ auto& operator()(Container& c) { return c.get_overflow_allocator(); }
+ };
+
+ template <typename Container>
+ struct GetFixedContainerCtorAllocator<Container, false>
+ {
+ auto& operator()(Container& c) { return c.get_allocator(); }
+ };
+ } // namespace Internal
+
+
+ ///////////////////////////////////////////////////////////////
+ // ContainerTemporary
+ //
+ // Helper type which prevents utilizing excessive stack space
+ // when creating temporaries when swapping/copying the underlying
+ // ring_buffer container type.
+ //
+ template <typename Container, bool UseHeapTemporary = (sizeof(Container) >= EASTL_MAX_STACK_USAGE)>
+ struct ContainerTemporary
+ {
+ Container mContainer;
+
+ ContainerTemporary(Container& parentContainer)
+ : mContainer(Internal::GetFixedContainerCtorAllocator<Container>{}(parentContainer))
+ {
+ }
+
+ Container& get() { return mContainer; }
+ };
+
+ template <typename Container>
+ struct ContainerTemporary<Container, true>
+ {
+ typename Container::allocator_type* mAllocator;
+ Container* mContainer;
+
+ ContainerTemporary(Container& parentContainer)
+ : mAllocator(&parentContainer.get_allocator())
+ , mContainer(new (mAllocator->allocate(sizeof(Container))) Container)
+ {
+ }
+
+ ~ContainerTemporary()
+ {
+ mContainer->~Container();
+ mAllocator->deallocate(mContainer, sizeof(Container));
+ }
+
+ Container& get() { return *mContainer; }
+ };
+
+
+ template <typename T, typename Container, typename Allocator>
+ void ring_buffer<T, Container, Allocator>::resize(size_type n)
+ {
+ // Note that if n > size(), we just move the end position out to
+ // the begin + n, with the data being the old end and the new end
+ // being stale values from the past. This is by design, as the concept
+ // of arbitrarily resizing a ring buffer like this is currently deemed
+ // to be vague in what it intends to do. We can only assume that the
+ // user knows what he is doing and will deal with the stale values.
+ EASTL_ASSERT(c.size() >= 1);
+ const size_type cap = (c.size() - 1);
+
+ mSize = n;
+
+ if(n > cap) // If we need to grow in capacity...
+ {
+ // Given that a growing operation will always result in memory allocation,
+ // we currently implement this function via the usage of a temp container.
+ // This makes for a simple implementation, but in some cases it is less
+ // efficient. In particular, if the container is a node-based container like
+ // a (linked) list, this function would be faster if we simply added nodes
+ // to ourself. We would do this by inserting the nodes to be after end()
+ // and adjusting the begin() position if it was after end().
+
+ // To do: This code needs to be amended to deal with possible exceptions
+ // that could occur during the resize call below.
+
+ ContainerTemporary<Container> cTemp(c);
+ cTemp.get().resize(n + 1);
+ eastl::copy(begin(), end(), cTemp.get().begin());
+ eastl::swap(c, cTemp.get());
+
+ mBegin = c.begin();
+ mEnd = mBegin;
+ eastl::advance(mEnd, n); // We can do a simple advance algorithm on this because we know that mEnd will not wrap around.
+ }
+ else // We could do a check here for n != size(), but that would be costly and people don't usually resize things to their same size.
+ {
+ mEnd = mBegin;
+
+ // eastl::advance(mEnd, n); // We *cannot* use this because there may be wraparound involved.
+
+ // To consider: Possibly we should implement some more detailed logic to optimize the code here.
+ // We'd need to do different behaviour dending on whether the container iterator type is a
+ // random access iterator or otherwise.
+
+ while(n--)
+ {
+ if(EASTL_UNLIKELY(++mEnd == c.end()))
+ mEnd = c.begin();
+ }
+ }
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::size_type
+ ring_buffer<T, Container, Allocator>::capacity() const EA_NOEXCEPT
+ {
+ EASTL_ASSERT(c.size() >= 1); // This is required because even an empty ring_buffer has one unused termination element, somewhat like a \0 at the end of a C string.
+
+ return (c.size() - 1); // Need to subtract one because the position at mEnd is unused.
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ void ring_buffer<T, Container, Allocator>::set_capacity(size_type n)
+ {
+ const size_type capacity = (c.size() - 1);
+
+ if(n != capacity) // If we need to change capacity...
+ {
+ ContainerTemporary<Container> cTemp(c);
+ cTemp.get().resize(n + 1);
+
+ iterator itCopyBegin = begin();
+
+ if(n < mSize) // If we are shrinking the capacity, to less than our size...
+ {
+ eastl::advance(itCopyBegin, mSize - n);
+ mSize = n;
+ }
+
+ eastl::copy(itCopyBegin, end(), cTemp.get().begin()); // The begin-end range may in fact be larger than n, in which case values will be overwritten.
+ eastl::swap(c, cTemp.get());
+
+ mBegin = c.begin();
+ mEnd = mBegin;
+ eastl::advance(mEnd, mSize); // We can do a simple advance algorithm on this because we know that mEnd will not wrap around.
+ }
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ void ring_buffer<T, Container, Allocator>::reserve(size_type n)
+ {
+ // We follow the pattern of vector and only do something if n > capacity.
+ EASTL_ASSERT(c.size() >= 1);
+
+ if(n > (c.size() - 1)) // If we need to grow in capacity... // (c.size() - 1) == capacity(); we are attempting to reduce function calls.
+ {
+ ContainerTemporary<Container> cTemp(c);
+ cTemp.get().resize(n + 1);
+ eastl::copy(begin(), end(), cTemp.get().begin());
+ eastl::swap(c, cTemp.get());
+
+ mBegin = c.begin();
+ mEnd = mBegin;
+ eastl::advance(mEnd, mSize); // We can do a simple advance algorithm on this because we know that mEnd will not wrap around.
+ }
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::reference
+ ring_buffer<T, Container, Allocator>::front()
+ {
+ return *mBegin;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::const_reference
+ ring_buffer<T, Container, Allocator>::front() const
+ {
+ return *mBegin;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::reference
+ ring_buffer<T, Container, Allocator>::back()
+ {
+ // return *(end() - 1); // Can't use this because not all iterators support operator-.
+
+ iterator temp(end()); // To do: Find a way to construct this temporary in the return statement.
+ return *(--temp); // We can do it by making all our containers' iterators support operator-.
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::const_reference
+ ring_buffer<T, Container, Allocator>::back() const
+ {
+ // return *(end() - 1); // Can't use this because not all iterators support operator-.
+
+ const_iterator temp(end()); // To do: Find a way to construct this temporary in the return statement.
+ return *(--temp); // We can do it by making all our containers' iterators support operator-.
+ }
+
+
+ /// A push_back operation on a ring buffer assigns the new value to end.
+ /// If there is no more space in the buffer, this will result in begin
+ /// being overwritten and the begin position being moved foward one position.
+ template <typename T, typename Container, typename Allocator>
+ void ring_buffer<T, Container, Allocator>::push_back(const value_type& value)
+ {
+ *mEnd = value;
+
+ if(++mEnd == c.end())
+ mEnd = c.begin();
+
+ if(mEnd == mBegin)
+ {
+ if(++mBegin == c.end())
+ mBegin = c.begin();
+ }
+ else
+ ++mSize;
+ }
+
+
+ /// A push_back operation on a ring buffer assigns the new value to end.
+ /// If there is no more space in the buffer, this will result in begin
+ /// being overwritten and the begin position being moved foward one position.
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::reference
+ ring_buffer<T, Container, Allocator>::push_back()
+ {
+ // We don't do the following assignment, as the value at mEnd is already constructed;
+ // it is merely possibly not default-constructed. However, the spirit of push_back
+ // is that the user intends to do an assignment or data modification after the
+ // push_back call. The user can always execute *back() = value_type() if he wants.
+ //*mEnd = value_type();
+
+ if(++mEnd == c.end())
+ mEnd = c.begin();
+
+ if(mEnd == mBegin)
+ {
+ if(++mBegin == c.end())
+ mBegin = c.begin();
+ }
+ else
+ ++mSize;
+
+ return back();
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ void ring_buffer<T, Container, Allocator>::pop_back()
+ {
+ EASTL_ASSERT(mEnd != mBegin); // We assume that size() > 0 and thus that there is something to pop.
+
+ if(EASTL_UNLIKELY(mEnd == c.begin()))
+ mEnd = c.end();
+ --mEnd;
+ --mSize;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ void ring_buffer<T, Container, Allocator>::push_front(const value_type& value)
+ {
+ if(EASTL_UNLIKELY(mBegin == c.begin()))
+ mBegin = c.end();
+
+ if(--mBegin == mEnd)
+ {
+ if(EASTL_UNLIKELY(mEnd == c.begin()))
+ mEnd = c.end();
+ --mEnd;
+ }
+ else
+ ++mSize;
+
+ *mBegin = value;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::reference
+ ring_buffer<T, Container, Allocator>::push_front()
+ {
+ if(EASTL_UNLIKELY(mBegin == c.begin()))
+ mBegin = c.end();
+
+ if(--mBegin == mEnd)
+ {
+ if(EASTL_UNLIKELY(mEnd == c.begin()))
+ mEnd = c.end();
+ --mEnd;
+ }
+ else
+ ++mSize;
+
+ // See comments above in push_back for why we don't execute this:
+ // *mBegin = value_type();
+
+ return *mBegin; // Same as return front();
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ void ring_buffer<T, Container, Allocator>::pop_front()
+ {
+ EASTL_ASSERT(mBegin != mEnd); // We assume that mEnd > mBegin and thus that there is something to pop.
+
+ if(++mBegin == c.end())
+ mBegin = c.begin();
+ --mSize;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::reference
+ ring_buffer<T, Container, Allocator>::operator[](size_type n)
+ {
+ // return *(begin() + n); // Can't use this because not all iterators support operator+.
+
+ // This should compile to code that is nearly as efficient as that above.
+ // The primary difference is the possible generation of a temporary in this case.
+ iterator temp(begin());
+ eastl::advance(temp, n);
+ return *(temp.mContainerIterator);
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::const_reference
+ ring_buffer<T, Container, Allocator>::operator[](size_type n) const
+ {
+ // return *(begin() + n); // Can't use this because not all iterators support operator+.
+
+ // This should compile to code that is nearly as efficient as that above.
+ // The primary difference is the possible generation of a temporary in this case.
+ const_iterator temp(begin());
+ eastl::advance(temp, n);
+ return *(temp.mContainerIterator);
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::iterator
+ ring_buffer<T, Container, Allocator>::insert(const_iterator position, const value_type& value)
+ {
+ // To consider: It would be faster if we could tell that position was in the first
+ // half of the container and instead of moving things after the position back,
+ // we could move things before the position forward.
+
+ iterator afterEnd(end());
+ iterator beforeEnd(afterEnd);
+
+ ++afterEnd;
+
+ if(afterEnd.mContainerIterator == mBegin) // If we are at full capacity...
+ --beforeEnd;
+ else
+ push_back();
+
+ iterator itPosition(position.mpContainer, position.mContainerIterator); // We merely copy from const_iterator to iterator.
+ eastl::copy_backward(itPosition, beforeEnd, end());
+ *itPosition = value;
+
+ return itPosition;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ void ring_buffer<T, Container, Allocator>::insert(const_iterator position, size_type n, const value_type& value)
+ {
+ // To do: This can be improved with a smarter version. However,
+ // this is a little tricky because we need to deal with the case
+ // whereby n is greater than the size of the container itself.
+ while(n--)
+ insert(position, value);
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ void ring_buffer<T, Container, Allocator>::insert(const_iterator position, std::initializer_list<value_type> ilist)
+ {
+ insert(position, ilist.begin(), ilist.end());
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ template <typename InputIterator>
+ void ring_buffer<T, Container, Allocator>::insert(const_iterator position, InputIterator first, InputIterator last)
+ {
+ // To do: This can possibly be improved with a smarter version.
+ // However, this can be tricky if distance(first, last) is greater
+ // than the size of the container itself.
+ for(; first != last; ++first, ++position)
+ insert(position, *first);
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::iterator
+ ring_buffer<T, Container, Allocator>::erase(const_iterator position)
+ {
+ iterator itPosition(position.mpContainer, position.mContainerIterator); // We merely copy from const_iterator to iterator.
+ iterator iNext(itPosition);
+
+ eastl::copy(++iNext, end(), itPosition);
+ pop_back();
+
+ return itPosition;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::iterator
+ ring_buffer<T, Container, Allocator>::erase(const_iterator first, const_iterator last)
+ {
+ iterator itFirst(first.mpContainer, first.mContainerIterator); // We merely copy from const_iterator to iterator.
+ iterator itLast(last.mpContainer, last.mContainerIterator);
+
+ typename iterator::difference_type d = eastl::distance(itFirst, itLast);
+
+ eastl::copy(itLast, end(), itFirst);
+
+ while(d--) // To do: improve this implementation.
+ pop_back();
+
+ return itFirst;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::reverse_iterator
+ ring_buffer<T, Container, Allocator>::erase(const_reverse_iterator position)
+ {
+ return reverse_iterator(erase((++position).base()));
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::reverse_iterator
+ ring_buffer<T, Container, Allocator>::erase(const_reverse_iterator first, const_reverse_iterator last)
+ {
+ // Version which erases in order from first to last.
+ // difference_type i(first.base() - last.base());
+ // while(i--)
+ // first = erase(first);
+ // return first;
+
+ // Version which erases in order from last to first, but is slightly more efficient:
+ return reverse_iterator(erase((++last).base(), (++first).base()));
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ void ring_buffer<T, Container, Allocator>::clear()
+ {
+ // Don't clear the container; we use its valid data for our elements.
+ mBegin = c.begin();
+ mEnd = c.begin();
+ mSize = 0;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ typename ring_buffer<T, Container, Allocator>::container_type&
+ ring_buffer<T, Container, Allocator>::get_container()
+ {
+ return c;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ const typename ring_buffer<T, Container, Allocator>::container_type&
+ ring_buffer<T, Container, Allocator>::get_container() const
+ {
+ return c;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ inline bool ring_buffer<T, Container, Allocator>::validate() const
+ {
+ if(!c.validate()) // This requires that the container implement the validate function. That pretty much
+ return false; // means that the container is an EASTL container and not a std STL container.
+
+ if(c.empty()) // c must always have a size of at least 1, as even an empty ring_buffer has an unused terminating element.
+ return false;
+
+ if(size() > capacity())
+ return false;
+
+ if((validate_iterator(begin()) & (isf_valid | isf_current)) != (isf_valid | isf_current))
+ return false;
+
+ if((validate_iterator(end()) & (isf_valid | isf_current)) != (isf_valid | isf_current))
+ return false;
+
+ // Verify that the size calculation is consistent.
+ size_type n = 0;
+ for(const_iterator i(begin()), iEnd(end()); i != iEnd; ++i)
+ ++n;
+ if(n != mSize)
+ return false;
+
+ return true;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ inline int ring_buffer<T, Container, Allocator>::validate_iterator(const_iterator i) const
+ {
+ // To do: Replace this with a more efficient implementation if possible.
+
+ for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp)
+ {
+ if(temp == i)
+ return (isf_valid | isf_current | isf_can_dereference);
+ }
+
+ if(i == end())
+ return (isf_valid | isf_current);
+
+ return isf_none;
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Container, typename Allocator>
+ inline bool operator==(const ring_buffer<T, Container, Allocator>& a, const ring_buffer<T, Container, Allocator>& b)
+ {
+ return (a.size() == b.size()) && (a.c == b.c);
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ inline bool operator<(const ring_buffer<T, Container, Allocator>& a, const ring_buffer<T, Container, Allocator>& b)
+ {
+ const typename ring_buffer<T, Container, Allocator>::size_type sizeA = a.size();
+ const typename ring_buffer<T, Container, Allocator>::size_type sizeB = b.size();
+
+ if(sizeA == sizeB)
+ return (a.c < b.c);
+ return sizeA < sizeB;
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ inline bool operator!=(const ring_buffer<T, Container, Allocator>& a, const ring_buffer<T, Container, Allocator>& b)
+ {
+ return !(a == b);
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ inline bool operator>(const ring_buffer<T, Container, Allocator>& a, const ring_buffer<T, Container, Allocator>& b)
+ {
+ return (b < a);
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ inline bool operator<=(const ring_buffer<T, Container, Allocator>& a, const ring_buffer<T, Container, Allocator>& b)
+ {
+ return !(b < a);
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ inline bool operator>=(const ring_buffer<T, Container, Allocator>& a, const ring_buffer<T, Container, Allocator>& b)
+ {
+ return !(a < b);
+ }
+
+
+ template <typename T, typename Container, typename Allocator>
+ inline void swap(ring_buffer<T, Container, Allocator>& a, ring_buffer<T, Container, Allocator>& b)
+ {
+ a.swap(b);
+ }
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/include/EASTL/bonus/sort_extra.h b/include/EASTL/bonus/sort_extra.h
new file mode 100644
index 0000000..5f9a0c4
--- /dev/null
+++ b/include/EASTL/bonus/sort_extra.h
@@ -0,0 +1,204 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+//////////////////////////////////////////////////////////////////////////////
+// This file implements additional sort algorithms beyond the basic set.
+// Included here are:
+// selection_sort -- Unstable.
+// shaker_sort -- Stable.
+// bucket_sort -- Stable.
+//
+//////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_SORT_EXTRA_H
+#define EASTL_SORT_EXTRA_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/iterator.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/functional.h>
+#include <EASTL/heap.h>
+#include <EASTL/sort.h> // For backwards compatibility due to sorts moved from here to sort.h.
+#include <EASTL/allocator.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+
+namespace eastl
+{
+ /// selection_sort
+ ///
+ /// Implements the SelectionSort algorithm.
+ ///
+ template <typename ForwardIterator, typename StrictWeakOrdering>
+ void selection_sort(ForwardIterator first, ForwardIterator last, StrictWeakOrdering compare)
+ {
+ ForwardIterator iCurrent, iMin;
+
+ for(; first != last; ++first)
+ {
+ iCurrent = first;
+ iMin = iCurrent;
+
+ for(++iCurrent; iCurrent != last; ++iCurrent)
+ {
+ if(compare(*iCurrent, *iMin))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*iMin, *iCurrent)); // Validate that the compare function is sane.
+ iMin = iCurrent;
+ }
+ }
+
+ if(first != iMin)
+ eastl::iter_swap(first, iMin);
+ }
+ } // selection_sort
+
+ template <typename ForwardIterator>
+ inline void selection_sort(ForwardIterator first, ForwardIterator last)
+ {
+ typedef eastl::less<typename eastl::iterator_traits<ForwardIterator>::value_type> Less;
+
+ eastl::selection_sort<ForwardIterator, Less>(first, last, Less());
+ }
+
+
+
+ /// shaker_sort
+ ///
+ /// Implements the ShakerSort algorithm, which is a sorting algorithm which
+ /// improves on bubble_sort by sweeping both from left to right and right
+ /// to left, resulting in less iteration.
+ ///
+ template <typename BidirectionalIterator, typename StrictWeakOrdering>
+ void shaker_sort(BidirectionalIterator first, BidirectionalIterator last, StrictWeakOrdering compare)
+ {
+ if(first != last)
+ {
+ BidirectionalIterator iCurrent, iNext, iLastModified;
+
+ --last;
+
+ while(first != last)
+ {
+ iLastModified = first;
+
+ for(iCurrent = first; iCurrent != last; iCurrent = iNext)
+ {
+ iNext = iCurrent;
+ ++iNext;
+
+ if(compare(*iNext, *iCurrent))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*iCurrent, *iNext)); // Validate that the compare function is sane.
+ iLastModified = iCurrent;
+ eastl::iter_swap(iCurrent, iNext);
+ }
+ }
+
+ last = iLastModified;
+
+ if(first != last)
+ {
+ for(iCurrent = last; iCurrent != first; iCurrent = iNext)
+ {
+ iNext = iCurrent;
+ --iNext;
+
+ if(compare(*iCurrent, *iNext))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*iNext, *iCurrent)); // Validate that the compare function is sane.
+ iLastModified = iCurrent;
+ eastl::iter_swap(iNext, iCurrent);
+ }
+ }
+ first = iLastModified;
+ }
+ }
+ }
+ } // shaker_sort
+
+ template <typename BidirectionalIterator>
+ inline void shaker_sort(BidirectionalIterator first, BidirectionalIterator last)
+ {
+ typedef eastl::less<typename eastl::iterator_traits<BidirectionalIterator>::value_type> Less;
+
+ eastl::shaker_sort<BidirectionalIterator, Less>(first, last, Less());
+ }
+
+
+
+ /// bucket_sort
+ ///
+ /// Implements the BucketSort algorithm.
+ ///
+ /// Example usage:
+ /// const size_t kElementRange = 32;
+ /// vector<int> intArray(1000);
+ ///
+ /// for(int i = 0; i < 1000; i++)
+ /// intArray[i] = rand() % kElementRange;
+ ///
+ /// vector< vector<int> > bucketArray(kElementRange);
+ /// bucket_sort(intArray.begin(), intArray.end(), bucketArray, eastl::hash_use_self<int>());
+ ///
+ template <typename T>
+ struct hash_use_self
+ {
+ T operator()(const T& x) const
+ { return x; }
+ };
+
+ // Requires buckeyArray to be an array of arrays with a size equal to the range of values
+ // returned by the hash function. The hash function is required to return a unique value
+ // for each uniquely sorted element. Usually the way this is done is the elements are
+ // integers of a limited range (e.g. 0-64) and the hash function returns the element value
+ // itself. If you had a case where all elements were always even numbers (e.g. 0-128),
+ // you could use a custom hash function that returns (element value / 2).
+ //
+ // The user is required to provide an empty bucketArray to this function. This function returns
+ // with the bucketArray non-empty. This function doesn't clear the bucketArray because that takes
+ // time and the user might not need it to be cleared, at least at that time.
+ //
+ template <typename ForwardIterator, typename ContainerArray, typename HashFunction>
+ void bucket_sort(ForwardIterator first, ForwardIterator last, ContainerArray& bucketArray, HashFunction hash /*= hash_use_self*/)
+ {
+ for(ForwardIterator iInput = first; iInput != last; ++iInput)
+ bucketArray[hash(*iInput)].push_back(*iInput);
+
+ for(typename ContainerArray::const_iterator iBucket = bucketArray.begin(); iBucket != bucketArray.end(); ++iBucket)
+ first = eastl::copy((*iBucket).begin(), (*iBucket).end(), first);
+ }
+
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/include/EASTL/bonus/tuple_vector.h b/include/EASTL/bonus/tuple_vector.h
new file mode 100644
index 0000000..7123c57
--- /dev/null
+++ b/include/EASTL/bonus/tuple_vector.h
@@ -0,0 +1,1592 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// tuple_vector is a data container that is designed to abstract and simplify
+// the handling of a "structure of arrays" layout of data in memory. In
+// particular, it mimics the interface of vector, including functionality to do
+// inserts, erases, push_backs, and random-access. It also provides a
+// RandomAccessIterator and corresponding functionality, making it compatible
+// with most STL (and STL-esque) algorithms such as ranged-for loops, find_if,
+// remove_if, or sort.
+
+// When used or applied properly, this container can improve performance of
+// some algorithms through cache-coherent data accesses or allowing for
+// sensible SIMD programming, while keeping the structure of a single
+// container, to permit a developer to continue to use existing algorithms in
+// STL and the like.
+//
+// Consult doc/Bonus/tuple_vector_readme.md for more information.
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_TUPLEVECTOR_H
+#define EASTL_TUPLEVECTOR_H
+
+#include <EASTL/bonus/compressed_pair.h>
+#include <EASTL/internal/config.h>
+#include <EASTL/iterator.h>
+#include <EASTL/memory.h>
+#include <EASTL/tuple.h>
+#include <EASTL/utility.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+EA_DISABLE_VC_WARNING(4244) // warning C4244: 'conversion from '___' to '___', possible loss of data
+EA_DISABLE_VC_WARNING(4623) // warning C4623: default constructor was implicitly defined as deleted
+EA_DISABLE_VC_WARNING(4625) // warning C4625: copy constructor was implicitly defined as deleted
+EA_DISABLE_VC_WARNING(4510) // warning C4510: default constructor could not be generated
+
+namespace eastl
+{
+ /// EASTL_TUPLE_VECTOR_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_TUPLE_VECTOR_DEFAULT_NAME
+ #define EASTL_TUPLE_VECTOR_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " tuple-vector" // Unless the user overrides something, this is "EASTL tuple-vector".
+ #endif
+
+
+ /// EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR
+ #define EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR allocator_type(EASTL_TUPLE_VECTOR_DEFAULT_NAME)
+ #endif
+
+namespace TupleVecInternal
+{
+
+// forward declarations
+template <eastl_size_t I, typename... Ts>
+struct tuplevec_element;
+
+template <eastl_size_t I, typename... Ts>
+using tuplevec_element_t = typename tuplevec_element<I, Ts...>::type;
+
+template <typename... Ts>
+struct TupleTypes {};
+
+template <typename Allocator, typename Indices, typename... Ts>
+class TupleVecImpl;
+
+template <typename... Ts>
+struct TupleRecurser;
+
+template <eastl_size_t I, typename... Ts>
+struct TupleIndexRecurser;
+
+template <eastl_size_t I, typename T>
+struct TupleVecLeaf;
+
+template <typename Indices, typename... Ts>
+struct TupleVecIter;
+
+// tuplevec_element helper to be able to isolate a type given an index
+template <eastl_size_t I>
+struct tuplevec_element<I>
+{
+ static_assert(I != I, "tuplevec_element index out of range");
+};
+
+template <typename T, typename... Ts>
+struct tuplevec_element<0, T, Ts...>
+{
+ tuplevec_element() = delete; // tuplevec_element should only be used for compile-time assistance, and never be instantiated
+ typedef T type;
+};
+
+template <eastl_size_t I, typename T, typename... Ts>
+struct tuplevec_element<I, T, Ts...>
+{
+ typedef tuplevec_element_t<I - 1, Ts...> type;
+};
+
+// attempt to isolate index given a type
+template <typename T, typename TupleVector>
+struct tuplevec_index
+{
+};
+
+template <typename T>
+struct tuplevec_index<T, TupleTypes<>>
+{
+ typedef void DuplicateTypeCheck;
+ tuplevec_index() = delete; // tuplevec_index should only be used for compile-time assistance, and never be instantiated
+ static const eastl_size_t index = 0;
+};
+
+template <typename T, typename... TsRest>
+struct tuplevec_index<T, TupleTypes<T, TsRest...>>
+{
+ typedef int DuplicateTypeCheck;
+ static_assert(is_void<typename tuplevec_index<T, TupleTypes<TsRest...>>::DuplicateTypeCheck>::value, "duplicate type T in tuple_vector::get<T>(); unique types must be provided in declaration, or only use get<eastl_size_t>()");
+
+ static const eastl_size_t index = 0;
+};
+
+template <typename T, typename Ts, typename... TsRest>
+struct tuplevec_index<T, TupleTypes<Ts, TsRest...>>
+{
+ typedef typename tuplevec_index<T, TupleTypes<TsRest...>>::DuplicateTypeCheck DuplicateTypeCheck;
+ static const eastl_size_t index = tuplevec_index<T, TupleTypes<TsRest...>>::index + 1;
+};
+
+template <typename Allocator, typename T, typename Indices, typename... Ts>
+struct tuplevec_index<T, TupleVecImpl<Allocator, Indices, Ts...>> : public tuplevec_index<T, TupleTypes<Ts...>>
+{
+};
+
+
+// helper to calculate the layout of the allocations for the tuple of types (esp. to take alignment into account)
+template <>
+struct TupleRecurser<>
+{
+ typedef eastl_size_t size_type;
+
+ // This class should never be instantiated. This is just a helper for working with static functions when anonymous functions don't work
+ // and provide some other utilities
+ TupleRecurser() = delete;
+
+ static EA_CONSTEXPR size_type GetTotalAlignment()
+ {
+ return 0;
+ }
+
+ static EA_CONSTEXPR size_type GetTotalAllocationSize(size_type capacity, size_type offset)
+ {
+ EA_UNUSED(capacity);
+ return offset;
+ }
+
+ template<typename Allocator, size_type I, typename Indices, typename... VecTypes>
+ static pair<void*, size_type> DoAllocate(TupleVecImpl<Allocator, Indices, VecTypes...> &vec, void** ppNewLeaf, size_type capacity, size_type offset)
+ {
+ EA_UNUSED(ppNewLeaf);
+
+ // If n is zero, then we allocate no memory and just return NULL.
+ // This is fine, as our default ctor initializes with NULL pointers.
+ size_type alignment = TupleRecurser<VecTypes...>::GetTotalAlignment();
+ void* ptr = capacity ? allocate_memory(vec.get_allocator(), offset, alignment, 0) : nullptr;
+
+ #if EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY((size_t)ptr & (alignment - 1)) != 0)
+ {
+ EASTL_FAIL_MSG("tuple_vector::DoAllocate -- memory not alignment at requested alignment");
+ }
+ #endif
+
+ return make_pair(ptr, offset);
+ }
+
+ template<typename TupleVecImplType, size_type I>
+ static void SetNewData(TupleVecImplType &vec, void* pData, size_type capacity, size_type offset)
+ {
+ EA_UNUSED(vec);
+ EA_UNUSED(pData);
+ EA_UNUSED(capacity);
+ EA_UNUSED(offset);
+ }
+};
+
+template <typename T, typename... Ts>
+struct TupleRecurser<T, Ts...> : TupleRecurser<Ts...>
+{
+ typedef eastl_size_t size_type;
+
+ static EA_CONSTEXPR size_type GetTotalAlignment()
+ {
+ return max(static_cast<size_type>(alignof(T)), TupleRecurser<Ts...>::GetTotalAlignment());
+ }
+
+ static EA_CONSTEXPR size_type GetTotalAllocationSize(size_type capacity, size_type offset)
+ {
+ return TupleRecurser<Ts...>::GetTotalAllocationSize(capacity, CalculateAllocationSize(offset, capacity));
+ }
+
+ template<typename Allocator, size_type I, typename Indices, typename... VecTypes>
+ static pair<void*, size_type> DoAllocate(TupleVecImpl<Allocator, Indices, VecTypes...> &vec, void** ppNewLeaf, size_type capacity, size_type offset)
+ {
+ size_type allocationOffset = CalculatAllocationOffset(offset);
+ size_type allocationSize = CalculateAllocationSize(offset, capacity);
+ pair<void*, size_type> allocation = TupleRecurser<Ts...>::template DoAllocate<Allocator, I + 1, Indices, VecTypes...>(
+ vec, ppNewLeaf, capacity, allocationSize);
+ ppNewLeaf[I] = (void*)((uintptr_t)(allocation.first) + allocationOffset);
+ return allocation;
+ }
+
+ template<typename TupleVecImplType, size_type I>
+ static void SetNewData(TupleVecImplType &vec, void* pData, size_type capacity, size_type offset)
+ {
+ size_type allocationOffset = CalculatAllocationOffset(offset);
+ size_type allocationSize = CalculateAllocationSize(offset, capacity);
+ vec.TupleVecLeaf<I, T>::mpData = (T*)((uintptr_t)pData + allocationOffset);
+ TupleRecurser<Ts...>::template SetNewData<TupleVecImplType, I + 1>(vec, pData, capacity, allocationSize);
+ }
+
+private:
+ static EA_CONSTEXPR size_type CalculateAllocationSize(size_type offset, size_type capacity)
+ {
+ return CalculatAllocationOffset(offset) + sizeof(T) * capacity;
+ }
+
+ static EA_CONSTEXPR size_type CalculatAllocationOffset(size_type offset) { return (offset + alignof(T) - 1) & (~alignof(T) + 1); }
+};
+
+template <eastl_size_t I, typename T>
+struct TupleVecLeaf
+{
+ typedef eastl_size_t size_type;
+
+ void DoUninitializedMoveAndDestruct(const size_type begin, const size_type end, T* pDest)
+ {
+ T* pBegin = mpData + begin;
+ T* pEnd = mpData + end;
+ eastl::uninitialized_move_ptr_if_noexcept(pBegin, pEnd, pDest);
+ eastl::destruct(pBegin, pEnd);
+ }
+
+ void DoInsertAndFill(size_type pos, size_type n, size_type numElements, const T& arg)
+ {
+ T* pDest = mpData + pos;
+ T* pDataEnd = mpData + numElements;
+ const T temp = arg;
+ const size_type nExtra = (numElements - pos);
+ if (n < nExtra) // If the inserted values are entirely within initialized memory (i.e. are before mpEnd)...
+ {
+ eastl::uninitialized_move_ptr(pDataEnd - n, pDataEnd, pDataEnd);
+ eastl::move_backward(pDest, pDataEnd - n, pDataEnd); // We need move_backward because of potential overlap issues.
+ eastl::fill(pDest, pDest + n, temp);
+ }
+ else
+ {
+ eastl::uninitialized_fill_n_ptr(pDataEnd, n - nExtra, temp);
+ eastl::uninitialized_move_ptr(pDest, pDataEnd, pDataEnd + n - nExtra);
+ eastl::fill(pDest, pDataEnd, temp);
+ }
+ }
+
+ void DoInsertRange(T* pSrcBegin, T* pSrcEnd, T* pDestBegin, size_type numDataElements)
+ {
+ size_type pos = pDestBegin - mpData;
+ size_type n = pSrcEnd - pSrcBegin;
+ T* pDataEnd = mpData + numDataElements;
+ const size_type nExtra = numDataElements - pos;
+ if (n < nExtra) // If the inserted values are entirely within initialized memory (i.e. are before mpEnd)...
+ {
+ eastl::uninitialized_move_ptr(pDataEnd - n, pDataEnd, pDataEnd);
+ eastl::move_backward(pDestBegin, pDataEnd - n, pDataEnd); // We need move_backward because of potential overlap issues.
+ eastl::copy(pSrcBegin, pSrcEnd, pDestBegin);
+ }
+ else
+ {
+ eastl::uninitialized_copy(pSrcEnd - (n - nExtra), pSrcEnd, pDataEnd);
+ eastl::uninitialized_move_ptr(pDestBegin, pDataEnd, pDataEnd + n - nExtra);
+ eastl::copy(pSrcBegin, pSrcEnd - (n - nExtra), pDestBegin);
+ }
+ }
+
+ void DoInsertValue(size_type pos, size_type numElements, T&& arg)
+ {
+ T* pDest = mpData + pos;
+ T* pDataEnd = mpData + numElements;
+
+ eastl::uninitialized_move_ptr(pDataEnd - 1, pDataEnd, pDataEnd);
+ eastl::move_backward(pDest, pDataEnd - 1, pDataEnd); // We need move_backward because of potential overlap issues.
+ eastl::destruct(pDest);
+ ::new (pDest) T(eastl::forward<T>(arg));
+ }
+
+ T* mpData = nullptr;
+};
+
+// swallow allows for parameter pack expansion of arguments as means of expanding operations performed
+// if a void function is used for operation expansion, it should be wrapped in (..., 0) so that the compiler
+// thinks it has a parameter to pass into the function
+template <typename... Ts>
+void swallow(Ts&&...) { }
+
+inline bool variadicAnd(bool cond) { return cond; }
+
+inline bool variadicAnd(bool cond, bool conds...) { return cond && variadicAnd(conds); }
+
+// Helper struct to check for strict compatibility between two iterators, whilst still allowing for
+// conversion between TupleVecImpl<Ts...>::iterator and TupleVecImpl<Ts...>::const_iterator.
+template <bool IsSameSize, typename From, typename To>
+struct TupleVecIterCompatibleImpl : public false_type { };
+
+template<>
+struct TupleVecIterCompatibleImpl<true, TupleTypes<>, TupleTypes<>> : public true_type { };
+
+template <typename From, typename... FromRest, typename To, typename... ToRest>
+struct TupleVecIterCompatibleImpl<true, TupleTypes<From, FromRest...>, TupleTypes<To, ToRest...>> : public integral_constant<bool,
+ TupleVecIterCompatibleImpl<true, TupleTypes<FromRest...>, TupleTypes<ToRest...>>::value &&
+ is_same<typename remove_const<From>::type, typename remove_const<To>::type>::value >
+{ };
+
+template <typename From, typename To>
+struct TupleVecIterCompatible;
+
+template<typename... Us, typename... Ts>
+struct TupleVecIterCompatible<TupleTypes<Us...>, TupleTypes<Ts...>> :
+ public TupleVecIterCompatibleImpl<sizeof...(Us) == sizeof...(Ts), TupleTypes<Us...>, TupleTypes<Ts...>>
+{ };
+
+// The Iterator operates by storing a persistent index internally,
+// and resolving the tuple of pointers to the various parts of the original tupleVec when dereferenced.
+// While resolving the tuple is a non-zero operation, it consistently generated better code than the alternative of
+// storing - and harmoniously updating on each modification - a full tuple of pointers to the tupleVec's data
+template <eastl_size_t... Indices, typename... Ts>
+struct TupleVecIter<index_sequence<Indices...>, Ts...>
+ : public iterator<random_access_iterator_tag, tuple<Ts...>, eastl_size_t, tuple<Ts*...>, tuple<Ts&...>>
+{
+private:
+ typedef TupleVecIter<index_sequence<Indices...>, Ts...> this_type;
+ typedef eastl_size_t size_type;
+
+ typedef iterator<random_access_iterator_tag, tuple<Ts...>, eastl_size_t, tuple<Ts*...>, tuple<Ts&...>> iter_type;
+
+ template<typename U, typename... Us>
+ friend struct TupleVecIter;
+
+ template<typename U, typename V, typename... Us>
+ friend class TupleVecImpl;
+
+ template<typename U>
+ friend class move_iterator;
+public:
+ typedef typename iter_type::iterator_category iterator_category;
+ typedef typename iter_type::value_type value_type;
+ typedef typename iter_type::difference_type difference_type;
+ typedef typename iter_type::pointer pointer;
+ typedef typename iter_type::reference reference;
+
+ TupleVecIter() = default;
+
+ template<typename VecImplType>
+ TupleVecIter(VecImplType* tupleVec, size_type index)
+ : mIndex(index)
+ , mpData{(void*)tupleVec->TupleVecLeaf<Indices, Ts>::mpData...}
+ { }
+
+ template <typename OtherIndicesType, typename... Us,
+ typename = typename enable_if<TupleVecIterCompatible<TupleTypes<Us...>, TupleTypes<Ts...>>::value, bool>::type>
+ TupleVecIter(const TupleVecIter<OtherIndicesType, Us...>& other)
+ : mIndex(other.mIndex)
+ , mpData{other.mpData[Indices]...}
+ {
+ }
+
+ bool operator==(const TupleVecIter& other) const { return mIndex == other.mIndex && mpData[0] == other.mpData[0]; }
+ bool operator!=(const TupleVecIter& other) const { return mIndex != other.mIndex || mpData[0] != other.mpData[0]; }
+ reference operator*() const { return MakeReference(); }
+
+ this_type& operator++() { ++mIndex; return *this; }
+ this_type operator++(int)
+ {
+ this_type temp = *this;
+ ++mIndex;
+ return temp;
+ }
+
+ this_type& operator--() { --mIndex; return *this; }
+ this_type operator--(int)
+ {
+ this_type temp = *this;
+ --mIndex;
+ return temp;
+ }
+
+ this_type& operator+=(difference_type n) { mIndex += n; return *this; }
+ this_type operator+(difference_type n) const
+ {
+ this_type temp = *this;
+ return temp += n;
+ }
+ friend this_type operator+(difference_type n, const this_type& rhs)
+ {
+ this_type temp = rhs;
+ return temp += n;
+ }
+
+ this_type& operator-=(difference_type n) { mIndex -= n; return *this; }
+ this_type operator-(difference_type n) const
+ {
+ this_type temp = *this;
+ return temp -= n;
+ }
+ friend this_type operator-(difference_type n, const this_type& rhs)
+ {
+ this_type temp = rhs;
+ return temp -= n;
+ }
+
+ difference_type operator-(const this_type& rhs) const { return mIndex - rhs.mIndex; }
+ bool operator<(const this_type& rhs) const { return mIndex < rhs.mIndex; }
+ bool operator>(const this_type& rhs) const { return mIndex > rhs.mIndex; }
+ bool operator>=(const this_type& rhs) const { return mIndex >= rhs.mIndex; }
+ bool operator<=(const this_type& rhs) const { return mIndex <= rhs.mIndex; }
+
+ reference operator[](const size_type n) const
+ {
+ return *(*this + n);
+ }
+
+private:
+
+ value_type MakeValue() const
+ {
+ return value_type(((Ts*)mpData[Indices])[mIndex]...);
+ }
+
+ reference MakeReference() const
+ {
+ return reference(((Ts*)mpData[Indices])[mIndex]...);
+ }
+
+ pointer MakePointer() const
+ {
+ return pointer(&((Ts*)mpData[Indices])[mIndex]...);
+ }
+
+ size_type mIndex = 0;
+ const void* mpData[sizeof...(Ts)];
+};
+
+// TupleVecImpl
+template <typename Allocator, eastl_size_t... Indices, typename... Ts>
+class TupleVecImpl<Allocator, index_sequence<Indices...>, Ts...> : public TupleVecLeaf<Indices, Ts>...
+{
+ typedef Allocator allocator_type;
+ typedef index_sequence<Indices...> index_sequence_type;
+ typedef TupleVecImpl<Allocator, index_sequence_type, Ts...> this_type;
+ typedef TupleVecImpl<Allocator, index_sequence_type, const Ts...> const_this_type;
+
+public:
+ typedef TupleVecInternal::TupleVecIter<index_sequence_type, Ts...> iterator;
+ typedef TupleVecInternal::TupleVecIter<index_sequence_type, const Ts...> const_iterator;
+ typedef eastl::reverse_iterator<iterator> reverse_iterator;
+ typedef eastl::reverse_iterator<const_iterator> const_reverse_iterator;
+ typedef eastl_size_t size_type;
+ typedef eastl::tuple<Ts...> value_tuple;
+ typedef eastl::tuple<Ts&...> reference_tuple;
+ typedef eastl::tuple<const Ts&...> const_reference_tuple;
+ typedef eastl::tuple<Ts*...> ptr_tuple;
+ typedef eastl::tuple<const Ts*...> const_ptr_tuple;
+ typedef eastl::tuple<Ts&&...> rvalue_tuple;
+
+ TupleVecImpl()
+ : mDataSizeAndAllocator(0, EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR)
+ {}
+
+ TupleVecImpl(const allocator_type& allocator)
+ : mDataSizeAndAllocator(0, allocator)
+ {}
+
+ TupleVecImpl(this_type&& x)
+ : mDataSizeAndAllocator(0, eastl::move(x.get_allocator()))
+ {
+ swap(x);
+ }
+
+ TupleVecImpl(this_type&& x, const Allocator& allocator)
+ : mDataSizeAndAllocator(0, allocator)
+ {
+ if (get_allocator() == x.get_allocator()) // If allocators are equivalent, then we can safely swap member-by-member
+ {
+ swap(x);
+ }
+ else
+ {
+ this_type temp(eastl::move(*this));
+ temp.swap(x);
+ }
+ }
+
+ TupleVecImpl(const this_type& x)
+ : mDataSizeAndAllocator(0, x.get_allocator())
+ {
+ DoInitFromIterator(x.begin(), x.end());
+ }
+
+ template<typename OtherAllocator>
+ TupleVecImpl(const TupleVecImpl<OtherAllocator, index_sequence_type, Ts...>& x, const Allocator& allocator)
+ : mDataSizeAndAllocator(0, allocator)
+ {
+ DoInitFromIterator(x.begin(), x.end());
+ }
+
+ template<typename MoveIterBase>
+ TupleVecImpl(move_iterator<MoveIterBase> begin, move_iterator<MoveIterBase> end, const allocator_type& allocator = EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR)
+ : mDataSizeAndAllocator(0, allocator)
+ {
+ DoInitFromIterator(begin, end);
+ }
+
+ TupleVecImpl(const_iterator begin, const_iterator end, const allocator_type& allocator = EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR)
+ : mDataSizeAndAllocator(0, allocator )
+ {
+ DoInitFromIterator(begin, end);
+ }
+
+ TupleVecImpl(size_type n, const allocator_type& allocator = EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR)
+ : mDataSizeAndAllocator(0, allocator)
+ {
+ DoInitDefaultFill(n);
+ }
+
+ TupleVecImpl(size_type n, const Ts&... args)
+ : mDataSizeAndAllocator(0, EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR)
+ {
+ DoInitFillArgs(n, args...);
+ }
+
+ TupleVecImpl(size_type n, const Ts&... args, const allocator_type& allocator)
+ : mDataSizeAndAllocator(0, allocator)
+ {
+ DoInitFillArgs(n, args...);
+ }
+
+ TupleVecImpl(size_type n, const_reference_tuple tup, const allocator_type& allocator = EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR)
+ : mDataSizeAndAllocator(0, allocator)
+ {
+ DoInitFillTuple(n, tup);
+ }
+
+ TupleVecImpl(const value_tuple* first, const value_tuple* last, const allocator_type& allocator = EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR)
+ : mDataSizeAndAllocator(0, allocator)
+ {
+ DoInitFromTupleArray(first, last);
+ }
+
+ TupleVecImpl(std::initializer_list<value_tuple> iList, const allocator_type& allocator = EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR)
+ : mDataSizeAndAllocator(0, allocator)
+ {
+ DoInitFromTupleArray(iList.begin(), iList.end());
+ }
+
+protected:
+ // ctor to provide a pre-allocated field of data that the container will own, specifically for fixed_tuple_vector
+ TupleVecImpl(const allocator_type& allocator, void* pData, size_type capacity, size_type dataSize)
+ : mpData(pData), mNumCapacity(capacity), mDataSizeAndAllocator(dataSize, allocator)
+ {
+ TupleRecurser<Ts...>::template SetNewData<this_type, 0>(*this, mpData, mNumCapacity, 0);
+ }
+
+public:
+ ~TupleVecImpl()
+ {
+ swallow((eastl::destruct(TupleVecLeaf<Indices, Ts>::mpData, TupleVecLeaf<Indices, Ts>::mpData + mNumElements), 0)...);
+ if (mpData)
+ EASTLFree(get_allocator(), mpData, internalDataSize());
+ }
+
+ void assign(size_type n, const Ts&... args)
+ {
+ if (n > mNumCapacity)
+ {
+ this_type temp(n, args..., get_allocator()); // We have little choice but to reallocate with new memory.
+ swap(temp);
+ }
+ else if (n > mNumElements) // If n > mNumElements ...
+ {
+ size_type oldNumElements = mNumElements;
+ swallow((eastl::fill(TupleVecLeaf<Indices, Ts>::mpData, TupleVecLeaf<Indices, Ts>::mpData + oldNumElements, args), 0)...);
+ swallow((eastl::uninitialized_fill_ptr(TupleVecLeaf<Indices, Ts>::mpData + oldNumElements,
+ TupleVecLeaf<Indices, Ts>::mpData + n, args), 0)...);
+ mNumElements = n;
+ }
+ else // else 0 <= n <= mNumElements
+ {
+ swallow((eastl::fill(TupleVecLeaf<Indices, Ts>::mpData, TupleVecLeaf<Indices, Ts>::mpData + n, args), 0)...);
+ erase(begin() + n, end());
+ }
+ }
+
+ void assign(const_iterator first, const_iterator last)
+ {
+#if EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(!validate_iterator_pair(first, last)))
+ EASTL_FAIL_MSG("tuple_vector::assign -- invalid iterator pair");
+#endif
+ size_type newNumElements = last - first;
+ if (newNumElements > mNumCapacity)
+ {
+ this_type temp(first, last, get_allocator());
+ swap(temp);
+ }
+ else
+ {
+ const void* ppOtherData[sizeof...(Ts)] = {first.mpData[Indices]...};
+ size_type firstIdx = first.mIndex;
+ size_type lastIdx = last.mIndex;
+ if (newNumElements > mNumElements) // If n > mNumElements ...
+ {
+ size_type oldNumElements = mNumElements;
+ swallow((eastl::copy((Ts*)(ppOtherData[Indices]) + firstIdx,
+ (Ts*)(ppOtherData[Indices]) + firstIdx + oldNumElements,
+ TupleVecLeaf<Indices, Ts>::mpData), 0)...);
+ swallow((eastl::uninitialized_copy_ptr((Ts*)(ppOtherData[Indices]) + firstIdx + oldNumElements,
+ (Ts*)(ppOtherData[Indices]) + lastIdx,
+ TupleVecLeaf<Indices, Ts>::mpData + oldNumElements), 0)...);
+ mNumElements = newNumElements;
+ }
+ else // else 0 <= n <= mNumElements
+ {
+ swallow((eastl::copy((Ts*)(ppOtherData[Indices]) + firstIdx, (Ts*)(ppOtherData[Indices]) + lastIdx,
+ TupleVecLeaf<Indices, Ts>::mpData), 0)...);
+ erase(begin() + newNumElements, end());
+ }
+ }
+ }
+
+ void assign(const value_tuple* first, const value_tuple* last)
+ {
+#if EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(first > last || first == nullptr || last == nullptr))
+ EASTL_FAIL_MSG("tuple_vector::assign from tuple array -- invalid ptrs");
+#endif
+ size_type newNumElements = last - first;
+ if (newNumElements > mNumCapacity)
+ {
+ this_type temp(first, last, get_allocator());
+ swap(temp);
+ }
+ else
+ {
+ if (newNumElements > mNumElements) // If n > mNumElements ...
+ {
+ size_type oldNumElements = mNumElements;
+
+ DoCopyFromTupleArray(begin(), begin() + oldNumElements, first);
+ DoUninitializedCopyFromTupleArray(begin() + oldNumElements, begin() + newNumElements, first + oldNumElements);
+ mNumElements = newNumElements;
+ }
+ else // else 0 <= n <= mNumElements
+ {
+ DoCopyFromTupleArray(begin(), begin() + newNumElements, first);
+ erase(begin() + newNumElements, end());
+ }
+ }
+ }
+
+ reference_tuple push_back()
+ {
+ size_type oldNumElements = mNumElements;
+ size_type newNumElements = oldNumElements + 1;
+ size_type oldNumCapacity = mNumCapacity;
+ mNumElements = newNumElements;
+ DoGrow(oldNumElements, oldNumCapacity, newNumElements);
+ swallow(::new(TupleVecLeaf<Indices, Ts>::mpData + oldNumElements) Ts()...);
+ return back();
+ }
+
+ void push_back(const Ts&... args)
+ {
+ size_type oldNumElements = mNumElements;
+ size_type newNumElements = oldNumElements + 1;
+ size_type oldNumCapacity = mNumCapacity;
+ mNumElements = newNumElements;
+ DoGrow(oldNumElements, oldNumCapacity, newNumElements);
+ swallow(::new(TupleVecLeaf<Indices, Ts>::mpData + oldNumElements) Ts(args)...);
+ }
+
+ void push_back_uninitialized()
+ {
+ size_type oldNumElements = mNumElements;
+ size_type newNumElements = oldNumElements + 1;
+ size_type oldNumCapacity = mNumCapacity;
+ mNumElements = newNumElements;
+ DoGrow(oldNumElements, oldNumCapacity, newNumElements);
+ }
+
+ reference_tuple emplace_back(Ts&&... args)
+ {
+ size_type oldNumElements = mNumElements;
+ size_type newNumElements = oldNumElements + 1;
+ size_type oldNumCapacity = mNumCapacity;
+ mNumElements = newNumElements;
+ DoGrow(oldNumElements, oldNumCapacity, newNumElements);
+ swallow(::new(TupleVecLeaf<Indices, Ts>::mpData + oldNumElements) Ts(eastl::forward<Ts>(args))...);
+ return back();
+ }
+
+ iterator emplace(const_iterator pos, Ts&&... args)
+ {
+#if EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(validate_iterator(pos) == isf_none))
+ EASTL_FAIL_MSG("tuple_vector::emplace -- invalid iterator");
+#endif
+ size_type firstIdx = pos - cbegin();
+ size_type oldNumElements = mNumElements;
+ size_type newNumElements = mNumElements + 1;
+ size_type oldNumCapacity = mNumCapacity;
+ mNumElements = newNumElements;
+ if (newNumElements > oldNumCapacity || firstIdx != oldNumElements)
+ {
+ if (newNumElements > oldNumCapacity)
+ {
+ const size_type newCapacity = eastl::max(GetNewCapacity(oldNumCapacity), newNumElements);
+
+ void* ppNewLeaf[sizeof...(Ts)];
+ pair<void*, size_type> allocation = TupleRecurser<Ts...>::template DoAllocate<allocator_type, 0, index_sequence_type, Ts...>(
+ *this, ppNewLeaf, newCapacity, 0);
+
+ swallow((TupleVecLeaf<Indices, Ts>::DoUninitializedMoveAndDestruct(
+ 0, firstIdx, (Ts*)ppNewLeaf[Indices]), 0)...);
+ swallow((TupleVecLeaf<Indices, Ts>::DoUninitializedMoveAndDestruct(
+ firstIdx, oldNumElements, (Ts*)ppNewLeaf[Indices] + firstIdx + 1), 0)...);
+ swallow(::new ((Ts*)ppNewLeaf[Indices] + firstIdx) Ts(eastl::forward<Ts>(args))...);
+ swallow(TupleVecLeaf<Indices, Ts>::mpData = (Ts*)ppNewLeaf[Indices]...);
+
+ EASTLFree(get_allocator(), mpData, internalDataSize());
+ mpData = allocation.first;
+ mNumCapacity = newCapacity;
+ internalDataSize() = allocation.second;
+ }
+ else
+ {
+ swallow((TupleVecLeaf<Indices, Ts>::DoInsertValue(firstIdx, oldNumElements, eastl::forward<Ts>(args)), 0)...);
+ }
+ }
+ else
+ {
+ swallow(::new (TupleVecLeaf<Indices, Ts>::mpData + oldNumElements) Ts(eastl::forward<Ts>(args))...);
+ }
+ return begin() + firstIdx;
+ }
+
+ iterator insert(const_iterator pos, size_type n, const Ts&... args)
+ {
+#if EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(validate_iterator(pos) == isf_none))
+ EASTL_FAIL_MSG("tuple_vector::insert -- invalid iterator");
+#endif
+ size_type firstIdx = pos - cbegin();
+ size_type lastIdx = firstIdx + n;
+ size_type oldNumElements = mNumElements;
+ size_type newNumElements = mNumElements + n;
+ size_type oldNumCapacity = mNumCapacity;
+ mNumElements = newNumElements;
+ if (newNumElements > oldNumCapacity || firstIdx != oldNumElements)
+ {
+ if (newNumElements > oldNumCapacity)
+ {
+ const size_type newCapacity = eastl::max(GetNewCapacity(oldNumCapacity), newNumElements);
+
+ void* ppNewLeaf[sizeof...(Ts)];
+ pair<void*, size_type> allocation = TupleRecurser<Ts...>::template DoAllocate<allocator_type, 0, index_sequence_type, Ts...>(
+ *this, ppNewLeaf, newCapacity, 0);
+
+ swallow((TupleVecLeaf<Indices, Ts>::DoUninitializedMoveAndDestruct(
+ 0, firstIdx, (Ts*)ppNewLeaf[Indices]), 0)...);
+ swallow((TupleVecLeaf<Indices, Ts>::DoUninitializedMoveAndDestruct(
+ firstIdx, oldNumElements, (Ts*)ppNewLeaf[Indices] + lastIdx), 0)...);
+ swallow((eastl::uninitialized_fill_ptr((Ts*)ppNewLeaf[Indices] + firstIdx, (Ts*)ppNewLeaf[Indices] + lastIdx, args), 0)...);
+ swallow(TupleVecLeaf<Indices, Ts>::mpData = (Ts*)ppNewLeaf[Indices]...);
+
+ EASTLFree(get_allocator(), mpData, internalDataSize());
+ mpData = allocation.first;
+ mNumCapacity = newCapacity;
+ internalDataSize() = allocation.second;
+ }
+ else
+ {
+ swallow((TupleVecLeaf<Indices, Ts>::DoInsertAndFill(firstIdx, n, oldNumElements, args), 0)...);
+ }
+ }
+ else
+ {
+ swallow((eastl::uninitialized_fill_ptr(TupleVecLeaf<Indices, Ts>::mpData + oldNumElements,
+ TupleVecLeaf<Indices, Ts>::mpData + newNumElements, args), 0)...);
+ }
+ return begin() + firstIdx;
+ }
+
+ iterator insert(const_iterator pos, const_iterator first, const_iterator last)
+ {
+#if EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(validate_iterator(pos) == isf_none))
+ EASTL_FAIL_MSG("tuple_vector::insert -- invalid iterator");
+ if (EASTL_UNLIKELY(!validate_iterator_pair(first, last)))
+ EASTL_FAIL_MSG("tuple_vector::insert -- invalid iterator pair");
+#endif
+ size_type posIdx = pos - cbegin();
+ size_type firstIdx = first.mIndex;
+ size_type lastIdx = last.mIndex;
+ size_type numToInsert = last - first;
+ size_type oldNumElements = mNumElements;
+ size_type newNumElements = oldNumElements + numToInsert;
+ size_type oldNumCapacity = mNumCapacity;
+ mNumElements = newNumElements;
+ const void* ppOtherData[sizeof...(Ts)] = {first.mpData[Indices]...};
+ if (newNumElements > oldNumCapacity || posIdx != oldNumElements)
+ {
+ if (newNumElements > oldNumCapacity)
+ {
+ const size_type newCapacity = eastl::max(GetNewCapacity(oldNumCapacity), newNumElements);
+
+ void* ppNewLeaf[sizeof...(Ts)];
+ pair<void*, size_type> allocation = TupleRecurser<Ts...>::template DoAllocate<allocator_type, 0, index_sequence_type, Ts...>(
+ *this, ppNewLeaf, newCapacity, 0);
+
+ swallow((TupleVecLeaf<Indices, Ts>::DoUninitializedMoveAndDestruct(
+ 0, posIdx, (Ts*)ppNewLeaf[Indices]), 0)...);
+ swallow((TupleVecLeaf<Indices, Ts>::DoUninitializedMoveAndDestruct(
+ posIdx, oldNumElements, (Ts*)ppNewLeaf[Indices] + posIdx + numToInsert), 0)...);
+ swallow((eastl::uninitialized_copy_ptr((Ts*)(ppOtherData[Indices]) + firstIdx,
+ (Ts*)(ppOtherData[Indices]) + lastIdx,
+ (Ts*)ppNewLeaf[Indices] + posIdx), 0)...);
+ swallow(TupleVecLeaf<Indices, Ts>::mpData = (Ts*)ppNewLeaf[Indices]...);
+
+ EASTLFree(get_allocator(), mpData, internalDataSize());
+ mpData = allocation.first;
+ mNumCapacity = newCapacity;
+ internalDataSize() = allocation.second;
+ }
+ else
+ {
+ swallow((TupleVecLeaf<Indices, Ts>::DoInsertRange(
+ (Ts*)(ppOtherData[Indices]) + firstIdx, (Ts*)(ppOtherData[Indices]) + lastIdx,
+ TupleVecLeaf<Indices, Ts>::mpData + posIdx, oldNumElements), 0)...);
+ }
+ }
+ else
+ {
+ swallow((eastl::uninitialized_copy_ptr((Ts*)(ppOtherData[Indices]) + firstIdx,
+ (Ts*)(ppOtherData[Indices]) + lastIdx,
+ TupleVecLeaf<Indices, Ts>::mpData + posIdx), 0)...);
+ }
+ return begin() + posIdx;
+ }
+
+ iterator insert(const_iterator pos, const value_tuple* first, const value_tuple* last)
+ {
+#if EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(validate_iterator(pos) == isf_none))
+ EASTL_FAIL_MSG("tuple_vector::insert -- invalid iterator");
+ if (EASTL_UNLIKELY(first > last || first == nullptr || last == nullptr))
+ EASTL_FAIL_MSG("tuple_vector::insert -- invalid source pointers");
+#endif
+ size_type posIdx = pos - cbegin();
+ size_type numToInsert = last - first;
+ size_type oldNumElements = mNumElements;
+ size_type newNumElements = oldNumElements + numToInsert;
+ size_type oldNumCapacity = mNumCapacity;
+ mNumElements = newNumElements;
+ if (newNumElements > oldNumCapacity || posIdx != oldNumElements)
+ {
+ if (newNumElements > oldNumCapacity)
+ {
+ const size_type newCapacity = eastl::max(GetNewCapacity(oldNumCapacity), newNumElements);
+
+ void* ppNewLeaf[sizeof...(Ts)];
+ pair<void*, size_type> allocation = TupleRecurser<Ts...>::template DoAllocate<allocator_type, 0, index_sequence_type, Ts...>(
+ *this, ppNewLeaf, newCapacity, 0);
+
+ swallow((TupleVecLeaf<Indices, Ts>::DoUninitializedMoveAndDestruct(
+ 0, posIdx, (Ts*)ppNewLeaf[Indices]), 0)...);
+ swallow((TupleVecLeaf<Indices, Ts>::DoUninitializedMoveAndDestruct(
+ posIdx, oldNumElements, (Ts*)ppNewLeaf[Indices] + posIdx + numToInsert), 0)...);
+
+ swallow(TupleVecLeaf<Indices, Ts>::mpData = (Ts*)ppNewLeaf[Indices]...);
+
+ // Do this after mpData is updated so that we can use new iterators
+ DoUninitializedCopyFromTupleArray(begin() + posIdx, begin() + posIdx + numToInsert, first);
+
+ EASTLFree(get_allocator(), mpData, internalDataSize());
+ mpData = allocation.first;
+ mNumCapacity = newCapacity;
+ internalDataSize() = allocation.second;
+ }
+ else
+ {
+ const size_type nExtra = oldNumElements - posIdx;
+ void* ppDataEnd[sizeof...(Ts)] = { (void*)(TupleVecLeaf<Indices, Ts>::mpData + oldNumElements)... };
+ void* ppDataBegin[sizeof...(Ts)] = { (void*)(TupleVecLeaf<Indices, Ts>::mpData + posIdx)... };
+ if (numToInsert < nExtra) // If the inserted values are entirely within initialized memory (i.e. are before mpEnd)...
+ {
+ swallow((eastl::uninitialized_move_ptr((Ts*)ppDataEnd[Indices] - numToInsert,
+ (Ts*)ppDataEnd[Indices], (Ts*)ppDataEnd[Indices]), 0)...);
+ // We need move_backward because of potential overlap issues.
+ swallow((eastl::move_backward((Ts*)ppDataBegin[Indices],
+ (Ts*)ppDataEnd[Indices] - numToInsert, (Ts*)ppDataEnd[Indices]), 0)...);
+
+ DoCopyFromTupleArray(pos, pos + numToInsert, first);
+ }
+ else
+ {
+ size_type numToInitialize = numToInsert - nExtra;
+ swallow((eastl::uninitialized_move_ptr((Ts*)ppDataBegin[Indices],
+ (Ts*)ppDataEnd[Indices], (Ts*)ppDataEnd[Indices] + numToInitialize), 0)...);
+
+ DoCopyFromTupleArray(pos, begin() + oldNumElements, first);
+ DoUninitializedCopyFromTupleArray(begin() + oldNumElements, pos + numToInsert, first + nExtra);
+ }
+ }
+ }
+ else
+ {
+ DoUninitializedCopyFromTupleArray(pos, pos + numToInsert, first);
+ }
+ return begin() + posIdx;
+ }
+
+ iterator erase(const_iterator first, const_iterator last)
+ {
+#if EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(validate_iterator(first) == isf_none || validate_iterator(last) == isf_none))
+ EASTL_FAIL_MSG("tuple_vector::erase -- invalid iterator");
+ if (EASTL_UNLIKELY(!validate_iterator_pair(first, last)))
+ EASTL_FAIL_MSG("tuple_vector::erase -- invalid iterator pair");
+#endif
+ if (first != last)
+ {
+ size_type firstIdx = first - cbegin();
+ size_type lastIdx = last - cbegin();
+ size_type oldNumElements = mNumElements;
+ size_type newNumElements = oldNumElements - (lastIdx - firstIdx);
+ mNumElements = newNumElements;
+ swallow((eastl::move(TupleVecLeaf<Indices, Ts>::mpData + lastIdx,
+ TupleVecLeaf<Indices, Ts>::mpData + oldNumElements,
+ TupleVecLeaf<Indices, Ts>::mpData + firstIdx), 0)...);
+ swallow((eastl::destruct(TupleVecLeaf<Indices, Ts>::mpData + newNumElements,
+ TupleVecLeaf<Indices, Ts>::mpData + oldNumElements), 0)...);
+ }
+ return begin() + first.mIndex;
+ }
+
+ iterator erase_unsorted(const_iterator pos)
+ {
+#if EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(validate_iterator(pos) == isf_none))
+ EASTL_FAIL_MSG("tuple_vector::erase_unsorted -- invalid iterator");
+#endif
+ size_type oldNumElements = mNumElements;
+ size_type newNumElements = oldNumElements - 1;
+ mNumElements = newNumElements;
+ swallow((eastl::move(TupleVecLeaf<Indices, Ts>::mpData + newNumElements,
+ TupleVecLeaf<Indices, Ts>::mpData + oldNumElements,
+ TupleVecLeaf<Indices, Ts>::mpData + (pos - begin())), 0)...);
+ swallow((eastl::destruct(TupleVecLeaf<Indices, Ts>::mpData + newNumElements,
+ TupleVecLeaf<Indices, Ts>::mpData + oldNumElements), 0)...);
+ return begin() + pos.mIndex;
+ }
+
+ void resize(size_type n)
+ {
+ size_type oldNumElements = mNumElements;
+ size_type oldNumCapacity = mNumCapacity;
+ mNumElements = n;
+ if (n > oldNumElements)
+ {
+ if (n > oldNumCapacity)
+ {
+ DoReallocate(oldNumElements, eastl::max<size_type>(GetNewCapacity(oldNumCapacity), n));
+ }
+ swallow((eastl::uninitialized_default_fill_n(TupleVecLeaf<Indices, Ts>::mpData + oldNumElements, n - oldNumElements), 0)...);
+ }
+ else
+ {
+ swallow((eastl::destruct(TupleVecLeaf<Indices, Ts>::mpData + n,
+ TupleVecLeaf<Indices, Ts>::mpData + oldNumElements), 0)...);
+ }
+ }
+
+ void resize(size_type n, const Ts&... args)
+ {
+ size_type oldNumElements = mNumElements;
+ size_type oldNumCapacity = mNumCapacity;
+ mNumElements = n;
+ if (n > oldNumElements)
+ {
+ if (n > oldNumCapacity)
+ {
+ DoReallocate(oldNumElements, eastl::max<size_type>(GetNewCapacity(oldNumCapacity), n));
+ }
+ swallow((eastl::uninitialized_fill_ptr(TupleVecLeaf<Indices, Ts>::mpData + oldNumElements,
+ TupleVecLeaf<Indices, Ts>::mpData + n, args), 0)...);
+ }
+ else
+ {
+ swallow((eastl::destruct(TupleVecLeaf<Indices, Ts>::mpData + n,
+ TupleVecLeaf<Indices, Ts>::mpData + oldNumElements), 0)...);
+ }
+ }
+
+ void reserve(size_type n)
+ {
+ DoConditionalReallocate(mNumElements, mNumCapacity, n);
+ }
+
+ void shrink_to_fit()
+ {
+ this_type temp(move_iterator<iterator>(begin()), move_iterator<iterator>(end()), get_allocator());
+ swap(temp);
+ }
+
+ void clear() EA_NOEXCEPT
+ {
+ size_type oldNumElements = mNumElements;
+ mNumElements = 0;
+ swallow((eastl::destruct(TupleVecLeaf<Indices, Ts>::mpData, TupleVecLeaf<Indices, Ts>::mpData + oldNumElements), 0)...);
+ }
+
+ void pop_back()
+ {
+#if EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(mNumElements <= 0))
+ EASTL_FAIL_MSG("tuple_vector::pop_back -- container is empty");
+#endif
+ size_type oldNumElements = mNumElements--;
+ swallow((eastl::destruct(TupleVecLeaf<Indices, Ts>::mpData + oldNumElements - 1,
+ TupleVecLeaf<Indices, Ts>::mpData + oldNumElements), 0)...);
+ }
+
+ void swap(this_type& x)
+ {
+ swallow((eastl::swap(TupleVecLeaf<Indices, Ts>::mpData, x.TupleVecLeaf<Indices, Ts>::mpData), 0)...);
+ eastl::swap(mpData, x.mpData);
+ eastl::swap(mNumElements, x.mNumElements);
+ eastl::swap(mNumCapacity, x.mNumCapacity);
+ eastl::swap(get_allocator(), x.get_allocator());
+ eastl::swap(internalDataSize(), x.internalDataSize());
+ }
+
+ void assign(size_type n, const_reference_tuple tup) { assign(n, eastl::get<Indices>(tup)...); }
+ void assign(std::initializer_list<value_tuple> iList) { assign(iList.begin(), iList.end()); }
+
+ void push_back(Ts&&... args) { emplace_back(eastl::forward<Ts>(args)...); }
+ void push_back(const_reference_tuple tup) { push_back(eastl::get<Indices>(tup)...); }
+ void push_back(rvalue_tuple tup) { emplace_back(eastl::forward<Ts>(eastl::get<Indices>(tup))...); }
+
+ void emplace_back(rvalue_tuple tup) { emplace_back(eastl::forward<Ts>(eastl::get<Indices>(tup))...); }
+ void emplace(const_iterator pos, rvalue_tuple tup) { emplace(pos, eastl::forward<Ts>(eastl::get<Indices>(tup))...); }
+
+ iterator insert(const_iterator pos, const Ts&... args) { return insert(pos, 1, args...); }
+ iterator insert(const_iterator pos, Ts&&... args) { return emplace(pos, eastl::forward<Ts>(args)...); }
+ iterator insert(const_iterator pos, rvalue_tuple tup) { return emplace(pos, eastl::forward<Ts>(eastl::get<Indices>(tup))...); }
+ iterator insert(const_iterator pos, const_reference_tuple tup) { return insert(pos, eastl::get<Indices>(tup)...); }
+ iterator insert(const_iterator pos, size_type n, const_reference_tuple tup) { return insert(pos, n, eastl::get<Indices>(tup)...); }
+ iterator insert(const_iterator pos, std::initializer_list<value_tuple> iList) { return insert(pos, iList.begin(), iList.end()); }
+
+ iterator erase(const_iterator pos) { return erase(pos, pos + 1); }
+ reverse_iterator erase(const_reverse_iterator pos) { return reverse_iterator(erase((pos + 1).base(), (pos).base())); }
+ reverse_iterator erase(const_reverse_iterator first, const_reverse_iterator last) { return reverse_iterator(erase((last).base(), (first).base())); }
+ reverse_iterator erase_unsorted(const_reverse_iterator pos) { return reverse_iterator(erase_unsorted((pos + 1).base())); }
+
+ void resize(size_type n, const_reference_tuple tup) { resize(n, eastl::get<Indices>(tup)...); }
+
+ bool empty() const EA_NOEXCEPT { return mNumElements == 0; }
+ size_type size() const EA_NOEXCEPT { return mNumElements; }
+ size_type capacity() const EA_NOEXCEPT { return mNumCapacity; }
+
+ iterator begin() EA_NOEXCEPT { return iterator(this, 0); }
+ const_iterator begin() const EA_NOEXCEPT { return const_iterator((const_this_type*)(this), 0); }
+ const_iterator cbegin() const EA_NOEXCEPT { return const_iterator((const_this_type*)(this), 0); }
+
+ iterator end() EA_NOEXCEPT { return iterator(this, size()); }
+ const_iterator end() const EA_NOEXCEPT { return const_iterator((const_this_type*)(this), size()); }
+ const_iterator cend() const EA_NOEXCEPT { return const_iterator((const_this_type*)(this), size()); }
+
+ reverse_iterator rbegin() EA_NOEXCEPT { return reverse_iterator(end()); }
+ const_reverse_iterator rbegin() const EA_NOEXCEPT { return const_reverse_iterator(end()); }
+ const_reverse_iterator crbegin() const EA_NOEXCEPT { return const_reverse_iterator(end()); }
+
+ reverse_iterator rend() EA_NOEXCEPT { return reverse_iterator(begin()); }
+ const_reverse_iterator rend() const EA_NOEXCEPT { return const_reverse_iterator(begin()); }
+ const_reverse_iterator crend() const EA_NOEXCEPT { return const_reverse_iterator(begin()); }
+
+ ptr_tuple data() EA_NOEXCEPT { return ptr_tuple(TupleVecLeaf<Indices, Ts>::mpData...); }
+ const_ptr_tuple data() const EA_NOEXCEPT { return const_ptr_tuple(TupleVecLeaf<Indices, Ts>::mpData...); }
+
+ reference_tuple at(size_type n)
+ {
+#if EASTL_EXCEPTIONS_ENABLED
+ if (EASTL_UNLIKELY(n >= mNumElements))
+ throw std::out_of_range("tuple_vector::at -- out of range");
+#elif EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(n >= mNumElements))
+ EASTL_FAIL_MSG("tuple_vector::at -- out of range");
+#endif
+ return reference_tuple(*(TupleVecLeaf<Indices, Ts>::mpData + n)...);
+ }
+
+ const_reference_tuple at(size_type n) const
+ {
+#if EASTL_EXCEPTIONS_ENABLED
+ if (EASTL_UNLIKELY(n >= mNumElements))
+ throw std::out_of_range("tuple_vector::at -- out of range");
+#elif EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(n >= mNumElements))
+ EASTL_FAIL_MSG("tuple_vector::at -- out of range");
+#endif
+ return const_reference_tuple(*(TupleVecLeaf<Indices, Ts>::mpData + n)...);
+ }
+
+ reference_tuple operator[](size_type n) { return at(n); }
+ const_reference_tuple operator[](size_type n) const { return at(n); }
+
+ reference_tuple front()
+ {
+ #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(mNumElements == 0)) // We don't allow the user to reference an empty container.
+ EASTL_FAIL_MSG("tuple_vector::front -- empty vector");
+ #else
+ // We allow the user to reference an empty container.
+ #endif
+
+ return at(0);
+ }
+
+ const_reference_tuple front() const
+ {
+ #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(mNumElements == 0)) // We don't allow the user to reference an empty container.
+ EASTL_FAIL_MSG("tuple_vector::front -- empty vector");
+ #else
+ // We allow the user to reference an empty container.
+ #endif
+
+ return at(0);
+ }
+
+ reference_tuple back()
+ {
+ #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(mNumElements == 0)) // We don't allow the user to reference an empty container.
+ EASTL_FAIL_MSG("tuple_vector::back -- empty vector");
+ #else
+ // We allow the user to reference an empty container.
+ #endif
+
+ return at(size() - 1);
+ }
+
+ const_reference_tuple back() const
+ {
+ #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(mNumElements == 0)) // We don't allow the user to reference an empty container.
+ EASTL_FAIL_MSG("tuple_vector::back -- empty vector");
+ #else
+ // We allow the user to reference an empty container.
+ #endif
+
+ return at(size() - 1);
+ }
+
+ template <size_type I>
+ tuplevec_element_t<I, Ts...>* get()
+ {
+ typedef tuplevec_element_t<I, Ts...> Element;
+ return TupleVecLeaf<I, Element>::mpData;
+ }
+ template <size_type I>
+ const tuplevec_element_t<I, Ts...>* get() const
+ {
+ typedef tuplevec_element_t<I, Ts...> Element;
+ return TupleVecLeaf<I, Element>::mpData;
+ }
+
+ template <typename T>
+ T* get()
+ {
+ typedef tuplevec_index<T, TupleTypes<Ts...>> Index;
+ return TupleVecLeaf<Index::index, T>::mpData;
+ }
+ template <typename T>
+ const T* get() const
+ {
+ typedef tuplevec_index<T, TupleTypes<Ts...>> Index;
+ return TupleVecLeaf<Index::index, T>::mpData;
+ }
+
+ this_type& operator=(const this_type& other)
+ {
+ if (this != &other)
+ {
+ clear();
+ assign(other.begin(), other.end());
+ }
+ return *this;
+ }
+
+ this_type& operator=(this_type&& other)
+ {
+ if (this != &other)
+ {
+ swap(other);
+ }
+ return *this;
+ }
+
+ this_type& operator=(std::initializer_list<value_tuple> iList)
+ {
+ assign(iList.begin(), iList.end());
+ return *this;
+ }
+
+ bool validate() const EA_NOEXCEPT
+ {
+ if (mNumElements > mNumCapacity)
+ return false;
+ if (!(variadicAnd(mpData <= TupleVecLeaf<Indices, Ts>::mpData...)))
+ return false;
+ void* pDataEnd = (void*)((uintptr_t)mpData + internalDataSize());
+ if (!(variadicAnd(pDataEnd >= TupleVecLeaf<Indices, Ts>::mpData...)))
+ return false;
+ return true;
+ }
+
+ int validate_iterator(const_iterator iter) const EA_NOEXCEPT
+ {
+ if (!(variadicAnd(iter.mpData[Indices] == TupleVecLeaf<Indices, Ts>::mpData...)))
+ return isf_none;
+ if (iter.mIndex < mNumElements)
+ return (isf_valid | isf_current | isf_can_dereference);
+ if (iter.mIndex <= mNumElements)
+ return (isf_valid | isf_current);
+ return isf_none;
+ }
+
+ static bool validate_iterator_pair(const_iterator first, const_iterator last) EA_NOEXCEPT
+ {
+ return (first.mIndex <= last.mIndex) && variadicAnd(first.mpData[Indices] == last.mpData[Indices]...);
+ }
+
+ template <typename Iterator, typename = typename enable_if<is_iterator_wrapper<Iterator>::value, bool>::type>
+ int validate_iterator(Iterator iter) const EA_NOEXCEPT { return validate_iterator(unwrap_iterator(iter)); }
+
+ template <typename Iterator, typename = typename enable_if<is_iterator_wrapper<Iterator>::value, bool>::type>
+ static bool validate_iterator_pair(Iterator first, Iterator last) EA_NOEXCEPT { return validate_iterator_pair(unwrap_iterator(first), unwrap_iterator(last)); }
+
+ allocator_type& get_allocator() EA_NOEXCEPT { return mDataSizeAndAllocator.second(); }
+ const allocator_type& get_allocator() const EA_NOEXCEPT { return mDataSizeAndAllocator.second(); }
+
+ void set_allocator(const allocator_type& alloc) { mDataSizeAndAllocator.second() = alloc; }
+
+protected:
+
+ void* mpData = nullptr;
+ size_type mNumElements = 0;
+ size_type mNumCapacity = 0;
+
+ compressed_pair<size_type, allocator_type> mDataSizeAndAllocator;
+
+ size_type& internalDataSize() EA_NOEXCEPT { return mDataSizeAndAllocator.first(); }
+ size_type const& internalDataSize() const EA_NOEXCEPT { return mDataSizeAndAllocator.first(); }
+
+ friend struct TupleRecurser<>;
+ template<typename... Us>
+ friend struct TupleRecurser;
+
+ template <typename MoveIterBase>
+ void DoInitFromIterator(move_iterator<MoveIterBase> begin, move_iterator<MoveIterBase> end)
+ {
+#if EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(!validate_iterator_pair(begin, end)))
+ EASTL_FAIL_MSG("tuple_vector::erase -- invalid iterator pair");
+#endif
+ size_type newNumElements = (size_type)(end - begin);
+ const void* ppOtherData[sizeof...(Ts)] = { begin.base().mpData[Indices]... };
+ size_type beginIdx = begin.base().mIndex;
+ size_type endIdx = end.base().mIndex;
+ DoConditionalReallocate(0, mNumCapacity, newNumElements);
+ mNumElements = newNumElements;
+ swallow((eastl::uninitialized_move_ptr(eastl::move_iterator<Ts*>((Ts*)(ppOtherData[Indices]) + beginIdx),
+ eastl::move_iterator<Ts*>((Ts*)(ppOtherData[Indices]) + endIdx),
+ TupleVecLeaf<Indices, Ts>::mpData), 0)...);
+ }
+
+ void DoInitFromIterator(const_iterator begin, const_iterator end)
+ {
+#if EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(!validate_iterator_pair(begin, end)))
+ EASTL_FAIL_MSG("tuple_vector::erase -- invalid iterator pair");
+#endif
+ size_type newNumElements = (size_type)(end - begin);
+ const void* ppOtherData[sizeof...(Ts)] = { begin.mpData[Indices]... };
+ size_type beginIdx = begin.mIndex;
+ size_type endIdx = end.mIndex;
+ DoConditionalReallocate(0, mNumCapacity, newNumElements);
+ mNumElements = newNumElements;
+ swallow((eastl::uninitialized_copy_ptr((Ts*)(ppOtherData[Indices]) + beginIdx,
+ (Ts*)(ppOtherData[Indices]) + endIdx,
+ TupleVecLeaf<Indices, Ts>::mpData), 0)...);
+ }
+
+ void DoInitFillTuple(size_type n, const_reference_tuple tup) { DoInitFillArgs(n, eastl::get<Indices>(tup)...); }
+
+ void DoInitFillArgs(size_type n, const Ts&... args)
+ {
+ DoConditionalReallocate(0, mNumCapacity, n);
+ mNumElements = n;
+ swallow((eastl::uninitialized_fill_ptr(TupleVecLeaf<Indices, Ts>::mpData, TupleVecLeaf<Indices, Ts>::mpData + n, args), 0)...);
+ }
+
+ void DoInitDefaultFill(size_type n)
+ {
+ DoConditionalReallocate(0, mNumCapacity, n);
+ mNumElements = n;
+ swallow((eastl::uninitialized_default_fill_n(TupleVecLeaf<Indices, Ts>::mpData, n), 0)...);
+ }
+
+ void DoInitFromTupleArray(const value_tuple* first, const value_tuple* last)
+ {
+#if EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY(first > last || first == nullptr || last == nullptr))
+ EASTL_FAIL_MSG("tuple_vector::ctor from tuple array -- invalid ptrs");
+#endif
+ size_type newNumElements = last - first;
+ DoConditionalReallocate(0, mNumCapacity, newNumElements);
+ mNumElements = newNumElements;
+ DoUninitializedCopyFromTupleArray(begin(), end(), first);
+ }
+
+ void DoCopyFromTupleArray(iterator destPos, iterator destEnd, const value_tuple* srcTuple)
+ {
+ // assign to constructed region
+ while (destPos < destEnd)
+ {
+ *destPos = *srcTuple;
+ ++destPos;
+ ++srcTuple;
+ }
+ }
+
+ void DoUninitializedCopyFromTupleArray(iterator destPos, iterator destEnd, const value_tuple* srcTuple)
+ {
+ // placement-new/copy-ctor to unconstructed regions
+ while (destPos < destEnd)
+ {
+ swallow(::new(eastl::get<Indices>(destPos.MakePointer())) Ts(eastl::get<Indices>(*srcTuple))...);
+ ++destPos;
+ ++srcTuple;
+ }
+ }
+
+ // Try to grow the size of the container "naturally" given the number of elements being used
+ void DoGrow(size_type oldNumElements, size_type oldNumCapacity, size_type requiredCapacity)
+ {
+ if (requiredCapacity > oldNumCapacity)
+ DoReallocate(oldNumElements, GetNewCapacity(requiredCapacity));
+ }
+
+ // Reallocate to the newCapacity (IFF it's actually larger, though)
+ void DoConditionalReallocate(size_type oldNumElements, size_type oldNumCapacity, size_type requiredCapacity)
+ {
+ if (requiredCapacity > oldNumCapacity)
+ DoReallocate(oldNumElements, requiredCapacity);
+ }
+
+ void DoReallocate(size_type oldNumElements, size_type requiredCapacity)
+ {
+ void* ppNewLeaf[sizeof...(Ts)];
+ pair<void*, size_type> allocation = TupleRecurser<Ts...>::template DoAllocate<allocator_type, 0, index_sequence_type, Ts...>(
+ *this, ppNewLeaf, requiredCapacity, 0);
+ swallow((TupleVecLeaf<Indices, Ts>::DoUninitializedMoveAndDestruct(0, oldNumElements, (Ts*)ppNewLeaf[Indices]), 0)...);
+ swallow(TupleVecLeaf<Indices, Ts>::mpData = (Ts*)ppNewLeaf[Indices]...);
+
+ EASTLFree(get_allocator(), mpData, internalDataSize());
+ mpData = allocation.first;
+ mNumCapacity = requiredCapacity;
+ internalDataSize() = allocation.second;
+ }
+
+ size_type GetNewCapacity(size_type oldNumCapacity)
+ {
+ return (oldNumCapacity > 0) ? (2 * oldNumCapacity) : 1;
+ }
+};
+
+} // namespace TupleVecInternal
+
+// Move_iterator specialization for TupleVecIter.
+// An rvalue reference of a move_iterator would normaly be "tuple<Ts...> &&" whereas
+// what we actually want is "tuple<Ts&&...>". This specialization gives us that.
+template <eastl_size_t... Indices, typename... Ts>
+class move_iterator<TupleVecInternal::TupleVecIter<index_sequence<Indices...>, Ts...>>
+{
+public:
+ typedef TupleVecInternal::TupleVecIter<index_sequence<Indices...>, Ts...> iterator_type;
+ typedef iterator_type wrapped_iterator_type; // This is not in the C++ Standard; it's used by use to identify it as
+ // a wrapping iterator type.
+ typedef iterator_traits<iterator_type> traits_type;
+ typedef typename traits_type::iterator_category iterator_category;
+ typedef typename traits_type::value_type value_type;
+ typedef typename traits_type::difference_type difference_type;
+ typedef typename traits_type::pointer pointer;
+ typedef tuple<Ts&&...> reference;
+ typedef move_iterator<iterator_type> this_type;
+
+protected:
+ iterator_type mIterator;
+
+public:
+ move_iterator() : mIterator() {}
+ explicit move_iterator(iterator_type mi) : mIterator(mi) {}
+
+ template <typename U>
+ move_iterator(const move_iterator<U>& mi) : mIterator(mi.base()) {}
+
+ iterator_type base() const { return mIterator; }
+ reference operator*() const { return eastl::move(MakeReference()); }
+ pointer operator->() const { return mIterator; }
+
+ this_type& operator++() { ++mIterator; return *this; }
+ this_type operator++(int) {
+ this_type tempMoveIterator = *this;
+ ++mIterator;
+ return tempMoveIterator;
+ }
+
+ this_type& operator--() { --mIterator; return *this; }
+ this_type operator--(int)
+ {
+ this_type tempMoveIterator = *this;
+ --mIterator;
+ return tempMoveIterator;
+ }
+
+ this_type operator+(difference_type n) const { return move_iterator(mIterator + n); }
+ this_type& operator+=(difference_type n)
+ {
+ mIterator += n;
+ return *this;
+ }
+
+ this_type operator-(difference_type n) const { return move_iterator(mIterator - n); }
+ this_type& operator-=(difference_type n)
+ {
+ mIterator -= n;
+ return *this;
+ }
+
+ difference_type operator-(const this_type& rhs) const { return mIterator - rhs.mIterator; }
+ bool operator<(const this_type& rhs) const { return mIterator < rhs.mIterator; }
+ bool operator>(const this_type& rhs) const { return mIterator > rhs.mIterator; }
+ bool operator>=(const this_type& rhs) const { return mIterator >= rhs.mIterator; }
+ bool operator<=(const this_type& rhs) const { return mIterator <= rhs.mIterator; }
+
+ reference operator[](difference_type n) const { return *(*this + n); }
+
+private:
+ reference MakeReference() const
+ {
+ return reference(eastl::move(((Ts*)mIterator.mpData[Indices])[mIterator.mIndex])...);
+ }
+};
+
+template <typename AllocatorA, typename AllocatorB, typename Indices, typename... Ts>
+inline bool operator==(const TupleVecInternal::TupleVecImpl<AllocatorA, Indices, Ts...>& a,
+ const TupleVecInternal::TupleVecImpl<AllocatorB, Indices, Ts...>& b)
+{
+ return ((a.size() == b.size()) && eastl::equal(a.begin(), a.end(), b.begin()));
+}
+
+template <typename AllocatorA, typename AllocatorB, typename Indices, typename... Ts>
+inline bool operator!=(const TupleVecInternal::TupleVecImpl<AllocatorA, Indices, Ts...>& a,
+ const TupleVecInternal::TupleVecImpl<AllocatorB, Indices, Ts...>& b)
+{
+ return ((a.size() != b.size()) || !eastl::equal(a.begin(), a.end(), b.begin()));
+}
+
+template <typename AllocatorA, typename AllocatorB, typename Indices, typename... Ts>
+inline bool operator<(const TupleVecInternal::TupleVecImpl<AllocatorA, Indices, Ts...>& a,
+ const TupleVecInternal::TupleVecImpl<AllocatorB, Indices, Ts...>& b)
+{
+ return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end());
+}
+
+template <typename AllocatorA, typename AllocatorB, typename Indices, typename... Ts>
+inline bool operator>(const TupleVecInternal::TupleVecImpl<AllocatorA, Indices, Ts...>& a,
+ const TupleVecInternal::TupleVecImpl<AllocatorB, Indices, Ts...>& b)
+{
+ return b < a;
+}
+
+template <typename AllocatorA, typename AllocatorB, typename Indices, typename... Ts>
+inline bool operator<=(const TupleVecInternal::TupleVecImpl<AllocatorA, Indices, Ts...>& a,
+ const TupleVecInternal::TupleVecImpl<AllocatorB, Indices, Ts...>& b)
+{
+ return !(b < a);
+}
+
+template <typename AllocatorA, typename AllocatorB, typename Indices, typename... Ts>
+inline bool operator>=(const TupleVecInternal::TupleVecImpl<AllocatorA, Indices, Ts...>& a,
+ const TupleVecInternal::TupleVecImpl<AllocatorB, Indices, Ts...>& b)
+{
+ return !(a < b);
+}
+
+template <typename AllocatorA, typename AllocatorB, typename Indices, typename... Ts>
+inline void swap(TupleVecInternal::TupleVecImpl<AllocatorA, Indices, Ts...>& a,
+ TupleVecInternal::TupleVecImpl<AllocatorB, Indices, Ts...>& b)
+{
+ a.swap(b);
+}
+
+// A customization of swap is made for r-values of tuples-of-references -
+// normally, swapping rvalues doesn't make sense, but in this case, we do want to
+// swap the contents of what the tuple-of-references are referring to
+//
+// This is required due to TupleVecIter returning a value-type for its dereferencing,
+// as opposed to an actual real reference of some sort
+template<typename... Ts>
+inline
+typename enable_if<conjunction<is_swappable<Ts>...>::value>::type
+swap(tuple<Ts&...>&& a, tuple<Ts&...>&& b)
+{
+ a.swap(b);
+}
+
+template<typename... Ts>
+inline
+typename enable_if<!conjunction<is_swappable<Ts>...>::value>::type
+swap(tuple<Ts&...>&& a, tuple<Ts&...>&& b) = delete;
+
+
+// External interface of tuple_vector
+template <typename... Ts>
+class tuple_vector : public TupleVecInternal::TupleVecImpl<EASTLAllocatorType, make_index_sequence<sizeof...(Ts)>, Ts...>
+{
+ typedef tuple_vector<Ts...> this_type;
+ typedef TupleVecInternal::TupleVecImpl<EASTLAllocatorType, make_index_sequence<sizeof...(Ts)>, Ts...> base_type;
+ using base_type::base_type;
+
+public:
+ this_type& operator=(std::initializer_list<typename base_type::value_tuple> iList)
+ {
+ base_type::operator=(iList);
+ return *this;
+ }
+};
+
+// Variant of tuple_vector that allows a user-defined allocator type (can't mix default template params with variadics)
+template <typename AllocatorType, typename... Ts>
+class tuple_vector_alloc
+ : public TupleVecInternal::TupleVecImpl<AllocatorType, make_index_sequence<sizeof...(Ts)>, Ts...>
+{
+ typedef tuple_vector_alloc<AllocatorType, Ts...> this_type;
+ typedef TupleVecInternal::TupleVecImpl<AllocatorType, make_index_sequence<sizeof...(Ts)>, Ts...> base_type;
+ using base_type::base_type;
+
+public:
+
+ this_type& operator=(std::initializer_list<typename base_type::value_tuple> iList)
+ {
+ base_type::operator=(iList);
+ return *this;
+ }
+};
+
+} // namespace eastl
+
+EA_RESTORE_VC_WARNING()
+EA_RESTORE_VC_WARNING()
+EA_RESTORE_VC_WARNING()
+EA_RESTORE_VC_WARNING()
+
+#endif // EASTL_TUPLEVECTOR_H