aboutsummaryrefslogtreecommitdiff
path: root/test/benchmark
diff options
context:
space:
mode:
Diffstat (limited to 'test/benchmark')
-rw-r--r--test/benchmark/README.md68
-rwxr-xr-xtest/benchmark/benchall.sh16
-rw-r--r--test/benchmark/benchflatc/benchflatc.cpp70
-rw-r--r--test/benchmark/benchflatc/flatbench_generated.h166
-rw-r--r--test/benchmark/benchflatc/flatbuffers/flatbuffers.h1189
-rwxr-xr-xtest/benchmark/benchflatc/run.sh23
-rw-r--r--test/benchmark/benchflatcc/benchflatcc.c98
-rwxr-xr-xtest/benchmark/benchflatcc/run.sh24
-rw-r--r--test/benchmark/benchflatccjson/benchflatccjson.c182
-rwxr-xr-xtest/benchmark/benchflatccjson/run.sh23
-rw-r--r--test/benchmark/benchmain/benchmain.h66
-rw-r--r--test/benchmark/benchout-osx.txt169
-rw-r--r--test/benchmark/benchout-ubuntu.txt169
-rw-r--r--test/benchmark/benchraw/benchraw.c117
-rwxr-xr-xtest/benchmark/benchraw/run.sh21
-rw-r--r--test/benchmark/schema/flatbench.fbs37
16 files changed, 2438 insertions, 0 deletions
diff --git a/test/benchmark/README.md b/test/benchmark/README.md
new file mode 100644
index 0000000..c98dbbb
--- /dev/null
+++ b/test/benchmark/README.md
@@ -0,0 +1,68 @@
+# FlatBench
+
+This is based on the Google FlatBuffer benchmark schema, but the
+benchmark itself is independent and not directly comparable, although
+roughly the same operations are being executed.
+
+The `benchflatc` folder contains C++ headers and code generated by Googles
+`flatc` compiler while `benchflatcc` contains material from this project.
+
+The `benchraw` folder contains structs similar to those used in Googles
+benchmark, but again the benchmark isn't directly comparable.
+
+It should be noted that allocation strategies differ. The C++ builder
+is constructed on each build iteration whereas theh C version resets
+instead. The benchmark is designed such that the C++ version could do
+the same if the builder supports it.
+
+## Execution
+
+Build and run each benchmark individually:
+
+ benchmark/benchflatc/run.sh
+ benchmark/benchflatcc/run.sh
+ benchmark/benchraw/run.sh
+ benchmark/benchflatccjson/run.sh
+
+Note that each benchmark runs in both debug and optimized versions!
+
+
+# Environment
+
+The the benchmark are designed for a `*nix environmen.
+
+- A C compiler named `cc` supporting -std=c11 is required for flatcc.
+- A C++ compiler named `c++` supporting -std=c++11 is requried for
+ flatc.
+- A C compiler named `cc` supporting <stdint.h> is required for raw benchmark.
+- Test is driven by a shell script.
+
+The time measurements in `elapsed.h` ought to work with Windows, but it
+has not been tested. The tests could be compiled for Windows with a
+separate set of `.bat` files that adapt to the relevant compiler settings
+(not provided).
+
+
+## Output
+
+The source and generated files and compiled binaries are placed in a
+dedicated folder under:
+
+ build/tmp/test/benchmark/
+
+Only flatcc includes files from the containing project - other
+benchmarks copy any relevant files into place.
+
+The optimized flatc C++ benchmark is 24K vs flatcc for C using 35K.
+
+
+## JSON numeric conversion
+
+The Json printer benchmark is significantly impacted by floating point
+conversion performance. By using the grisu3 algorithm instead of the
+printing speed more than doubles compared to sprintf "%.17g" method with
+clang glibc. The parsing, on the other hand, parsing slows down
+slightly because floats are always printed as double which increases the
+json text from 700 to 722 bytes. For comparision, RapidJSON also only
+supports double precision because the JSON spec does not specifically
+mention preicision.
diff --git a/test/benchmark/benchall.sh b/test/benchmark/benchall.sh
new file mode 100755
index 0000000..87d6983
--- /dev/null
+++ b/test/benchmark/benchall.sh
@@ -0,0 +1,16 @@
+#!/usr/bin/env bash
+
+set -e
+
+cd `dirname $0`
+
+echo "running all benchmarks (raw, flatc C++, flatcc C)"
+
+echo "building and benchmarking raw structs"
+benchraw/run.sh
+echo "building and benchmarking flatc generated C++"
+benchflatc/run.sh
+echo "building and benchmarking flatcc generated C"
+benchflatcc/run.sh
+echo "building and benchmarking flatcc json generated C"
+benchflatccjson/run.sh
diff --git a/test/benchmark/benchflatc/benchflatc.cpp b/test/benchmark/benchflatc/benchflatc.cpp
new file mode 100644
index 0000000..ae24abd
--- /dev/null
+++ b/test/benchmark/benchflatc/benchflatc.cpp
@@ -0,0 +1,70 @@
+#define BENCH_TITLE "flatc for C++"
+
+#define BENCHMARK_BUFSIZ 1000
+#define DECLARE_BENCHMARK(BM)\
+ void *BM = 0
+#define CLEAR_BENCHMARK(BM)
+
+#include <string.h>
+#include "flatbench_generated.h"
+
+using namespace flatbuffers;
+using namespace benchfb;
+
+/* The builder is created each time - perhaps fbb can be reused somehow? */
+int encode(void *bench, void *buffer, size_t *size)
+{
+ const int veclen = 3;
+ Offset<FooBar> vec[veclen];
+ FlatBufferBuilder fbb;
+
+ (void)bench;
+
+ for (int i = 0; i < veclen; i++) {
+ // We add + i to not make these identical copies for a more realistic
+ // compression test.
+ auto const &foo = Foo(0xABADCAFEABADCAFE + i, 10000 + i, '@' + i, 1000000 + i);
+ auto const &bar = Bar(foo, 123456 + i, 3.14159f + i, 10000 + i);
+ auto name = fbb.CreateString("Hello, World!");
+ auto foobar = CreateFooBar(fbb, &bar, name, 3.1415432432445543543 + i, '!' + i);
+ vec[i] = foobar;
+ }
+ auto location = fbb.CreateString("https://www.example.com/myurl/");
+ auto foobarvec = fbb.CreateVector(vec, veclen);
+ auto foobarcontainer = CreateFooBarContainer(fbb, foobarvec, true, Enum_Bananas, location);
+ fbb.Finish(foobarcontainer);
+ if (*size < fbb.GetSize()) {
+ return -1;
+ }
+ *size = fbb.GetSize();
+ memcpy(buffer, fbb.GetBufferPointer(), *size);
+ return 0;
+}
+
+int64_t decode(void *bench, void *buffer, size_t size, int64_t sum)
+{
+ auto foobarcontainer = GetFooBarContainer(buffer);
+
+ (void)bench;
+ sum += foobarcontainer->initialized();
+ sum += foobarcontainer->location()->Length();
+ sum += foobarcontainer->fruit();
+ for (unsigned int i = 0; i < foobarcontainer->list()->Length(); i++) {
+ auto foobar = foobarcontainer->list()->Get(i);
+ sum += foobar->name()->Length();
+ sum += foobar->postfix();
+ sum += static_cast<int64_t>(foobar->rating());
+ auto bar = foobar->sibling();
+ sum += static_cast<int64_t>(bar->ratio());
+ sum += bar->size();
+ sum += bar->time();
+ auto &foo = bar->parent();
+ sum += foo.count();
+ sum += foo.id();
+ sum += foo.length();
+ sum += foo.prefix();
+ }
+ return sum + 2 * sum;
+}
+
+#include "benchmain.h"
diff --git a/test/benchmark/benchflatc/flatbench_generated.h b/test/benchmark/benchflatc/flatbench_generated.h
new file mode 100644
index 0000000..0b2abc5
--- /dev/null
+++ b/test/benchmark/benchflatc/flatbench_generated.h
@@ -0,0 +1,166 @@
+// automatically generated by the FlatBuffers compiler, do not modify
+
+#ifndef FLATBUFFERS_GENERATED_FLATBENCH_BENCHFB_H_
+#define FLATBUFFERS_GENERATED_FLATBENCH_BENCHFB_H_
+
+#include "flatbuffers/flatbuffers.h"
+
+
+namespace benchfb {
+
+struct Foo;
+struct Bar;
+struct FooBar;
+struct FooBarContainer;
+
+enum Enum {
+ Enum_Apples = 0,
+ Enum_Pears = 1,
+ Enum_Bananas = 2
+};
+
+inline const char **EnumNamesEnum() {
+ static const char *names[] = { "Apples", "Pears", "Bananas", nullptr };
+ return names;
+}
+
+inline const char *EnumNameEnum(Enum e) { return EnumNamesEnum()[static_cast<int>(e)]; }
+
+MANUALLY_ALIGNED_STRUCT(8) Foo FLATBUFFERS_FINAL_CLASS {
+ private:
+ uint64_t id_;
+ int16_t count_;
+ int8_t prefix_;
+ int8_t __padding0;
+ uint32_t length_;
+
+ public:
+ Foo(uint64_t id, int16_t count, int8_t prefix, uint32_t length)
+ : id_(flatbuffers::EndianScalar(id)), count_(flatbuffers::EndianScalar(count)), prefix_(flatbuffers::EndianScalar(prefix)), __padding0(0), length_(flatbuffers::EndianScalar(length)) { (void)__padding0; }
+
+ uint64_t id() const { return flatbuffers::EndianScalar(id_); }
+ int16_t count() const { return flatbuffers::EndianScalar(count_); }
+ int8_t prefix() const { return flatbuffers::EndianScalar(prefix_); }
+ uint32_t length() const { return flatbuffers::EndianScalar(length_); }
+};
+STRUCT_END(Foo, 16);
+
+MANUALLY_ALIGNED_STRUCT(8) Bar FLATBUFFERS_FINAL_CLASS {
+ private:
+ Foo parent_;
+ int32_t time_;
+ float ratio_;
+ uint16_t size_;
+ int16_t __padding0;
+ int32_t __padding1;
+
+ public:
+ Bar(const Foo &parent, int32_t time, float ratio, uint16_t size)
+ : parent_(parent), time_(flatbuffers::EndianScalar(time)), ratio_(flatbuffers::EndianScalar(ratio)), size_(flatbuffers::EndianScalar(size)), __padding0(0), __padding1(0) { (void)__padding0; (void)__padding1; }
+
+ const Foo &parent() const { return parent_; }
+ int32_t time() const { return flatbuffers::EndianScalar(time_); }
+ float ratio() const { return flatbuffers::EndianScalar(ratio_); }
+ uint16_t size() const { return flatbuffers::EndianScalar(size_); }
+};
+STRUCT_END(Bar, 32);
+
+struct FooBar FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ const Bar *sibling() const { return GetStruct<const Bar *>(4); }
+ const flatbuffers::String *name() const { return GetPointer<const flatbuffers::String *>(6); }
+ double rating() const { return GetField<double>(8, 0); }
+ uint8_t postfix() const { return GetField<uint8_t>(10, 0); }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyField<Bar>(verifier, 4 /* sibling */) &&
+ VerifyField<flatbuffers::uoffset_t>(verifier, 6 /* name */) &&
+ verifier.Verify(name()) &&
+ VerifyField<double>(verifier, 8 /* rating */) &&
+ VerifyField<uint8_t>(verifier, 10 /* postfix */) &&
+ verifier.EndTable();
+ }
+};
+
+struct FooBarBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_sibling(const Bar *sibling) { fbb_.AddStruct(4, sibling); }
+ void add_name(flatbuffers::Offset<flatbuffers::String> name) { fbb_.AddOffset(6, name); }
+ void add_rating(double rating) { fbb_.AddElement<double>(8, rating, 0); }
+ void add_postfix(uint8_t postfix) { fbb_.AddElement<uint8_t>(10, postfix, 0); }
+ FooBarBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); }
+ FooBarBuilder &operator=(const FooBarBuilder &);
+ flatbuffers::Offset<FooBar> Finish() {
+ auto o = flatbuffers::Offset<FooBar>(fbb_.EndTable(start_, 4));
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<FooBar> CreateFooBar(flatbuffers::FlatBufferBuilder &_fbb,
+ const Bar *sibling = 0,
+ flatbuffers::Offset<flatbuffers::String> name = 0,
+ double rating = 0,
+ uint8_t postfix = 0) {
+ FooBarBuilder builder_(_fbb);
+ builder_.add_rating(rating);
+ builder_.add_name(name);
+ builder_.add_sibling(sibling);
+ builder_.add_postfix(postfix);
+ return builder_.Finish();
+}
+
+struct FooBarContainer FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ const flatbuffers::Vector<flatbuffers::Offset<FooBar>> *list() const { return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<FooBar>> *>(4); }
+ uint8_t initialized() const { return GetField<uint8_t>(6, 0); }
+ Enum fruit() const { return static_cast<Enum>(GetField<int16_t>(8, 0)); }
+ const flatbuffers::String *location() const { return GetPointer<const flatbuffers::String *>(10); }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyField<flatbuffers::uoffset_t>(verifier, 4 /* list */) &&
+ verifier.Verify(list()) &&
+ verifier.VerifyVectorOfTables(list()) &&
+ VerifyField<uint8_t>(verifier, 6 /* initialized */) &&
+ VerifyField<int16_t>(verifier, 8 /* fruit */) &&
+ VerifyField<flatbuffers::uoffset_t>(verifier, 10 /* location */) &&
+ verifier.Verify(location()) &&
+ verifier.EndTable();
+ }
+};
+
+struct FooBarContainerBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_list(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<FooBar>>> list) { fbb_.AddOffset(4, list); }
+ void add_initialized(uint8_t initialized) { fbb_.AddElement<uint8_t>(6, initialized, 0); }
+ void add_fruit(Enum fruit) { fbb_.AddElement<int16_t>(8, static_cast<int16_t>(fruit), 0); }
+ void add_location(flatbuffers::Offset<flatbuffers::String> location) { fbb_.AddOffset(10, location); }
+ FooBarContainerBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); }
+ FooBarContainerBuilder &operator=(const FooBarContainerBuilder &);
+ flatbuffers::Offset<FooBarContainer> Finish() {
+ auto o = flatbuffers::Offset<FooBarContainer>(fbb_.EndTable(start_, 4));
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<FooBarContainer> CreateFooBarContainer(flatbuffers::FlatBufferBuilder &_fbb,
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<FooBar>>> list = 0,
+ uint8_t initialized = 0,
+ Enum fruit = Enum_Apples,
+ flatbuffers::Offset<flatbuffers::String> location = 0) {
+ FooBarContainerBuilder builder_(_fbb);
+ builder_.add_location(location);
+ builder_.add_list(list);
+ builder_.add_fruit(fruit);
+ builder_.add_initialized(initialized);
+ return builder_.Finish();
+}
+
+inline const benchfb::FooBarContainer *GetFooBarContainer(const void *buf) { return flatbuffers::GetRoot<benchfb::FooBarContainer>(buf); }
+
+inline bool VerifyFooBarContainerBuffer(flatbuffers::Verifier &verifier) { return verifier.VerifyBuffer<benchfb::FooBarContainer>(); }
+
+inline void FinishFooBarContainerBuffer(flatbuffers::FlatBufferBuilder &fbb, flatbuffers::Offset<benchfb::FooBarContainer> root) { fbb.Finish(root); }
+
+} // namespace benchfb
+
+#endif // FLATBUFFERS_GENERATED_FLATBENCH_BENCHFB_H_
diff --git a/test/benchmark/benchflatc/flatbuffers/flatbuffers.h b/test/benchmark/benchflatc/flatbuffers/flatbuffers.h
new file mode 100644
index 0000000..3482cbe
--- /dev/null
+++ b/test/benchmark/benchflatc/flatbuffers/flatbuffers.h
@@ -0,0 +1,1189 @@
+/*
+ * Copyright 2014 Google Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLATBUFFERS_H_
+#define FLATBUFFERS_H_
+
+#include <assert.h>
+
+#include <cstdint>
+#include <cstddef>
+#include <cstdlib>
+#include <cstring>
+#include <string>
+#include <type_traits>
+#include <vector>
+#include <algorithm>
+#include <functional>
+#include <memory>
+
+#if __cplusplus <= 199711L && \
+ (!defined(_MSC_VER) || _MSC_VER < 1600) && \
+ (!defined(__GNUC__) || \
+ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__ < 40603))
+ #error A C++11 compatible compiler is required for FlatBuffers.
+ #error __cplusplus _MSC_VER __GNUC__ __GNUC_MINOR__ __GNUC_PATCHLEVEL__
+#endif
+
+// The wire format uses a little endian encoding (since that's efficient for
+// the common platforms).
+#if !defined(FLATBUFFERS_LITTLEENDIAN)
+ #if defined(__GNUC__) || defined(__clang__)
+ #ifdef __BIG_ENDIAN__
+ #define FLATBUFFERS_LITTLEENDIAN 0
+ #else
+ #define FLATBUFFERS_LITTLEENDIAN 1
+ #endif // __BIG_ENDIAN__
+ #elif defined(_MSC_VER)
+ #if defined(_M_PPC)
+ #define FLATBUFFERS_LITTLEENDIAN 0
+ #else
+ #define FLATBUFFERS_LITTLEENDIAN 1
+ #endif
+ #else
+ #error Unable to determine endianness, define FLATBUFFERS_LITTLEENDIAN.
+ #endif
+#endif // !defined(FLATBUFFERS_LITTLEENDIAN)
+
+#define FLATBUFFERS_VERSION_MAJOR 1
+#define FLATBUFFERS_VERSION_MINOR 0
+#define FLATBUFFERS_VERSION_REVISION 0
+#define FLATBUFFERS_STRING_EXPAND(X) #X
+#define FLATBUFFERS_STRING(X) FLATBUFFERS_STRING_EXPAND(X)
+
+#if (!defined(_MSC_VER) || _MSC_VER > 1600) && \
+ (!defined(__GNUC__) || (__GNUC__ * 100 + __GNUC_MINOR__ >= 407))
+ #define FLATBUFFERS_FINAL_CLASS final
+#else
+ #define FLATBUFFERS_FINAL_CLASS
+#endif
+
+namespace flatbuffers {
+
+// Our default offset / size type, 32bit on purpose on 64bit systems.
+// Also, using a consistent offset type maintains compatibility of serialized
+// offset values between 32bit and 64bit systems.
+typedef uint32_t uoffset_t;
+
+// Signed offsets for references that can go in both directions.
+typedef int32_t soffset_t;
+
+// Offset/index used in v-tables, can be changed to uint8_t in
+// format forks to save a bit of space if desired.
+typedef uint16_t voffset_t;
+
+typedef uintmax_t largest_scalar_t;
+
+// Pointer to relinquished memory.
+typedef std::unique_ptr<uint8_t, std::function<void(uint8_t * /* unused */)>>
+ unique_ptr_t;
+
+// Wrapper for uoffset_t to allow safe template specialization.
+template<typename T> struct Offset {
+ uoffset_t o;
+ Offset() : o(0) {}
+ Offset(uoffset_t _o) : o(_o) {}
+ Offset<void> Union() const { return Offset<void>(o); }
+};
+
+inline void EndianCheck() {
+ int endiantest = 1;
+ // If this fails, see FLATBUFFERS_LITTLEENDIAN above.
+ assert(*reinterpret_cast<char *>(&endiantest) == FLATBUFFERS_LITTLEENDIAN);
+ (void)endiantest;
+}
+
+template<typename T> T EndianScalar(T t) {
+ #if FLATBUFFERS_LITTLEENDIAN
+ return t;
+ #else
+ #if defined(_MSC_VER)
+ #pragma push_macro("__builtin_bswap16")
+ #pragma push_macro("__builtin_bswap32")
+ #pragma push_macro("__builtin_bswap64")
+ #define __builtin_bswap16 _byteswap_ushort
+ #define __builtin_bswap32 _byteswap_ulong
+ #define __builtin_bswap64 _byteswap_uint64
+ #endif
+ // If you're on the few remaining big endian platforms, we make the bold
+ // assumption you're also on gcc/clang, and thus have bswap intrinsics:
+ if (sizeof(T) == 1) { // Compile-time if-then's.
+ return t;
+ } else if (sizeof(T) == 2) {
+ auto r = __builtin_bswap16(*reinterpret_cast<uint16_t *>(&t));
+ return *reinterpret_cast<T *>(&r);
+ } else if (sizeof(T) == 4) {
+ auto r = __builtin_bswap32(*reinterpret_cast<uint32_t *>(&t));
+ return *reinterpret_cast<T *>(&r);
+ } else if (sizeof(T) == 8) {
+ auto r = __builtin_bswap64(*reinterpret_cast<uint64_t *>(&t));
+ return *reinterpret_cast<T *>(&r);
+ } else {
+ assert(0);
+ }
+ #if defined(_MSC_VER)
+ #pragma pop_macro("__builtin_bswap16")
+ #pragma pop_macro("__builtin_bswap32")
+ #pragma pop_macro("__builtin_bswap64")
+ #endif
+ #endif
+}
+
+template<typename T> T ReadScalar(const void *p) {
+ return EndianScalar(*reinterpret_cast<const T *>(p));
+}
+
+template<typename T> void WriteScalar(void *p, T t) {
+ *reinterpret_cast<T *>(p) = EndianScalar(t);
+}
+
+template<typename T> size_t AlignOf() {
+ #ifdef _MSC_VER
+ return __alignof(T);
+ #else
+ return alignof(T);
+ #endif
+}
+
+// When we read serialized data from memory, in the case of most scalars,
+// we want to just read T, but in the case of Offset, we want to actually
+// perform the indirection and return a pointer.
+// The template specialization below does just that.
+// It is wrapped in a struct since function templates can't overload on the
+// return type like this.
+// The typedef is for the convenience of callers of this function
+// (avoiding the need for a trailing return decltype)
+template<typename T> struct IndirectHelper {
+ typedef T return_type;
+ static const size_t element_stride = sizeof(T);
+ static return_type Read(const uint8_t *p, uoffset_t i) {
+ return EndianScalar((reinterpret_cast<const T *>(p))[i]);
+ }
+};
+template<typename T> struct IndirectHelper<Offset<T>> {
+ typedef const T *return_type;
+ static const size_t element_stride = sizeof(uoffset_t);
+ static return_type Read(const uint8_t *p, uoffset_t i) {
+ p += i * sizeof(uoffset_t);
+ return reinterpret_cast<return_type>(p + ReadScalar<uoffset_t>(p));
+ }
+};
+template<typename T> struct IndirectHelper<const T *> {
+ typedef const T *return_type;
+ static const size_t element_stride = sizeof(T);
+ static return_type Read(const uint8_t *p, uoffset_t i) {
+ return reinterpret_cast<const T *>(p + i * sizeof(T));
+ }
+};
+
+// An STL compatible iterator implementation for Vector below, effectively
+// calling Get() for every element.
+template<typename T, bool bConst>
+struct VectorIterator : public
+ std::iterator < std::input_iterator_tag,
+ typename std::conditional < bConst,
+ const typename IndirectHelper<T>::return_type,
+ typename IndirectHelper<T>::return_type > ::type, uoffset_t > {
+
+ typedef std::iterator<std::input_iterator_tag,
+ typename std::conditional<bConst,
+ const typename IndirectHelper<T>::return_type,
+ typename IndirectHelper<T>::return_type>::type, uoffset_t> super_type;
+
+public:
+ VectorIterator(const uint8_t *data, uoffset_t i) :
+ data_(data + IndirectHelper<T>::element_stride * i) {};
+ VectorIterator(const VectorIterator &other) : data_(other.data_) {}
+ VectorIterator(VectorIterator &&other) : data_(std::move(other.data_)) {}
+
+ VectorIterator &operator=(const VectorIterator &other) {
+ data_ = other.data_;
+ return *this;
+ }
+
+ VectorIterator &operator=(VectorIterator &&other) {
+ data_ = other.data_;
+ return *this;
+ }
+
+ bool operator==(const VectorIterator& other) const {
+ return data_ == other.data_;
+ }
+
+ bool operator!=(const VectorIterator& other) const {
+ return data_ != other.data_;
+ }
+
+ ptrdiff_t operator-(const VectorIterator& other) const {
+ return (data_ - other.data_) / IndirectHelper<T>::element_stride;
+ }
+
+ typename super_type::value_type operator *() const {
+ return IndirectHelper<T>::Read(data_, 0);
+ }
+
+ typename super_type::value_type operator->() const {
+ return IndirectHelper<T>::Read(data_, 0);
+ }
+
+ VectorIterator &operator++() {
+ data_ += IndirectHelper<T>::element_stride;
+ return *this;
+ }
+
+ VectorIterator operator++(int) {
+ VectorIterator temp(data_);
+ data_ += IndirectHelper<T>::element_stride;
+ return temp;
+ }
+
+private:
+ const uint8_t *data_;
+};
+
+// This is used as a helper type for accessing vectors.
+// Vector::data() assumes the vector elements start after the length field.
+template<typename T> class Vector {
+public:
+ typedef VectorIterator<T, false> iterator;
+ typedef VectorIterator<T, true> const_iterator;
+
+ uoffset_t size() const { return EndianScalar(length_); }
+
+ // Deprecated: use size(). Here for backwards compatibility.
+ uoffset_t Length() const { return size(); }
+
+ typedef typename IndirectHelper<T>::return_type return_type;
+
+ return_type Get(uoffset_t i) const {
+ assert(i < size());
+ return IndirectHelper<T>::Read(Data(), i);
+ }
+
+ return_type operator[](uoffset_t i) const { return Get(i); }
+
+ // If this is a Vector of enums, T will be its storage type, not the enum
+ // type. This function makes it convenient to retrieve value with enum
+ // type E.
+ template<typename E> E GetEnum(uoffset_t i) const {
+ return static_cast<E>(Get(i));
+ }
+
+ const void *GetStructFromOffset(size_t o) const {
+ return reinterpret_cast<const void *>(Data() + o);
+ }
+
+ iterator begin() { return iterator(Data(), 0); }
+ const_iterator begin() const { return const_iterator(Data(), 0); }
+
+ iterator end() { return iterator(Data(), size()); }
+ const_iterator end() const { return const_iterator(Data(), size()); }
+
+ // Change elements if you have a non-const pointer to this object.
+ // Scalars only. See reflection_reader.h, and the documentation.
+ void Mutate(uoffset_t i, T val) {
+ assert(i < size());
+ WriteScalar(data() + i, val);
+ }
+
+ // Change an element of a vector of tables (or strings).
+ // "val" points to the new table/string, as you can obtain from
+ // e.g. reflection::AddFlatBuffer().
+ void MutateOffset(uoffset_t i, const uint8_t *val) {
+ assert(i < size());
+ assert(sizeof(T) == sizeof(uoffset_t));
+ WriteScalar(data() + i, val - (Data() + i * sizeof(uoffset_t)));
+ }
+
+ // The raw data in little endian format. Use with care.
+ const uint8_t *Data() const {
+ return reinterpret_cast<const uint8_t *>(&length_ + 1);
+ }
+
+ uint8_t *Data() {
+ return reinterpret_cast<uint8_t *>(&length_ + 1);
+ }
+
+ // Similarly, but typed, much like std::vector::data
+ const T *data() const { return reinterpret_cast<const T *>(Data()); }
+ T *data() { return reinterpret_cast<T *>(Data()); }
+
+ template<typename K> return_type LookupByKey(K key) const {
+ void *search_result = std::bsearch(&key, Data(), size(),
+ IndirectHelper<T>::element_stride, KeyCompare<K>);
+
+ if (!search_result) {
+ return nullptr; // Key not found.
+ }
+
+ const uint8_t *data = reinterpret_cast<const uint8_t *>(search_result);
+
+ return IndirectHelper<T>::Read(data, 0);
+ }
+
+protected:
+ // This class is only used to access pre-existing data. Don't ever
+ // try to construct these manually.
+ Vector();
+
+ uoffset_t length_;
+
+private:
+ template<typename K> static int KeyCompare(const void *ap, const void *bp) {
+ const K *key = reinterpret_cast<const K *>(ap);
+ const uint8_t *data = reinterpret_cast<const uint8_t *>(bp);
+ auto table = IndirectHelper<T>::Read(data, 0);
+
+ // std::bsearch compares with the operands transposed, so we negate the
+ // result here.
+ return -table->KeyCompareWithValue(*key);
+ }
+};
+
+// Represent a vector much like the template above, but in this case we
+// don't know what the element types are (used with reflection.h).
+class VectorOfAny {
+public:
+ uoffset_t size() const { return EndianScalar(length_); }
+
+ const uint8_t *Data() const {
+ return reinterpret_cast<const uint8_t *>(&length_ + 1);
+ }
+ uint8_t *Data() {
+ return reinterpret_cast<uint8_t *>(&length_ + 1);
+ }
+protected:
+ VectorOfAny();
+
+ uoffset_t length_;
+};
+
+// Convenient helper function to get the length of any vector, regardless
+// of wether it is null or not (the field is not set).
+template<typename T> static inline size_t VectorLength(const Vector<T> *v) {
+ return v ? v->Length() : 0;
+}
+
+struct String : public Vector<char> {
+ const char *c_str() const { return reinterpret_cast<const char *>(Data()); }
+ std::string str() const { return c_str(); }
+
+ bool operator <(const String &o) const {
+ return strcmp(c_str(), o.c_str()) < 0;
+ }
+};
+
+// Simple indirection for buffer allocation, to allow this to be overridden
+// with custom allocation (see the FlatBufferBuilder constructor).
+class simple_allocator {
+ public:
+ virtual ~simple_allocator() {}
+ virtual uint8_t *allocate(size_t size) const { return new uint8_t[size]; }
+ virtual void deallocate(uint8_t *p) const { delete[] p; }
+};
+
+// This is a minimal replication of std::vector<uint8_t> functionality,
+// except growing from higher to lower addresses. i.e push_back() inserts data
+// in the lowest address in the vector.
+class vector_downward {
+ public:
+ explicit vector_downward(size_t initial_size,
+ const simple_allocator &allocator)
+ : reserved_(initial_size),
+ buf_(allocator.allocate(reserved_)),
+ cur_(buf_ + reserved_),
+ allocator_(allocator) {
+ assert((initial_size & (sizeof(largest_scalar_t) - 1)) == 0);
+ }
+
+ ~vector_downward() {
+ if (buf_)
+ allocator_.deallocate(buf_);
+ }
+
+ void clear() {
+ if (buf_ == nullptr)
+ buf_ = allocator_.allocate(reserved_);
+
+ cur_ = buf_ + reserved_;
+ }
+
+ // Relinquish the pointer to the caller.
+ unique_ptr_t release() {
+ // Actually deallocate from the start of the allocated memory.
+ std::function<void(uint8_t *)> deleter(
+ std::bind(&simple_allocator::deallocate, allocator_, buf_));
+
+ // Point to the desired offset.
+ unique_ptr_t retval(data(), deleter);
+
+ // Don't deallocate when this instance is destroyed.
+ buf_ = nullptr;
+ cur_ = nullptr;
+
+ return retval;
+ }
+
+ size_t growth_policy(size_t bytes) {
+ return (bytes / 2) & ~(sizeof(largest_scalar_t) - 1);
+ }
+
+ uint8_t *make_space(size_t len) {
+ if (len > static_cast<size_t>(cur_ - buf_)) {
+ auto old_size = size();
+ auto largest_align = AlignOf<largest_scalar_t>();
+ reserved_ += std::max(len, growth_policy(reserved_));
+ // Round up to avoid undefined behavior from unaligned loads and stores.
+ reserved_ = (reserved_ + (largest_align - 1)) & ~(largest_align - 1);
+ auto new_buf = allocator_.allocate(reserved_);
+ auto new_cur = new_buf + reserved_ - old_size;
+ memcpy(new_cur, cur_, old_size);
+ cur_ = new_cur;
+ allocator_.deallocate(buf_);
+ buf_ = new_buf;
+ }
+ cur_ -= len;
+ // Beyond this, signed offsets may not have enough range:
+ // (FlatBuffers > 2GB not supported).
+ assert(size() < (1UL << (sizeof(soffset_t) * 8 - 1)) - 1);
+ return cur_;
+ }
+
+ uoffset_t size() const {
+ assert(cur_ != nullptr && buf_ != nullptr);
+ return static_cast<uoffset_t>(reserved_ - (cur_ - buf_));
+ }
+
+ uint8_t *data() const {
+ assert(cur_ != nullptr);
+ return cur_;
+ }
+
+ uint8_t *data_at(size_t offset) { return buf_ + reserved_ - offset; }
+
+ // push() & fill() are most frequently called with small byte counts (<= 4),
+ // which is why we're using loops rather than calling memcpy/memset.
+ void push(const uint8_t *bytes, size_t num) {
+ auto dest = make_space(num);
+ for (size_t i = 0; i < num; i++) dest[i] = bytes[i];
+ }
+
+ void fill(size_t zero_pad_bytes) {
+ auto dest = make_space(zero_pad_bytes);
+ for (size_t i = 0; i < zero_pad_bytes; i++) dest[i] = 0;
+ }
+
+ void pop(size_t bytes_to_remove) { cur_ += bytes_to_remove; }
+
+ private:
+ // You shouldn't really be copying instances of this class.
+ vector_downward(const vector_downward &);
+ vector_downward &operator=(const vector_downward &);
+
+ size_t reserved_;
+ uint8_t *buf_;
+ uint8_t *cur_; // Points at location between empty (below) and used (above).
+ const simple_allocator &allocator_;
+};
+
+// Converts a Field ID to a virtual table offset.
+inline voffset_t FieldIndexToOffset(voffset_t field_id) {
+ // Should correspond to what EndTable() below builds up.
+ const int fixed_fields = 2; // Vtable size and Object Size.
+ return (field_id + fixed_fields) * sizeof(voffset_t);
+}
+
+// Computes how many bytes you'd have to pad to be able to write an
+// "scalar_size" scalar if the buffer had grown to "buf_size" (downwards in
+// memory).
+inline size_t PaddingBytes(size_t buf_size, size_t scalar_size) {
+ return ((~buf_size) + 1) & (scalar_size - 1);
+}
+
+// Helper class to hold data needed in creation of a flat buffer.
+// To serialize data, you typically call one of the Create*() functions in
+// the generated code, which in turn call a sequence of StartTable/PushElement/
+// AddElement/EndTable, or the builtin CreateString/CreateVector functions.
+// Do this is depth-first order to build up a tree to the root.
+// Finish() wraps up the buffer ready for transport.
+class FlatBufferBuilder FLATBUFFERS_FINAL_CLASS {
+ public:
+ explicit FlatBufferBuilder(uoffset_t initial_size = 1024,
+ const simple_allocator *allocator = nullptr)
+ : buf_(initial_size, allocator ? *allocator : default_allocator),
+ minalign_(1), force_defaults_(false) {
+ offsetbuf_.reserve(16); // Avoid first few reallocs.
+ vtables_.reserve(16);
+ EndianCheck();
+ }
+
+ // Reset all the state in this FlatBufferBuilder so it can be reused
+ // to construct another buffer.
+ void Clear() {
+ buf_.clear();
+ offsetbuf_.clear();
+ vtables_.clear();
+ minalign_ = 1;
+ }
+
+ // The current size of the serialized buffer, counting from the end.
+ uoffset_t GetSize() const { return buf_.size(); }
+
+ // Get the serialized buffer (after you call Finish()).
+ uint8_t *GetBufferPointer() const { return buf_.data(); }
+
+ // Get the released pointer to the serialized buffer.
+ // Don't attempt to use this FlatBufferBuilder afterwards!
+ // The unique_ptr returned has a special allocator that knows how to
+ // deallocate this pointer (since it points to the middle of an allocation).
+ // Thus, do not mix this pointer with other unique_ptr's, or call release() /
+ // reset() on it.
+ unique_ptr_t ReleaseBufferPointer() { return buf_.release(); }
+
+ void ForceDefaults(bool fd) { force_defaults_ = fd; }
+
+ void Pad(size_t num_bytes) { buf_.fill(num_bytes); }
+
+ void Align(size_t elem_size) {
+ if (elem_size > minalign_) minalign_ = elem_size;
+ buf_.fill(PaddingBytes(buf_.size(), elem_size));
+ }
+
+ void PushBytes(const uint8_t *bytes, size_t size) {
+ buf_.push(bytes, size);
+ }
+
+ void PopBytes(size_t amount) { buf_.pop(amount); }
+
+ template<typename T> void AssertScalarT() {
+ // The code assumes power of 2 sizes and endian-swap-ability.
+ static_assert(std::is_scalar<T>::value
+ // The Offset<T> type is essentially a scalar but fails is_scalar.
+ || sizeof(T) == sizeof(Offset<void>),
+ "T must be a scalar type");
+ }
+
+ // Write a single aligned scalar to the buffer
+ template<typename T> uoffset_t PushElement(T element) {
+ AssertScalarT<T>();
+ T litle_endian_element = EndianScalar(element);
+ Align(sizeof(T));
+ PushBytes(reinterpret_cast<uint8_t *>(&litle_endian_element), sizeof(T));
+ return GetSize();
+ }
+
+ template<typename T> uoffset_t PushElement(Offset<T> off) {
+ // Special case for offsets: see ReferTo below.
+ return PushElement(ReferTo(off.o));
+ }
+
+ // When writing fields, we track where they are, so we can create correct
+ // vtables later.
+ void TrackField(voffset_t field, uoffset_t off) {
+ FieldLoc fl = { off, field };
+ offsetbuf_.push_back(fl);
+ }
+
+ // Like PushElement, but additionally tracks the field this represents.
+ template<typename T> void AddElement(voffset_t field, T e, T def) {
+ // We don't serialize values equal to the default.
+ if (e == def && !force_defaults_) return;
+ auto off = PushElement(e);
+ TrackField(field, off);
+ }
+
+ template<typename T> void AddOffset(voffset_t field, Offset<T> off) {
+ if (!off.o) return; // An offset of 0 means NULL, don't store.
+ AddElement(field, ReferTo(off.o), static_cast<uoffset_t>(0));
+ }
+
+ template<typename T> void AddStruct(voffset_t field, const T *structptr) {
+ if (!structptr) return; // Default, don't store.
+ Align(AlignOf<T>());
+ PushBytes(reinterpret_cast<const uint8_t *>(structptr), sizeof(T));
+ TrackField(field, GetSize());
+ }
+
+ void AddStructOffset(voffset_t field, uoffset_t off) {
+ TrackField(field, off);
+ }
+
+ // Offsets initially are relative to the end of the buffer (downwards).
+ // This function converts them to be relative to the current location
+ // in the buffer (when stored here), pointing upwards.
+ uoffset_t ReferTo(uoffset_t off) {
+ Align(sizeof(uoffset_t)); // To ensure GetSize() below is correct.
+ assert(off <= GetSize()); // Must refer to something already in buffer.
+ return GetSize() - off + sizeof(uoffset_t);
+ }
+
+ void NotNested() {
+ // If you hit this, you're trying to construct an object when another
+ // hasn't finished yet.
+ assert(!offsetbuf_.size());
+ }
+
+ // From generated code (or from the parser), we call StartTable/EndTable
+ // with a sequence of AddElement calls in between.
+ uoffset_t StartTable() {
+ NotNested();
+ return GetSize();
+ }
+
+ // This finishes one serialized object by generating the vtable if it's a
+ // table, comparing it against existing vtables, and writing the
+ // resulting vtable offset.
+ uoffset_t EndTable(uoffset_t start, voffset_t numfields) {
+ // Write the vtable offset, which is the start of any Table.
+ // We fill it's value later.
+ auto vtableoffsetloc = PushElement<soffset_t>(0);
+ // Write a vtable, which consists entirely of voffset_t elements.
+ // It starts with the number of offsets, followed by a type id, followed
+ // by the offsets themselves. In reverse:
+ buf_.fill(numfields * sizeof(voffset_t));
+ auto table_object_size = vtableoffsetloc - start;
+ assert(table_object_size < 0x10000); // Vtable use 16bit offsets.
+ PushElement<voffset_t>(static_cast<voffset_t>(table_object_size));
+ PushElement<voffset_t>(FieldIndexToOffset(numfields));
+ // Write the offsets into the table
+ for (auto field_location = offsetbuf_.begin();
+ field_location != offsetbuf_.end();
+ ++field_location) {
+ auto pos = static_cast<voffset_t>(vtableoffsetloc - field_location->off);
+ // If this asserts, it means you've set a field twice.
+ assert(!ReadScalar<voffset_t>(buf_.data() + field_location->id));
+ WriteScalar<voffset_t>(buf_.data() + field_location->id, pos);
+ }
+ offsetbuf_.clear();
+ auto vt1 = reinterpret_cast<voffset_t *>(buf_.data());
+ auto vt1_size = ReadScalar<voffset_t>(vt1);
+ auto vt_use = GetSize();
+ // See if we already have generated a vtable with this exact same
+ // layout before. If so, make it point to the old one, remove this one.
+ for (auto it = vtables_.begin(); it != vtables_.end(); ++it) {
+ auto vt2 = reinterpret_cast<voffset_t *>(buf_.data_at(*it));
+ auto vt2_size = *vt2;
+ if (vt1_size != vt2_size || memcmp(vt2, vt1, vt1_size)) continue;
+ vt_use = *it;
+ buf_.pop(GetSize() - vtableoffsetloc);
+ break;
+ }
+ // If this is a new vtable, remember it.
+ if (vt_use == GetSize()) {
+ vtables_.push_back(vt_use);
+ }
+ // Fill the vtable offset we created above.
+ // The offset points from the beginning of the object to where the
+ // vtable is stored.
+ // Offsets default direction is downward in memory for future format
+ // flexibility (storing all vtables at the start of the file).
+ WriteScalar(buf_.data_at(vtableoffsetloc),
+ static_cast<soffset_t>(vt_use) -
+ static_cast<soffset_t>(vtableoffsetloc));
+ return vtableoffsetloc;
+ }
+
+ // This checks a required field has been set in a given table that has
+ // just been constructed.
+ template<typename T> void Required(Offset<T> table, voffset_t field) {
+ auto table_ptr = buf_.data_at(table.o);
+ auto vtable_ptr = table_ptr - ReadScalar<soffset_t>(table_ptr);
+ bool ok = ReadScalar<voffset_t>(vtable_ptr + field) != 0;
+ // If this fails, the caller will show what field needs to be set.
+ assert(ok);
+ (void)ok;
+ }
+
+ uoffset_t StartStruct(size_t alignment) {
+ Align(alignment);
+ return GetSize();
+ }
+
+ uoffset_t EndStruct() { return GetSize(); }
+
+ void ClearOffsets() { offsetbuf_.clear(); }
+
+ // Aligns such that when "len" bytes are written, an object can be written
+ // after it with "alignment" without padding.
+ void PreAlign(size_t len, size_t alignment) {
+ buf_.fill(PaddingBytes(GetSize() + len, alignment));
+ }
+ template<typename T> void PreAlign(size_t len) {
+ AssertScalarT<T>();
+ PreAlign(len, sizeof(T));
+ }
+
+ // Functions to store strings, which are allowed to contain any binary data.
+ Offset<String> CreateString(const char *str, size_t len) {
+ NotNested();
+ PreAlign<uoffset_t>(len + 1); // Always 0-terminated.
+ buf_.fill(1);
+ PushBytes(reinterpret_cast<const uint8_t *>(str), len);
+ PushElement(static_cast<uoffset_t>(len));
+ return Offset<String>(GetSize());
+ }
+
+ Offset<String> CreateString(const char *str) {
+ return CreateString(str, strlen(str));
+ }
+
+ Offset<String> CreateString(const std::string &str) {
+ return CreateString(str.c_str(), str.length());
+ }
+
+ Offset<String> CreateString(const String *str) {
+ return CreateString(str->c_str(), str->Length());
+ }
+
+ uoffset_t EndVector(size_t len) {
+ return PushElement(static_cast<uoffset_t>(len));
+ }
+
+ void StartVector(size_t len, size_t elemsize) {
+ PreAlign<uoffset_t>(len * elemsize);
+ PreAlign(len * elemsize, elemsize); // Just in case elemsize > uoffset_t.
+ }
+
+ uint8_t *ReserveElements(size_t len, size_t elemsize) {
+ return buf_.make_space(len * elemsize);
+ }
+
+ template<typename T> Offset<Vector<T>> CreateVector(const T *v, size_t len) {
+ NotNested();
+ StartVector(len, sizeof(T));
+ for (auto i = len; i > 0; ) {
+ PushElement(v[--i]);
+ }
+ return Offset<Vector<T>>(EndVector(len));
+ }
+
+ template<typename T> Offset<Vector<T>> CreateVector(const std::vector<T> &v) {
+ return CreateVector(v.data(), v.size());
+ }
+
+ template<typename T> Offset<Vector<const T *>> CreateVectorOfStructs(
+ const T *v, size_t len) {
+ NotNested();
+ StartVector(len * sizeof(T) / AlignOf<T>(), AlignOf<T>());
+ PushBytes(reinterpret_cast<const uint8_t *>(v), sizeof(T) * len);
+ return Offset<Vector<const T *>>(EndVector(len));
+ }
+
+ template<typename T> Offset<Vector<const T *>> CreateVectorOfStructs(
+ const std::vector<T> &v) {
+ return CreateVectorOfStructs(v.data(), v.size());
+ }
+
+ template<typename T> Offset<Vector<Offset<T>>> CreateVectorOfSortedTables(
+ Offset<T> *v, size_t len) {
+ std::sort(v, v + len,
+ [this](const Offset<T> &a, const Offset<T> &b) -> bool {
+ auto table_a = reinterpret_cast<T *>(buf_.data_at(a.o));
+ auto table_b = reinterpret_cast<T *>(buf_.data_at(b.o));
+ return table_a->KeyCompareLessThan(table_b);
+ }
+ );
+ return CreateVector(v, len);
+ }
+
+ template<typename T> Offset<Vector<Offset<T>>> CreateVectorOfSortedTables(
+ std::vector<Offset<T>> *v) {
+ return CreateVectorOfSortedTables(v->data(), v->size());
+ }
+
+ // Specialized version for non-copying use cases. Write the data any time
+ // later to the returned buffer pointer `buf`.
+ uoffset_t CreateUninitializedVector(size_t len, size_t elemsize,
+ uint8_t **buf) {
+ NotNested();
+ StartVector(len, elemsize);
+ *buf = buf_.make_space(len * elemsize);
+ return EndVector(len);
+ }
+
+ template<typename T> Offset<Vector<T>> CreateUninitializedVector(
+ size_t len, T **buf) {
+ return CreateUninitializedVector(len, sizeof(T),
+ reinterpret_cast<uint8_t **>(buf));
+ }
+
+ static const size_t kFileIdentifierLength = 4;
+
+ // Finish serializing a buffer by writing the root offset.
+ // If a file_identifier is given, the buffer will be prefix with a standard
+ // FlatBuffers file header.
+ template<typename T> void Finish(Offset<T> root,
+ const char *file_identifier = nullptr) {
+ // This will cause the whole buffer to be aligned.
+ PreAlign(sizeof(uoffset_t) + (file_identifier ? kFileIdentifierLength : 0),
+ minalign_);
+ if (file_identifier) {
+ assert(strlen(file_identifier) == kFileIdentifierLength);
+ buf_.push(reinterpret_cast<const uint8_t *>(file_identifier),
+ kFileIdentifierLength);
+ }
+ PushElement(ReferTo(root.o)); // Location of root.
+ }
+
+ private:
+ // You shouldn't really be copying instances of this class.
+ FlatBufferBuilder(const FlatBufferBuilder &);
+ FlatBufferBuilder &operator=(const FlatBufferBuilder &);
+
+ struct FieldLoc {
+ uoffset_t off;
+ voffset_t id;
+ };
+
+ simple_allocator default_allocator;
+
+ vector_downward buf_;
+
+ // Accumulating offsets of table members while it is being built.
+ std::vector<FieldLoc> offsetbuf_;
+
+ std::vector<uoffset_t> vtables_; // todo: Could make this into a map?
+
+ size_t minalign_;
+
+ bool force_defaults_; // Serialize values equal to their defaults anyway.
+};
+
+// Helpers to get a typed pointer to the root object contained in the buffer.
+template<typename T> T *GetMutableRoot(void *buf) {
+ EndianCheck();
+ return reinterpret_cast<T *>(reinterpret_cast<uint8_t *>(buf) +
+ EndianScalar(*reinterpret_cast<uoffset_t *>(buf)));
+}
+
+template<typename T> const T *GetRoot(const void *buf) {
+ return GetMutableRoot<T>(const_cast<void *>(buf));
+}
+
+// Helper to see if the identifier in a buffer has the expected value.
+inline bool BufferHasIdentifier(const void *buf, const char *identifier) {
+ return strncmp(reinterpret_cast<const char *>(buf) + sizeof(uoffset_t),
+ identifier, FlatBufferBuilder::kFileIdentifierLength) == 0;
+}
+
+// Helper class to verify the integrity of a FlatBuffer
+class Verifier FLATBUFFERS_FINAL_CLASS {
+ public:
+ Verifier(const uint8_t *buf, size_t buf_len, size_t _max_depth = 64,
+ size_t _max_tables = 1000000)
+ : buf_(buf), end_(buf + buf_len), depth_(0), max_depth_(_max_depth),
+ num_tables_(0), max_tables_(_max_tables)
+ {}
+
+ // Central location where any verification failures register.
+ bool Check(bool ok) const {
+ #ifdef FLATBUFFERS_DEBUG_VERIFICATION_FAILURE
+ assert(ok);
+ #endif
+ return ok;
+ }
+
+ // Verify any range within the buffer.
+ bool Verify(const void *elem, size_t elem_len) const {
+ return Check(elem_len <= (size_t) (end_ - buf_) && elem >= buf_ && elem <= end_ - elem_len);
+ }
+
+ // Verify a range indicated by sizeof(T).
+ template<typename T> bool Verify(const void *elem) const {
+ return Verify(elem, sizeof(T));
+ }
+
+ // Verify a pointer (may be NULL) of a table type.
+ template<typename T> bool VerifyTable(const T *table) {
+ return !table || table->Verify(*this);
+ }
+
+ // Verify a pointer (may be NULL) of any vector type.
+ template<typename T> bool Verify(const Vector<T> *vec) const {
+ const uint8_t *end;
+ return !vec ||
+ VerifyVector(reinterpret_cast<const uint8_t *>(vec), sizeof(T),
+ &end);
+ }
+
+ // Verify a pointer (may be NULL) to string.
+ bool Verify(const String *str) const {
+ const uint8_t *end;
+ return !str ||
+ (VerifyVector(reinterpret_cast<const uint8_t *>(str), 1, &end) &&
+ Verify(end, 1) && // Must have terminator
+ Check(*end == '\0')); // Terminating byte must be 0.
+ }
+
+ // Common code between vectors and strings.
+ bool VerifyVector(const uint8_t *vec, size_t elem_size,
+ const uint8_t **end) const {
+ // Check we can read the size field.
+ if (!Verify<uoffset_t>(vec)) return false;
+ // Check the whole array. If this is a string, the byte past the array
+ // must be 0.
+ auto size = ReadScalar<uoffset_t>(vec);
+ auto byte_size = sizeof(size) + elem_size * size;
+ *end = vec + byte_size;
+ return Verify(vec, byte_size);
+ }
+
+ // Special case for string contents, after the above has been called.
+ bool VerifyVectorOfStrings(const Vector<Offset<String>> *vec) const {
+ if (vec) {
+ for (uoffset_t i = 0; i < vec->size(); i++) {
+ if (!Verify(vec->Get(i))) return false;
+ }
+ }
+ return true;
+ }
+
+ // Special case for table contents, after the above has been called.
+ template<typename T> bool VerifyVectorOfTables(const Vector<Offset<T>> *vec) {
+ if (vec) {
+ for (uoffset_t i = 0; i < vec->size(); i++) {
+ if (!vec->Get(i)->Verify(*this)) return false;
+ }
+ }
+ return true;
+ }
+
+ // Verify this whole buffer, starting with root type T.
+ template<typename T> bool VerifyBuffer() {
+ // Call T::Verify, which must be in the generated code for this type.
+ return Verify<uoffset_t>(buf_) &&
+ reinterpret_cast<const T *>(buf_ + ReadScalar<uoffset_t>(buf_))->
+ Verify(*this);
+ }
+
+ // Called at the start of a table to increase counters measuring data
+ // structure depth and amount, and possibly bails out with false if
+ // limits set by the constructor have been hit. Needs to be balanced
+ // with EndTable().
+ bool VerifyComplexity() {
+ depth_++;
+ num_tables_++;
+ return Check(depth_ <= max_depth_ && num_tables_ <= max_tables_);
+ }
+
+ // Called at the end of a table to pop the depth count.
+ bool EndTable() {
+ depth_--;
+ return true;
+ }
+
+ private:
+ const uint8_t *buf_;
+ const uint8_t *end_;
+ size_t depth_;
+ size_t max_depth_;
+ size_t num_tables_;
+ size_t max_tables_;
+};
+
+// "structs" are flat structures that do not have an offset table, thus
+// always have all members present and do not support forwards/backwards
+// compatible extensions.
+
+class Struct FLATBUFFERS_FINAL_CLASS {
+ public:
+ template<typename T> T GetField(uoffset_t o) const {
+ return ReadScalar<T>(&data_[o]);
+ }
+
+ template<typename T> T GetPointer(uoffset_t o) const {
+ auto p = &data_[o];
+ return reinterpret_cast<T>(p + ReadScalar<uoffset_t>(p));
+ }
+
+ template<typename T> T GetStruct(uoffset_t o) const {
+ return reinterpret_cast<T>(&data_[o]);
+ }
+
+ const uint8_t *GetAddressOf(uoffset_t o) const { return &data_[o]; }
+ uint8_t *GetAddressOf(uoffset_t o) { return &data_[o]; }
+
+ private:
+ uint8_t data_[1];
+};
+
+// "tables" use an offset table (possibly shared) that allows fields to be
+// omitted and added at will, but uses an extra indirection to read.
+class Table {
+ public:
+ // This gets the field offset for any of the functions below it, or 0
+ // if the field was not present.
+ voffset_t GetOptionalFieldOffset(voffset_t field) const {
+ // The vtable offset is always at the start.
+ auto vtable = data_ - ReadScalar<soffset_t>(data_);
+ // The first element is the size of the vtable (fields + type id + itself).
+ auto vtsize = ReadScalar<voffset_t>(vtable);
+ // If the field we're accessing is outside the vtable, we're reading older
+ // data, so it's the same as if the offset was 0 (not present).
+ return field < vtsize ? ReadScalar<voffset_t>(vtable + field) : 0;
+ }
+
+ template<typename T> T GetField(voffset_t field, T defaultval) const {
+ auto field_offset = GetOptionalFieldOffset(field);
+ return field_offset ? ReadScalar<T>(data_ + field_offset) : defaultval;
+ }
+
+ template<typename P> P GetPointer(voffset_t field) {
+ auto field_offset = GetOptionalFieldOffset(field);
+ auto p = data_ + field_offset;
+ return field_offset
+ ? reinterpret_cast<P>(p + ReadScalar<uoffset_t>(p))
+ : nullptr;
+ }
+ template<typename P> P GetPointer(voffset_t field) const {
+ return const_cast<Table *>(this)->GetPointer<P>(field);
+ }
+
+ template<typename P> P GetStruct(voffset_t field) const {
+ auto field_offset = GetOptionalFieldOffset(field);
+ auto p = const_cast<uint8_t *>(data_ + field_offset);
+ return field_offset ? reinterpret_cast<P>(p) : nullptr;
+ }
+
+ template<typename T> bool SetField(voffset_t field, T val) {
+ auto field_offset = GetOptionalFieldOffset(field);
+ if (!field_offset) return false;
+ WriteScalar(data_ + field_offset, val);
+ return true;
+ }
+
+ bool SetPointer(voffset_t field, const uint8_t *val) {
+ auto field_offset = GetOptionalFieldOffset(field);
+ if (!field_offset) return false;
+ WriteScalar(data_ + field_offset, val - (data_ + field_offset));
+ return true;
+ }
+
+ uint8_t *GetAddressOf(voffset_t field) {
+ auto field_offset = GetOptionalFieldOffset(field);
+ return field_offset ? data_ + field_offset : nullptr;
+ }
+ const uint8_t *GetAddressOf(voffset_t field) const {
+ return const_cast<Table *>(this)->GetAddressOf(field);
+ }
+
+ uint8_t *GetVTable() { return data_ - ReadScalar<soffset_t>(data_); }
+
+ bool CheckField(voffset_t field) const {
+ return GetOptionalFieldOffset(field) != 0;
+ }
+
+ // Verify the vtable of this table.
+ // Call this once per table, followed by VerifyField once per field.
+ bool VerifyTableStart(Verifier &verifier) const {
+ // Check the vtable offset.
+ if (!verifier.Verify<soffset_t>(data_)) return false;
+ auto vtable = data_ - ReadScalar<soffset_t>(data_);
+ // Check the vtable size field, then check vtable fits in its entirety.
+ return verifier.VerifyComplexity() &&
+ verifier.Verify<voffset_t>(vtable) &&
+ verifier.Verify(vtable, ReadScalar<voffset_t>(vtable));
+ }
+
+ // Verify a particular field.
+ template<typename T> bool VerifyField(const Verifier &verifier,
+ voffset_t field) const {
+ // Calling GetOptionalFieldOffset should be safe now thanks to
+ // VerifyTable().
+ auto field_offset = GetOptionalFieldOffset(field);
+ // Check the actual field.
+ return !field_offset || verifier.Verify<T>(data_ + field_offset);
+ }
+
+ // VerifyField for required fields.
+ template<typename T> bool VerifyFieldRequired(const Verifier &verifier,
+ voffset_t field) const {
+ auto field_offset = GetOptionalFieldOffset(field);
+ return verifier.Check(field_offset != 0) &&
+ verifier.Verify<T>(data_ + field_offset);
+ }
+
+ private:
+ // private constructor & copy constructor: you obtain instances of this
+ // class by pointing to existing data only
+ Table();
+ Table(const Table &other);
+
+ uint8_t data_[1];
+};
+
+// Utility function for reverse lookups on the EnumNames*() functions
+// (in the generated C++ code)
+// names must be NULL terminated.
+inline int LookupEnum(const char **names, const char *name) {
+ for (const char **p = names; *p; p++)
+ if (!strcmp(*p, name))
+ return static_cast<int>(p - names);
+ return -1;
+}
+
+// These macros allow us to layout a struct with a guarantee that they'll end
+// up looking the same on different compilers and platforms.
+// It does this by disallowing the compiler to do any padding, and then
+// does padding itself by inserting extra padding fields that make every
+// element aligned to its own size.
+// Additionally, it manually sets the alignment of the struct as a whole,
+// which is typically its largest element, or a custom size set in the schema
+// by the force_align attribute.
+// These are used in the generated code only.
+
+#if defined(_MSC_VER)
+ #define MANUALLY_ALIGNED_STRUCT(alignment) \
+ __pragma(pack(1)); \
+ struct __declspec(align(alignment))
+ #define STRUCT_END(name, size) \
+ __pragma(pack()); \
+ static_assert(sizeof(name) == size, "compiler breaks packing rules")
+#elif defined(__GNUC__) || defined(__clang__)
+ #define MANUALLY_ALIGNED_STRUCT(alignment) \
+ _Pragma("pack(1)") \
+ struct __attribute__((aligned(alignment)))
+ #define STRUCT_END(name, size) \
+ _Pragma("pack()") \
+ static_assert(sizeof(name) == size, "compiler breaks packing rules")
+#else
+ #error Unknown compiler, please define structure alignment macros
+#endif
+
+// String which identifies the current version of FlatBuffers.
+// flatbuffer_version_string is used by Google developers to identify which
+// applications uploaded to Google Play are using this library. This allows
+// the development team at Google to determine the popularity of the library.
+// How it works: Applications that are uploaded to the Google Play Store are
+// scanned for this version string. We track which applications are using it
+// to measure popularity. You are free to remove it (of course) but we would
+// appreciate if you left it in.
+
+// Weak linkage is culled by VS & doesn't work on cygwin.
+#if !defined(_WIN32) && !defined(__CYGWIN__)
+
+extern volatile __attribute__((weak)) const char *flatbuffer_version_string;
+volatile __attribute__((weak)) const char *flatbuffer_version_string =
+ "FlatBuffers "
+ FLATBUFFERS_STRING(FLATBUFFERS_VERSION_MAJOR) "."
+ FLATBUFFERS_STRING(FLATBUFFERS_VERSION_MINOR) "."
+ FLATBUFFERS_STRING(FLATBUFFERS_VERSION_REVISION);
+
+#endif // !defined(_WIN32) && !defined(__CYGWIN__)
+
+} // namespace flatbuffers
+
+#endif // FLATBUFFERS_H_
diff --git a/test/benchmark/benchflatc/run.sh b/test/benchmark/benchflatc/run.sh
new file mode 100755
index 0000000..4aff0b8
--- /dev/null
+++ b/test/benchmark/benchflatc/run.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+
+set -e
+cd `dirname $0`/../../..
+ROOT=`pwd`
+TMP=build/tmp/test/benchmark/benchflatc
+INC=$ROOT/include
+mkdir -p ${TMP}
+rm -rf ${TMP}/*
+
+CXX=${CXX:-c++}
+cp -r test/benchmark/benchmain/* ${TMP}
+cp -r test/benchmark/benchflatc/* ${TMP}
+#include include at root as it may conflict
+cp -r ${ROOT}/include/flatcc/support ${TMP}
+
+cd ${TMP}
+$CXX -g -std=c++11 benchflatc.cpp -o benchflatc_d -I $INC
+$CXX -O3 -DNDEBUG -std=c++11 benchflatc.cpp -o benchflatc -I $INC
+echo "running flatbench flatc for C++ (debug)"
+./benchflatc_d
+echo "running flatbench flatc for C++ (optimized)"
+./benchflatc
diff --git a/test/benchmark/benchflatcc/benchflatcc.c b/test/benchmark/benchflatcc/benchflatcc.c
new file mode 100644
index 0000000..682418a
--- /dev/null
+++ b/test/benchmark/benchflatcc/benchflatcc.c
@@ -0,0 +1,98 @@
+#define BENCH_TITLE "flatcc for C"
+
+#define BENCHMARK_BUFSIZ 1000
+#define DECLARE_BENCHMARK(BM)\
+ flatcc_builder_t builder, *BM;\
+ BM = &builder;\
+ flatcc_builder_init(BM);
+
+#define CLEAR_BENCHMARK(BM) flatcc_builder_clear(BM);
+
+
+#include "flatbench_builder.h"
+
+#define C(x) FLATBUFFERS_WRAP_NAMESPACE(benchfb_FooBarContainer, x)
+#define FooBar(x) FLATBUFFERS_WRAP_NAMESPACE(benchfb_FooBar, x)
+#define Bar(x) FLATBUFFERS_WRAP_NAMESPACE(benchfb_Bar, x)
+#define Foo(x) FLATBUFFERS_WRAP_NAMESPACE(benchfb_Foo, x)
+#define Enum(x) FLATBUFFERS_WRAP_NAMESPACE(benchfb_Enum, x)
+#define True flatbuffers_true
+#define False flatbuffers_false
+#define StringLen flatbuffers_string_len
+
+int encode(flatcc_builder_t *B, void *buffer, size_t *size)
+{
+ int i, veclen = 3;
+ void *buffer_ok;
+
+ flatcc_builder_reset(B);
+
+ C(start_as_root(B));
+ C(list_start(B));
+ for (i = 0; i < veclen; ++i) {
+ /*
+ * By using push_start instead of push_create we can construct
+ * the sibling field (of Bar type) in-place on the stack,
+ * otherwise we would need to create a temporary Bar struct.
+ */
+ C(list_push_start(B));
+ FooBar(sibling_create(B,
+ 0xABADCAFEABADCAFE + i, 10000 + i, '@' + i, 1000000 + i,
+ 123456 + i, 3.14159f + i, 10000 + i));
+ FooBar(name_create_str(B, "Hello, World!"));
+ FooBar(rating_add(B, 3.1415432432445543543 + i));
+ FooBar(postfix_add(B, '!' + i));
+ C(list_push_end(B));
+ }
+ C(list_end(B));
+ C(location_create_str(B, "https://www.example.com/myurl/"));
+ C(fruit_add(B, Enum(Bananas)));
+ C(initialized_add(B, True));
+ C(end_as_root(B));
+
+ /*
+ * This only works with the default emitter and only if the buffer
+ * is larger enough. Otherwise use whatever custom operation the
+ * emitter provides.
+ */
+ buffer_ok = flatcc_builder_copy_buffer(B, buffer, *size);
+ *size = flatcc_builder_get_buffer_size(B);
+ return !buffer_ok;
+}
+
+int64_t decode(flatcc_builder_t *B, void *buffer, size_t size, int64_t sum)
+{
+ unsigned int i;
+ C(table_t) foobarcontainer;
+ FooBar(vec_t) list;
+ FooBar(table_t) foobar;
+ Bar(struct_t) bar;
+ Foo(struct_t) foo;
+
+ (void)B;
+
+ foobarcontainer = C(as_root(buffer));
+ sum += C(initialized(foobarcontainer));
+ sum += StringLen(C(location(foobarcontainer)));
+ sum += C(fruit(foobarcontainer));
+ list = C(list(foobarcontainer));
+ for (i = 0; i < FooBar(vec_len(list)); ++i) {
+ foobar = FooBar(vec_at(list, i));
+ sum += StringLen(FooBar(name(foobar)));
+ sum += FooBar(postfix(foobar));
+ sum += (int64_t)FooBar(rating(foobar));
+ bar = FooBar(sibling(foobar));
+ sum += (int64_t)Bar(ratio(bar));
+ sum += Bar(size(bar));
+ sum += Bar(time(bar));
+ foo = Bar(parent(bar));
+ sum += Foo(count(foo));
+ sum += Foo(id(foo));
+ sum += Foo(length(foo));
+ sum += Foo(prefix(foo));
+ }
+ return sum + 2 * sum;
+}
+
+/* Copy to same folder before compilation or use include directive. */
+#include "benchmain.h"
diff --git a/test/benchmark/benchflatcc/run.sh b/test/benchmark/benchflatcc/run.sh
new file mode 100755
index 0000000..2d63dae
--- /dev/null
+++ b/test/benchmark/benchflatcc/run.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+
+set -e
+cd `dirname $0`/../../..
+ROOT=`pwd`
+TMP=build/tmp/test/benchmark/benchflatcc
+${ROOT}/scripts/build.sh
+mkdir -p ${TMP}
+rm -rf ${TMP}/*
+#bin/flatcc -a -o ${TMP} test/benchmark/schema/flatbench.fbs
+bin/flatcc --json-printer -a -o ${TMP} test/benchmark/schema/flatbench.fbs
+
+CC=${CC:-cc}
+cp -r test/benchmark/benchmain/* ${TMP}
+cp -r test/benchmark/benchflatcc/* ${TMP}
+cd ${TMP}
+$CC -g -std=c11 -I ${ROOT}/include benchflatcc.c \
+ ${ROOT}/lib/libflatccrt_d.a -o benchflatcc_d
+$CC -O3 -DNDEBUG -std=c11 -I ${ROOT}/include benchflatcc.c \
+ ${ROOT}/lib/libflatccrt.a -o benchflatcc
+echo "running flatbench flatcc for C (debug)"
+./benchflatcc_d
+echo "running flatbench flatcc for C (optimized)"
+./benchflatcc
diff --git a/test/benchmark/benchflatccjson/benchflatccjson.c b/test/benchmark/benchflatccjson/benchflatccjson.c
new file mode 100644
index 0000000..26ee291
--- /dev/null
+++ b/test/benchmark/benchflatccjson/benchflatccjson.c
@@ -0,0 +1,182 @@
+#define BENCH_TITLE "flatcc json parser and printer for C"
+
+/*
+ * NOTE:
+ *
+ * Using dtoa_grisu3.c over sprintf("%.17g") more than doubles the
+ * encoding performance of this benchmark from 3.3 us/op to 1.3 us/op.
+ */
+
+#include <stdlib.h>
+
+/*
+ * Builder is only needed so we can create the initial buffer to encode
+ * json from, but it also includes the reader which is needed calculate
+ * the decoded checksum after parsing.
+ */
+#include "flatbench_builder.h"
+
+#include "flatbench_json_parser.h"
+#include "flatbench_json_printer.h"
+
+#define C(x) FLATBUFFERS_WRAP_NAMESPACE(benchfb_FooBarContainer, x)
+#define FooBar(x) FLATBUFFERS_WRAP_NAMESPACE(benchfb_FooBar, x)
+#define Bar(x) FLATBUFFERS_WRAP_NAMESPACE(benchfb_Bar, x)
+#define Foo(x) FLATBUFFERS_WRAP_NAMESPACE(benchfb_Foo, x)
+#define Enum(x) FLATBUFFERS_WRAP_NAMESPACE(benchfb_Enum, x)
+#define True flatbuffers_true
+#define False flatbuffers_false
+#define StringLen flatbuffers_string_len
+
+typedef struct flatcc_jsonbench {
+ flatcc_builder_t builder;
+ flatcc_json_parser_t parser;
+ flatcc_json_printer_t printer;
+
+ /* Holds the source data to print (encode) from. */
+ char bin[1000];
+ size_t bin_size;
+ /* Extra buffer for extracting the parse (decoded) into. */
+ char decode_buffer[1000];
+ /*
+ * The target encode / source decode buffer is provided by the
+ * benchmark framework.
+ */
+} flatcc_jsonbench_t;
+
+int flatcc_jsonbench_init(flatcc_jsonbench_t *bench)
+{
+ int i, veclen = 3;
+ void *buffer_ok;
+ flatcc_builder_t *B = &bench->builder;
+
+ flatcc_builder_init(B);
+
+ /* Generate the data needed to print from, just once. */
+ C(start_as_root(B));
+ C(list_start(B));
+ for (i = 0; i < veclen; ++i) {
+ /*
+ * By using push_start instead of push_create we can construct
+ * the sibling field (of Bar type) in-place on the stack,
+ * otherwise we would need to create a temporary Bar struct.
+ */
+ C(list_push_start(B));
+ FooBar(sibling_create(B,
+ 0xABADCAFEABADCAFE + i, 10000 + i, '@' + i, 1000000 + i,
+ 123456 + i, 3.14159f + i, 10000 + i));
+ FooBar(name_create_str(B, "Hello, World!"));
+ FooBar(rating_add(B, 3.1415432432445543543 + i));
+ FooBar(postfix_add(B, '!' + i));
+ C(list_push_end(B));
+ }
+ C(list_end(B));
+ C(location_create_str(B, "https://www.example.com/myurl/"));
+ C(fruit_add(B, Enum(Bananas)));
+ C(initialized_add(B, True));
+ C(end_as_root(B));
+
+ buffer_ok = flatcc_builder_copy_buffer(B, bench->bin, sizeof(bench->bin));
+ bench->bin_size = flatcc_builder_get_buffer_size(B);
+
+ flatcc_builder_reset(&bench->builder);
+ return !buffer_ok;
+}
+
+void flatcc_jsonbench_clear(flatcc_jsonbench_t *bench)
+{
+ flatcc_json_printer_clear(&bench->printer);
+ flatcc_builder_clear(&bench->builder);
+ // parser does not need to be cleared.
+}
+
+/*
+ * For a buffer large enough to hold encoded representation.
+ *
+ * 1000 is enough for compact json, but for pretty printing we must up.
+ */
+#define BENCHMARK_BUFSIZ 10000
+
+/* Interface to main benchmark logic. */
+#define DECLARE_BENCHMARK(BM) \
+ flatcc_jsonbench_t flatcc_jsonbench, *BM; \
+ BM = &flatcc_jsonbench; \
+ flatcc_jsonbench_init(BM);
+
+#define CLEAR_BENCHMARK(BM) flatcc_jsonbench_clear(BM);
+
+int encode(flatcc_jsonbench_t *bench, void *buffer, size_t *size)
+{
+ int ret;
+
+ flatcc_json_printer_init_buffer(&bench->printer, buffer, *size);
+ /*
+ * Normally avoid setting indentation - this yields compact
+ * spaceless json which is what you want in resource critical
+ * parsing and printing. But - it doesn't get that much slower,
+ * so interesting to benchmark. Improve by enabling SSE4_2, but
+ * generally not worth the trouble.
+ */
+ //flatcc_json_printer_set_indent(&bench->printer, 8);
+
+ /*
+ * Unquoted makes it slightly slower, noenum hardly makes a
+ * difference - for this particular data set.
+ */
+ // flatcc_json_printer_set_noenum(&bench->printer, 1);
+ // flatcc_json_printer_set_unquoted(&bench->printer, 1);
+ ret = flatbench_print_json(&bench->printer, bench->bin, bench->bin_size);
+ *size = flatcc_json_printer_flush(&bench->printer);
+
+ return ret < 0 ? ret : 0;
+}
+
+int64_t decode(flatcc_jsonbench_t *bench, void *buffer, size_t size, int64_t sum)
+{
+ unsigned int i;
+ int ret;
+ flatcc_builder_t *B = &bench->builder;
+
+ C(table_t) foobarcontainer;
+ FooBar(vec_t) list;
+ FooBar(table_t) foobar;
+ Bar(struct_t) bar;
+ Foo(struct_t) foo;
+
+ flatcc_builder_reset(B);
+ ret = flatbench_parse_json(B, &bench->parser, buffer, size, 0);
+ if (ret) {
+ return 0;
+ }
+ if (!flatcc_builder_copy_buffer(B,
+ bench->decode_buffer, sizeof(bench->decode_buffer))) {
+ return 0;
+ }
+
+ /* Traverse parsed result to calculate checksum. */
+
+ foobarcontainer = C(as_root(bench->decode_buffer));
+ sum += C(initialized(foobarcontainer));
+ sum += StringLen(C(location(foobarcontainer)));
+ sum += C(fruit(foobarcontainer));
+ list = C(list(foobarcontainer));
+ for (i = 0; i < FooBar(vec_len(list)); ++i) {
+ foobar = FooBar(vec_at(list, i));
+ sum += StringLen(FooBar(name(foobar)));
+ sum += FooBar(postfix(foobar));
+ sum += (int64_t)FooBar(rating(foobar));
+ bar = FooBar(sibling(foobar));
+ sum += (int64_t)Bar(ratio(bar));
+ sum += Bar(size(bar));
+ sum += Bar(time(bar));
+ foo = Bar(parent(bar));
+ sum += Foo(count(foo));
+ sum += Foo(id(foo));
+ sum += Foo(length(foo));
+ sum += Foo(prefix(foo));
+ }
+ return sum + 2 * sum;
+}
+
+/* Copy to same folder before compilation or use include directive. */
+#include "benchmain.h"
diff --git a/test/benchmark/benchflatccjson/run.sh b/test/benchmark/benchflatccjson/run.sh
new file mode 100755
index 0000000..c24e02e
--- /dev/null
+++ b/test/benchmark/benchflatccjson/run.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+
+set -e
+cd `dirname $0`/../../..
+ROOT=`pwd`
+TMP=build/tmp/test/benchmark/benchflatccjson
+${ROOT}/scripts/build.sh
+mkdir -p ${TMP}
+rm -rf ${TMP}/*
+bin/flatcc --json -crw -o ${TMP} test/benchmark/schema/flatbench.fbs
+
+CC=${CC:-cc}
+cp -r test/benchmark/benchmain/* ${TMP}
+cp -r test/benchmark/benchflatccjson/* ${TMP}
+cd ${TMP}
+$CC -g -std=c11 -I ${ROOT}/include benchflatccjson.c \
+ ${ROOT}/lib/libflatccrt_d.a -o benchflatccjson_d
+$CC -O3 -DNDEBUG -std=c11 -I ${ROOT}/include benchflatccjson.c \
+ ${ROOT}/lib/libflatccrt.a -o benchflatccjson
+echo "running flatbench flatcc json parse and print for C (debug)"
+./benchflatccjson_d
+echo "running flatbench flatcc json parse and print for C (optimized)"
+./benchflatccjson
diff --git a/test/benchmark/benchmain/benchmain.h b/test/benchmark/benchmain/benchmain.h
new file mode 100644
index 0000000..f29c548
--- /dev/null
+++ b/test/benchmark/benchmain/benchmain.h
@@ -0,0 +1,66 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+#include "flatcc/support/elapsed.h"
+
+#ifdef NDEBUG
+#define COMPILE_TYPE "(optimized)"
+#else
+#define COMPILE_TYPE "(debug)"
+#endif
+
+int main(int argc, char *argv[])
+{
+ /*
+ * The size must be large enough to hold different representations,
+ * including printed json, but we know the printed json is close to
+ * 700 bytes.
+ */
+ const int bufsize = BENCHMARK_BUFSIZ, rep = 1000000;
+ void *buf;
+ size_t size, old_size;
+ double t1, t2, t3;
+ /* Use volatie to prevent over optimization. */
+ volatile int64_t total = 0;
+ int i, ret = 0;
+ DECLARE_BENCHMARK(BM);
+
+ buf = malloc(bufsize);
+
+ /* Warmup to preallocate internal buffers. */
+ size = bufsize;
+ old_size = size;
+ encode(BM, buf, &size);
+ t1 = elapsed_realtime();
+ for (i = 0; i < rep; ++i) {
+ size = bufsize;
+ ret |= encode(BM, buf, &size);
+ assert(ret == 0);
+ if (i > 0 && size != old_size) {
+ printf("abort on inconsistent encoding size\n");
+ goto done;
+ }
+ old_size = size;
+ }
+ t2 = elapsed_realtime();
+ for (i = 0; i < rep; ++i) {
+ total = decode(BM, buf, size, total);
+ }
+ t3 = elapsed_realtime();
+ if (total != -8725036910085654784LL) {
+ printf("ABORT ON CHECKSUM FAILURE\n");
+ goto done;
+ }
+ printf("----\n");
+ show_benchmark(BENCH_TITLE " encode " COMPILE_TYPE, t1, t2, size, rep, "1M");
+ printf("\n");
+ show_benchmark(BENCH_TITLE " decode/traverse " COMPILE_TYPE, t2, t3, size, rep, "1M");
+ printf("----\n");
+ ret = 0;
+done:
+ if (buf) {
+ free(buf);
+ }
+ CLEAR_BENCHMARK(BM);
+ return 0;
+}
diff --git a/test/benchmark/benchout-osx.txt b/test/benchmark/benchout-osx.txt
new file mode 100644
index 0000000..ab0ec63
--- /dev/null
+++ b/test/benchmark/benchout-osx.txt
@@ -0,0 +1,169 @@
+running all benchmarks (raw, flatc C++, flatcc C)
+building and benchmarking raw strucs
+running flatbench for raw C structs (debug)
+----
+operation: flatbench for raw C structs encode (debug)
+elapsed time: 0.106 (s)
+iterations: 1000000
+size: 312 (bytes)
+bandwidth: 2956.926 (MB/s)
+throughput in ops per sec: 9477325.499
+throughput in 1M ops per sec: 9.477
+time per op: 105.515 (ns)
+
+operation: flatbench for raw C structs decode/traverse (debug)
+elapsed time: 0.074 (s)
+iterations: 1000000
+size: 312 (bytes)
+bandwidth: 4222.379 (MB/s)
+throughput in ops per sec: 13533264.765
+throughput in 1M ops per sec: 13.533
+time per op: 73.892 (ns)
+----
+running flatbench for raw C structs (optimized)
+----
+operation: flatbench for raw C structs encode (optimized)
+elapsed time: 0.052 (s)
+iterations: 1000000
+size: 312 (bytes)
+bandwidth: 5991.474 (MB/s)
+throughput in ops per sec: 19203441.257
+throughput in 1M ops per sec: 19.203
+time per op: 52.074 (ns)
+
+operation: flatbench for raw C structs decode/traverse (optimized)
+elapsed time: 0.012 (s)
+iterations: 1000000
+size: 312 (bytes)
+bandwidth: 26342.452 (MB/s)
+throughput in ops per sec: 84430935.495
+throughput in 1M ops per sec: 84.431
+time per op: 11.844 (ns)
+----
+building and benchmarking flatc generated C++
+running flatbench flatc for C++ (debug)
+----
+operation: flatc for C++ encode (debug)
+elapsed time: 5.338 (s)
+iterations: 1000000
+size: 344 (bytes)
+bandwidth: 64.444 (MB/s)
+throughput in ops per sec: 187337.801
+throughput in 1M ops per sec: 0.187
+time per op: 5.338 (us)
+
+operation: flatc for C++ decode/traverse (debug)
+elapsed time: 0.798 (s)
+iterations: 1000000
+size: 344 (bytes)
+bandwidth: 430.966 (MB/s)
+throughput in ops per sec: 1252809.425
+throughput in 1M ops per sec: 1.253
+time per op: 798.206 (ns)
+----
+running flatbench flatc for C++ (optimized)
+----
+operation: flatc for C++ encode (optimized)
+elapsed time: 0.716 (s)
+iterations: 1000000
+size: 344 (bytes)
+bandwidth: 480.630 (MB/s)
+throughput in ops per sec: 1397180.769
+throughput in 1M ops per sec: 1.397
+time per op: 715.727 (ns)
+
+operation: flatc for C++ decode/traverse (optimized)
+elapsed time: 0.029 (s)
+iterations: 1000000
+size: 344 (bytes)
+bandwidth: 12058.751 (MB/s)
+throughput in ops per sec: 35054509.763
+throughput in 1M ops per sec: 35.055
+time per op: 28.527 (ns)
+----
+building and benchmarking flatcc generated C
+[1/1] Linking C executable ../../bin/flatcc_d
+[1/1] Linking C executable ../../bin/flatcc
+running flatbench flatcc for C (debug)
+----
+operation: flatcc for C encode (debug)
+elapsed time: 1.975 (s)
+iterations: 1000000
+size: 336 (bytes)
+bandwidth: 170.157 (MB/s)
+throughput in ops per sec: 506418.346
+throughput in 1M ops per sec: 0.506
+time per op: 1.975 (us)
+
+operation: flatcc for C decode/traverse (debug)
+elapsed time: 0.566 (s)
+iterations: 1000000
+size: 336 (bytes)
+bandwidth: 593.408 (MB/s)
+throughput in ops per sec: 1766094.864
+throughput in 1M ops per sec: 1.766
+time per op: 566.221 (ns)
+----
+running flatbench flatcc for C (optimized)
+----
+operation: flatcc for C encode (optimized)
+elapsed time: 0.606 (s)
+iterations: 1000000
+size: 336 (bytes)
+bandwidth: 554.266 (MB/s)
+throughput in ops per sec: 1649601.539
+throughput in 1M ops per sec: 1.650
+time per op: 606.207 (ns)
+
+operation: flatcc for C decode/traverse (optimized)
+elapsed time: 0.029 (s)
+iterations: 1000000
+size: 336 (bytes)
+bandwidth: 11740.452 (MB/s)
+throughput in ops per sec: 34941821.867
+throughput in 1M ops per sec: 34.942
+time per op: 28.619 (ns)
+----
+building and benchmarking flatcc json generated C
+[1/1] Linking C executable ../../bin/flatcc_d
+[1/1] Linking C executable ../../bin/flatcc
+running flatbench flatcc json parse and print for C (debug)
+----
+operation: flatcc json parser and printer for C encode (debug)
+elapsed time: 4.633 (s)
+iterations: 1000000
+size: 722 (bytes)
+bandwidth: 155.855 (MB/s)
+throughput in ops per sec: 215866.116
+throughput in 1M ops per sec: 0.216
+time per op: 4.633 (us)
+
+operation: flatcc json parser and printer for C decode/traverse (debug)
+elapsed time: 6.957 (s)
+iterations: 1000000
+size: 722 (bytes)
+bandwidth: 103.781 (MB/s)
+throughput in ops per sec: 143740.882
+throughput in 1M ops per sec: 0.144
+time per op: 6.957 (us)
+----
+running flatbench flatcc json parse and print for C (optimized)
+----
+operation: flatcc json parser and printer for C encode (optimized)
+elapsed time: 1.358 (s)
+iterations: 1000000
+size: 722 (bytes)
+bandwidth: 531.528 (MB/s)
+throughput in ops per sec: 736188.912
+throughput in 1M ops per sec: 0.736
+time per op: 1.358 (us)
+
+operation: flatcc json parser and printer for C decode/traverse (optimized)
+elapsed time: 2.224 (s)
+iterations: 1000000
+size: 722 (bytes)
+bandwidth: 324.572 (MB/s)
+throughput in ops per sec: 449546.295
+throughput in 1M ops per sec: 0.450
+time per op: 2.224 (us)
+----
diff --git a/test/benchmark/benchout-ubuntu.txt b/test/benchmark/benchout-ubuntu.txt
new file mode 100644
index 0000000..b551901
--- /dev/null
+++ b/test/benchmark/benchout-ubuntu.txt
@@ -0,0 +1,169 @@
+running all benchmarks (raw, flatc C++, flatcc C)
+building and benchmarking raw strucs
+running flatbench for raw C structs (debug)
+----
+operation: flatbench for raw C structs encode (debug)
+elapsed time: 0.065 (s)
+iterations: 1000000
+size: 312 (bytes)
+bandwidth: 4781.609 (MB/s)
+throughput in ops per sec: 15325670.498
+throughput in 1M ops per sec: 15.326
+time per op: 65.250 (ns)
+
+operation: flatbench for raw C structs decode/traverse (debug)
+elapsed time: 0.063 (s)
+iterations: 1000000
+size: 312 (bytes)
+bandwidth: 4931.325 (MB/s)
+throughput in ops per sec: 15805528.774
+throughput in 1M ops per sec: 15.806
+time per op: 63.269 (ns)
+----
+running flatbench for raw C structs (optimized)
+----
+operation: flatbench for raw C structs encode (optimized)
+elapsed time: 0.030 (s)
+iterations: 1000000
+size: 312 (bytes)
+bandwidth: 10521.346 (MB/s)
+throughput in ops per sec: 33722263.438
+throughput in 1M ops per sec: 33.722
+time per op: 29.654 (ns)
+
+operation: flatbench for raw C structs decode/traverse (optimized)
+elapsed time: 0.012 (s)
+iterations: 1000000
+size: 312 (bytes)
+bandwidth: 25409.235 (MB/s)
+throughput in ops per sec: 81439856.666
+throughput in 1M ops per sec: 81.440
+time per op: 12.279 (ns)
+----
+building and benchmarking flatc generated C++
+running flatbench flatc for C++ (debug)
+----
+operation: flatc for C++ encode (debug)
+elapsed time: 5.577 (s)
+iterations: 1000000
+size: 344 (bytes)
+bandwidth: 61.679 (MB/s)
+throughput in ops per sec: 179300.638
+throughput in 1M ops per sec: 0.179
+time per op: 5.577 (us)
+
+operation: flatc for C++ decode/traverse (debug)
+elapsed time: 0.892 (s)
+iterations: 1000000
+size: 344 (bytes)
+bandwidth: 385.522 (MB/s)
+throughput in ops per sec: 1120703.084
+throughput in 1M ops per sec: 1.121
+time per op: 892.297 (ns)
+----
+running flatbench flatc for C++ (optimized)
+----
+operation: flatc for C++ encode (optimized)
+elapsed time: 0.516 (s)
+iterations: 1000000
+size: 344 (bytes)
+bandwidth: 667.104 (MB/s)
+throughput in ops per sec: 1939254.783
+throughput in 1M ops per sec: 1.939
+time per op: 515.662 (ns)
+
+operation: flatc for C++ decode/traverse (optimized)
+elapsed time: 0.030 (s)
+iterations: 1000000
+size: 344 (bytes)
+bandwidth: 11479.294 (MB/s)
+throughput in ops per sec: 33370040.378
+throughput in 1M ops per sec: 33.370
+time per op: 29.967 (ns)
+----
+building and benchmarking flatcc generated C
+[1/1] Linking C executable ../../bin/flatcc_d
+[1/1] Linking C executable ../../bin/flatcc
+running flatbench flatcc for C (debug)
+----
+operation: flatcc for C encode (debug)
+elapsed time: 1.893 (s)
+iterations: 1000000
+size: 336 (bytes)
+bandwidth: 177.461 (MB/s)
+throughput in ops per sec: 528159.065
+throughput in 1M ops per sec: 0.528
+time per op: 1.893 (us)
+
+operation: flatcc for C decode/traverse (debug)
+elapsed time: 0.643 (s)
+iterations: 1000000
+size: 336 (bytes)
+bandwidth: 522.374 (MB/s)
+throughput in ops per sec: 1554685.277
+throughput in 1M ops per sec: 1.555
+time per op: 643.217 (ns)
+----
+running flatbench flatcc for C (optimized)
+----
+operation: flatcc for C encode (optimized)
+elapsed time: 0.531 (s)
+iterations: 1000000
+size: 336 (bytes)
+bandwidth: 632.498 (MB/s)
+throughput in ops per sec: 1882434.440
+throughput in 1M ops per sec: 1.882
+time per op: 531.227 (ns)
+
+operation: flatcc for C decode/traverse (optimized)
+elapsed time: 0.028 (s)
+iterations: 1000000
+size: 336 (bytes)
+bandwidth: 12200.879 (MB/s)
+throughput in ops per sec: 36312139.148
+throughput in 1M ops per sec: 36.312
+time per op: 27.539 (ns)
+----
+building and benchmarking flatcc json generated C
+[1/1] Linking C executable ../../bin/flatcc_d
+[1/1] Linking C executable ../../bin/flatcc
+running flatbench flatcc json parse and print for C (debug)
+----
+operation: flatcc json parser and printer for C encode (debug)
+elapsed time: 3.931 (s)
+iterations: 1000000
+size: 722 (bytes)
+bandwidth: 183.674 (MB/s)
+throughput in ops per sec: 254396.609
+throughput in 1M ops per sec: 0.254
+time per op: 3.931 (us)
+
+operation: flatcc json parser and printer for C decode/traverse (debug)
+elapsed time: 6.874 (s)
+iterations: 1000000
+size: 722 (bytes)
+bandwidth: 105.031 (MB/s)
+throughput in ops per sec: 145472.171
+throughput in 1M ops per sec: 0.145
+time per op: 6.874 (us)
+----
+running flatbench flatcc json parse and print for C (optimized)
+----
+operation: flatcc json parser and printer for C encode (optimized)
+elapsed time: 1.210 (s)
+iterations: 1000000
+size: 722 (bytes)
+bandwidth: 596.609 (MB/s)
+throughput in ops per sec: 826328.137
+throughput in 1M ops per sec: 0.826
+time per op: 1.210 (us)
+
+operation: flatcc json parser and printer for C decode/traverse (optimized)
+elapsed time: 1.772 (s)
+iterations: 1000000
+size: 722 (bytes)
+bandwidth: 407.372 (MB/s)
+throughput in ops per sec: 564227.736
+throughput in 1M ops per sec: 0.564
+time per op: 1.772 (us)
+----
diff --git a/test/benchmark/benchraw/benchraw.c b/test/benchmark/benchraw/benchraw.c
new file mode 100644
index 0000000..fd6a9ea
--- /dev/null
+++ b/test/benchmark/benchraw/benchraw.c
@@ -0,0 +1,117 @@
+#define BENCH_TITLE "flatbench for raw C structs"
+
+#define BENCHMARK_BUFSIZ 1000
+#define DECLARE_BENCHMARK(BM)\
+ void *BM = 0
+#define CLEAR_BENCHMARK(BM)
+
+#include <string.h>
+#include <stdint.h>
+
+#define STRING_LEN 32
+#define VEC_LEN 3
+#define fb_bool uint8_t
+
+enum Enum { Apples, Pears, Bananas };
+
+struct Foo {
+ int64_t id;
+ short count;
+ char prefix;
+ int length;
+};
+
+struct Bar {
+ struct Foo parent;
+ int time;
+ float ratio;
+ unsigned short size;
+};
+
+struct FooBar {
+ struct Bar sibling;
+ int name_len;
+ char name[STRING_LEN];
+ double rating;
+ unsigned char postfix;
+};
+
+struct FooBarContainer {
+ struct FooBar list[VEC_LEN];
+ fb_bool initialized;
+ enum Enum fruit;
+ int location_len;
+ char location[STRING_LEN];
+};
+
+int encode(void *bench, void *buffer, size_t *size)
+{
+ int i;
+ struct FooBarContainer fbc;
+ struct FooBar *foobar;
+ struct Foo *foo;
+ struct Bar *bar;
+
+ (void)bench;
+
+ strcpy(fbc.location, "https://www.example.com/myurl/");
+ fbc.location_len = strlen(fbc.location);
+ fbc.fruit = Bananas;
+ fbc.initialized = 1;
+ for (i = 0; i < VEC_LEN; ++i) {
+ foobar = &fbc.list[i];
+ foobar->rating = 3.1415432432445543543 + i;
+ foobar->postfix = '!' + i;
+ strcpy(foobar->name, "Hello, World!");
+ foobar->name_len = strlen(foobar->name);
+ bar = &foobar->sibling;
+ bar->ratio = 3.14159f + i;
+ bar->size = 10000 + i;
+ bar->time = 123456 + i;
+ foo = &bar->parent;
+ foo->id = 0xABADCAFEABADCAFE + i;
+ foo->count = 10000 + i;
+ foo->length = 1000000 + i;
+ foo->prefix = '@' + i;
+ }
+ if (*size < sizeof(struct FooBarContainer)) {
+ return -1;
+ }
+ *size = sizeof(struct FooBarContainer);
+ memcpy(buffer, &fbc, *size);
+ return 0;
+}
+
+int64_t decode(void *bench, void *buffer, size_t size, int64_t sum)
+{
+ int i;
+ struct FooBarContainer *foobarcontainer;
+ struct FooBar *foobar;
+ struct Foo *foo;
+ struct Bar *bar;
+
+ (void)bench;
+
+ foobarcontainer = buffer;
+ sum += foobarcontainer->initialized;
+ sum += foobarcontainer->location_len;
+ sum += foobarcontainer->fruit;
+ for (i = 0; i < VEC_LEN; ++i) {
+ foobar = &foobarcontainer->list[i];
+ sum += foobar->name_len;
+ sum += foobar->postfix;
+ sum += (int64_t)foobar->rating;
+ bar = &foobar->sibling;
+ sum += (int64_t)bar->ratio;
+ sum += bar->size;
+ sum += bar->time;
+ foo = &bar->parent;
+ sum += foo->count;
+ sum += foo->id;
+ sum += foo->length;
+ sum += foo->prefix;
+ }
+ return sum + 2 * sum;
+}
+
+#include "benchmain.h"
diff --git a/test/benchmark/benchraw/run.sh b/test/benchmark/benchraw/run.sh
new file mode 100755
index 0000000..13e3333
--- /dev/null
+++ b/test/benchmark/benchraw/run.sh
@@ -0,0 +1,21 @@
+#!/usr/bin/env bash
+
+set -e
+cd `dirname $0`/../../..
+ROOT=`pwd`
+TMP=build/tmp/test/benchmark/benchraw
+INC=$ROOT/include
+mkdir -p ${TMP}
+rm -rf ${TMP}/*
+
+CC=${CC:-cc}
+cp -r test/benchmark/benchmain/* ${TMP}
+cp -r test/benchmark/benchraw/* ${TMP}
+
+cd ${TMP}
+$CC -g benchraw.c -o benchraw_d -I $INC
+$CC -O3 -DNDEBUG benchraw.c -o benchraw -I $INC
+echo "running flatbench for raw C structs (debug)"
+./benchraw_d
+echo "running flatbench for raw C structs (optimized)"
+./benchraw
diff --git a/test/benchmark/schema/flatbench.fbs b/test/benchmark/schema/flatbench.fbs
new file mode 100644
index 0000000..34bd2df
--- /dev/null
+++ b/test/benchmark/schema/flatbench.fbs
@@ -0,0 +1,37 @@
+// trying to represent a typical mix of datatypes:
+// 1 array of 3 elements, each element: 1 string, 3 nested objects, 9 scalars
+// root element has the array, additional string and an enum
+
+namespace benchfb;
+
+enum Enum : short { Apples, Pears, Bananas }
+
+struct Foo {
+ id:ulong;
+ count:short;
+ prefix:byte;
+ length:uint;
+}
+
+struct Bar {
+ parent:Foo;
+ time:int;
+ ratio:float;
+ size:ushort;
+}
+
+table FooBar {
+ sibling:Bar;
+ name:string;
+ rating:double;
+ postfix:ubyte;
+}
+
+table FooBarContainer {
+ list:[FooBar]; // 3 copies of the above
+ initialized:bool;
+ fruit:Enum;
+ location:string;
+}
+
+root_type FooBarContainer;